code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# coding: utf8
"""
Contains the Python package for Algorithm used
"""
# Import numpy
import numpy as np
# Import control for plotting option
import control as cn
# Import bokeh for plotting
import bokeh.plotting as bk
# TUBS Rot, Gelb, Orange, Hellgrün, Grün, Dunkelgrün, Hellblau, Blau, Dunkelblau, Violett
TUBScolorscale=[(190,30,60,0.9),
(255,200,42,0.9),
(225,109,0,0.9),
(172,193,58,0.9),
(109,131,0,0.9),
(0,83,74,0.9),
(102,180,211,0.9),
(0,112,155,0.9),
(0,63,87,0.9),
(138,48,127,0.9)]
# Step Information of SISO system
def Step_Info(y,t, p=0.02, yr = 1):
"""
Returns the Rise Time, Overshoot, Settling Time and Steady State
of a given signal. Requires y, yr, t and optional a percentage for settling time
"""
# Check for Steady State
# Does series converge?
if np.abs(y[-1]-yr) < 1e-2 :
yss = y[-1]
# If not return error
else:
return np.NaN, np.NaN, np.NaN, np.NaN
# Get the rising time as the time
# First value above 0.1 steady state
index1 = np.where(y>=0.1*yss)
# First value equal 0.9 steady stae
index2 = np.where(y<=0.9*yss)
# Rising Time
# Check if empty
if index1[0].size == 0:
t_rise = np.NaN
elif index2[0].size == 0:
t_rise = np.NaN
else:
t_rise = t[index1[0][0]]-t[index2[0][0]]
# Overshoot for values above steady state
# Get all values
mp = np.abs(y[np.where(abs(y)>abs(yss))])
# Check if empty
if mp.size == 0:
mp = np.NaN
else:
mp = np.abs(np.max(mp)-np.abs(yss))
# Settling time for all value between a certain percentage
index = np.where(np.logical_and(abs(y)<(1+p)*abs(yss), abs(y)>(1-p)*abs(yss)))
# Ceck if empty
if index[0].size ==0:
t_settle = np.NaN
else:
t_settle = t[index[0][0]]
return t_rise,mp,t_settle,yss
# Integral Identification of first order time delay
def Integral_Identification(y,u,t, graphics = "off"):
"""Returns a FOTD Model from the given data.
y - array of outputs
u - array of inputs
t - array of time values
"""
# If the output is zero return emtpy function
if np.max(abs(y)) == 0:
return 0,0,0
# Truncate for Maximum value of abs
i_end = np.argmax(abs(y),axis=0)
# If Last indice is used
if i_end <= 0:
i_end = 1
yp = y[0:i_end]
up = u[0:i_end]
tp = t[0:i_end]
# Get Gain
KM = (yp[-1]-yp[0])/(up[-1])
# Get the Residence Time
Tar = 1/KM * np.trapz(yp[-1]-yp,tp)
# Time Constant
T = np.exp(1)/KM*np.trapz(yp[np.where(tp<=Tar)],tp[np.where(tp<=Tar)])
# Delay
L = Tar-T
# Check if all arguments are valid
if (T < 0):
print("Error - Negative lag - Using 20 instead")
T = 20
if (L < 1e-2):
print("Error - Small delay - Using 0 instead")
if (L > 0):
L = 0
else:
L = 0
#Plotting
if graphics == 'on':
# Pade Polynomial
num,den = cn.pade(L,10)
# Make system model
G = cn.tf(KM,[T,1])*cn.tf(num,den)
# Show the system model
ym,tm = cn.step(G, T=t )
fig = bk.figure(title="FOTD Approximation")
fig.line(t,y,color=TUBScolorscale[6],legend='Original System')
fig.line(tm,ym,color=TUBScolorscale[2], legend='Model System')
fig.legend.location = "bottom_right"
return KM,T,L
# Algrotihm for computing gain of first order time delay
def FOTD_Gain(K,T,L,w=0):
"""Computes the gain of a first order time delay system at a given frequency"""
# Check if all dimensions match
if (K.shape != T.shape) or (K.shape != L.shape) or (L.shape != T.shape):
print("Shapes of parameter array are not equal!")
return np.NaN
# Steady State
if w==0:
return K
# System Dimension
if K.ndim == 1:
# Using system Identity by multiplying with the complex conjugate
G = 1/(T**2 * w**2 +1)*(K-1j*T*w)*(np.cos(-L*w)+1j*np.sin(-L*w))
else:
outputs,inputs = K.shape
# Create a system within the complex numbers
G = np.zeros_like(K, dtype=complex)
for i in range(0,inputs):
for o in range(0,outputs):
# Using system Identity by multiplying with the complex conjugate
G[o][i] = 1 /(T[o][i]**2 * w**2 +1) * ( K[o][i] - 1j*T[o][i]*w) *(np.cos(-L[o][i]*w)+1j*np.sin(-L[o][i]*w))
return np.real(G)
# Algorithm for computing the RGA
def RGA(K,T,L,w=0):
"""Takes a FOTD System and computes the RGA of the system"""
if (K.shape != T.shape) or (K.shape != L.shape) or (L.shape != T.shape):
print("Shapes of parameter array are not equal!")
# Compute the System
G = np.absolute(FOTD_Gain(K,T,L,w))
# Calculate the RGA
RGA = np.multiply(G, np.transpose(np.linalg.inv(G)))
return RGA
# Algorithm for AMIGO Tuning
def AMIGO_Tune(K,T,L, structure = 'PI'):
"""Computes the PI(D) controller parameter based on AMIGO algorithm;
Parameter are returned as parallel notation KP,KI,KD and set point;
Needs first order time delay parameter as input
"""
# Check for small delay
if L < 0.3*T:
if 0.3*T < 1e-2:
L_P = 1e-2
else:
L_P = 0.3*T
else:
L_P = L
# PI Controller
if structure == 'PI':
# Parameter as Defined in Aström et. al., Advanced PID Control,p.229
KP = 0.15/K + (0.35 - L_P*T /(L_P+T)**2)*T/(K*L_P)
TI = 0.35*L_P+(13*L_P*T**2)/(T**2+12*L_P*T+7*L_P**2)
TD = 0.0
# Set Point Weight, Derived from Fig. 7.2, p. 230
if L/(T+L) < 0.2:
b = 0.0
elif L/(T+L) > 0.3:
b = 1.0
else:
# Approximate as Linear Function
b = 0.0 + (1.0 - 0.0)/(0.3-0.2)*(L/(T+L)-0.2)
elif structure == 'PID':
KP = 1/K*(0.2+0.45*T/L_P)
TI = (0.4*L_P + 0.8*T)/(L_P+0.1*T)*L
TD = (0.5*L_P*T)/(0.3*L_P+T)
# Set Point Weight, as given on p.235
# PRÜFEN!!!
if L/(T+L) > 0.5:
b = 1
else:
b = 0.0
else:
print("Undefined controller Structure")
return np.NaN
KI = KP/TI
KD = KP*TD
return [KP,KI,KD],b
# Algortihm for AMIGO Detuning
def AMIGO_DETUNE(K,T,L,params,KP, MS = 1.4, structure = 'PI'):
"""Detunes the AMIGO parameter according to Astrom"""
# Check for small delay
if L < 1e-1:
L = 1e-1
# Calculate normalized Time
tau = L/(L+T)
# Needed Parameter
alpha_D = (MS-1)/MS # See p.255 Eq. 7.19
beta_D = MS*(MS+np.sqrt(MS**2-1))/2# See p.257 Eq. 7.24
# Define old set of parameter
KP0 = params[0]
KI0 = params[1]
KD0 = params[2]
if structure=='PI':
# Use normalized time to determine Process as explained on p.255 f.
if tau > 0.1:
KI = KI0*(K*KP+alpha_D)/(K*KP0+alpha_D)
else:
# Needed constrain for switch case,See p. 258 Eq. 7.27
c = KP*K - KP0*K*(L+T)/(beta_D*(alpha_D+KP*K)) - alpha_D
if c < 0:
KI = beta_D*(alpha_D+KP*K)**2/(K*(L+T))
else:
KI = KI0*(alpha_D+KP*K)/(alpha_D+KP0*K)
return [KP,KP/KI,0.0]
if structure == 'PID':
print("Not implemented")
return np.NaN
else:
print("Undefined controller Structure")
return np.NaN
# ALgorithm for computing decentralized controller based on RGA
def Control_Decentral(K,T,L, w = 0, b=np.empty, structure = 'PI'):
""" Computes decentralised controller with AMIGO algorithm based on RGA pairing"""
# Compute SISO Case
if K.ndim <= 1:
# Using system Identity by multiplying with the complex conjugate
params, b0 = AMIGO_Tune(K,T,L)
# If b is not given, use b from AMIGO
if b == np.empty:
B = b0
#Kr = [b0*params[0], params[1], params[2]]
Ky = params
else:
B = b
Ky = params
D = 1
# Compute general MIMO Case
else:
# Systems dimensions
outputs,inputs = K.shape
# Create an empty controller
Ky = np.zeros([outputs,inputs,3])
B = np.zeros([outputs,inputs])
D = np.eye(outputs,inputs)
# Compute RGA -> Checks for Shape
LG = RGA(K,T,L,w)
# Get Pairing as an array for every column
Pairing = np.argmax(LG, axis=0)
# Iterate through the pairing
for o in range(0,outputs):
# Best Pairing
i = Pairing[o]
# Compute controller via recursion
Ky[o][i],B[o][i],d = Control_Decentral(K[o][i],T[o][i],L[o][i],b)
return Ky, B, D
# Algorithm for computing a decoupling control based on Aström
def Control_Astrom(K,T,L,H, MS= None, w = 0, b=np.empty, structure = 'PI', graphics = "off"):
"""Computes a Decoupling Controller via Aström Algortihm based on FOTD"""
# Check Input for Maximum Sensitivity
if MS is None:
MS = 1.4*np.eye(K.shape[0],K.shape[1])
# Compute Determinant of Maximum Sensitivity
ms = np.linalg.det(MS)
# Compute SISO Case
if K.ndim <= 1:
return Control_Decentral(K,T,L,w,b,structure)
# Compute General MIMO Case
else:
# Systems dimensions
outputs,inputs = K.shape
# Check dimensions
if (K.shape != T.shape) or (K.shape != H.shape) or (K.shape != MS.shape) or (K.shape != L.shape) or (L.shape != T.shape):
print("Shapes of parameter array are not equal!")
return np.NaN
# Create an empty controller
Ky = np.empty([outputs,inputs,3])
B = np.empty([outputs,inputs])
# Compute the decoupler
D = np.linalg.inv(K)
# Compute the interaction indeces
# Since d/ds(Q*K) = d/ds(Q)*K = d/ds(G) we can write the Taylor coefficient
Gamma = np.abs(np.dot(np.multiply(-K,T+L),D))
# Set main diagonal to zero
np.fill_diagonal(Gamma,0)
# Get the maximum of each row
GMax = np.argmax(Gamma,axis=1)
# Get the new System
Tt = np.dot(np.multiply(K,np.add(T,L)),D)-np.diag(np.max(L,axis=1))#np.dot(K,np.dot(np.transpose(np.add(T,L)),D))-np.diag(np.max(L,axis=1))
Lt = np.diag(np.max(np.transpose(L),axis=0))
Kt = np.eye(K.shape[0],K.shape[1])
# Plotting
# When on, simulation of closed loop is not working correctly!
# WHY!?
if graphics == 'on':
# Open Figure
fig = bk.figure(title = "Approximation of FOTD Sum")
# Loop over the outputs
for o in range(0,outputs):
# Make system model
system = cn.tf(0,1)
# Loop over inputs
for i in range(0,inputs):
# Make the system
numerator,denominator = cn.pade(L[o][i],10)
system = system + D[i][o] * cn.tf(K[o][i],[T[o][i],1])*cn.tf(numerator,denominator)
# System
output_system,sim_time = cn.step(system)
# System Model
numerator,denominator = cn.pade(Lt[o][o],10)
system_model = cn.tf(Kt[o][o],[Tt[o][o],1])*cn.tf(numerator,denominator)
output_system_model, sim_time = cn.step(system_model,sim_time)
fig.line(sim_time,output_system,color=TUBScolorscale[6],legend='Original System '+str(outputs))
fig.line(sim_time,output_system_model,color=TUBScolorscale[2], legend='Model System '+str(outputs))
# Legend of the system
fig.legend.location="bottom_right"
# Show the plot
bk.show(fig)
# Delete variables
del numerator,denominator,system_model,system,output_system,output_system_model, fig, sim_time
# Iterate through the outputs
for o in range(0,outputs):
# Estimate the new system parameter
# Get the maximal delay
#l = np.max(L[o][:])
l = Lt[o][o]
# Add the systems gain -> scaled to 1 because of inversion
k = Kt[o][o]
# Get the array of gains
# Get the system time constant as weighted sum
t = Tt[o][o]
# Calculate the detuning frequency
R = 0.8
wc_min = 2.0/R * (t+l)/((t+l)**2 + t**2)
print(wc_min)
# Design a controller based on estimated system
ky, b0, d = Control_Decentral(k,t,l,w,b,structure)
# Test for Interaction
# We detune the controller of the n-th output in such a way that the maximum of the n-th row is sufficiently small
# Current maximum interaction
gmax = Gamma[o][GMax[o]]
# Check for set point weight, either given
if b == np.empty:
# Or computed from AMIGO_TUNE
b = b0
# Check for structure
if structure == 'PI':
# Set counter for while
counter=0
# Set shrinking rate
shrink_rate = 0.9
# Check if decoupling is needed
while (np.abs(H[o][o]/(ms*gmax)) - np.sqrt( (b*ky[0]*wc_min)**2 + ky[1]**2 ) < 0):
if counter > 1e6:
#print('Maximal Iteration for detuning reached! Abort')
break
# Detune the controller with the shrinking rate
ky = AMIGO_DETUNE(k,t,l,ky,shrink_rate*ky[0])
# Increment counter
counter += 1
print("Aström Detuning Iterations:" +str(counter))
# Get the controller parameter
Ky[o][o][:] = ky
B[o][o] = b
return Ky,B,D
# Modified Detuning
def Control_Decoupled(K,T,L,H, MS= None, w = 0, b=np.empty, structure = 'PI'):
# Check Input for Maximum Sensitivity
if MS is None:
MS = 1.4*np.eye(K.shape[0],K.shape[1])
# Compute Determinant of Maximum Sensitivity
ms = np.linalg.det(MS)
# Compute SISO Case
if K.ndim <= 1:
return Control_Decentral(K,T,L,w,b,structure)
# Compute General MIMO Case
else:
# Compute a decentralized control structure based on RGA
Ky, B, D = Control_Decentral(K,T,L, w , b, structure)
# Calculate the Pairing
# Compute RGA -> Checks for Shape
LG = RGA(K,T,L,w)
# Get Pairing as an array for every column
Pairing = np.argmax(LG, axis=0)
# Compute the Taylor Series
Gamma = np.multiply(-K,T+L)
# Initialize
# Gain
KD = np.zeros_like(Gamma)
# Interaction
GD = np.zeros_like(Gamma)
# Get the Diagonal entries for decoupling
for outputs in range(0,K.shape[0]):
inputs = Pairing[outputs]
KD[outputs][inputs] = K[outputs][inputs]
GD[outputs][inputs] = Gamma[outputs][inputs]
# Get the Antidiagonal
# Gain
KA = K-KD
# Interaction
GA = Gamma-GD
# Define the splitter
S = -np.dot(np.linalg.inv(KD),KA)
# Get the interaction relative to the gain
#GammaA = np.abs(GA + np.dot(GD,S))
# Interaction relative to the dynamic of the interaction
GammaA = np.abs(np.dot(np.linalg.inv(GD),GA) + S)
# Interaction relative to the gain
#GammaA = np.abs(np.dot(np.linalg.inv(KD),GA + np.dot(GD,S)))
print(GammaA)
# Get the maximum of each row
GMax = np.argmax(GammaA,axis=1)
#Iterate through the outputs
for outputs in range(0,K.shape[0]):
inputs = Pairing[outputs]
# Test the current controller for interaction
# Every controller has the dimension 3 for kp, ki, kd
ky = Ky[outputs][inputs]
#kr = Kr[outputs][inputs]
# Get the current parameter
k = K[outputs][inputs]
t = T[outputs][inputs]
l = L[outputs][inputs]
# Calculate the detuning frequency
R = 0.8
wc_min = 2.0/R * (t+l)/((t+l)**2 + t**2)
print(wc_min)
# Check for set point weight, either given
if b == np.empty:
# Or computed from AMIGO_TUNE
b = B[outputs][inputs]
gmax = GammaA[outputs][GMax[outputs]]
print(gmax, H[outputs][outputs])
# Check for PI Structure
if structure == 'PI':
# Define the counter
counter = 0
# Set shrinking rate
shrink_rate = 0.9
while (np.abs(H[outputs][outputs]/(ms*gmax)) - np.sqrt( (b*ky[0]/wc_min)**2 + ky[1]**2 ) < 0):
if counter > 1e6:
#print('Maximal Iteration for detuning reached! Abort')
break
# Detune the controller with the shrinking rate
ky = AMIGO_DETUNE(k,t,l,ky,shrink_rate*ky[0])
# Increment counter
counter += 1
print("Modified Detuning Iterationts "+str(counter))
# Get the controller parameter
Ky[outputs][inputs][:] = ky
#Kr[outputs][inputs][:] = [b*ky[0], ky[1], ky[2]]
# Return the controller with splitter
return Ky,B,np.eye(K.shape[0],K.shape[1])+S | AlCap23/Thesis | Python/Algorithms_Graphic.py | Python | gpl-3.0 | 17,966 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_static_route
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage static IP routes on Vyatta VyOS network devices
description:
- This module provides declarative management of static
IP routes on Vyatta VyOS network devices.
notes:
- Tested against VYOS 1.1.7
options:
prefix:
description:
- Network prefix of the static route.
C(mask) param should be ignored if C(prefix) is provided
with C(mask) value C(prefix/mask).
mask:
description:
- Network prefix mask of the static route.
next_hop:
description:
- Next hop IP of the static route.
admin_distance:
description:
- Admin distance of the static route.
aggregate:
description: List of static route definitions
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: configure static route
vyos_static_route:
prefix: 192.168.2.0
mask: 24
next_hop: 10.0.0.1
- name: configure static route prefix/mask
vyos_static_route:
prefix: 192.168.2.0/16
next_hop: 10.0.0.1
- name: remove configuration
vyos_static_route:
prefix: 192.168.2.0
mask: 16
next_hop: 10.0.0.1
state: absent
- name: configure aggregates of static routes
vyos_static_route:
aggregate:
- { prefix: 192.168.2.0, mask: 24, next_hop: 10.0.0.1 }
- { prefix: 192.168.3.0, mask: 16, next_hop: 10.0.2.1 }
- { prefix: 192.168.3.0/16, next_hop: 10.0.2.1 }
- name: Remove static route collections
vyos_static_route:
aggregate:
- { prefix: 172.24.1.0/24, next_hop: 192.168.42.64 }
- { prefix: 172.24.3.0/24, next_hop: 192.168.42.64 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set protocols static route 192.168.2.0/16 next-hop 10.0.0.1
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.vyos.vyos import get_config, load_config
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def spec_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
prefix = w['prefix']
mask = w['mask']
next_hop = w['next_hop']
admin_distance = w['admin_distance']
state = w['state']
del w['state']
if state == 'absent' and w in have:
commands.append('delete protocols static route %s/%s' % (prefix, mask))
elif state == 'present' and w not in have:
cmd = 'set protocols static route %s/%s next-hop %s' % (prefix, mask, next_hop)
if admin_distance != 'None':
cmd += ' distance %s' % (admin_distance)
commands.append(cmd)
return commands
def config_to_dict(module):
data = get_config(module)
obj = []
for line in data.split('\n'):
if line.startswith('set protocols static route'):
match = re.search(r'static route (\S+)', line, re.M)
prefix = match.group(1).split('/')[0]
mask = match.group(1).split('/')[1]
if 'next-hop' in line:
match_hop = re.search(r'next-hop (\S+)', line, re.M)
next_hop = match_hop.group(1).strip("'")
match_distance = re.search(r'distance (\S+)', line, re.M)
if match_distance is not None:
admin_distance = match_distance.group(1)[1:-1]
else:
admin_distance = None
if admin_distance is not None:
obj.append({'prefix': prefix,
'mask': mask,
'next_hop': next_hop,
'admin_distance': admin_distance})
else:
obj.append({'prefix': prefix,
'mask': mask,
'next_hop': next_hop,
'admin_distance': 'None'})
return obj
def map_params_to_obj(module, required_together=None):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
module._check_required_together(required_together, item)
d = item.copy()
if '/' in d['prefix']:
d['mask'] = d['prefix'].split('/')[1]
d['prefix'] = d['prefix'].split('/')[0]
if 'admin_distance' in d:
d['admin_distance'] = str(d['admin_distance'])
obj.append(d)
else:
prefix = module.params['prefix'].strip()
if '/' in prefix:
mask = prefix.split('/')[1]
prefix = prefix.split('/')[0]
else:
mask = module.params['mask'].strip()
next_hop = module.params['next_hop'].strip()
admin_distance = str(module.params['admin_distance'])
state = module.params['state']
obj.append({
'prefix': prefix,
'mask': mask,
'next_hop': next_hop,
'admin_distance': admin_distance,
'state': state
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
prefix=dict(type='str'),
mask=dict(type='str'),
next_hop=dict(type='str'),
admin_distance=dict(type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['prefix'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(vyos_argument_spec)
required_one_of = [['aggregate', 'prefix']]
required_together = [['prefix', 'next_hop']]
mutually_exclusive = [['aggregate', 'prefix']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module, required_together=required_together)
have = config_to_dict(module)
commands = spec_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| sgerhart/ansible | lib/ansible/modules/network/vyos/vyos_static_route.py | Python | mit | 8,147 |
# -*- coding: utf-8 -*-
"""
"""
# ------------------------------------------------------------------------------
# entity state
# ------------------------------------------------------------------------------
ENTITY_STATE_UNKNOW = -1
ENTITY_STATE_FREE = 0
ENTITY_STATE_DEAD = 1
ENTITY_STATE_REST = 2
ENTITY_STATE_FIGHT = 3
ENTITY_STATE_MAX = 4
# sub state
ENTITY_SUB_STATE_NORMAL = 0
ENTITY_SUB_STATE_RANDOM_STROLL = 1
ENTITY_SUB_STATE_GO_BACK = 2
ENTITY_SUB_STATE_CHASE_TARGET = 3
ENTITY_SUB_STATE_FLEE = 4
# entity的一些行为禁止标志
FORBID_NO = 0x00000000
FORBID_MOTION = 0x00000001
FORBID_CHAT = 0x00000002
FORBID_SPELL = 0x00000004
FORBID_TRADE = 0x00000008
FORBID_EQUIP = 0x00000010
FORBID_INTONATE = 0x00000020
FORBID_ATTACK_PHY_NEAR = 0x00000040
FORBID_ATTACK_PHY_FAR = 0x00000080
FORBID_ATTACK_MAGIC = 0x00000080
FORBID_YAW = 0x00008000
FORBID_ATTACK_PHY = FORBID_ATTACK_PHY_NEAR | FORBID_ATTACK_PHY_FAR
FORBID_ATTACK_MAG = FORBID_ATTACK_MAGIC
FORBID_ATTACK = FORBID_ATTACK_PHY | FORBID_ATTACK_MAG
FORBID_MOTION_YAW = FORBID_MOTION | FORBID_YAW
FORBID_ALL = [
FORBID_MOTION,
FORBID_YAW,
FORBID_CHAT,
FORBID_ATTACK,
FORBID_SPELL,
FORBID_TRADE,
FORBID_EQUIP,
FORBID_INTONATE,
FORBID_ATTACK_PHY_NEAR,
FORBID_ATTACK_PHY_FAR,
FORBID_ATTACK_MAGIC,
]
FORBID_ACTIONS = {
ENTITY_STATE_UNKNOW : 0,
ENTITY_STATE_FREE : FORBID_NO,
ENTITY_STATE_DEAD : FORBID_MOTION_YAW | FORBID_TRADE | FORBID_ATTACK | FORBID_SPELL | FORBID_EQUIP,
ENTITY_STATE_REST : FORBID_MOTION_YAW | FORBID_TRADE | FORBID_ATTACK | FORBID_SPELL | FORBID_EQUIP,
ENTITY_STATE_FIGHT : FORBID_EQUIP | FORBID_TRADE,
}
for f in FORBID_ALL: FORBID_ACTIONS[ENTITY_STATE_UNKNOW] |= f
# ------------------------------------------------------------------------------
# 定义对话相关
# ------------------------------------------------------------------------------
DIALOG_TYPE_NORMAL = 0 # 普通对话
DIALOG_TYPE_QUEST = 1 # 任务对话
# ------------------------------------------------------------------------------
# 技能相关
# ------------------------------------------------------------------------------
# 技能对象类别
SKILL_OBJECT_TYPE_UNKNOWN = 0
SKILL_OBJECT_TYPE_ENTITY = 1
SKILL_OBJECT_TYPE_POSITION = 2
| daaoling/KBEngine-LearnNote | kbengine_demos_assets/scripts/common/GlobalDefine.py | Python | gpl-2.0 | 2,439 |
import xlwt
import datetime
# 设置表格样式
def set_style(name, height, bold=False):
style = xlwt.XFStyle()
font = xlwt.Font()
font.name = name
font.bold = bold
font.color_index = 4
font.height = height
style.font = font
return style
def set_date_style(name, height, num_format_str, bold=False):
style = set_style(name, height, bold)
style.num_format_str = num_format_str
return style
# 写Excel
def write_excel():
f = xlwt.Workbook()
sheet1 = f.add_sheet('学生', cell_overwrite_ok=True)
row0 = ["姓名", "年龄", "出生日期", "爱好"]
colum0 = ["张三", "李四", "恋习Python", "小明", "小红", "无名"]
# 写第一行
for i in range(0, len(row0)):
sheet1.write(0, i, row0[i], set_style('Times New Roman', 220, True))
# 写第一列
for i in range(0, len(colum0)):
sheet1.write(i + 1, 0, colum0[i], set_style('Times New Roman', 220, True))
sheet1.write(1, 2, datetime.datetime(2018, 10, 10), set_date_style('Times New Roman', 220, 'yyyy-mm-dd'))
sheet1.write_merge(6, 6, 1, 3, '未知') # 合并行单元格
sheet1.write_merge(1, 2, 3, 3, '打游戏') # 合并列单元格
sheet1.write_merge(4, 5, 3, 3, '打篮球')
f.add_sheet('年级', cell_overwrite_ok=True)
f.save('D:\\test.xls')
print('excel done')
if __name__ == '__main__':
write_excel()
| hhj0325/pystock | com/hhj/excel/write_excel.py | Python | apache-2.0 | 1,404 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class MatrixBandPartTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBandPartTest(dtype_, batch_shape_, shape_):
def Test(self):
mat = np.ones(shape_).astype(dtype_)
batch_mat = np.tile(mat, batch_shape_ + (1, 1))
for lower in -1, 0, 1, shape_[-2] - 1:
for upper in -1, 0, 1, shape_[-1] - 1:
band_np = mat
if lower >= 0:
band_np = np.triu(band_np, -lower)
if upper >= 0:
band_np = np.tril(band_np, upper)
if batch_shape_ is not ():
band_np = np.tile(band_np, batch_shape_ + (1, 1))
for index_dtype in [dtypes_lib.int32, dtypes_lib.int64]:
with self.cached_session(use_gpu=False):
band = array_ops.matrix_band_part(
batch_mat,
constant_op.constant(lower, index_dtype),
constant_op.constant(upper, index_dtype))
self.assertAllEqual(band_np, self.evaluate(band))
return Test
class MatrixBandPartGradTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBandPartGradTest(dtype_, batch_shape_, shape_):
def Test(self):
shape = batch_shape_ + shape_
x = constant_op.constant(np.random.rand(*shape), dtype=dtype_)
with self.session(use_gpu=False):
for lower in -1, 0, 1, shape_[-2] - 1:
for upper in -1, 0, 1, shape_[-1] - 1:
y = array_ops.matrix_band_part(x, lower, upper)
error = gradient_checker.compute_gradient_error(
x, x.get_shape().as_list(), y, y.get_shape().as_list())
self.assertLess(error, 1e-4)
return Test
class MatrixBandPartBenchmark(test_lib.Benchmark):
shapes = [
(10, 16, 16),
(10, 101, 101),
(10, 256, 256),
(10, 1000, 1000),
(10, 1024, 1024),
(10, 2048, 2048),
(10, 10, 4, 4),
(10, 10, 10, 10),
(10, 10, 16, 16),
(10, 10, 101, 101),
(10, 10, 256, 256),
(10, 10, 1000, 1000),
(10, 10, 1024, 1024),
(10, 10, 2048, 2048),
]
def benchmarkMatrixBandPartOp(self):
for shape_ in self.shapes:
for limits in (-1, -1), (-1, 0), (0, -1), (2, 2):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(array_ops.ones(shape_))
band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(band),
min_iters=10,
name="matrix_band_part_cpu_{shape}_{limits}".format(
shape=shape_, limits=limits))
if test_lib.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix = variables.Variable(array_ops.ones(shape_))
band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(band),
min_iters=10,
name="matrix_band_part_gpu_{shape}_{limits}".format(
shape=shape_, limits=limits))
if __name__ == "__main__":
dtypes = (np.bool, np.int32, np.int64, np.float16,
dtypes_lib.bfloat16.as_numpy_dtype, np.float32, np.float64,
np.complex64, np.complex128)
for dtype in dtypes:
for batch_shape in ((), (2,), (1, 3, 2)):
for rows in 1, 2, 7, 23:
for cols in 1, 2, 7, 23:
shape = (rows, cols)
name = "%s_%s" % (dtype.__name__,
"_".join(map(str, batch_shape + shape)))
_AddTest(MatrixBandPartTest, "MatrixBandPart", name,
_GetMatrixBandPartTest(dtype, batch_shape, shape))
for dtype in (np.float32, np.float64):
for batch_shape in ((), (2,)):
for rows in 1, 2, 7:
for cols in 1, 2, 7:
shape = (rows, cols)
name = "%s_%s" % (dtype.__name__,
"_".join(map(str, batch_shape + shape)))
_AddTest(MatrixBandPartGradTest, "MatrixBandPartGrad", name,
_GetMatrixBandPartGradTest(dtype, batch_shape, shape))
test_lib.main()
| hehongliang/tensorflow | tensorflow/python/kernel_tests/matrix_band_part_op_test.py | Python | apache-2.0 | 5,978 |
from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ip_other_block_alloc: {"optional": true, "size": "8", "type": "number", "oid": "17", "format": "counter"}
:param entry_match_drop: {"optional": true, "size": "8", "type": "number", "oid": "6", "format": "counter"}
:param ip_port_block_free: {"optional": true, "size": "8", "type": "number", "oid": "15", "format": "counter"}
:param ip_node_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "13", "format": "counter"}
:param entry_list_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "10", "format": "counter"}
:param ip_node_alloc: {"optional": true, "size": "8", "type": "number", "oid": "11", "format": "counter"}
:param entry_added_shadow: {"optional": true, "size": "8", "type": "number", "oid": "20", "format": "counter"}
:param ip_port_block_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "16", "format": "counter"}
:param ip_other_block_alloc_failure: {"optional": true, "size": "8", "type": "number", "oid": "19", "format": "counter"}
:param entry_removed_from_hw: {"optional": true, "size": "8", "type": "number", "oid": "4", "format": "counter"}
:param entry_deleted: {"optional": true, "size": "8", "type": "number", "oid": "2", "format": "counter"}
:param entry_list_alloc: {"optional": true, "size": "8", "type": "number", "oid": "8", "format": "counter"}
:param entry_list_free: {"optional": true, "size": "8", "type": "number", "oid": "9", "format": "counter"}
:param entry_added_to_hw: {"optional": true, "size": "8", "type": "number", "oid": "3", "format": "counter"}
:param ip_node_free: {"optional": true, "size": "8", "type": "number", "oid": "12", "format": "counter"}
:param entry_added: {"optional": true, "size": "8", "type": "number", "oid": "1", "format": "counter"}
:param ip_other_block_free: {"optional": true, "size": "8", "type": "number", "oid": "18", "format": "counter"}
:param entry_invalidated: {"optional": true, "size": "8", "type": "number", "oid": "21", "format": "counter"}
:param ip_port_block_alloc: {"optional": true, "size": "8", "type": "number", "oid": "14", "format": "counter"}
:param entry_match_drop_hw: {"optional": true, "size": "8", "type": "number", "oid": "7", "format": "counter"}
:param hw_out_of_entries: {"optional": true, "size": "8", "type": "number", "oid": "5", "format": "counter"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.ip_other_block_alloc = ""
self.entry_match_drop = ""
self.ip_port_block_free = ""
self.ip_node_alloc_failure = ""
self.entry_list_alloc_failure = ""
self.ip_node_alloc = ""
self.entry_added_shadow = ""
self.ip_port_block_alloc_failure = ""
self.ip_other_block_alloc_failure = ""
self.entry_removed_from_hw = ""
self.entry_deleted = ""
self.entry_list_alloc = ""
self.entry_list_free = ""
self.entry_added_to_hw = ""
self.ip_node_free = ""
self.entry_added = ""
self.ip_other_block_free = ""
self.entry_invalidated = ""
self.ip_port_block_alloc = ""
self.entry_match_drop_hw = ""
self.hw_out_of_entries = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class DdosProtection(A10BaseClass):
"""Class Description::
Statistics for the object ddos-protection.
Class ddos-protection supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/ddos-protection/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ddos-protection"
self.a10_url="/axapi/v3/cgnv6/ddos-protection/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
| amwelch/a10sdk-python | a10sdk/core/cgnv6/cgnv6_ddos_protection_stats.py | Python | apache-2.0 | 4,533 |
from bs4 import BeautifulSoup
import requests
import random
import urlparse
class BufferOverflow():
def __init__(self, url, detail):
self.url = url
self.detail = detail
self.input_pairs = []
self.tag_num = []
self.status = [0,0,0,0,0]
self.input_tag_num = {}
def randomizer(self):
str2 = ""
str1 = "QAa0bcLdUK2eHfJgTP8XhiFj61DOklNm9nBoI5pGqYVrs3CtSuMZvwWx4yE7zR"
for i in range(300):
r = random.randint(0, 61)
str2 = str2 + str1[r]
return str2
def parseInput(self):
response = requests.get(self.url) # Todo: identify which to use, GET or POST ?
if response.status_code != 200:
# Todo
pass
self.status[response.status_code/100-1] += 1
html = response.content
soup = BeautifulSoup(html)
forms = soup.select("form")
# print "forms # : " + forms.len(forms)
test_tag = ["form", "input", "div", "section","article","main","aside","header","footer","nav","figure","figcaption","template","video","audio","embed","mark","embed","mark","progress","meter","time","ruby","rt","rp","bdi","wbr","canvas","datalist","keygen","output"]
input_tag = ['tel','search','url','email','date','time','number','range','color','text','hidden','password','radio','checkbox','submit']
for tag in test_tag:
self.tag_num.append([tag,len(soup.select(tag))])
for tag in input_tag:
self.input_tag_num[tag] = 0
self.input_pairs = []
for form in forms:
action = form.get("action", "")
if action == "":
continue
#print 'action:',action,'\n'
inputs = form.find_all("input")
#print 'input:',inputs,'\n'
form_content = {}
form_content["action"] = form["action"]
form_content["payload"] = {}
for _input in inputs:
types = _input.get("type","")
if types == "":
continue
#if "type" not in _input:
# continue
elif _input["type"].lower() == "text" or _input["type"].lower() == "hidden" or _input["type"].lower() == "password":
s = self.randomizer()
if "value" in _input:
s = _input["value"] + s
form_content['payload'][_input["name"]] = s
elif _input["type"].lower() == "radio":
form_content['payload'][_input["name"]] = "checked"
elif _input["type"].lower() == "checkbox":
form_content['payload'][_input["name"]] = "checked"
self.input_tag_num[_input['type'].lower()] += 1
self.input_pairs.append(form_content)
print self.input_pairs
self.sendBack()
return
def sendBack(self):
for form in self.input_pairs:
if not form["action"].startswith("http"):
urlinfo = urlparse.urlparse(self.url)
#print urlinfo,'1',form['action']
url = urlinfo.scheme + '://' + urlinfo.netloc + '/' + form["action"]
else:
url = form["action"]
#print 'payload:',form['payload']
response = requests.post(url, data = form['payload'])
if response.status_code != 200:
# Todo
pass
print response
return
def analyzeBufferOverflow(self):
print '###################################################################################'
print '<---BUFFER OVERFLOW ANALYSIS--->'
for entry in self.tag_num:
print 'Total # of %s tag:%d'%(entry[0],entry[1])
print '###################################################################################'
print 'Input tag type:'
print 'Total # of %s tag:%d'%(self.tag_num[1][0],self.tag_num[1][1])
print 'Detail:'
for k,v in self.input_tag_num.iteritems():
print 'Total # of %s tag in input tags:%d'%(k,v)
print '###################################################################################'
print '<<-Categorizing the available data on basis of HTTP Status Codes->>'
print "Informational Codes 1xx Series:",self.status[0]
print "Successful Client Interaction related 2xx Series:",self.status[1]
print "Redirection related 3xx Series:",self.status[2]
print "Client Error related 4xx Series:",self.status[3]
print "Server Error related 5xx Series:",self.status[4]
print '###################################################################################'
| FakerKimg/sulley | Firefuzzer/BufferOverflow.py | Python | gpl-2.0 | 4,836 |
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google.cloud.security.enforcer.enforcer."""
import copy
import json
import httplib2
import mock
import testing_constants as constants
from tests.unittest_utils import ForsetiTestCase
from google.protobuf import text_format
from tests.unittest_utils import get_datafile_path
from google.cloud.security.enforcer import enforcer_log_pb2
from google.cloud.security.enforcer import enforcer
# Used anywhere a real timestamp could be generated to ensure consistent
# comparisons in tests
MOCK_TIMESTAMP = 1234567890
class EnforcerTest(ForsetiTestCase):
"""Extended unit tests for BatchFirewallEnforcer class."""
def setUp(self):
"""Set up."""
self.mock_compute = mock.patch.object(enforcer.batch_enforcer.compute,
'ComputeClient').start()
self.gce_service = self.mock_compute().service
self.gce_service.networks().list().execute.return_value = (
constants.SAMPLE_TEST_NETWORK_SELFLINK)
self.project = constants.TEST_PROJECT
self.mock_time = mock.patch.object(enforcer.batch_enforcer.datelib,
'Timestamp').start()
self.mock_time.now().AsMicroTimestamp.return_value = MOCK_TIMESTAMP
self.mock_time.now().AsSecondsSinceEpoch.return_value = MOCK_TIMESTAMP
self.enforcer = enforcer.initialize_batch_enforcer(
{},
concurrent_threads=1,
max_write_threads=1,
max_running_operations=0,
dry_run=True)
self.expected_summary = (
enforcer_log_pb2.BatchResult(
batch_id=MOCK_TIMESTAMP,
timestamp_start_msec=MOCK_TIMESTAMP,
timestamp_end_msec=MOCK_TIMESTAMP))
self.addCleanup(mock.patch.stopall)
def test_enforce_single_project(self):
"""Verifies enforce_single_project returns the correct results.
Setup:
* Set API calls to return the different firewall rules from the new
policy on the first call, and the expected new firewall rules on the
second call.
* Load a mock policy file.
* Create a temporary directory for writing the dremel recordio table out
to.
* Send the policy and project to EnforceSingleProject.
Expected Results:
* The results proto returned matches the expected results.
"""
self.gce_service.firewalls().list().execute.side_effect = [
constants.DEFAULT_FIREWALL_API_RESPONSE,
constants.EXPECTED_FIREWALL_API_RESPONSE]
policy_filename = get_datafile_path(__file__, 'sample_policy.json')
results = enforcer.enforce_single_project(self.enforcer, self.project,
policy_filename)
self.expected_summary.projects_total = 1
self.expected_summary.projects_success = 1
self.expected_summary.projects_changed = 1
self.expected_summary.projects_unchanged = 0
self.assertEqual(self.expected_summary, results.summary)
expected_results = enforcer_log_pb2.ProjectResult()
text_format.Merge(constants.SAMPLE_ENFORCER_PROJECTRESULTS_ASCIIPB,
expected_results)
expected_results.run_context = enforcer_log_pb2.ENFORCER_ONE_PROJECT
expected_results.gce_firewall_enforcement.policy_path = policy_filename
project_result = results.results[0]
self.assertEqual(expected_results, project_result)
def test_enforcer_raises_exception_with_invalid_json_policy(self):
"""Verifies json parsed correct as a list of dictionaries.
Setup:
* Load an invalid json file (no list).
* Give it to enforcer to parse and load
Expected Results:
* Enforcer should raise InvalidParsedPolicyFileError
"""
policy_filename = get_datafile_path(__file__, 'invalid_sample_policy.json')
with self.assertRaises(enforcer.InvalidParsedPolicyFileError) as r:
enforcer.enforce_single_project(
self.enforcer, self.project, policy_filename)
if __name__ == '__main__':
unittest.main()
| cschnei3/forseti-security | tests/enforcer/enforcer_test.py | Python | apache-2.0 | 4,766 |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
"""The fogbench exceptions module contains Exception subclasses
"""
# nothing to import yet
__author__ = "Praveen Garg"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
class FogbenchError(Exception):
"""
All errors specific to fogbench will be
subclassed from FogbenchError which is subclassed from Exception.
"""
pass
class InvalidTemplateFormat(FogbenchError):
pass
class InvalidSensorValueObjectTemplateFormat(InvalidTemplateFormat):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "{!s}".format(self.msg)
| foglamp/FogLAMP | extras/python/fogbench/exceptions.py | Python | apache-2.0 | 735 |
# coding: utf-8
from app import db
class Content(db.Model):
__tablename__ = "contents"
session = db.session
data = db.Column(db.Text())
_type = db.Column(db.String(80), primary_key=True)
def __init__(self, data, type):
self.data = data
self._type = type
def to_dict(self):
return{
"data": self.data,
"_type": self._type,
}
@classmethod
def make_commit(cls):
cls.session.commit()
@classmethod
def get_contents(cls):
return cls.session.query(cls).all()
@classmethod
def get_content_by_type(cls, _type):
return cls.session.query(cls).get(_type)
@classmethod
def save_content(cls, content):
cls.session.add(content)
cls.make_commit()
return content._type
@classmethod
def delete_content(cls, _type):
cls.session.query(cls).filter(cls._type == _type).delete()
cls.make_commit()
| pygamebrasil/pygame-site | src/app/contents/models.py | Python | mit | 969 |
'''
model without galaxy assembly bias
'''
import numpy as np
import os.path as path
from Corrfunc import _countpairs
from Corrfunc.utils import read_catalog
from astropy.table import Table
from halotools.mock_observables import FoFGroups
from halotools.sim_manager import CachedHaloCatalog
from halotools.empirical_models import HodModelFactory
from halotools.empirical_models import TrivialPhaseSpace, AssembiasZheng07Cens
from halotools.empirical_models import NFWPhaseSpace, AssembiasZheng07Sats
from halotools.empirical_models import PrebuiltHodModelFactory
from halotools.empirical_models.factories.mock_helpers import three_dim_pos_bundle
from halotools.mock_observables.catalog_analysis_helpers import return_xyz_formatted_array
from halotools.empirical_models import enforce_periodicity_of_box
def single_model(Mr):
model = PrebuiltHodModelFactory("zheng07" , threshold = -1.*Mr)
return model
def richness(group_id):
gals = Table()
gals['groupid'] = group_id
gals['dummy'] = 1
grouped_table = gals.group_by('groupid')
grp_richness = grouped_table['dummy'].groups.aggregate(np.sum)
return grp_richness
class MCMC_model(object):
def __init__(self, Mr):
self.Mr = Mr
self.model = single_model(Mr)
self.halocat = CachedHaloCatalog(simname = 'bolplanck', redshift = 0, halo_finder = 'rockstar')
#GMF binning settings
self.boxsize = self.halocat.Lbox
if self.Mr == 18:
gmf_cat = np.loadtxt("../dat/gmf_mr18.0.dat")
if self.Mr == 19:
gmf_cat = np.loadtxt("../dat/gmf_mr19.0.dat")
if self.Mr == 20:
gmf_cat = np.loadtxt("../dat/gmf_mr20.0.dat")
self.data_gmf_bin = np.hstack([gmf_cat[:,0],gmf_cat[-1,1]])
self.data_gmf_bin_width=(self.data_gmf_bin[1:]-self.data_gmf_bin[:-1])
def __call__(self, theta, prior_range):
return self._sum_stat(theta, prior_range=prior_range)
def _sum_stat(self, theta, prior_range=None):
self.model.param_dict['logM0'] = theta[0]
self.model.param_dict['sigma_logM'] = theta[1]
self.model.param_dict['logMmin'] = theta[2]
self.model.param_dict['alpha'] = theta[3]
self.model.param_dict['logM1'] = theta[4]
gmff = []
for i in xrange(1):
self.model.populate_mock(self.halocat)
x = self.model.mock.galaxy_table['x']
y = self.model.mock.galaxy_table['y']
z = self.model.mock.galaxy_table['z']
vz = self.model.mock.galaxy_table['vz']
# applying RSD
pos = return_xyz_formatted_array(x, y, z, velocity = vz, velocity_distortion_dimension = 'z')
# enforcing PBC
pos = enforce_periodicity_of_box(pos, self.boxsize)
bperp = 0.14
bpar = 0.75
Lbox = np.array([self.boxsize, self.boxsize, self.boxsize])
period = Lbox
groups = FoFGroups(pos, b_perp=bperp, b_para=bpar, Lbox = Lbox , period=Lbox)
group_ids = groups.group_ids
group_richness = richness(group_ids)
gmff.append(np.histogram(np.array(group_richness) ,self.data_gmf_bin)[0] / (self.data_gmf_bin_width * self.boxsize**3.))
gmf = np.mean(np.array(gmff) , axis = 0)
nbar = 1.*len(pos)/(self.boxsize)**3.
return nbar , gmf
| mjvakili/gambly | code/hod_group.py | Python | mit | 3,355 |
"""
EarthBound Patcher - An easy-to-use EarthBound ROM patcher.
Copyright (C) 2013 Lyrositor <[email protected]>
This file is part of EarthBound Patcher.
EarthBound Patcher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
EarthBound Patcher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with EarthBound Patcher. If not, see <http://www.gnu.org/licenses/>.
"""
# EBPPatch
# Handles the import of EBP (EarthBound Patch) patches.
import json
from IPSPatch import *
class EBPPatch(IPSPatch):
"""The new EarthBound patcher format for patching, based on IPS."""
def __init__(self, patchPath, new=False):
"""Creates a new patch or loads an existing one."""
super().__init__(patchPath, new)
if not new:
self.info = self.loadMetadata()
else:
self.patchPath = patchPath
def loadMetadata(self):
"""Loads the metadata from the patch."""
# Check to see if it contains metadata.
try:
info = json.loads(self.read().decode("utf-8"))
assert info["patcher"] == "EBPatcher"
print("EBPPatch.loadMetadata(): Metadata loaded.\n"
"\tTitle: {}\n\tAuthor: {}\n\tDescription: {}".format(
info["title"], info["author"], info["description"]))
except:
info = None
print("EBPPatch.loadMetadata(): Failed to load metadata.")
return info
def createFromSource(self, sourceROM, targetROM, metadata):
"""Creates an EBP patch from the source and target ROMs."""
# Create the records.
i = None
records = {}
sourceROM.seek(0)
targetROM.seek(0)
s = sourceROM.read(1)
t = targetROM.read(1)
while t:
if t == s and i is not None:
i = None
elif t != s:
if i is not None:
# Check that the record's size can fit in 2 bytes.
if targetROM.tell() - 1 - i == 0xFFFF:
i = None
continue
records[i] += t
else:
i = targetROM.tell() - 1
# Check that the offset isn't EOF. If it is, go back one
# byte to work around this IPS limitation.
if i.to_bytes(3, "big") != b"EOF":
records[i] = t
else:
i -= 1
records[i] = targetROM.getvalue()[i]
s = sourceROM.read(1)
t = targetROM.read(1)
# Write the patch.
self.seek(0)
self.write(b"PATCH")
for r in sorted(records):
self.write(r.to_bytes(3, "big"))
self.write(len(records[r]).to_bytes(2, "big"))
self.write(records[r])
self.write(b"EOF")
self.write(bytes(metadata, "utf-8"))
# Write the patch to a file.
f = open(self.patchPath, "wb")
f.write(self.getvalue())
f.close()
| Lyrositor/EBPatcher | EBPPatch.py | Python | gpl-3.0 | 3,532 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# django-bbcode: templatetags/bbcode.py
##
from django import template
from bbcode import util as bbcode
register = template.Library()
def render_bbcode(value):
return bbcode.to_html(value)
register.filter('render_bbcode', render_bbcode)
##
# End of File
##
| stitzelj/Ficlatte | django-bbcode-master/bbcode/templatetags/bbcode.py | Python | agpl-3.0 | 317 |
#!/usr/bin/python2.7
#coding:utf-8
import requests
import re
from dummy import *
info = {
'NAME':'WordPress cp multi view calendar <= 1.1.4 - SQL Injection',
'AUTHOR':'WJK,yangbh,lkz',
'TIME':'20150320',
'WEB':'https://www.yascanner.com/#!/n/120',
'DESCRIPTION':''
}
opts = {
'url':'http://testasp.vulnweb.com', #'target ip'
}
# opts = [
# ['url','http://testasp.vulnweb.com','target url']
# ]
def Assign(services):
if services.has_key('url') and services.has_key('cms') and services['cms']=='Wordpress':
return True
return False
def Audit(services):
url = services['url'] + '/?action=data_management&cpmvc_do_action=mvparse&f=edit&id=1%20UNION%20ALL%20SELECT%20NULL,NULL,NULL,NULL,CONCAT%280x7167676a71,0x4d7059554473416c6d79,0x7170777871%29,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL#'
try:
rq = requests.get(url)
res= rq.text
m = re.search("qggjqMpYUDsAlmyqpwxq",res)
if m:
security_hole(url+'WordPress cp-multi-view-calendar <= 1.1.4 - SQL Injection')
except:
pass
# ----------------------------------------------------------------------------------------------------
# untest yet
# ----------------------------------------------------------------------------------------------------
if __name__=='__main__':
services = {'url':'http://www.eguan.cn'}
pprint(Audit(services))
pprint(services)
| xujun10110/Hammer | plugins/Web_Applications/WordPress_cp_multi_view_calendar1_1_4_SQL_Injection.py | Python | gpl-2.0 | 1,333 |
# -*- coding: utf-8 -*-
# Copyright 2017 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from . import models
from . import wizards
| OCA/carrier-delivery | stock_picking_delivery_rate/__init__.py | Python | agpl-3.0 | 167 |
#!/usr/bin/env python
#Useful utilities for automate.py
import os
import time
import datetime
import xml.etree.ElementTree as ET
import hgapi
import re
from sumatra.projects import Project
from sumatra.projects import load_project
from sumatra.parameters import build_parameters
def check_file_exists(input_file, output_file):
'''Checks if file exists'''
if not os.path.exists(output_file):
return True, "Missing file %s for %s" % (output_file, input_file)
else:
return False, "File %s exists for %s" % (output_file, input_file)
def spawn_job(jobname, SAMPLE, LOG_PATH, RESULTS_EMAIL, SCHEDULER, walltime, queue, nodes, ppn, memory, script, args_list):
'''Spawns a job on a cluster (HPC or AWS Star Cluster) using DRMAA'''
import drmaa
s = drmaa.Session()
s.initialize()
print 'Creating job template for ' + jobname
jt = s.createJobTemplate()
print 'Job template created'
jt.jobName = jobname + "_" + str(SAMPLE)
print "job name is " + jt.jobName
jt.outputPath = LOG_PATH
#print "Error path is" + jt.outputPath
jt.errorPath = LOG_PATH
jt.email = RESULTS_EMAIL
#print "email is" + str(jt.email)
if SCHEDULER == "PBS":
jt.hardWallclockTimeLimit = walltime
jt.softWallClockTimeLimit = walltime
jt.hardRunDurationLimit = walltime
jt.softRunDurationLimit = walltime
jt.nativeSpecification = "-q " + queue + " -l " + "nodes=" + str(nodes) + ":ppn=" + str(ppn) + " -l mem=" + memory
print "native specification is" + jt.nativeSpecification
elif SCHEDULER == "LSF":
print "LSF currently unsupported. Please contact us to request this feature."
elif SCHEDULER == "Slurm":
print "Slurm currently unsupported. Please contact us to request this feature."
elif SCHEDULER == "SGE":
jt.nativeSpecification = "-V -cwd -pe local " + str(ppn) + " -l h_vmem=" + re.sub("gb","G",memory)
print "native specification is " + jt.nativeSpecification
else:
print "Scheduler unsupported. Please make sure you have a parameter in your parameter file SCHEDULER with the options PBS, SGE, LSF, StarCluster or Slurm"
jt.remoteCommand = os.getcwd() + script
#print "remote command is" + jt.remoteCommand #THIS PRINTS THEN HANGS
jt.args = args_list
print "ArgsList is " + str(jt.args)
jt.joinFiles = True
jobid = s.runJob(jt)
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d %H:%M")
print "Date/Time: " + date
print "Job has been submitted with id" + jobid + " at Date/Time: " + date
retval = s.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d %H:%M")
print "Job: " + str(retval.jobId) + ' finished with status: ' + str(retval.hasExited) + ' and exit status: ' + str(retval.exitStatus) + " at Date/Time: " + date
print "Date/Time: " + date
print 'Cleaning up'
s.deleteJobTemplate(jt)
s.exit()
return
def job_status(jobname, resultspath, SAMPLE, outputfilename, FLAG_PATH):
'''Checks to see if a job has successfully finished by checking if the specified output file exists.'''
stage = jobname
try:
size = os.path.getsize(resultspath + "/" + outputfilename)
except OSError:
print("Looking for file: " + resultspath + "/" + outputfilename)
print("%s failed to produce any output files" % stage)
if 'size' in locals():
if size == 0:
print "Job Failed!"
print('%s produced an empty output file' % stage)
else:
print("%s Finished and Successfully produced an output file of size %s" % (stage,size))
flag_file = "%s/%s_%s_completed.flag" % (FLAG_PATH, stage, SAMPLE)
open(flag_file, 'w').close()
else:
print("%s failed to produce any output files" % stage)
return
def job_status_nfsc(jobname, resultspath, SAMPLE, outputfilename, FLAG_PATH):
'''Checks to see if a job as succesfully finished by checking if the specified output file exits.'''
stage = jobname
try:
size = os.path.getsize(resultspath + "/" + outputfilename)
except OSError:
print("Looking for file: " + resultspath + "/" + outputfilename)
print("%s failed to produce any output files" % stage)
if 'size' in locals():
print("%s Finished and Successfully produced an output file of size %s" % (stage,size))
flag_file = "%s/%s_%s_completed.flag" % (FLAG_PATH, stage, SAMPLE)
open(flag_file, 'w').close()
else:
print("%s failed to produce any output files" % stage)
return
def decompress(file_directory, compression_type):
'''Unzips (bzip or gzip) files'''
if compression_type == "bzip":
os.chdir(file_directory)
os.system("bunzip2 *.bz2")
print "Bzip files successfully uncompressed"
elif compression_type == "gzip":
os.chdir(file_directory)
os.system("gunzip *.gz")
print "Gzip files successfully uncompressed"
else:
print "Working with uncompressed files"
return
def compress(file_directory, compression_type):
'''Compresses (bzip or gzip) files'''
if compression_type == "bzip":
os.chdir(file_directory)
os.system("bzip2 *.fastq")
print "Bzip files successfully compressed"
elif compression_type == "gzip":
os.chdir(file_directory)
os.system("gzip *.fastq")
print "Gzip files successfully compressed"
else:
print "Working with uncompressed files"
return
class Bunch(object):
'''Bunches parameters into a dictionary'''
def __init__(self, adict):
self.__dict__.update(adict)
def check_create_dir(directory):
'''Creates a directory if it does not exist'''
if not os.path.exists(directory):
os.makedirs(directory)
return
def parse_xml(file):
'''Parses XML file to extract sample names from TCGA XML manifest'''
tree = ET.parse(file)
root = tree.getroot()
for child in root:
tree = ET.ElementTree(child)
for id in tree.findall('analysis_id'):
print id.text
name = "/gpfs/home/kfisch/" + id.text + ".xml"
tree.write(name)
return
def get_TCGA_ID(file):
'''Gets TCGA ID from TCGA XML manifest and creates a sample list'''
tree = ET.parse(file)
root = tree.getroot()
sample_list = []
for id in root.findall('Result'):
analysis_id = id.find('analysis_id').text
analysis_id_string = "TCGA_" + str(analysis_id)
sample_list.append(analysis_id_string)
print sample_list
return sample_list
def make_params(step, sample_list, flag_path):
'''Creates parameter input lists for each step in pipeline'''
vars()['inputList_' + step] = []
for sample in sample_list:
vars()['inputList_' + step].append([sample, "%s/%s_%s_completed.flag" % (flag_path, step, sample)])
#print vars()['inputList_' + step]
return vars()['inputList_' + step]
def get_samples_from_txt_file(file):
'''Creates sample list from text file of samples'''
global SAMPLE_LIST
sample_file = open(file, 'r')
reader = csv.reader(sample_file)
sample_list = [row for row in reader]
return SAMPLE_LIST
def sumatra_start(repository, sumatra_db_path, results_path, working_dir, hg_username, sumatra_run_name, parameters):
'''Clones the Omics Pipe repository from Bitbucket, creates a Sumatra project, and creates a Sumatra record for the current run'''
print "sumatra_db_path is " + sumatra_db_path
print type(sumatra_db_path)
check_create_dir(sumatra_db_path)
os.chdir(sumatra_db_path)
repo1 = hgapi.Repo(repository)
repo_path = sumatra_db_path +"/omics_pipe"
repo= {"url":repo_path,
"type":"sumatra.versioncontrol._mercurial.MercurialRepository",
"upstream":repository}
executable= {"path":"",
"version": "",
"type":"sumatra.programs.PythonExecutable",
"options":"",
"name": "Python"}
sumatra_launch_mode = {"working_directory": working_dir, "type": "sumatra.launch.SerialLaunchMode"}
data_store1 = {"root":results_path, "type": "sumatra.datastore.filesystem.FileSystemDataStore"}
database_path = sumatra_db_path + "/records/recordstore.db"
record_store1 = {"db_file": database_path, "type": "sumatra.recordstore.django_store.DjangoRecordStore"}
input_datastore1 = {"root": results_path, "type": "sumatra.datastore.filesystem.FileSystemDataStore"}
while True:
try:
repo1.hg_clone(url = repository, path=repo_path)
with open(repo_path + "/.hg/hgrc", "a") as myfile:
myfile.write("[ui]\nusername= " + hg_username)
print "Omics pipe repository cloned to : " + repo_path
break
except hgapi.hgapi.HgException:
print "Omics pipe repository already exists."
break
while True:
try:
Project(sumatra_run_name, default_repository=repo, default_executable=executable,
default_launch_mode = sumatra_launch_mode, on_changed='store-diff',
data_store=data_store1, record_store=record_store1, input_datastore=input_datastore1)
print "Sumatra project created: " + sumatra_run_name + " in directory: " + sumatra_db_path
break
except Exception:
print "Sumatra project already exists, loading project: " + sumatra_run_name
break
project = load_project(path=sumatra_db_path)
print project
sumatra_params = build_parameters(parameters)
print sumatra_params
os.chdir(repo_path)
repo_main = "omics_pipe/main.py"
record = project.new_record(parameters=sumatra_params, main_file=repo_main)
print record
return record,project
def sumatra_end(start_time, record, project):
'''Saves the Sumatra project to the database'''
#file1 = open("/gpfs/home/kfisch/test/test.txt", "w")
#file1.write("test")
#file1.close()
record.duration = time.time() - start_time
print record.duration
record.output_data = record.datastore.find_new_data(record.timestamp)
print record.output_data
project.add_record(record)
print project
project.save()
return
| adammaikai/OmicsPipe2.0 | omics_pipe/utils.py | Python | mit | 10,719 |
"""
Stackoverflow OAuth support.
This contribution adds support for Stackoverflow OAuth service. The settings
STACKOVERFLOW_CLIENT_ID, STACKOVERFLOW_CLIENT_SECRET and
STACKOVERFLOW_CLIENT_SECRET must be defined with the values given by
Stackoverflow application registration process.
Extended permissions are supported by defining
STACKOVERFLOW_EXTENDED_PERMISSIONS setting, it must be a list of values
to request.
By default account id and token expiration time are stored in extra_data
field, check OAuthBackend class for details on how to extend it.
"""
from urllib.parse import urlencode
from urllib.request import Request
from urllib.error import HTTPError
from urllib.parse import parse_qsl
from gzip import GzipFile
from io import StringIO
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from django.conf import settings
from social_auth.utils import dsa_urlopen
from social_auth.backends import BaseOAuth2, OAuthBackend
from social_auth.exceptions import AuthUnknownError, AuthCanceled
# Stackoverflow configuration
STACKOVERFLOW_AUTHORIZATION_URL = 'https://stackexchange.com/oauth'
STACKOVERFLOW_ACCESS_TOKEN_URL = 'https://stackexchange.com/oauth/access_token'
STACKOVERFLOW_USER_DATA_URL = 'https://api.stackexchange.com/2.1/me'
STACKOVERFLOW_SERVER = 'stackexchange.com'
class StackoverflowBackend(OAuthBackend):
"""Stackoverflow OAuth authentication backend"""
name = 'stackoverflow'
ID_KEY = 'user_id'
# Default extra data to store
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Stackoverflow account"""
return {'username': response.get('link').split('/')[-1],
'full_name': response.get('display_name')}
class StackoverflowAuth(BaseOAuth2):
"""Stackoverflow OAuth2 mechanism"""
AUTHORIZATION_URL = STACKOVERFLOW_AUTHORIZATION_URL
ACCESS_TOKEN_URL = STACKOVERFLOW_ACCESS_TOKEN_URL
AUTH_BACKEND = StackoverflowBackend
SETTINGS_KEY_NAME = 'STACKOVERFLOW_CLIENT_ID'
SETTINGS_SECRET_NAME = 'STACKOVERFLOW_CLIENT_SECRET'
SCOPE_SEPARATOR = ','
# See: https://api.stackexchange.com/docs/authentication#scope
SCOPE_VAR_NAME = 'STACKOVERFLOW_EXTENDED_PERMISSIONS'
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
self.process_error(self.data)
params = self.auth_complete_params(self.validate_state())
request = Request(self.ACCESS_TOKEN_URL, data=urlencode(params),
headers=self.auth_headers())
try:
response = dict(parse_qsl(dsa_urlopen(request).read()))
except HTTPError as e:
if e.code == 400:
raise AuthCanceled(self)
else:
raise
except (ValueError, KeyError):
raise AuthUnknownError(self)
self.process_error(response)
return self.do_auth(response['access_token'], response=response,
*args, **kwargs)
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
url = STACKOVERFLOW_USER_DATA_URL + '?' + urlencode({
'site': 'stackoverflow',
'access_token': access_token,
'key': getattr(settings, 'STACKOVERFLOW_KEY')})
opener = dsa_urlopen(url)
if opener.headers.get('content-encoding') == 'gzip':
''' stackoverflow doesn't respect no gzip header '''
gzip = GzipFile(fileobj=StringIO(opener.read()), mode='r')
response = gzip.read()
else:
response = opener.read()
try:
data = simplejson.loads(response)
return data.get('items')[0]
except (ValueError, TypeError):
return None
# Backend definition
BACKENDS = {
'stackoverflow': StackoverflowAuth,
}
| limdauto/django-social-auth | social_auth/backends/contrib/stackoverflow.py | Python | bsd-3-clause | 4,043 |
# -*- coding: utf-8 -*-
"""
This program measures the equivalent isotropic radiated power of an equipment under test (maximal spectral density to be precise)
If the polarization of the EUT is known without any doubt (e.g the EUT has an external linear antenna) one polarization (along the antenna) can be enough.
3 cutting planes, 1 polarization
"""
from __future__ import division
import time
import visa
import scipy
import os
from numpy import *
import matplotlib.pyplot as plt
#Instruments modules
import Spectrum
import TurnTable
nom=raw_input('Enter the name of the equipment?')
if (os.path.isdir('Results_'+nom)==False):
os.mkdir('Results_'+nom)
#Calibration files
Correction_H=loadtxt('SynthCal_Pol_H.txt')
Correction_V=loadtxt('SynthCal_Pol_V.txt')
os.chdir('Results_'+nom)
f=Correction_H[:,0]
###############################################
########## Testing parameters ##############
###############################################
fstart=f[0] #Start frequency
fstop=f[-1] #Stop frequency
fcenter=0.5*(fstart+fstop) #Center frequency
fspan=fstop-fstart #Span
RBW=1e6 #RBW size in Hz
VBW=100e3 #VBW size in Hz
SwpPt=len(f) #Number of points
N=19 #Number of incident angles
Angles=linspace(0,360,N)
Pol=2 #Number of polarizations
Exp=3 #Number of cutting planes
Tmes=0.05 #dwell time
###Stop criterion
###channels center frequencies (european wifi)
##f0=2.412e9
##fn=2.472e9
##n=13 #number of channels
##fc=linspace(f0,fn,n)
###channel center frequencies indexes
##peaksindx=zeros(len(fc),dtype=int)
##for i in range(len(fc)):
## a=int(argmin(abs(f-fc[i])))
## peaksindx[i]=a
Level_criterion=-35
print '___________________________\n Instruments initializations\n'
print '\nSpectrum analyzer:'
Spectre=Spectrum.FSV30()
Spectre.reset()
Spectre.startFreq(fstart)
Spectre.stopFreq(fstop)
Spectre.RBW(RBW)
Spectre.SweepPoint(SwpPt)
Spectre.MaxHold()
Spectre.UnitDBM()
print '\nTurn table:'
TTable=TurnTable.PlateauCA()
TTable.reset()
print '____________________\nMeasurement\n'
Measurement=empty([Pol,Exp,N,2])
Raw_Traces=empty([Pol,Exp,N,2,SwpPt])
for k in range(Exp): #Boucle sur l'exposition de l'objet sous test
print ("Cutting plane %s " %k)
if k==0:
l=0
if k==1:
l=1
if k==2:
l=1
if l==0:
print 'Polarization: V'
Polarization='V'
else:
print 'Polarization: H'
Polarization='H'
raw_input("\n Antenna polarization : %s, Cutting plane : %i \n Press Enter to continue...\n" %(Polarization,k))
for j in range(0,len(Angles)):
#print ("Go to %s deg" %(Angles [j]))
TTable.setPosition(Angles [j])
Spectre.readwrite()
Spectre.MaxHold()
time.sleep(Tmes)
#raw_input("\n Press Enter to validate the measurement\n")
Level = Spectre.getTrace(SwpPt)
if Polarization=='V':
cLevel=Level+Correction_V[:,1]
else:
cLevel=Level+Correction_H[:,1]
#criterion automatic stop
#while (min(cLevel[peaksindx])<Level_criterion): #every channel
#while (min(cLevel[peaksindx])<Level_criterion): #one channel
#while mean(Level[peaksindx]>Level_criterion)<p/n: #p channels among n
# Level = Spectre.getTrace(SwpPt)
# if Polarization=='V':
# cLevel=Level+Correction_V[:,1]
# else:
# cLevel=Level+Correction_H[:,1]
# time.sleep(0.5)
Trace=Level
MaxLevel=max(cLevel)
MaxIdx =cLevel.argmax()
Measurement[l,k,j,:]=array([f[MaxIdx],MaxLevel])
Raw_Traces[l,k,j,:]=Trace
print ' %s deg Max EIRP = %2.2f mW/MHz' %((Angles [j]),10**(MaxLevel/10))
print ("\n\nBack to 0 deg.")
TTable.setPosition(0)
r=(10**((Measurement[l,k,:,1])/10))
plt.clf()
plt.polar((Angles*pi/180),r)
Graphlin= 'Graph_Pol_%s_Exp%s' %(Polarization,k)
plt.ylabel('Puissance max mW')
plt.title("Diagramme de rayonnement en mW")
plt.savefig(Graphlin+'.pdf',bbox='tight')
plt.savefig(Graphlin+'.png',bbox='tight')
plt.clf()
plt.plot(Angles,Measurement[l,k,:,1])
plt.ylabel('Puissance max en dBm')
plt.xlabel("Angles en degres")
plt.title("Diagramme de rayonnement en dBm")
plt.xlim(0,360)
plt.grid(True)
GraphdBm= 'Graph_lin_%s_Exp%s' %(Polarization,k)
plt.savefig(GraphdBm+'.pdf',bbox='tight')
plt.savefig(GraphdBm+'.png',bbox='tight')
plt.clf()
fname = ( '%s_Exp%s.txt') %(Polarization,k)
savetxt(fname,Measurement[l,k,:])
savez('Bin_Results.npz',Measurement=Measurement,Raw_Traces=Raw_Traces,f=f)
| manuamador/PIRE | EIRP_light.py | Python | agpl-3.0 | 5,124 |
# -*- coding: utf-8 -*-
# Copyright(C) 2009-2011 Romain Bignon, Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .accounts_list import AccountsList, TitreDetails
from .login import LoginPage, StopPage
from .transfer import TransferPage, TransferConfirmPage
from .bills import BillsPage
from .titre import TitrePage, TitreHistory
class AccountPrelevement(AccountsList):
pass
__all__ = ['AccountsList', 'LoginPage', 'TitreDetails',
'AccountPrelevement', 'TransferPage', 'TransferConfirmPage',
'BillsPage', 'StopPage', 'TitrePage', 'TitreHistory']
| frankrousseau/weboob | modules/ing/pages/__init__.py | Python | agpl-3.0 | 1,220 |
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.faster_rcnn import FasterRCNNTrainChain
from chainercv.utils import generate_random_bbox
from tests.links_tests.model_tests.faster_rcnn_tests.dummy_faster_rcnn \
import DummyFasterRCNN
def _random_array(shape):
return np.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
class TestFasterRCNNTrainChain(unittest.TestCase):
def setUp(self):
self.n_anchor_base = 6
self.feat_stride = 4
self.n_fg_class = 3
self.n_roi = 24
self.n_bbox = 3
self.link = FasterRCNNTrainChain(DummyFasterRCNN(
n_anchor_base=self.n_anchor_base,
feat_stride=self.feat_stride,
n_fg_class=self.n_fg_class,
n_roi=self.n_roi,
min_size=600,
max_size=800,
))
self.bboxes = chainer.Variable(
generate_random_bbox(self.n_bbox, (600, 800), 16, 350)[np.newaxis])
_labels = np.random.randint(
0, self.n_fg_class, size=(1, self.n_bbox)).astype(np.int32)
self.labels = chainer.Variable(_labels)
self.imgs = chainer.Variable(_random_array((1, 3, 600, 800)))
self.scales = chainer.Variable(np.array([1.]))
def check_call(self):
loss = self.link(self.imgs, self.bboxes, self.labels, self.scales)
self.assertEqual(loss.shape, ())
def test_call_cpu(self):
self.check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self.imgs.to_gpu()
self.bboxes.to_gpu()
self.labels.to_gpu()
self.check_call()
testing.run_module(__name__, __file__)
| chainer/chainercv | tests/links_tests/model_tests/faster_rcnn_tests/test_faster_rcnn_train_chain.py | Python | mit | 1,758 |
#!/usr/bin/python3
#coding:utf-8
import numpy as np
a = np.array([x*2 for x in range(6)], dtype=float)
print(a)
b = np.array([y*3 for y in range(6)], dtype=np.float64)
print(b)
print([key for key, value in np.typeDict.items() if value is np.float64])
print(set(np.typeDict.values()))
c = a.astype(np.int32)
print(c)
n1 = 3.14
n2 = np.float64(n1)
| qrsforever/workspace | python/learn/numpy/l1/type.py | Python | mit | 352 |
from collections import Iterable
import py
from .utils import TIME_UNITS
from .utils import slugify
try:
from pygal.graph.box import Box
from pygal.style import DefaultStyle
except ImportError as exc:
raise ImportError(exc.args, "Please install pygal and pygaljs or pytest-benchmark[histogram]")
class CustomBox(Box):
def _box_points(self, serie, _):
return serie, [serie[0], serie[6]]
def _value_format(self, x):
return "Min: {0[0]:.4f}\n" \
"Q1-1.5IQR: {0[1]:.4f}\n" \
"Q1: {0[2]:.4f}\nMedian: {0[3]:.4f}\nQ3: {0[4]:.4f}\n" \
"Q3+1.5IQR: {0[5]:.4f}\n" \
"Max: {0[6]:.4f}".format(x[:7])
def _format(self, x, *args):
sup = super(CustomBox, self)._format
if args:
val = x.values
else:
val = x
if isinstance(val, Iterable):
return self._value_format(val), val[7]
else:
return sup(x, *args)
def _tooltip_data(self, node, value, x, y, classes=None, xlabel=None):
super(CustomBox, self)._tooltip_data(node, value[0], x, y, classes=classes, xlabel=None)
self.svg.node(node, 'desc', class_="x_label").text = value[1]
def make_plot(benchmarks, title, adjustment):
class Style(DefaultStyle):
colors = ["#000000" if row["path"] else DefaultStyle.colors[1]
for row in benchmarks]
font_family = 'Consolas, "Deja Vu Sans Mono", "Bitstream Vera Sans Mono", "Courier New", monospace'
minimum = int(min(row["min"] * adjustment for row in benchmarks))
maximum = int(max(
min(row["max"], row["hd15iqr"]) * adjustment
for row in benchmarks
) + 1)
try:
import pygaljs
except ImportError:
opts = {}
else:
opts = {
"js": [
pygaljs.uri("2.0.x", "pygal-tooltips.js")
]
}
plot = CustomBox(
box_mode='tukey',
x_label_rotation=-90,
x_labels=["{0[name]}".format(row) for row in benchmarks],
show_legend=False,
title=title,
x_title="Trial",
y_title="Duration",
style=Style,
min_scale=20,
max_scale=20,
truncate_label=50,
range=(minimum, maximum),
zero=minimum,
css=[
"file://style.css",
"file://graph.css",
"""inline:
.tooltip .value {
font-size: 1em !important;
}
.axis text {
font-size: 9px !important;
}
"""
],
**opts
)
for row in benchmarks:
serie = [row[field] * adjustment for field in ["min", "ld15iqr", "q1", "median", "q3", "hd15iqr", "max"]]
serie.append(row["path"])
plot.add("{0[fullname]} - {0[rounds]} rounds".format(row), serie)
return plot
def make_histogram(output_prefix, name, benchmarks, unit, adjustment):
if name:
path = "{0}-{1}.svg".format(output_prefix, slugify(name))
title = "Speed in {0} of {1}".format(TIME_UNITS[unit], name)
else:
path = "{0}.svg".format(output_prefix)
title = "Speed in {0}".format(TIME_UNITS[unit])
output_file = py.path.local(path).ensure()
plot = make_plot(
benchmarks=benchmarks,
title=title,
adjustment=adjustment,
)
plot.render_to_file(str(output_file))
return output_file
| thedrow/pytest-benchmark | src/pytest_benchmark/histogram.py | Python | bsd-2-clause | 3,478 |
"""Defines models related to rooms."""
import string
from sqlite3 import IntegrityError
import random
from blinker import Namespace
from flask import request
from flask_login import current_user
from sqlalchemy import Column, INTEGER, VARCHAR, ForeignKey, Table
from sqlalchemy.orm import relationship
from base.models import SerializableMixin
from database import Base
__author__ = "Benjamin Schubert <[email protected]>"
participants_in_room = Table(
"participants_in_room",
Base.metadata,
Column("user_id", INTEGER, ForeignKey("users.id")),
Column("room_id", INTEGER, ForeignKey("rooms.id"))
)
namespace = Namespace()
deleted = namespace.signal("deleted")
class Room(SerializableMixin, Base):
"""Defines a model for rooms."""
__tablename__ = "rooms"
__excluded__ = set("owner_id")
id = Column(INTEGER, primary_key=True)
name = Column(VARCHAR(255))
token = Column(VARCHAR(6), unique=True)
owner_id = Column(INTEGER, ForeignKey("users.id"))
owner = relationship("User")
participants = relationship("User", secondary=participants_in_room, backref="rooms")
def as_dict(self):
"""Get the object as a dictionary."""
base = super().as_dict()
base["owning"] = current_user.is_authenticated and self.owner_id == current_user.id
return base
def set_token(self, session, size=6):
"""
Create a random token to identify the room.
:param session: session to use to commit the changes
:param size: size of the token to generate
"""
e = None
for i in range(1000):
try:
self.token = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(size))
session.commit()
except IntegrityError as exc:
e = exc
else:
return
raise e
| BenjaminSchubert/web-polls | backend/rooms/models.py | Python | mit | 1,907 |
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto.pyami.installers
import os
import os.path
import stat
import boto
from pwd import getpwnam
class Installer(boto.pyami.installers.Installer):
"""
Base Installer class for Ubuntu-based AMI's
"""
def add_cron(self, name, command, minute="*", hour="*", mday="*", month="*", wday="*", who="root", env=None):
"""
Write a file to /etc/cron.d to schedule a command
env is a dict containing environment variables you want to set in the file
name will be used as the name of the file
"""
fp = open('/etc/cron.d/%s' % name, "w")
if env:
for key, value in env.items():
fp.write('%s=%s\n' % (key, value))
fp.write('%s %s %s %s %s %s %s\n' % (minute, hour, mday, month, wday, who, command))
fp.close()
def add_init_script(self, file, name):
"""
Add this file to the init.d directory
"""
f_path = os.path.join("/etc/init.d", name)
f = open(f_path, "w")
f.write(file)
f.close()
os.chmod(f_path, stat.S_IREAD| stat.S_IWRITE | stat.S_IEXEC)
self.run("/usr/sbin/update-rc.d %s defaults" % name)
def add_env(self, key, value):
"""
Add an environemnt variable
For Ubuntu, the best place is /etc/environment. Values placed here do
not need to be exported.
"""
boto.log.info('Adding env variable: %s=%s' % (key, value))
if not os.path.exists("/etc/environment.orig"):
self.run('cp /etc/environment /etc/environment.orig', notify=False, exit_on_error=False)
fp = open('/etc/environment', 'a')
fp.write('\n%s="%s"' % (key, value))
fp.close()
os.environ[key] = value
def stop(self, service_name):
self.run('/etc/init.d/%s stop' % service_name)
def start(self, service_name):
self.run('/etc/init.d/%s start' % service_name)
def create_user(self, user):
"""
Create a user on the local system
"""
self.run("useradd -m %s" % user)
usr = getpwnam(user)
return usr
def install(self):
"""
This is the only method you need to override
"""
raise NotImplimented()
| adamfisk/littleshoot-client | server/appengine/boto/pyami/installers/ubuntu/installer.py | Python | gpl-2.0 | 3,378 |
# -*- coding: utf-8 -*-
"""demimove-ui
Usage:
demimove-ui [<path>] [-c <file>] [-v|-vv|-vvv] [-q] [-h]
Options:
-c, --config=<file> Specify a config file to load.
-v Logging verbosity level, up to -vvv.
-q, --quiet Do not print logging messages to console.
-h, --help Show this help text and exit.
--version Show the current demimove-ui version.
"""
# GUI:
# TODO: Overwrite check.
# TODO: Accelerators (C+Q, Q+S).
# TODO: Add recursive include/exclude in contextmenu.
# TODO: Test QDirIterator vs os.path.walk. If positive, replace get_targets
# functionality (though Qt has encoding issues for non-utf8 file names).
# TODO: History tab.
# TODO: Statustab with Errors/Warnings, Summaries etc.
# TODO: Metatags (Photos, Videos, Audio)
# TODO: Test demimove on windows?
# TODO: Write unittests for rename/undo with mock unicode input?
# Fileops:
# TODO: Fix count step and count base plus large listings (~i).
# TODO: Enable glob replacing like this: *.mp3 prefix*.mp3
# (Adjust translate method to group wildcards).
# TODO: Fix filters on hiddencheck? Logic for on_refreshbutton?
# TODO: (more) fallback encodings?
# TODO: grey out undo button if history empty
import codecs
import logging
import os
import sys
from PyQt4 import Qt, QtGui, QtCore, uic
import fileops
import helpers
import history
log = logging.getLogger("gui")
try:
from docopt import docopt
except ImportError:
print("ImportError: You won't be able to use the CLI.")
class BoldDelegate(QtGui.QStyledItemDelegate):
def paint(self, painter, option, index):
if self.parent().cwdidx == index:
option.font.setWeight(QtGui.QFont.Bold)
super(BoldDelegate, self).paint(painter, option, index)
class DirModel(QtGui.QFileSystemModel):
def __init__(self, parent=None):
super(DirModel, self).__init__(parent)
self.p = parent
self.labels = ["Name", "Size", "Type", "Date Modified", "Preview"]
def columnCount(self, parent=QtCore.QModelIndex()):
return super(DirModel, self).columnCount() + 1
def headerData(self, col, orientation, role=Qt.Qt.DisplayRole):
if role == QtCore.Qt.DisplayRole and orientation == QtCore.Qt.Horizontal:
return self.labels[col]
return QtGui.QFileSystemModel.headerData(self, col, orientation, role)
def data(self, index, role):
if index.column() == self.columnCount() - 1:
if role == QtCore.Qt.DisplayRole:
if not self.p.autopreview:
return
fileindex = self.index(index.row(), 0, index.parent())
return self.match_preview(fileindex)
return super(DirModel, self).data(index, role)
def match_preview(self, index, *args):
if not self.p.cwdidx:
return
if not self.p.fileops.recursive and index.parent() != self.p.cwdidx:
return
target = helpers.splitpath_os(self.p.get_path(index))
if self.p.cwd in target[0] and target in self.p.targets:
idx = self.p.targets.index(target)
try:
# If preview differs from its original name, show the preview.
if target[1] + target[2] != self.p.previews[idx][1]:
for i in ["utf-8", "latin1"]:
try:
return self.p.previews[idx][1].decode(i)
except UnicodeDecodeError:
pass
return self.p.previews[idx][1]
# Otherwise show "\1" to indicate that nothing changed.
else:
return "\\1"
except IndexError:
return "err"
class UpdateThread(QtCore.QThread):
def __init__(self, parent=None):
super(UpdateThread, self).__init__(parent)
self.p = parent
self.mode = 1
def run(self):
if self.mode == 0:
self.p.update_targets()
elif self.mode == 1:
self.p.update_previews()
elif self.mode == 2:
self.p.update_targets()
self.p.update_previews()
class CommitThread(QtCore.QThread):
def __init__(self, parent=None):
super(CommitThread, self).__init__(parent)
self.p = parent
def run(self):
self.p.fileops.commit(self.p.previews)
class DemiMoveGUI(QtGui.QMainWindow):
def __init__(self, startdir, fileops, configfile, parent=None):
super(DemiMoveGUI, self).__init__(parent)
self.fileops = fileops
# Current working directory.
self.basedir = os.path.dirname(os.path.realpath(__file__))
self._autopreview = True
self._cwd = ""
self._cwdidx = None
self.switchview = False
# Initialize empty containers for option states and targets.
self.dualoptions1, self.dualoptions2 = {}, {}
self.targets, self.joinedtargets = [], []
self.previews = []
self.initialize_ui(startdir, configfile)
def initialize_ui(self, startdir, configfile):
self.updatethread = UpdateThread(self)
self.committhread = CommitThread(self)
guifile = os.path.join(self.basedir, "data/gui.ui")
iconfile = os.path.join(self.basedir, "data/icon.png")
uic.loadUi(guifile, self)
self.switchviewcheck.hide()
self.setWindowIcon(QtGui.QIcon(iconfile))
self.mainsplitter.setStretchFactor(0, 1)
self.create_browser(startdir)
# self.create_historytab()
self.connect_elements()
self.startoptions, self.defaultoptions = helpers.load_configfile(
self.fileops.configdir,
configfile)
self.set_options(self.startoptions)
self.mediachecks = [self.casecheck, self.keepextensionscheck,
self.removesymbolscheck, self.removecheck,
self.removeduplicatescheck, self.spacecheck]
self.mediaboxes = [self.casebox, self.spacebox]
self.dirview.setExpanded(self.get_index(), True)
log.info("demimove-ui initialized.")
self.statusbar.showMessage("Select a directory and press Enter.")
def create_browser(self, startdir):
# TODO: With readOnly disabled we can use setData for file operations?
self.dirmodel = DirModel(self)
self.dirmodel.setReadOnly(False)
self.dirmodel.setRootPath("/")
self.dirmodel.setFilter(QtCore.QDir.Dirs | QtCore.QDir.Files |
QtCore.QDir.NoDotAndDotDot)
self.menu = QtGui.QMenu(self)
self.dirview.setModel(self.dirmodel)
self.dirview.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.dirview.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.dirview.setColumnHidden(2, True)
self.dirview.header().swapSections(4, 1)
self.dirview.header().resizeSection(0, 300)
self.dirview.header().resizeSection(4, 220)
self.dirview.header().resizeSection(3, 124)
self.dirview.setEditTriggers(QtGui.QAbstractItemView.EditKeyPressed)
self.dirview.setItemDelegate(BoldDelegate(self))
self.dirview.setCurrentIndex(self.dirmodel.index(startdir))
def create_historytab(self):
historyfile = os.path.join(self.fileops.configdir, "history.txt")
try:
with codecs.open(historyfile, encoding="utf-8") as f:
data = f.read()
except IOError:
historyfile = os.path.join(self.basedir, "data/history.txt")
with codecs.open(historyfile, encoding="utf-8") as f:
data = f.read()
self.historymodel = history.HistoryTreeModel(data, self)
self.historytree.setModel(self.historymodel)
def set_options(self, options=None, sanitize=False):
if not options:
options = self.defaultoptions
# self.autopreview = False
for k, v in options["checks"].items():
# Handle autopreview attribute at the end.
if k == "autopreviewcheck": continue
if sanitize:
v = False
getattr(self, k).setChecked(v)
for k, v in options["combos"].items():
if sanitize:
v = 0
getattr(self, k).setCurrentIndex(v)
for k, v in options["edits"].items():
if sanitize:
v = ""
getattr(self, k).setText(v)
for k, v in options["radios"].items():
getattr(self, k).setChecked(v)
for k, v in options["spins"].items():
getattr(self, k).setValue(v)
self.autopreviewcheck.setChecked(options["checks"]["autopreviewcheck"])
def get_options(self):
options = self.defaultoptions
o = {}
o["checks"] = {k:getattr(self, k).isChecked() for k in options["checks"].keys()}
o["combos"] = {k:getattr(self, k).currentIndex() for k in options["combos"].keys()}
o["edits"] = {k:str(getattr(self, k).text().toUtf8()) for k in options["edits"].keys()}
o["radios"] = {k:getattr(self, k).isChecked() for k in options["radios"].keys()}
o["spins"] = {k:getattr(self, k).value() for k in options["spins"].keys()}
return o
def get_path(self, index):
return str(self.dirmodel.filePath(index).toUtf8())
def get_index(self):
return self.dirview.currentIndex()
def set_cwd(self, index=None, force=False):
"Set the current working directory for renaming actions."
if not index:
index = self.get_index()
path = self.get_path(index)
if force or path != self.cwd and os.path.isdir(path):
self.cwd = path
self.cwdidx = index
self.dirview.setExpanded(self.cwdidx, True)
self.update(2)
elif self.cwd and path == self.cwd:
self.fileops.stopupdate = True
self.dirview.setExpanded(self.cwdidx, False)
self.cwd = ""
self.cwdidx = None
self.update_indexview()
def delete_index(self, indexes=None):
if not indexes:
indexes = self.get_selected_indexes()
for index in indexes:
path = self.get_path(index)
name = os.path.basename(path)
# TODO: Subclass MessageBox to center it on screen?
m = QtGui.QMessageBox(self)
reply = m.question(self, "Message", "Delete {}?".format(name),
m.Yes | m.No, m.Yes)
if reply == QtGui.QMessageBox.Yes:
self.dirmodel.remove(index)
def on_popmenu(self, position):
self.menu.clear()
items = ["Toggle", "Include", "Exclude",
"Clear Includes", "Clear Excludes", "Clear Both",
"Set/Unset CWD", "Edit", "Delete"]
for item in items:
action = self.menu.addAction(item)
action.triggered[()].connect(lambda i=item: self.menuhandler(i))
self.menu.exec_(self.dirview.mapToGlobal(position))
def get_selected_indexes(self):
indexes = self.dirview.selectionModel().selectedIndexes()
return indexes[:len(indexes) / 5]
def toggle_selection(self, mode=0):
indexes = self.get_selected_indexes()
for idx in indexes:
path = self.get_path(idx)
target = helpers.splitpath_os(path)
name = target[1] + target[2]
if mode == 0: # Toggle Include/Exclude
if target in self.targets:
self.fileops.includes.discard(name)
self.fileops.excludes.add(name)
else:
self.fileops.excludes.discard(name)
self.fileops.includes.add(name)
elif mode == 1: # Include
self.fileops.excludes.discard(name)
self.fileops.includes.add(name)
elif mode == 2: # Exclude
self.fileops.includes.discard(name)
self.fileops.excludes.add(name)
elif mode == 3: # Recursive Include
pass
elif mode == 4: # Recursive Exclude
pass
log.debug("includes: {}".format(self.fileops.includes))
log.debug("excludes: {}".format(self.fileops.excludes))
self.update(2)
log.debug(self.targets)
def menuhandler(self, action):
if action == "Toggle":
self.toggle_selection(0)
if action == "Include":
self.toggle_selection(1)
if action == "Exclude":
self.toggle_selection(2)
if action == "Recursive Include":
self.toggle_selection(3)
if action == "Recursive Exclude":
self.toggle_selection(4)
elif action == "Clear Includes":
self.fileops.includes.clear()
elif action == "Clear Excludes":
self.fileops.excludes.clear()
elif action == "Clear Both":
self.fileops.includes.clear()
self.fileops.excludes.clear()
self.update(2)
elif action == "Set/Unset CWD":
self.set_cwd()
elif action == "Edit":
self.dirview.edit(self.get_index())
elif action == "Delete":
self.delete_index()
def keyPressEvent(self, e):
"Overloaded to connect return key to self.set_cwd()."
# TODO: Move this to TreeView only.
if e.key() == QtCore.Qt.Key_Return:
self.set_cwd()
if e.key() == QtCore.Qt.Key_Delete:
self.delete_index()
def update(self, mode=1):
"""Main update routine using threading to get targets and/or previews"""
# Modes: 0 = targets, 1 = previews, 2 = both.
self.fileops.stopupdate = False
if not self.autopreview or not self.cwd:
self.update_view()
return
self.updatethread.mode = mode
self.updatethread.start()
def on_updatethread_started(self):
log.debug("Updatethread started.")
self.statusbar.showMessage("Refreshing...")
self.refreshbutton.setText("Stop")
def on_updatethread_finished(self):
log.debug("Updatethread finished.")
self.refreshbutton.setText("Refresh")
if self.cwd:
lent = len(self.targets)
lenp = sum(i[0][1] != i[1] for i in self.previews)
self.statusbar.showMessage("Targets: {}, Staged: {} - {}"
.format(lent, lenp, self.cwd))
else:
self.statusbar.showMessage("No working directory set.")
self.update_view()
def update_targets(self):
if self.cwd:
self.targets = self.fileops.get_targets(self.cwd)
else:
self.targets = []
def update_previews(self):
if self.cwd:
self.previews = self.fileops.get_previews(self.targets)
else:
self.previews = []
def update_view(self):
m, v = self.dirmodel, self.dirview
r = v.rect()
m.dataChanged.emit(v.indexAt(r.topLeft()), v.indexAt(r.bottomRight()))
def update_indexview(self, index=None):
if index is None:
index = self.get_index()
m = self.dirmodel
m.dataChanged.emit(index, m.index(index.row(), m.columnCount()))
def on_committhread_started(self):
log.debug("Committhread started.")
self.statusbar.showMessage("Committing...")
self.commitbutton.setText("Stop")
def on_committhread_finished(self):
log.debug("Committhread finished.")
self.commitbutton.setText("Commit")
self.update(2)
def connect_elements(self):
self.dirview.customContextMenuRequested.connect(self.on_popmenu)
self.updatethread.finished.connect(self.on_updatethread_finished)
self.updatethread.started.connect(self.on_updatethread_started)
self.committhread.finished.connect(self.on_committhread_finished)
self.committhread.started.connect(self.on_committhread_started)
# Main buttons:
self.commitbutton.clicked.connect(self.on_commitbutton)
self.refreshbutton.clicked.connect(self.on_refreshbutton)
self.undobutton.clicked.connect(self.on_undobutton)
self.bothradio.toggled.connect(self.on_bothradio)
self.dirsradio.toggled.connect(self.on_dirsradio)
self.filesradio.toggled.connect(self.on_filesradio)
self.switchviewcheck.toggled.connect(self.on_switchviewcheck)
# Main options:
self.autopreviewcheck.toggled.connect(self.on_autopreviewcheck)
self.autostopcheck.toggled.connect(self.on_autostopcheck)
self.keepextensionscheck.toggled.connect(self.on_keepextensioncheck)
self.hiddencheck.toggled.connect(self.on_hiddencheck)
self.manualmirrorcheck.toggled.connect(self.on_manualmirrorcheck)
self.recursivecheck.toggled.connect(self.on_recursivecheck)
self.recursivedepth.valueChanged.connect(self.on_recursivedepth)
self.saveoptionsbutton.clicked.connect(self.on_saveoptionsbutton)
self.restoreoptionsbutton.clicked.connect(self.on_restoreoptionsbutton)
self.clearoptionsbutton.clicked.connect(self.on_clearoptionsbutton)
# Match options:
self.matchcheck.toggled.connect(self.on_matchcheck)
self.matchignorecase.toggled.connect(self.on_matchignorecase)
self.matchreplacecheck.toggled.connect(self.on_matchreplacecheck)
self.matchexcludecheck.toggled.connect(self.on_matchexcludecheck)
self.matchfiltercheck.toggled.connect(self.on_matchfiltercheck)
self.globradio.toggled.connect(self.on_globradio)
self.regexradio.toggled.connect(self.on_regexradio)
self.matchedit.textChanged.connect(self.on_matchedit)
self.replaceedit.textChanged.connect(self.on_replaceedit)
self.excludeedit.textChanged.connect(self.on_excludeedit)
self.filteredit.textChanged.connect(self.on_filteredit)
# Insert options:
self.insertcheck.toggled.connect(self.on_insertcheck)
self.insertpos.valueChanged.connect(self.on_insertpos)
self.insertedit.textChanged.connect(self.on_insertedit)
# Delete options:
self.deletecheck.toggled.connect(self.on_deletecheck)
self.deletestart.valueChanged.connect(self.on_deletestart)
self.deleteend.valueChanged.connect(self.on_deleteend)
# Count options:
self.countcheck.toggled.connect(self.on_countcheck)
self.countbase.valueChanged.connect(self.on_countbase)
self.countpos.valueChanged.connect(self.on_countpos)
self.countstep.valueChanged.connect(self.on_countstep)
self.countpreedit.textChanged.connect(self.on_countpreedit)
self.countsufedit.textChanged.connect(self.on_countsufedit)
self.countfillcheck.toggled.connect(self.on_countfillcheck)
# Remove options:
self.removecheck.toggled.connect(self.on_removecheck)
self.removeduplicatescheck.toggled.connect(self.on_removeduplicates)
self.removeextensionscheck.toggled.connect(self.on_removeextensions)
self.removenonwordscheck.toggled.connect(self.on_removenonwords)
self.removesymbolscheck.toggled.connect(self.on_removesymbols)
self.casecheck.toggled.connect(self.on_casecheck)
self.casebox.currentIndexChanged[int].connect(self.on_casebox)
self.spacecheck.toggled.connect(self.on_spacecheck)
self.spacebox.currentIndexChanged[int].connect(self.on_spacebox)
self.mediamodecheck.toggled.connect(self.on_mediamodecheck)
self.dualmodecheck.toggled.connect(self.on_dualmodecheck)
def on_saveoptionsbutton(self):
"""Save current options to configfile."""
log.info("Saving options.")
helpers.save_configfile(self.fileops.configdir, self.get_options())
self.statusbar.showMessage("Configuration file saved.")
def on_restoreoptionsbutton(self):
"""Restore options to start point."""
log.info("Restoring options.")
self.set_options(self.startoptions)
def on_clearoptionsbutton(self):
"""Reset/Clear all options."""
log.info("Clearing options.")
self.set_options(sanitize=True)
def on_commitbutton(self):
"""Perform the currently previewed rename actions."""
log.info("Committing previewed changes.")
if self.committhread.isRunning():
self.fileops.stopcommit = True
else:
self.fileops.stopcommit = False
self.committhread.start()
def on_undobutton(self):
"""Pops the history stack of commits, reverting the one on top."""
log.info("Reverting last commit.")
self.fileops.undo()
self.update(2)
def on_refreshbutton(self):
"""Force a refresh of browser view and model."""
if self.updatethread.isRunning():
self.fileops.stopupdate = True
else:
self.update(2)
def on_autopreviewcheck(self, checked):
self.autopreview = checked
if checked:
self.update(2)
def on_keepextensioncheck(self, checked):
self.fileops.keepext = checked
if checked:
self.removeextensionscheck.setChecked(False)
self.update()
def on_hiddencheck(self, checked):
self.fileops.hidden = checked
# TODO: Delegate gets overriden by filter here?
if checked:
self.dirmodel.setFilter(QtCore.QDir.Dirs | QtCore.QDir.Files |
QtCore.QDir.NoDotAndDotDot |
QtCore.QDir.Hidden)
else:
self.dirmodel.setFilter(QtCore.QDir.Dirs | QtCore.QDir.Files |
QtCore.QDir.NoDotAndDotDot)
self.update(2)
def on_manualmirrorcheck(self, checked):
self.fileops.manualmirror = checked
self.update()
def on_recursivecheck(self, checked):
self.fileops.recursive = checked
if not checked:
self.recursivedepth.setValue(0)
self.update(2)
def on_recursivedepth(self, num):
self.fileops.recursivedepth = int(num)
self.update(2)
def on_autostopcheck(self, checked):
self.fileops.autostop = checked
def on_matchcheck(self, checked):
self.fileops.matchcheck = checked
if not checked:
self.matchedit.setEnabled(False)
self.replaceedit.setEnabled(False)
self.filteredit.setEnabled(False)
self.excludeedit.setEnabled(False)
else:
if self.matchreplacecheck.isChecked():
self.matchedit.setEnabled(True)
self.replaceedit.setEnabled(True)
if self.matchfiltercheck.isChecked():
self.filteredit.setEnabled(True)
if self.matchexcludecheck.isChecked():
self.excludeedit.setEnabled(True)
self.update()
def on_matchignorecase(self, checked):
self.fileops.ignorecase = checked
self.update()
def on_filteredit(self, text):
text = str(text.toUtf8())
self.fileops.filteredit = text
self.update(2)
def on_excludeedit(self, text):
text = str(text.toUtf8())
self.fileops.excludeedit = text
self.update(2)
def on_insertedit(self, text):
text = str(text.toUtf8())
self.fileops.insertedit = text
self.update()
def on_countpreedit(self, text):
text = str(text.toUtf8())
self.fileops.countpreedit = text
self.update()
def on_countsufedit(self, text):
text = str(text.toUtf8())
self.fileops.countsufedit = text
self.update()
def on_matchedit(self, text):
text = str(text.toUtf8())
self.fileops.matchedit = text
self.update()
def on_replaceedit(self, text):
text = str(text.toUtf8())
self.fileops.replaceedit = text
self.update()
def on_matchfiltercheck(self, checked):
self.fileops.matchfiltercheck = checked
self.update(2)
def on_matchexcludecheck(self, checked):
self.fileops.matchexcludecheck = checked
self.update(2)
def on_matchreplacecheck(self, checked):
self.fileops.matchreplacecheck = checked
self.update()
def on_globradio(self, checked):
self.fileops.regex = not checked
if self.fileops.matchfiltercheck or self.fileops.matchexcludecheck:
self.update(0)
self.update()
def on_regexradio(self, checked):
self.fileops.regex = checked
if self.fileops.matchfiltercheck or self.fileops.matchexcludecheck:
self.update(0)
self.update()
def on_insertcheck(self, checked):
self.fileops.insertcheck = checked
self.update()
def on_insertpos(self, num):
self.fileops.insertpos = int(num)
self.update()
def on_countcheck(self, checked):
self.fileops.countcheck = checked
self.update()
def on_countbase(self, num):
self.fileops.countbase = int(num)
self.update()
def on_countpos(self, num):
self.fileops.countpos = int(num)
self.update()
def on_countstep(self, num):
self.fileops.countstep = int(num)
self.update()
def on_countfillcheck(self, checked):
self.fileops.countfill = checked
self.update()
def on_removecheck(self, checked):
self.fileops.removecheck = checked
self.update()
def on_removeduplicates(self, checked):
self.fileops.remdups = checked
self.update()
def on_removeextensions(self, checked):
self.fileops.remext = checked
if checked:
self.keepextensionscheck.setChecked(False)
self.update()
def on_removenonwords(self, checked):
self.fileops.remnonwords = checked
self.update()
def on_removesymbols(self, checked):
self.fileops.remsymbols = checked
self.update()
def save_premediaoptions(self):
self.checksaves = {i: i.isChecked() for i in self.mediachecks}
self.combosaves = {i: i.currentIndex() for i in self.mediaboxes}
def restore_premediaoptions(self):
for k, v in self.checksaves.items():
k.setChecked(v)
for k, v in self.combosaves.items():
k.setCurrentIndex(v)
def on_mediamodecheck(self, checked):
self.autopreviewcheck.setChecked(False)
if checked:
self.save_premediaoptions()
# self.fileops.keepext = True
for i in self.mediachecks:
i.setChecked(True)
self.spacebox.setCurrentIndex(6)
self.casebox.setCurrentIndex(0)
else:
self.restore_premediaoptions()
self.autopreviewcheck.setChecked(True)
self.update()
def on_dualmodecheck(self, checked):
if checked:
self.dualoptions1 = self.get_options()
self.set_options(self.dualoptions2)
else:
self.dualoptions2 = self.get_options()
self.set_options(self.dualoptions1)
self.update()
def on_switchviewcheck(self, checked):
self.switchview = checked
log.debug("switchview: {}".format(checked))
if self.filesradio.isChecked():
self.on_filesradio(True)
elif self.dirsradio.isChecked():
self.on_dirsradio(True)
elif self.bothradio.isChecked():
self.on_bothradio(True)
def on_bothradio(self, checked):
self.fileops.filesonly = False
self.fileops.dirsonly = False
if self.switchview:
self.dirmodel.setFilter(QtCore.QDir.Dirs | QtCore.QDir.Files |
QtCore.QDir.NoDotAndDotDot |
QtCore.QDir.Hidden)
self.update(2)
def on_dirsradio(self, checked):
self.fileops.dirsonly = checked
if self.switchview:
self.dirmodel.setFilter(QtCore.QDir.Dirs | QtCore.QDir.Hidden |
QtCore.QDir.NoDotAndDotDot)
self.update(2)
def on_filesradio(self, checked):
self.fileops.filesonly = checked
if self.switchview:
self.dirmodel.setFilter(QtCore.QDir.Files | QtCore.QDir.Hidden |
QtCore.QDir.NoDotAndDotDot)
self.update(2)
def on_spacecheck(self, checked):
self.fileops.spacecheck = checked
self.update()
def on_casecheck(self, checked):
self.fileops.casecheck = checked
self.update()
def on_deletecheck(self, checked):
self.fileops.deletecheck = checked
self.update()
def on_deletestart(self, num):
self.fileops.deletestart = int(num)
self.update()
def on_deleteend(self, num):
self.fileops.deleteend = int(num)
self.update()
def on_casebox(self, index):
self.fileops.casemode = index
self.update()
def on_spacebox(self, index):
self.fileops.spacemode = index
self.update()
@property
def cwd(self):
return self._cwd
@cwd.setter
def cwd(self, path):
path = str(path)
# Exit out if dir is not a valid target.
self._cwd = path
log.debug("cwd: {}".format(path))
if path:
self.statusbar.showMessage("Root is now {}.".format(path))
else:
self.statusbar.showMessage("No root set.")
@property
def cwdidx(self):
return self._cwdidx
@cwdidx.setter
def cwdidx(self, index):
self._cwdidx = index
@property
def autopreview(self):
return self._autopreview
@autopreview.setter
def autopreview(self, boolean):
self._autopreview = boolean
log.debug("autopreview: {}".format(boolean))
def main():
"Main entry point for demimove-ui."
startdir = os.getcwd()
configfile = None
try:
args = docopt(__doc__, version="0.2")
# args["-v"] = 3 # Force debug logging
fileop = fileops.FileOps(verbosity=args["-v"],
quiet=args["--quiet"])
if args["<path>"]:
startdir = args["<path>"]
if args["--config"]:
configfile = args["--config"]
except NameError:
fileop = fileops.FileOps()
log.error("Please install docopt to use the CLI.")
app = QtGui.QApplication(sys.argv)
app.setApplicationName("demimove-ui")
gui = DemiMoveGUI(startdir, fileop, configfile)
gui.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| mikar/demimove | demimove/gui.py | Python | mit | 30,975 |
import requests
from flask import current_app, render_template, url_for, flash
from flask_wtf import Form, RecaptchaField
from wtforms import StringField, PasswordField, SelectField, SubmitField
from wtforms.validators import DataRequired
from app.mod_frontend import mod_frontend
@mod_frontend.route('/', methods=['GET', 'POST'])
def index():
"""Landing page.
This is the main page when accessing the app through a web browser. It
gives some basic informations and links to the sourcecode and documentary.
It also has some kind of API example. You can get a ical link through
entering credentials in the form.
"""
class GetIcalForm(Form):
username = StringField(
'', description='Username',
validators=[DataRequired()])
password = PasswordField(
'', description='Password',
validators=[DataRequired()])
facility = SelectField(
'', description='Facility',
choices=[(i, j['metadata']['name'])
for i, j in current_app.facilities.items()],
validators=[DataRequired()])
if 'RECAPTCHA_PUBLIC_KEY' in current_app.config and \
'RECAPTCHA_PRIVATE_KEY' in current_app.config:
recaptcha = RecaptchaField()
submit = SubmitField()
form = GetIcalForm()
if form.validate_on_submit():
url = url_for('mod_api.get_token', facility=form.facility.data,
_external=True)
r = requests.post(url,
json={'username': form.username.data,
'password': form.password.data})
if r.status_code == 200:
token = r.json().get('token')
if token:
token_url = '{}?token={}'.format(
url_for('mod_api.lent_ical', facility=form.facility.data),
token)
current_app.logger.info(
'created token url: {}'.format(token_url)
)
flash(token_url, 'success')
else:
flash('No token recieved!', 'danger')
else:
flash('Connection problems with the API!', 'danger')
# create a facility metadata dict
facilities = {}
for i, j in current_app.facilities.items():
facilities[i] = j['metadata']
return render_template('index.html', form=form, facilities=facilities)
| xsteadfastx/bib-api | app/mod_frontend/views.py | Python | mit | 2,456 |
#!/usr/bin/env python
# coding=utf-8
"""446. Retractions B
https://projecteuler.net/problem=446
For every integer n>1, the family of functions fn,a,b is defined by
fn,a,b(x)≡ax+b mod n for a,b,x integer and 0<a<n, 0≤b<n, 0≤x<n.
We will call fn,a,b a _retraction_ if f n,a,b(fn,a,b(x))≡fn,a,b(x) mod n for
every 0≤x<n.
Let R(n) be the number of retractions for n.
F(N)=∑R(n4+4) for 1≤n≤N.
F(1024)=77532377300600.
Find F(107) (mod 1 000 000 007)
"""
| openqt/algorithms | projecteuler/pe446-retractions-b.py | Python | gpl-3.0 | 476 |
import datetime
import os
import re
import shutil
from . import util
_sourceless_rev_file = re.compile(r'(?!__init__)(.*\.py)(c|o)?$')
_only_source_rev_file = re.compile(r'(?!__init__)(.*\.py)$')
_legacy_rev = re.compile(r'([a-f0-9]+)\.py$')
_mod_def_re = re.compile(r'(upgrade|downgrade)_([a-z0-9]+)')
_slug_re = re.compile(r'\w+')
_default_file_template = "%(rev)s_%(slug)s"
_relative_destination = re.compile(r'(?:\+|-)\d+')
class ScriptDirectory(object):
"""Provides operations upon an Alembic script directory.
This object is useful to get information as to current revisions,
most notably being able to get at the "head" revision, for schemes
that want to test if the current revision in the database is the most
recent::
from alembic.script import ScriptDirectory
from alembic.config import Config
config = Config()
config.set_main_option("script_location", "myapp:migrations")
script = ScriptDirectory.from_config(config)
head_revision = script.get_current_head()
"""
def __init__(self, dir, file_template=_default_file_template,
truncate_slug_length=40,
sourceless=False):
self.dir = dir
self.versions = os.path.join(self.dir, 'versions')
self.file_template = file_template
self.truncate_slug_length = truncate_slug_length or 40
self.sourceless = sourceless
if not os.access(dir, os.F_OK):
raise util.CommandError("Path doesn't exist: %r. Please use "
"the 'init' command to create a new "
"scripts folder." % dir)
@classmethod
def from_config(cls, config):
"""Produce a new :class:`.ScriptDirectory` given a :class:`.Config`
instance.
The :class:`.Config` need only have the ``script_location`` key
present.
"""
script_location = config.get_main_option('script_location')
if script_location is None:
raise util.CommandError("No 'script_location' key "
"found in configuration.")
truncate_slug_length = config.get_main_option("truncate_slug_length")
if truncate_slug_length is not None:
truncate_slug_length = int(truncate_slug_length)
return ScriptDirectory(
util.coerce_resource_to_filename(script_location),
file_template=config.get_main_option(
'file_template',
_default_file_template),
truncate_slug_length=truncate_slug_length,
sourceless=config.get_main_option("sourceless") == "true"
)
def walk_revisions(self, base="base", head="head"):
"""Iterate through all revisions.
This is actually a breadth-first tree traversal,
with leaf nodes being heads.
"""
if head == "head":
heads = set(self.get_heads())
else:
heads = set([head])
while heads:
todo = set(heads)
heads = set()
for head in todo:
if head in heads:
break
for sc in self.iterate_revisions(head, base):
if sc.is_branch_point and sc.revision not in todo:
heads.add(sc.revision)
break
else:
yield sc
def get_revision(self, id_):
"""Return the :class:`.Script` instance with the given rev id."""
id_ = self.as_revision_number(id_)
try:
return self._revision_map[id_]
except KeyError:
# do a partial lookup
revs = [x for x in self._revision_map
if x is not None and x.startswith(id_)]
if not revs:
raise util.CommandError("No such revision '%s'" % id_)
elif len(revs) > 1:
raise util.CommandError(
"Multiple revisions start "
"with '%s', %s..." % (
id_,
", ".join("'%s'" % r for r in revs[0:3])
))
else:
return self._revision_map[revs[0]]
_get_rev = get_revision
def as_revision_number(self, id_):
"""Convert a symbolic revision, i.e. 'head' or 'base', into
an actual revision number."""
if id_ == 'head':
id_ = self.get_current_head()
elif id_ == 'base':
id_ = None
return id_
_as_rev_number = as_revision_number
def iterate_revisions(self, upper, lower):
"""Iterate through script revisions, starting at the given
upper revision identifier and ending at the lower.
The traversal uses strictly the `down_revision`
marker inside each migration script, so
it is a requirement that upper >= lower,
else you'll get nothing back.
The iterator yields :class:`.Script` objects.
"""
if upper is not None and _relative_destination.match(upper):
relative = int(upper)
revs = list(self._iterate_revisions("head", lower))
revs = revs[-relative:]
if len(revs) != abs(relative):
raise util.CommandError("Relative revision %s didn't "
"produce %d migrations" % (upper, abs(relative)))
return iter(revs)
elif lower is not None and _relative_destination.match(lower):
relative = int(lower)
revs = list(self._iterate_revisions(upper, "base"))
revs = revs[0:-relative]
if len(revs) != abs(relative):
raise util.CommandError("Relative revision %s didn't "
"produce %d migrations" % (lower, abs(relative)))
return iter(revs)
else:
return self._iterate_revisions(upper, lower)
def _iterate_revisions(self, upper, lower):
lower = self.get_revision(lower)
upper = self.get_revision(upper)
orig = lower.revision if lower else 'base', \
upper.revision if upper else 'base'
script = upper
while script != lower:
if script is None and lower is not None:
raise util.CommandError(
"Revision %s is not an ancestor of %s" % orig)
yield script
downrev = script.down_revision
script = self._revision_map[downrev]
def _upgrade_revs(self, destination, current_rev):
revs = self.iterate_revisions(destination, current_rev)
return [
(script.module.upgrade, script.down_revision, script.revision,
script.doc)
for script in reversed(list(revs))
]
def _downgrade_revs(self, destination, current_rev):
revs = self.iterate_revisions(current_rev, destination)
return [
(script.module.downgrade, script.revision, script.down_revision,
script.doc)
for script in revs
]
def run_env(self):
"""Run the script environment.
This basically runs the ``env.py`` script present
in the migration environment. It is called exclusively
by the command functions in :mod:`alembic.command`.
"""
util.load_python_file(self.dir, 'env.py')
@property
def env_py_location(self):
return os.path.abspath(os.path.join(self.dir, "env.py"))
@util.memoized_property
def _revision_map(self):
map_ = {}
for file_ in os.listdir(self.versions):
script = Script._from_filename(self, self.versions, file_)
if script is None:
continue
if script.revision in map_:
util.warn("Revision %s is present more than once" %
script.revision)
map_[script.revision] = script
for rev in map_.values():
if rev.down_revision is None:
continue
if rev.down_revision not in map_:
util.warn("Revision %s referenced from %s is not present"
% (rev.down_revision, rev))
rev.down_revision = None
else:
map_[rev.down_revision].add_nextrev(rev.revision)
map_[None] = None
return map_
def _rev_path(self, rev_id, message, create_date):
slug = "_".join(_slug_re.findall(message or "")).lower()
if len(slug) > self.truncate_slug_length:
slug = slug[:self.truncate_slug_length].rsplit('_', 1)[0] + '_'
filename = "%s.py" % (
self.file_template % {
'rev': rev_id,
'slug': slug,
'year': create_date.year,
'month': create_date.month,
'day': create_date.day,
'hour': create_date.hour,
'minute': create_date.minute,
'second': create_date.second
}
)
return os.path.join(self.versions, filename)
def get_current_head(self):
"""Return the current head revision.
If the script directory has multiple heads
due to branching, an error is raised.
Returns a string revision number.
"""
current_heads = self.get_heads()
if len(current_heads) > 1:
raise util.CommandError('Only a single head is supported. The '
'script directory has multiple heads (due to branching), which '
'must be resolved by manually editing the revision files to '
'form a linear sequence. Run `alembic branches` to see the '
'divergence(s).')
if current_heads:
return current_heads[0]
else:
return None
_current_head = get_current_head
"""the 0.2 name, for backwards compat."""
def get_heads(self):
"""Return all "head" revisions as strings.
Returns a list of string revision numbers.
This is normally a list of length one,
unless branches are present. The
:meth:`.ScriptDirectory.get_current_head()` method
can be used normally when a script directory
has only one head.
"""
heads = []
for script in self._revision_map.values():
if script and script.is_head:
heads.append(script.revision)
return heads
def get_base(self):
"""Return the "base" revision as a string.
This is the revision number of the script that
has a ``down_revision`` of None.
Behavior is not defined if more than one script
has a ``down_revision`` of None.
"""
for script in self._revision_map.values():
if script and script.down_revision is None \
and script.revision in self._revision_map:
return script.revision
else:
return None
def _generate_template(self, src, dest, **kw):
util.status("Generating %s" % os.path.abspath(dest),
util.template_to_file,
src,
dest,
**kw
)
def _copy_file(self, src, dest):
util.status("Generating %s" % os.path.abspath(dest),
shutil.copy,
src, dest)
def generate_revision(self, revid, message, refresh=False, **kw):
"""Generate a new revision file.
This runs the ``script.py.mako`` template, given
template arguments, and creates a new file.
:param revid: String revision id. Typically this
comes from ``alembic.util.rev_id()``.
:param message: the revision message, the one passed
by the -m argument to the ``revision`` command.
:param refresh: when True, the in-memory state of this
:class:`.ScriptDirectory` will be updated with a new
:class:`.Script` instance representing the new revision;
the :class:`.Script` instance is returned.
If False, the file is created but the state of the
:class:`.ScriptDirectory` is unmodified; ``None``
is returned.
"""
current_head = self.get_current_head()
create_date = datetime.datetime.now()
path = self._rev_path(revid, message, create_date)
self._generate_template(
os.path.join(self.dir, "script.py.mako"),
path,
up_revision=str(revid),
down_revision=current_head,
create_date=create_date,
message=message if message is not None else ("empty message"),
**kw
)
if refresh:
script = Script._from_path(self, path)
self._revision_map[script.revision] = script
if script.down_revision:
self._revision_map[script.down_revision].\
add_nextrev(script.revision)
return script
else:
return None
class Script(object):
"""Represent a single revision file in a ``versions/`` directory.
The :class:`.Script` instance is returned by methods
such as :meth:`.ScriptDirectory.iterate_revisions`.
"""
nextrev = frozenset()
def __init__(self, module, rev_id, path):
self.module = module
self.revision = rev_id
self.path = path
self.down_revision = getattr(module, 'down_revision', None)
revision = None
"""The string revision number for this :class:`.Script` instance."""
module = None
"""The Python module representing the actual script itself."""
path = None
"""Filesystem path of the script."""
down_revision = None
"""The ``down_revision`` identifier within the migration script."""
@property
def doc(self):
"""Return the docstring given in the script."""
return re.split("\n\n", self.longdoc)[0]
@property
def longdoc(self):
"""Return the docstring given in the script."""
doc = self.module.__doc__
if doc:
if hasattr(self.module, "_alembic_source_encoding"):
doc = doc.decode(self.module._alembic_source_encoding)
return doc.strip()
else:
return ""
def add_nextrev(self, rev):
self.nextrev = self.nextrev.union([rev])
@property
def is_head(self):
"""Return True if this :class:`.Script` is a 'head' revision.
This is determined based on whether any other :class:`.Script`
within the :class:`.ScriptDirectory` refers to this
:class:`.Script`. Multiple heads can be present.
"""
return not bool(self.nextrev)
@property
def is_branch_point(self):
"""Return True if this :class:`.Script` is a branch point.
A branchpoint is defined as a :class:`.Script` which is referred
to by more than one succeeding :class:`.Script`, that is more
than one :class:`.Script` has a `down_revision` identifier pointing
here.
"""
return len(self.nextrev) > 1
@property
def log_entry(self):
return \
"Rev: %s%s%s\n" \
"Parent: %s\n" \
"Path: %s\n" \
"\n%s\n" % (
self.revision,
" (head)" if self.is_head else "",
" (branchpoint)" if self.is_branch_point else "",
self.down_revision,
self.path,
"\n".join(
" %s" % para
for para in self.longdoc.splitlines()
)
)
def __str__(self):
return "%s -> %s%s%s, %s" % (
self.down_revision,
self.revision,
" (head)" if self.is_head else "",
" (branchpoint)" if self.is_branch_point else "",
self.doc)
@classmethod
def _from_path(cls, scriptdir, path):
dir_, filename = os.path.split(path)
return cls._from_filename(scriptdir, dir_, filename)
@classmethod
def _from_filename(cls, scriptdir, dir_, filename):
if scriptdir.sourceless:
py_match = _sourceless_rev_file.match(filename)
else:
py_match = _only_source_rev_file.match(filename)
if not py_match:
return None
py_filename = py_match.group(1)
if scriptdir.sourceless:
is_c = py_match.group(2) == 'c'
is_o = py_match.group(2) == 'o'
else:
is_c = is_o = False
if is_o or is_c:
py_exists = os.path.exists(os.path.join(dir_, py_filename))
pyc_exists = os.path.exists(os.path.join(dir_, py_filename + "c"))
# prefer .py over .pyc because we'd like to get the
# source encoding; prefer .pyc over .pyo because we'd like to
# have the docstrings which a -OO file would not have
if py_exists or is_o and pyc_exists:
return None
module = util.load_python_file(dir_, filename)
if not hasattr(module, "revision"):
# attempt to get the revision id from the script name,
# this for legacy only
m = _legacy_rev.match(filename)
if not m:
raise util.CommandError(
"Could not determine revision id from filename %s. "
"Be sure the 'revision' variable is "
"declared inside the script (please see 'Upgrading "
"from Alembic 0.1 to 0.2' in the documentation)."
% filename)
else:
revision = m.group(1)
else:
revision = module.revision
return Script(module, revision, os.path.join(dir_, filename))
| SeaFalcon/Musicool_Pr | lib/alembic/script.py | Python | apache-2.0 | 18,125 |
# -*- coding: utf-8 -*-
#
# bender-hooks documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 09 17:09:52 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Autodoc settings
autodoc_default_flags = ['members', 'show-inheritance']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bender-hooks'
copyright = u'2014, Bruno Oliveira, Fabio Menegazzo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bender-hooksdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'bender-hooks.tex', u'bender-hooks Documentation',
u'Bruno Oliveira, Fabio Menegazzo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bender-hooks', u'bender-hooks Documentation',
[u'Bruno Oliveira, Fabio Menegazzo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bender-hooks', u'bender-hooks Documentation',
u'Bruno Oliveira, Fabio Menegazzo', 'bender-hooks', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bender-bot/bender-hooks | docs/conf.py | Python | lgpl-3.0 | 8,798 |
# Copyright (c) 2014 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from troveclient import base
from troveclient import common
class Cluster(base.Resource):
"""A Cluster is an opaque cluster used to store Database clusters."""
def __repr__(self):
return "<Cluster: %s>" % self.name
def delete(self):
"""Delete the cluster."""
self.manager.delete(self)
class Clusters(base.ManagerWithFind):
"""Manage :class:`Cluster` resources."""
resource_class = Cluster
def create(self, name, datastore, datastore_version, instances=None,
locality=None):
"""Create (boot) a new cluster."""
body = {"cluster": {
"name": name
}}
datastore_obj = {
"type": datastore,
"version": datastore_version
}
body["cluster"]["datastore"] = datastore_obj
if instances:
body["cluster"]["instances"] = instances
if locality:
body["cluster"]["locality"] = locality
return self._create("/clusters", body, "cluster")
def list(self, limit=None, marker=None):
"""Get a list of all clusters.
:rtype: list of :class:`Cluster`.
"""
return self._paginated("/clusters", "clusters", limit, marker)
def get(self, cluster):
"""Get a specific cluster.
:rtype: :class:`Cluster`
"""
return self._get("/clusters/%s" % base.getid(cluster),
"cluster")
def delete(self, cluster):
"""Delete the specified cluster.
:param cluster: The cluster to delete
"""
url = "/clusters/%s" % base.getid(cluster)
resp, body = self.api.client.delete(url)
common.check_for_exceptions(resp, body, url)
def _action(self, cluster, body):
"""Perform a cluster "action" -- grow/shrink/etc."""
url = "/clusters/%s" % base.getid(cluster)
resp, body = self.api.client.post(url, body=body)
common.check_for_exceptions(resp, body, url)
if body:
return self.resource_class(self, body['cluster'], loaded=True)
return body
def add_shard(self, cluster):
"""Adds a shard to the specified cluster.
:param cluster: The cluster to add a shard to
"""
url = "/clusters/%s" % base.getid(cluster)
body = {"add_shard": {}}
resp, body = self.api.client.post(url, body=body)
common.check_for_exceptions(resp, body, url)
if body:
return self.resource_class(self, body, loaded=True)
return body
def grow(self, cluster, instances=None):
"""Grow a cluster.
:param cluster: The cluster to grow
:param instances: List of instances to add
"""
body = {"grow": instances}
return self._action(cluster, body)
def shrink(self, cluster, instances=None):
"""Shrink a cluster.
:param cluster: The cluster to shrink
:param instances: List of instances to drop
"""
body = {"shrink": instances}
return self._action(cluster, body)
class ClusterStatus(object):
ACTIVE = "ACTIVE"
BUILD = "BUILD"
FAILED = "FAILED"
SHUTDOWN = "SHUTDOWN"
| Tesora-Release/tesora-python-troveclient | troveclient/v1/clusters.py | Python | apache-2.0 | 3,850 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from datetime import datetime
class Migration(migrations.Migration):
dependencies = [
('testmanualrunner', '0005_auto_20140703_0928'),
]
operations = [
migrations.AddField(
model_name='testrun',
name='updated_at',
field=models.DateTimeField(default=datetime.now(), auto_now=True),
preserve_default=False,
),
migrations.AddField(
model_name='testrunresult',
name='updated_at',
field=models.DateTimeField(default=datetime.now(), auto_now=True),
preserve_default=False,
),
migrations.AlterField(
model_name='testrun',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='testrunresult',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
]
| mwasilew/testmanager | testmanager/testmanualrunner/migrations/0006_auto_20140703_0953.py | Python | agpl-3.0 | 1,072 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_project_completed'),
]
operations = [
migrations.CreateModel(
name='Unit',
fields=[
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('unit_id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=20, unique=True)),
],
options={
'abstract': False,
},
),
]
| bonnieblueag/farm_log | core/migrations/0004_unit.py | Python | gpl-3.0 | 722 |
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_storagepool
short_description: Manage disk groups and disk pools
version_added: '2.2'
description:
- Create or remove disk groups and disk pools for NetApp E-series storage arrays.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
required: true
description:
- The ID of the array to manage (as configured on the web services proxy).
state:
required: true
description:
- Whether the specified storage pool should exist or not.
- Note that removing a storage pool currently requires the removal of all defined volumes first.
choices: ['present', 'absent']
name:
required: true
description:
- The name of the storage pool to manage
criteria_drive_count:
description:
- The number of disks to use for building the storage pool. The pool will be expanded if this number exceeds the number of disks already in place
criteria_drive_type:
description:
- The type of disk (hdd or ssd) to use when searching for candidates to use.
choices: ['hdd','ssd']
criteria_size_unit:
description:
- The unit used to interpret size parameters
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
criteria_drive_min_size:
description:
- The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
criteria_min_usable_capacity:
description:
- The minimum size of the storage pool (in size_unit). The pool will be expanded if this value exceeds itscurrent size.
criteria_drive_interface_type:
description:
- The interface type to use when selecting drives for the storage pool (no value means all interface types will be considered)
choices: ['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata']
criteria_drive_require_fde:
description:
- Whether full disk encryption ability is required for drives to be added to the storage pool
raid_level:
required: true
choices: ['raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']
description:
- "Only required when the requested state is 'present'. The RAID level of the storage pool to be created."
erase_secured_drives:
required: false
choices: ['true', 'false']
description:
- Whether to erase secured disks before adding to storage pool
secure_pool:
required: false
choices: ['true', 'false']
description:
- Whether to convert to a secure storage pool. Will only work if all drives in the pool are security capable.
reserve_drive_count:
required: false
description:
- Set the number of drives reserved by the storage pool for reconstruction operations. Only valide on raid disk pools.
remove_volumes:
required: false
default: False
description:
- Prior to removing a storage pool, delete all volumes in the pool.
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
- name: No disk groups
netapp_e_storagepool:
ssid: "{{ ssid }}"
name: "{{ item }}"
state: absent
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
'''
RETURN = '''
msg:
description: Success message
returned: success
type: string
sample: Json facts for the pool that was created.
'''
import json
import logging
from traceback import format_exc
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def select(predicate, iterable):
# python 2, 3 generic filtering.
if predicate is None:
predicate = bool
for x in iterable:
if predicate(x):
yield x
class groupby(object):
# python 2, 3 generic grouping.
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
def __iter__(self):
return self
def next(self):
while self.currkey == self.tgtkey:
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
class NetAppESeriesStoragePool(object):
def __init__(self):
self._sp_drives_cached = None
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
state=dict(required=True, choices=['present', 'absent'], type='str'),
ssid=dict(required=True, type='str'),
name=dict(required=True, type='str'),
criteria_size_unit=dict(default='gb', type='str'),
criteria_drive_count=dict(type='int'),
criteria_drive_interface_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
type='str'),
criteria_drive_type=dict(choices=['ssd', 'hdd'], type='str'),
criteria_drive_min_size=dict(type='int'),
criteria_drive_require_fde=dict(type='bool'),
criteria_min_usable_capacity=dict(type='int'),
raid_level=dict(
choices=['raidUnsupported', 'raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']),
erase_secured_drives=dict(type='bool'),
log_path=dict(type='str'),
remove_drives=dict(type='list'),
secure_pool=dict(type='bool', default=False),
reserve_drive_count=dict(type='int'),
remove_volumes=dict(type='bool', default=False)
))
self.module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['raid_level'])
],
mutually_exclusive=[
],
# TODO: update validation for various selection criteria
supports_check_mode=True
)
p = self.module.params
log_path = p['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
self.debug = self._logger.debug
if log_path:
logging.basicConfig(level=logging.DEBUG, filename=log_path)
self.state = p['state']
self.ssid = p['ssid']
self.name = p['name']
self.validate_certs = p['validate_certs']
self.criteria_drive_count = p['criteria_drive_count']
self.criteria_drive_type = p['criteria_drive_type']
self.criteria_size_unit = p['criteria_size_unit']
self.criteria_drive_min_size = p['criteria_drive_min_size']
self.criteria_min_usable_capacity = p['criteria_min_usable_capacity']
self.criteria_drive_interface_type = p['criteria_drive_interface_type']
self.criteria_drive_require_fde = p['criteria_drive_require_fde']
self.raid_level = p['raid_level']
self.erase_secured_drives = p['erase_secured_drives']
self.remove_drives = p['remove_drives']
self.secure_pool = p['secure_pool']
self.reserve_drive_count = p['reserve_drive_count']
self.remove_volumes = p['remove_volumes']
try:
self.api_usr = p['api_username']
self.api_pwd = p['api_password']
self.api_url = p['api_url']
except KeyError:
self.module.fail_json(msg="You must pass in api_username "
"and api_password and api_url to the module.")
self.post_headers = dict(Accept="application/json")
self.post_headers['Content-Type'] = 'application/json'
# Quick and dirty drive selector, since the one provided by web service proxy is broken for min_disk_size as of 2016-03-12.
# Doesn't really need to be a class once this is in module_utils or retired- just groups everything together so we
# can copy/paste to other modules more easily.
# Filters all disks by specified criteria, then groups remaining disks by capacity, interface and disk type, and selects
# the first set that matches the specified count and/or aggregate capacity.
# class DriveSelector(object):
def filter_drives(
self,
drives, # raw drives resp
interface_type=None, # sas, sata, fibre, etc
drive_type=None, # ssd/hdd
spindle_speed=None, # 7200, 10000, 15000, ssd (=0)
min_drive_size=None,
max_drive_size=None,
fde_required=None,
size_unit='gb',
min_total_capacity=None,
min_drive_count=None,
exact_drive_count=None,
raid_level=None
):
if min_total_capacity is None and exact_drive_count is None:
raise Exception("One of criteria_min_total_capacity or criteria_drive_count must be specified.")
if min_total_capacity:
min_total_capacity = min_total_capacity * self._size_unit_map[size_unit]
# filter clearly invalid/unavailable drives first
drives = select(lambda d: self._is_valid_drive(d), drives)
if interface_type:
drives = select(lambda d: d['phyDriveType'] == interface_type, drives)
if drive_type:
drives = select(lambda d: d['driveMediaType'] == drive_type, drives)
if spindle_speed is not None: # 0 is valid for ssds
drives = select(lambda d: d['spindleSpeed'] == spindle_speed, drives)
if min_drive_size:
min_drive_size_bytes = min_drive_size * self._size_unit_map[size_unit]
drives = select(lambda d: int(d['rawCapacity']) >= min_drive_size_bytes, drives)
if max_drive_size:
max_drive_size_bytes = max_drive_size * self._size_unit_map[size_unit]
drives = select(lambda d: int(d['rawCapacity']) <= max_drive_size_bytes, drives)
if fde_required:
drives = select(lambda d: d['fdeCapable'], drives)
# initial implementation doesn't have a preference for any of these values...
# just return the first set we find that matches the requested disk count and/or minimum total capacity
for (cur_capacity, drives_by_capacity) in groupby(drives, lambda d: int(d['rawCapacity'])):
for (cur_interface_type, drives_by_interface_type) in groupby(drives_by_capacity,
lambda d: d['phyDriveType']):
for (cur_drive_type, drives_by_drive_type) in groupby(drives_by_interface_type,
lambda d: d['driveMediaType']):
# listify so we can consume more than once
drives_by_drive_type = list(drives_by_drive_type)
candidate_set = list() # reset candidate list on each iteration of the innermost loop
if exact_drive_count:
if len(drives_by_drive_type) < exact_drive_count:
continue # we know this set is too small, move on
for drive in drives_by_drive_type:
candidate_set.append(drive)
if self._candidate_set_passes(candidate_set, min_capacity_bytes=min_total_capacity,
min_drive_count=min_drive_count,
exact_drive_count=exact_drive_count, raid_level=raid_level):
return candidate_set
raise Exception("couldn't find an available set of disks to match specified criteria")
def _is_valid_drive(self, d):
is_valid = d['available'] \
and d['status'] == 'optimal' \
and not d['pfa'] \
and not d['removed'] \
and not d['uncertified'] \
and not d['invalidDriveData'] \
and not d['nonRedundantAccess']
return is_valid
def _candidate_set_passes(self, candidate_set, min_capacity_bytes=None, min_drive_count=None,
exact_drive_count=None, raid_level=None):
if not self._is_drive_count_valid(len(candidate_set), min_drive_count=min_drive_count,
exact_drive_count=exact_drive_count, raid_level=raid_level):
return False
# TODO: this assumes candidate_set is all the same size- if we want to allow wastage, need to update to use min size of set
if min_capacity_bytes is not None and self._calculate_usable_capacity(int(candidate_set[0]['rawCapacity']),
len(candidate_set),
raid_level=raid_level) < min_capacity_bytes:
return False
return True
def _calculate_usable_capacity(self, disk_size_bytes, disk_count, raid_level=None):
if raid_level in [None, 'raid0']:
return disk_size_bytes * disk_count
if raid_level == 'raid1':
return (disk_size_bytes * disk_count) // 2
if raid_level in ['raid3', 'raid5']:
return (disk_size_bytes * disk_count) - disk_size_bytes
if raid_level in ['raid6', 'raidDiskPool']:
return (disk_size_bytes * disk_count) - (disk_size_bytes * 2)
raise Exception("unsupported raid_level: %s" % raid_level)
def _is_drive_count_valid(self, drive_count, min_drive_count=0, exact_drive_count=None, raid_level=None):
if exact_drive_count and exact_drive_count != drive_count:
return False
if raid_level == 'raidDiskPool':
if drive_count < 11:
return False
if raid_level == 'raid1':
if drive_count % 2 != 0:
return False
if raid_level in ['raid3', 'raid5']:
if drive_count < 3:
return False
if raid_level == 'raid6':
if drive_count < 4:
return False
if min_drive_count and drive_count < min_drive_count:
return False
return True
def get_storage_pool(self, storage_pool_name):
# global ifilter
self.debug("fetching storage pools")
# map the storage pool name to its id
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception:
err = get_exception()
rc = err.args[0]
if rc == 404 and self.state == 'absent':
self.module.exit_json(
msg="Storage pool [%s] did not exist." % (self.name))
else:
err = get_exception()
self.module.exit_json(
msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." %
(self.ssid, str(err), self.state, rc))
self.debug("searching for storage pool '%s'" % storage_pool_name)
pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None)
if pool_detail:
found = 'found'
else:
found = 'not found'
self.debug(found)
return pool_detail
def get_candidate_disks(self):
self.debug("getting candidate disks...")
# driveCapacityMin is broken on /drives POST. Per NetApp request we built our own
# switch back to commented code below if it gets fixed
# drives_req = dict(
# driveCount = self.criteria_drive_count,
# sizeUnit = 'mb',
# raidLevel = self.raid_level
# )
#
# if self.criteria_drive_type:
# drives_req['driveType'] = self.criteria_drive_type
# if self.criteria_disk_min_aggregate_size_mb:
# drives_req['targetUsableCapacity'] = self.criteria_disk_min_aggregate_size_mb
#
# # TODO: this arg appears to be ignored, uncomment if it isn't
# #if self.criteria_disk_min_size_gb:
# # drives_req['driveCapacityMin'] = self.criteria_disk_min_size_gb * 1024
# (rc,drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), data=json.dumps(drives_req), headers=self.post_headers,
# method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs)
#
# if rc == 204:
# self.module.fail_json(msg='Cannot find disks to match requested criteria for storage pool')
# disk_ids = [d['id'] for d in drives_resp]
try:
(rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs)
except:
err = get_exception()
self.module.exit_json(
msg="Failed to fetch disk drives. Array id [%s]. Error[%s]." % (self.ssid, str(err)))
try:
candidate_set = self.filter_drives(drives_resp,
exact_drive_count=self.criteria_drive_count,
drive_type=self.criteria_drive_type,
min_drive_size=self.criteria_drive_min_size,
raid_level=self.raid_level,
size_unit=self.criteria_size_unit,
min_total_capacity=self.criteria_min_usable_capacity,
interface_type=self.criteria_drive_interface_type,
fde_required=self.criteria_drive_require_fde
)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to allocate adequate drive count. Id [%s]. Error [%s]." % (self.ssid, str(err)))
disk_ids = [d['id'] for d in candidate_set]
return disk_ids
def create_storage_pool(self):
self.debug("creating storage pool...")
sp_add_req = dict(
raidLevel=self.raid_level,
diskDriveIds=self.disk_ids,
name=self.name
)
if self.erase_secured_drives:
sp_add_req['eraseSecuredDrives'] = self.erase_secured_drives
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
data=json.dumps(sp_add_req), headers=self.post_headers, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs,
timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to create storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
str(err)))
self.pool_detail = self.get_storage_pool(self.name)
if self.secure_pool:
secure_pool_data = dict(securePool=True)
try:
(retc, r) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to update storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
str(err)))
@property
def needs_raid_level_migration(self):
current_raid_level = self.pool_detail['raidLevel']
needs_migration = self.raid_level != current_raid_level
if needs_migration: # sanity check some things so we can fail early/check-mode
if current_raid_level == 'raidDiskPool':
self.module.fail_json(msg="raid level cannot be changed for disk pools")
return needs_migration
def migrate_raid_level(self):
self.debug("migrating storage pool to raid level '%s'..." % self.raid_level)
sp_raid_migrate_req = dict(
raidLevel=self.raid_level
)
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s/raid-type-migration" % (self.ssid,
self.name),
data=json.dumps(sp_raid_migrate_req), headers=self.post_headers, method='POST',
url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to change the raid level of storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, str(err)))
@property
def sp_drives(self, exclude_hotspares=True):
if not self._sp_drives_cached:
self.debug("fetching drive list...")
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to fetch disk drives. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, self.ssid, str(err)))
sp_id = self.pool_detail['id']
if exclude_hotspares:
self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id and not d['hotSpare']]
else:
self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id]
return self._sp_drives_cached
@property
def reserved_drive_count_differs(self):
if int(self.pool_detail['volumeGroupData']['diskPoolData']['reconstructionReservedDriveCount']) != self.reserve_drive_count:
return True
return False
@property
def needs_expansion(self):
if self.criteria_drive_count > len(self.sp_drives):
return True
# TODO: is totalRaidedSpace the best attribute for "how big is this SP"?
if self.criteria_min_usable_capacity and \
(self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]) > int(self.pool_detail['totalRaidedSpace']):
return True
return False
def get_expansion_candidate_drives(self):
# sanity checks; don't call this if we can't/don't need to expand
if not self.needs_expansion:
self.module.fail_json(msg="can't get expansion candidates when pool doesn't need expansion")
self.debug("fetching expansion candidate drives...")
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
self.pool_detail['id']),
method='GET', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to fetch candidate drives for storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, str(err)))
current_drive_count = len(self.sp_drives)
current_capacity_bytes = int(self.pool_detail['totalRaidedSpace']) # TODO: is this the right attribute to use?
if self.criteria_min_usable_capacity:
requested_capacity_bytes = self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]
else:
requested_capacity_bytes = current_capacity_bytes
if self.criteria_drive_count:
minimum_disks_to_add = max((self.criteria_drive_count - current_drive_count), 1)
else:
minimum_disks_to_add = 1
minimum_bytes_to_add = max(requested_capacity_bytes - current_capacity_bytes, 0)
# FUTURE: allow more control over expansion candidate selection?
# loop over candidate disk sets and add until we've met both criteria
added_drive_count = 0
added_capacity_bytes = 0
drives_to_add = set()
for s in resp:
# don't trust the API not to give us duplicate drives across candidate sets, especially in multi-drive sets
candidate_drives = s['drives']
if len(drives_to_add.intersection(candidate_drives)) != 0:
# duplicate, skip
continue
drives_to_add.update(candidate_drives)
added_drive_count += len(candidate_drives)
added_capacity_bytes += int(s['usableCapacity'])
if added_drive_count >= minimum_disks_to_add and added_capacity_bytes >= minimum_bytes_to_add:
break
if (added_drive_count < minimum_disks_to_add) or (added_capacity_bytes < minimum_bytes_to_add):
self.module.fail_json(
msg="unable to find at least %s drives to add that would add at least %s bytes of capacity" % (
minimum_disks_to_add, minimum_bytes_to_add))
return list(drives_to_add)
def expand_storage_pool(self):
drives_to_add = self.get_expansion_candidate_drives()
self.debug("adding %s drives to storage pool..." % len(drives_to_add))
sp_expand_req = dict(
drives=drives_to_add
)
try:
request(
self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
self.pool_detail['id']),
data=json.dumps(sp_expand_req), headers=self.post_headers, method='POST', url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
str(
err)))
# TODO: check response
# TODO: support blocking wait?
def reduce_drives(self, drive_list):
if all(drive in drive_list for drive in self.sp_drives):
# all the drives passed in are present in the system
pass
else:
self.module.fail_json(
msg="One of the drives you wish to remove does not currently exist in the storage pool you specified")
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s/reduction" % (self.ssid,
self.pool_detail['id']),
data=json.dumps(drive_list), headers=self.post_headers, method='POST', url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to remove drives from storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, str(err)))
def update_reserve_drive_count(self, qty):
data = dict(reservedDriveCount=qty)
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
data=json.dumps(data), headers=self.post_headers, method='POST', url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
except:
err = get_exception()
pool_id = self.pool_detail['id']
self.module.exit_json(
msg="Failed to update reserve drive count. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
str(
err)))
def apply(self):
changed = False
pool_exists = False
self.pool_detail = self.get_storage_pool(self.name)
if self.pool_detail:
pool_exists = True
pool_id = self.pool_detail['id']
if self.state == 'absent':
self.debug("CHANGED: storage pool exists, but requested state is 'absent'")
changed = True
elif self.state == 'present':
# sanity checks first- we can't change these, so we'll bomb if they're specified
if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail['driveMediaType']:
self.module.fail_json(
msg="drive media type %s cannot be changed to %s" % (self.pool_detail['driveMediaType'],
self.criteria_drive_type))
# now the things we can change...
if self.needs_expansion:
self.debug("CHANGED: storage pool needs expansion")
changed = True
if self.needs_raid_level_migration:
self.debug(
"CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'" % (
self.pool_detail['raidLevel'], self.raid_level))
changed = True
# if self.reserved_drive_count_differs:
# changed = True
# TODO: validate other state details? (pool priority, alert threshold)
# per FPoole and others, pool reduce operations will not be supported. Automatic "smart" reduction
# presents a difficult parameter issue, as the disk count can increase due to expansion, so we
# can't just use disk count > criteria_drive_count.
else: # pool does not exist
if self.state == 'present':
self.debug("CHANGED: storage pool does not exist, but requested state is 'present'")
changed = True
# ensure we can get back a workable set of disks
# (doing this early so candidate selection runs under check mode)
self.disk_ids = self.get_candidate_disks()
else:
self.module.exit_json(msg="Storage pool [%s] did not exist." % (self.name))
if changed and not self.module.check_mode:
# apply changes
if self.state == 'present':
if not pool_exists:
self.create_storage_pool()
else: # pool exists but differs, modify...
if self.needs_expansion:
self.expand_storage_pool()
if self.remove_drives:
self.reduce_drives(self.remove_drives)
if self.needs_raid_level_migration:
self.migrate_raid_level()
# if self.reserved_drive_count_differs:
# self.update_reserve_drive_count(self.reserve_drive_count)
if self.secure_pool:
secure_pool_data = dict(securePool=True)
try:
(retc, r) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid,
self.pool_detail[
'id']),
data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
except:
err = get_exception()
self.module.exit_json(
msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
pool_id, self.ssid, str(err)))
if int(retc) == 422:
self.module.fail_json(
msg="Error in enabling secure pool. One of the drives in the specified storage pool is likely not security capable")
elif self.state == 'absent':
# delete the storage pool
try:
remove_vol_opt = ''
if self.remove_volumes:
remove_vol_opt = '?delete-volumes=true'
(rc, resp) = request(
self.api_url + "/storage-systems/%s/storage-pools/%s%s" % (self.ssid, pool_id,
remove_vol_opt),
method='DELETE',
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except:
err = get_exception()
self.module.exit_json(
msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
self.ssid,
str(err)))
self.module.exit_json(changed=changed, **self.pool_detail)
def main():
sp = NetAppESeriesStoragePool()
try:
sp.apply()
except Exception:
e = get_exception()
sp.debug("Exception in apply(): \n%s" % format_exc(e))
raise
if __name__ == '__main__':
main()
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/storage/netapp/netapp_e_storagepool.py | Python | bsd-3-clause | 39,322 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import wizard
from . import stock | odoousers2014/LibrERP | l10n_it_sale_group/__init__.py | Python | agpl-3.0 | 1,004 |
"""
Rsync helper functions
"""
from fabric.api import env
from fabric.contrib.project import rsync_project
def rsync_update_project():
rsync_project(
remote_dir=env.project_path,
local_dir=env.rsync_local,
exclude=env.rsync_exclude,
delete=env.rsync_delete
)
def setup_rsync(rsync_exclude, rsync_local, rsync_delete=False):
"""
setup all needed vars
"""
env.rsync_exclude = rsync_exclude
env.rsync_local = rsync_local
env.rsync_delete = rsync_delete | tomaszroszko/rsfab | rsfab/rsync_deploy.py | Python | bsd-3-clause | 526 |
import ckan.plugins as plugins
import ckan.lib.helpers as helpers
import ckan.plugins.toolkit as toolkit
import ckan.lib.base as base
import ckan.lib.render as render
import routes.mapper
from routes import redirect_to
import os.path
import py2psql
import json
from pylons import config
import re
from helpers import *
import peroid
class DownloadPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IRoutes)
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.ITemplateHelpers)
#
# desc : status object
#
def __status(self, state, info, data):
return {"state" : state, "info" : info, "data" : data}
#
# desc : create table for download_summary
#
def createTb(self):
schemaFile = "ckanext/download/schema.json"
dataSchema = ""
if os.path.isfile(schemaFile):
with open(schemaFile, "r") as fin:
for line in fin:
dataSchema += line.strip()
dataSchema = json.loads(dataSchema)
p2l = None
url = config.get('ckan.download.psqlUrl')
pattern = re.compile('\S+://(\S+):(\S+)@(\S+):(\d+)/(\S+)')
match = pattern.match(url)
if match:
link = pattern.findall(url)[0]
p2l = py2psql.py2psql(link[2],link[3],link[4],"",link[0],link[1])
else:
pattern = re.compile('\S+://(\S+):(\S+)@(\S+)/(\S+)')
link = pattern.findall(url)[0]
p2l = py2psql.py2psql(link[2],"5432",link[3],"",link[0],link[1])
p2l.createTable(\
"download_summary", \
dataSchema, \
dropFirst=False \
)
if p2l.status()["state"] != "success":
return self.__status("failure", p2l.status()["info"], {})
else:
return self.__status("success", "Create download_summary table completely.", {})
# IConfigurer
def update_config(self, config_):
# replace package url
template = plugins.toolkit.asbool(config.get('ckan.download.template'))
if not (template == None or template == False):
plugins.toolkit.add_template_directory(config_, 'templates')
else:
# keep counting download is available
plugins.toolkit.add_template_directory(config, 'theme/templates')
# new page
plugins.toolkit.add_template_directory(config, 'theme/main')
# add resource
plugins.toolkit.add_resource('fanstatic', 'dwnres')
if config.get('ckan.download.psqlUrl') == None:
return "Error : Configuration is not set."
if self.createTb()["state"] != "success":
return "Error : The download_summary table can not be created. [info] : " + self.createTb()["info"]
## ITemplateHelpers
def get_helpers(self):
# define in the helpers.py
return { 'counter' : counter, \
'countDwnBody' : countDwnBody, \
'countDownload' : countDownload, \
'getResDwnSummary' : getResDwnSummary, \
'transform2UTF8' : transform2UTF8, \
'getResSummaryInfo' : getResSummaryInfo, \
'getBothViewDwnCount' : getBothViewDwnCount, \
'getPkgSum' : getPkgSum, \
'getViewSum' : getViewSum, \
'getViewDwnCount' : getViewDwnCount \
}
def before_map(self, route_map):
with routes.mapper.SubMapper(route_map, controller='ckanext.download.plugin:DownloadController') as m:
m.connect('download_summary', '/download', action='download_summary')
m.connect('download_date_summary', '/download_date', action='download_date_summary')
m.connect('download_date_summary_peroid', '/download_date/peroid', action='download_date_summary_peroid')
return route_map
def after_map(self, route_map):
return route_map
class DownloadController(base.BaseController):
def __countRes(self, getResID):
countDownload(getResID)
def __renderUrl(self, option):
tracking = plugins.toolkit.asbool(config.get('ckan.tracking_enabled'))
if tracking == None or tracking == False:
tracking = "False"
else:
tracking = "True"
passVars = {"tracking" : tracking}
if option == "index":
return toolkit.render('index.html', extra_vars=passVars)
elif option == "date":
return toolkit.render('date.html', extra_vars=passVars)
#
# desc : controller for url path
#
def download_summary(self):
if plugins.toolkit.request.method == "GET" \
and "resourceid" in plugins.toolkit.request.params.keys() \
and "dataurl" in plugins.toolkit.request.params.keys():
# count the resource
self.__countRes(plugins.toolkit.request.params.get('resourceid'))
# redirect to the data url
#r = helpers.redirect_to(unicode(plugins.toolkit.request.params.get('dataurl')).encode('utf-8'))
searchObj = re.search(r'.*download\?resourceid=.*&dataurl=(.*)', unicode(plugins.toolkit.request.url).encode('utf-8'), re.M | re.I)
redirUrl = searchObj.groups()[0]
r = redirect_to(redirUrl)
return r
else:
return self.__renderUrl('index')
def download_date_summary(self):
return self.__renderUrl('date')
def download_date_summary_peroid(self):
url = config.get('ckan.download.psqlUrl')
pattern = re.compile('\S+://(\S+):(\S+)@(\S+):(\d+)/(\S+)')
match = pattern.match(url)
if match:
link = pattern.findall(url)[0]
peroidObj = peroid.dataPeroidModel(link[2], str(link[3]), link[4], "download_summary", link[0], link[1])
return peroidObj.countPeroidBody()
else:
pattern = re.compile('\S+://(\S+):(\S+)@(\S+)/(\S+)')
link = pattern.findall(url)[0]
peroidObj = peroid.dataPeroidModel(link[2], str("5432"), link[3], "download_summary", link[0], link[1])
return peroidObj.countPeroidBody()
| jiankaiwang/ckanext-download | ckanext/download/plugin.py | Python | agpl-3.0 | 6,218 |
"""Area filter module"""
from weakref import WeakValueDictionary
import numpy as np
from matplotlib.path import Path
try:
from rtree.index import Index
except (ImportError, OSError):
print('Warning: RTree could not be loaded. areafilter get_intersecting and get_knearest won\'t work')
class Index:
''' Dummy index class for installations where rtree is missing
or doesn't work.
'''
@staticmethod
def intersection(*args, **kwargs):
return []
@staticmethod
def nearest(*args, **kwargs):
return []
@staticmethod
def insert(*args, **kwargs):
return
@staticmethod
def delete(*args, **kwargs):
return
import bluesky as bs
from bluesky.tools.geo import kwikdist
# Dictionary of all basic shapes (The shape classes defined in this file) by name
basic_shapes = dict()
def hasArea(areaname):
"""Check if area with name 'areaname' exists."""
return areaname in basic_shapes
def defineArea(areaname, areatype, coordinates, top=1e9, bottom=-1e9):
"""Define a new area"""
if areaname == 'LIST':
if not basic_shapes:
return True, 'No shapes are currently defined.'
else:
return True, 'Currently defined shapes:\n' + \
', '.join(basic_shapes)
if not coordinates:
if areaname in basic_shapes:
return True, str(basic_shapes[areaname])
else:
return False, f'Unknown shape: {areaname}'
if areatype == 'BOX':
basic_shapes[areaname] = Box(areaname, coordinates, top, bottom)
elif areatype == 'CIRCLE':
basic_shapes[areaname] = Circle(areaname, coordinates, top, bottom)
elif areatype[:4] == 'POLY':
basic_shapes[areaname] = Poly(areaname, coordinates, top, bottom)
elif areatype == 'LINE':
basic_shapes[areaname] = Line(areaname, coordinates)
# Pass the shape on to the screen object
bs.scr.objappend(areatype, areaname, coordinates)
return True, f'Created {areatype} {areaname}'
def checkInside(areaname, lat, lon, alt):
""" Check if points with coordinates lat, lon, alt are inside area with name 'areaname'.
Returns an array of booleans. True == Inside"""
if areaname not in basic_shapes:
return np.zeros(len(lat), dtype=np.bool)
area = basic_shapes[areaname]
return area.checkInside(lat, lon, alt)
def deleteArea(areaname):
""" Delete area with name 'areaname'. """
if areaname in basic_shapes:
basic_shapes.pop(areaname)
bs.scr.objappend('', areaname, None)
def reset():
""" Clear all data. """
basic_shapes.clear()
Shape.reset()
def get_intersecting(lat0, lon0, lat1, lon1):
''' Return all shapes that intersect with a specified rectangular area.
Arguments:
- lat0/1, lon0/1: Coordinates of the top-left and bottom-right corner
of the intersection area.
'''
items = Shape.areatree.intersection((lat0, lon0, lat1, lon1))
return [Shape.areas_by_id[i.id] for i in items]
def get_knearest(lat0, lon0, lat1, lon1, k=1):
''' Return the k nearest shapes to a specified rectangular area.
Arguments:
- lat0/1, lon0/1: Coordinates of the top-left and bottom-right corner
of the relevant area.
- k: The (maximum) number of results to return.
'''
items = Shape.areatree.nearest((lat0, lon0, lat1, lon1), k)
return [Shape.areas_by_id[i.id] for i in items]
class Shape:
'''
Base class of BlueSky shapes
'''
# Global counter to keep track of used shape ids
max_area_id = 0
# Weak-value dictionary of all Shape-derived objects by name, and id
areas_by_id = WeakValueDictionary()
areas_by_name = WeakValueDictionary()
# RTree of all areas for efficient geospatial searching
areatree = Index()
@classmethod
def reset(cls):
''' Reset shape data when simulation is reset. '''
# Weak dicts and areatree should be cleared automatically
# Reset max area id
cls.max_area_id = 0
def __init__(self, name, coordinates, top=1e9, bottom=-1e9):
self.raw = dict(name=name, shape=self.kind(), coordinates=coordinates)
self.name = name
self.coordinates = coordinates
self.top = np.maximum(bottom, top)
self.bottom = np.minimum(bottom, top)
lat = coordinates[::2]
lon = coordinates[1::2]
self.bbox = [min(lat), min(lon), max(lat), max(lon)]
# Global weak reference and tree storage
self.area_id = Shape.max_area_id
Shape.max_area_id += 1
Shape.areas_by_id[self.area_id] = self
Shape.areas_by_name[self.name] = self
Shape.areatree.insert(self.area_id, self.bbox)
def __del__(self):
# Objects are removed automatically from the weak-value dicts,
# but need to be manually removed from the rtree
Shape.areatree.delete(self.area_id, self.bbox)
def checkInside(self, lat, lon, alt):
''' Returns True (or boolean array) if coordinate lat, lon, alt lies
within this shape.
Reimplement this function in the derived shape classes for this to
work.
'''
return False
def _str_vrange(self):
if self.top < 9e8:
if self.bottom > -9e8:
return f' with altitude between {self.bottom} and {self.top}'
else:
return f' with altitude below {self.top}'
if self.bottom > -9e8:
return f' with altitude above {self.bottom}'
return ''
def __str__(self):
return f'{self.name} is a {self.raw["shape"]} with coordinates ' + \
', '.join(str(c) for c in self.coordinates) + self._str_vrange()
@classmethod
def kind(cls):
''' Return a string describing what kind of shape this is. '''
return cls.__name__.upper()
class Line(Shape):
''' A line shape '''
def __init__(self, name, coordinates):
super().__init__(name, coordinates)
def __str__(self):
return f'{self.name} is a LINE with ' \
f'start point ({self.coordinates[0]}, {self.coordinates[1]}), ' \
f'and end point ({self.coordinates[2]}, {self.coordinates[3]}).'
class Box(Shape):
''' A box shape '''
def __init__(self, name, coordinates, top=1e9, bottom=-1e9):
super().__init__(name, coordinates, top, bottom)
# Sort the order of the corner points
self.lat0 = min(coordinates[0], coordinates[2])
self.lon0 = min(coordinates[1], coordinates[3])
self.lat1 = max(coordinates[0], coordinates[2])
self.lon1 = max(coordinates[1], coordinates[3])
def checkInside(self, lat, lon, alt):
return ((self.lat0 <= lat) & ( lat <= self.lat1)) & \
((self.lon0 <= lon) & (lon <= self.lon1)) & \
((self.bottom <= alt) & (alt <= self.top))
class Circle(Shape):
''' A circle shape '''
def __init__(self, name, coordinates, top=1e9, bottom=-1e9):
super().__init__(name, coordinates, top, bottom)
self.clat = coordinates[0]
self.clon = coordinates[1]
self.r = coordinates[2]
def checkInside(self, lat, lon, alt):
distance = kwikdist(self.clat, self.clon, lat, lon) # [NM]
inside = (distance <= self.r) & (self.bottom <= alt) & (alt <= self.top)
return inside
def __str__(self):
return f'{self.name} is a CIRCLE with ' \
f'center ({self.clat}, {self.clon}) ' \
f'and radius {self.r}.' + self._str_vrange()
class Poly(Shape):
''' A polygon shape '''
def __init__(self, name, coordinates, top=1e9, bottom=-1e9):
super().__init__(name, coordinates, top, bottom)
self.border = Path(np.reshape(coordinates, (len(coordinates) // 2, 2)))
def checkInside(self, lat, lon, alt):
points = np.vstack((lat,lon)).T
inside = np.all((self.border.contains_points(points), self.bottom <= alt, alt <= self.top), axis=0)
return inside
| ProfHoekstra/bluesky | bluesky/tools/areafilter.py | Python | gpl-3.0 | 8,158 |
from __future__ import absolute_import
from __future__ import unicode_literals
import os.path
import pytest
from pre_commit import git
from pre_commit.errors import FatalError
from pre_commit.util import cmd_output
from pre_commit.util import cwd
from testing.fixtures import git_dir
def test_get_root_at_root(tmpdir_factory):
path = git_dir(tmpdir_factory)
with cwd(path):
assert git.get_root() == path
def test_get_root_deeper(tmpdir_factory):
path = git_dir(tmpdir_factory)
foo_path = os.path.join(path, 'foo')
os.mkdir(foo_path)
with cwd(foo_path):
assert git.get_root() == path
def test_get_root_not_git_dir(tmpdir_factory):
with cwd(tmpdir_factory.get()):
with pytest.raises(FatalError):
git.get_root()
def test_is_not_in_merge_conflict(tmpdir_factory):
path = git_dir(tmpdir_factory)
with cwd(path):
assert git.is_in_merge_conflict() is False
def test_is_in_merge_conflict(in_merge_conflict):
assert git.is_in_merge_conflict() is True
def test_cherry_pick_conflict(in_merge_conflict):
cmd_output('git', 'merge', '--abort')
foo_ref = cmd_output('git', 'rev-parse', 'foo')[1].strip()
cmd_output('git', 'cherry-pick', foo_ref, retcode=None)
assert git.is_in_merge_conflict() is False
@pytest.fixture
def get_files_matching_func():
def get_filenames():
return (
'pre_commit/main.py',
'pre_commit/git.py',
'im_a_file_that_doesnt_exist.py',
'hooks.yaml',
)
return git.get_files_matching(get_filenames)
def test_get_files_matching_base(get_files_matching_func):
ret = get_files_matching_func('', '^$')
assert ret == set([
'pre_commit/main.py',
'pre_commit/git.py',
'hooks.yaml',
])
def test_get_files_matching_total_match(get_files_matching_func):
ret = get_files_matching_func('^.*\\.py$', '^$')
assert ret == set([
'pre_commit/main.py',
'pre_commit/git.py',
])
def test_does_search_instead_of_match(get_files_matching_func):
ret = get_files_matching_func('\\.yaml$', '^$')
assert ret == set(['hooks.yaml'])
def test_does_not_include_deleted_fileS(get_files_matching_func):
ret = get_files_matching_func('exist.py', '^$')
assert ret == set()
def test_exclude_removes_files(get_files_matching_func):
ret = get_files_matching_func('', '\\.py$')
assert ret == set(['hooks.yaml'])
def resolve_conflict():
with open('conflict_file', 'w') as conflicted_file:
conflicted_file.write('herp\nderp\n')
cmd_output('git', 'add', 'conflict_file')
def test_get_conflicted_files(in_merge_conflict):
resolve_conflict()
with open('other_file', 'w') as other_file:
other_file.write('oh hai')
cmd_output('git', 'add', 'other_file')
ret = set(git.get_conflicted_files())
assert ret == set(('conflict_file', 'other_file'))
def test_get_conflicted_files_unstaged_files(in_merge_conflict):
# If they for whatever reason did pre-commit run --no-stash during a
# conflict
resolve_conflict()
# Make unstaged file.
with open('bar_only_file', 'w') as bar_only_file:
bar_only_file.write('new contents!\n')
ret = set(git.get_conflicted_files())
assert ret == set(('conflict_file',))
MERGE_MSG = "Merge branch 'foo' into bar\n\nConflicts:\n\tconflict_file\n"
OTHER_MERGE_MSG = MERGE_MSG + '\tother_conflict_file\n'
@pytest.mark.parametrize(
('input', 'expected_output'),
(
(MERGE_MSG, ['conflict_file']),
(OTHER_MERGE_MSG, ['conflict_file', 'other_conflict_file']),
),
)
def test_parse_merge_msg_for_conflicts(input, expected_output):
ret = git.parse_merge_msg_for_conflicts(input)
assert ret == expected_output
| Teino1978-Corp/pre-commit | tests/git_test.py | Python | mit | 3,799 |
###############################################################################
#
# $Id: example6.py 718 2012-04-15 23:59:35Z weegreenblobbie $
#
###############################################################################
from Nsound import *
sr = 44100.0;
BITS_PER_SAMPLE = 16;
###############################################################################
def softTones(sr, duration, f1, f2, gaussian_width):
sin = Sine(sr)
audio = AudioStream(sr, 2)
envelope = sin.drawFatGaussian(duration, gaussian_width)
audio[0] = sin.generate(duration, f1)
audio[1] = sin.generate(duration, f2)
return audio * envelope
###############################################################################
# Main
sine = Sine(sr)
out = AudioStream(sr, 2)
out << softTones(sr, 0.25, 261.63, 523.25, 0.90) \
<< softTones(sr, 0.25, 493.87, 293.66, 0.90) \
<< softTones(sr, 0.25, 329.61, 439.96, 0.90) \
<< softTones(sr, 0.25, 391.97, 349.22, 0.90) \
<< softTones(sr, 0.25, 349.22, 391.97, 0.90) \
<< softTones(sr, 0.25, 439.96, 329.61, 0.90) \
<< softTones(sr, 0.25, 293.66, 493.87, 0.90) \
<< softTones(sr, 0.25, 523.25, 261.63, 0.90) \
<< softTones(sr, 0.25, 261.63, 523.25, 0.90) \
<< softTones(sr, 0.25, 493.87, 293.66, 0.90) \
<< softTones(sr, 0.25, 329.61, 439.96, 0.90) \
<< softTones(sr, 0.25, 391.97, 349.22, 0.90) \
<< softTones(sr, 0.25, 349.22, 391.97, 0.90) \
<< softTones(sr, 0.25, 439.96, 329.61, 0.90) \
<< softTones(sr, 0.25, 293.66, 493.87, 0.90) \
<< softTones(sr, 0.25, 523.25, 261.63, 0.90) \
<< sine.silence(0.25)
out << softTones(sr, 0.25, 261.63, 523.25, 0.30) \
<< softTones(sr, 0.25, 493.87, 293.66, 0.30) \
<< softTones(sr, 0.25, 329.61, 439.96, 0.30) \
<< softTones(sr, 0.25, 391.97, 349.22, 0.30) \
<< softTones(sr, 0.25, 349.22, 391.97, 0.30) \
<< softTones(sr, 0.25, 439.96, 329.61, 0.30) \
<< softTones(sr, 0.25, 293.66, 493.87, 0.30) \
<< softTones(sr, 0.25, 523.25, 261.63, 0.30) \
<< softTones(sr, 0.25, 261.63, 523.25, 0.30) \
<< softTones(sr, 0.25, 493.87, 293.66, 0.30) \
<< softTones(sr, 0.25, 329.61, 439.96, 0.30) \
<< softTones(sr, 0.25, 391.97, 349.22, 0.30) \
<< softTones(sr, 0.25, 349.22, 391.97, 0.30) \
<< softTones(sr, 0.25, 439.96, 329.61, 0.30) \
<< softTones(sr, 0.25, 293.66, 493.87, 0.30) \
<< softTones(sr, 0.25, 523.25, 261.63, 0.30) \
<< sine.silence(0.25)
out *= 0.25
out >> "example6.wav"
# ReverberationRoom(sample_rate, room_feedback, wet_percent, dry_percent, low_pass_freq)
room = ReverberationRoom(sr, 0.50, 1.0, 1.0, 100.0)
out2 = 0.25 * room.filter(out)
out2 >> "example6_reverb.wav"
pb = AudioPlayback(sr, 2, 16)
out2 >> pb
| weegreenblobbie/nsound | src/examples/example6.py | Python | gpl-2.0 | 2,748 |
import unittest
from flask import current_app
from app import create_app, db
class BasicsTestCase(unittest.TestCase):
# Runs before each test
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
# Runs after each test
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
# Make sure the app exists
def test_app_exists(self):
self.assertFalse(current_app is None)
# Make sure the app is running with TESTING config
def test_app_is_testing(self):
self.assertTrue(current_app.config['TESTING'])
| bitmotive/flask-boilerplate | tests/test_basics.py | Python | mit | 705 |
from __future__ import absolute_import
from __future__ import print_function
from chains.commandline.formatter import Formatter
import time
class FormatterManagerList(Formatter):
def main(self, result):
# fmt = '%-20s %-10s %-10s %s'
fmt = '%-20s %-10s %s'
ret = []
ret.append('-' * 60)
# ret.append( fmt % ('Manager', 'Online', 'Services', 'Last heartbeat') )
ret.append(fmt % ('Manager', 'Online', 'Last heartbeat'))
ret.append('-' * 60)
for managerId in result:
values = [managerId]
if result[managerId]['online']:
values.append('Online')
else:
values.append('')
# values.append( result[managerId]['services'] )
if 'heartbeat' in result[managerId]:
t = time.time() - float(result[managerId]['heartbeat'])
if t > (60 * 60):
values.append('about %s hours ago' % int(round(t / 60 / 60)))
elif t > 60:
values.append('about %s min ago' % int(round(t / 60)))
else:
values.append('%s sec ago' % int(round(t)))
else:
values.append('')
ret.append(fmt % tuple(values))
return '\n'.join(ret)
| ChainsAutomation/chains | lib/chains/commandline/formatter/ManagerList.py | Python | gpl-2.0 | 1,318 |
# coding: utf-8
from pymorphy2 import MorphAnalyzer
import re
from random import shuffle
class Morpher(MorphAnalyzer):
def __init__(self):
super(Morpher, self).__init__()
self.zaebat = self.parse(u'заебать')[0]
self.genders = ('neut', 'femn', 'masc')
def is_noun(self, word):
for w in self.parse(word):
if 'NOUN' not in w.tag:
return False
return True
def normalize_word(self, word):
parsed = self.parse(word)
single = lambda w: 'sing' in w.tag or 'Sgtm' in w.tag
if any(map(single, parsed)):
return filter(single, parsed)[0].normal_form
return parsed[0].normal_form
def zaeb(self, word):
w = self.parse(word)[0]
gender = w.tag.gender
if w.tag.number == 'plur':
return u'Заебали'
if gender in self.genders:
return self.zaebat.inflect(set([gender])).word
return u'Заебись'
def ru_only(self, string):
return set(map(unicode.lower, re.findall(u'[А-Яа-я]+', string)))
def process_to_words(self, string):
words = filter(self.is_noun, self.ru_only(string))
normal_words = map(self.normalize_word, words)
shuffle(normal_words)
return normal_words[:1]
def word2phrase(self, word):
return "%s %s" % (self.zaeb(word), word)
| strizhechenko/twitterbots | morpher.py | Python | gpl-3.0 | 1,400 |
from dateutils import date
class Config(object):
SECRET_KEY = '%\xe3\xc2\x8c\xff\x1c\x16\xf0\x82\xc9\x15\nG|e\x85[\x82\x19:\xb7\xb6\xf6h'
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg'])
MAX_CONTENT_LENGTH = 2 * 1024 * 1024
SQLALCHEMY_ECHO = False
THUMB_WIDTH = 200
THUMB_HEIGHT = 200
SQLALCHEMY_DATABASE_URI = 'mysql://urbanjungle:urbanjungle@localhost/urbanjungle'
class ProductionConfig(Config):
DEBUG = False
TESTING = False
UPLOAD_FOLDER = '/home/urbanjungle/reports/'
THUMBS_FOLDER = '/home/urbanjungle/thumbs/'
class TestConfig(Config):
DEBUG = False
TESTING = True
class DevelopmentConfig(Config):
'''Use "if app.debug" anywhere in your code, that code will run in development code.'''
DEBUG = True
TESTING = True
UPLOAD_FOLDER = '/tmp/upload/'
THUMBS_FOLDER = '/tmp/upload/thumbs/'
| thibault/UrbanJungle | site/urbanjungle/config.py | Python | gpl-3.0 | 860 |
#!/usr/bin/python
# coding: utf-8
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix:
return 0
m, n = len(matrix), len(matrix[0])
left, right, height = [0] * n, [n] * n, [0] * n
res = 0
for i in range(m):
cur_left, cur_right = 0, n
for j in range(n):
if matrix[i][j] == '1':
height[j] += 1
else:
height[j] = 0
for j in range(n):
if matrix[i][j] == '1':
left[j] = max(cur_left, left[j])
else:
left[j] = 0
cur_left = j + 1
for j in range(n - 1, -1, -1):
if matrix[i][j] == '1':
right[j] = min(cur_right, right[j])
else:
right[j] = n
cur_right = j
for j in range(n):
res = max(res, (right[j] - left[j]) * height[j])
return res
| Lanceolata/code-problems | python/leetcode/Question_085_Maximal_Rectangle.py | Python | mit | 1,126 |
import os; os.environ.setdefault("DJANGO_SETTINGS_MODULE", "foodtrucks.settings")
import requests, json, datetime, logging, geopy
from geopy.geocoders import GoogleV3
from trucks.models import Truck
api_url = 'https://data.sfgov.org/resource/6a9r-agq8.json?$$exclude_system_fields=false&status=APPROVED'
headers = {'X-App-Token': 'mbB8LuRTwTwODNOcQNYLEKCXJ'}
geolocator = GoogleV3(api_key="AIzaSyCPlyfyEB3jUWgmlxnSeDkFvm6xPcF8Gsg")
r = requests.get(api_url, headers=headers)
if r.status_code != 200:
logging.error("API call failed with status code: " + r.status_code)
else:
trucks = list()
last_modified = datetime.datetime.strptime(r.headers['Last-Modified'], "%a, %d %b %Y %H:%M:%S %Z")
for item in r.json():
truck = Truck()
truck.name = item['applicant']
truck.type = item.get('facilitytype', '')
truck.address = item['address']
truck.food = item['fooditems']
try:
truck.lat = item['latitude']
truck.long = item['longitude']
except KeyError:
location = geolocator.geocode(item['address']+ " San Francisco, CA")
truck.lat, truck.long = location.latitude, location.longitude
trucks.append(truck)
Truck.objects.bulk_create(trucks) | ricefield/foodtrucksofsf | fetch_data.py | Python | mit | 1,269 |
from ggrade import read_tab_file
import argparse
################################################################################
################################################################################
def main():
# Parse the input arguments
parser = argparse.ArgumentParser()
parser.add_argument('infile_name', type=str, default=None, help='Input file name',nargs='?')
parser.add_argument('--solutions-file', dest='outfile_name', type=str,\
default=None, help='Name of output file to write the solutions to.')
args = parser.parse_args()
# Open the file and pull out the information.
questions,solutions,student_answers = None,None,None
if args.infile_name is not None:
questions,solutions,student_answers = read_tab_file(args.infile_name)
solutions_string = "solutions = [ \n"
extra_feedback_string = "feedback_for_everyone = [ \n"
incorrect_feedback_string = "feedback_for_wrong_answers = [ \n"
points_per_question_string = "points_per_question = [ \n"
nsolutions = len(solutions)
# For now, assume the solutions are the first one.
for i,solution in enumerate(solutions):
solutions_string += "\t\"%s\"" % (solution)
extra_feedback_string += "\tNone"
incorrect_feedback_string += "\tNone"
points_per_question_string += "10"
if i != nsolutions-1:
solutions_string += ", # Question %d\n" % (i+1)
extra_feedback_string += ", # Question %d\n" % (i+1)
incorrect_feedback_string += ", # Question %d\n" % (i+1)
points_per_question_string += ", # Question %d\n" % (i+1)
else:
solutions_string += " # Question %d \n" % (i+1)
extra_feedback_string += " # Question %d \n" % (i+1)
incorrect_feedback_string += " # Question %d \n" % (i+1)
points_per_question_string += " # Question %d \n" % (i+1)
solutions_string += "] \n"
extra_feedback_string += "] \n"
incorrect_feedback_string += "] \n"
points_per_question_string += "] \n"
# Write the output to a file.
outfile_name = "solutions.py"
if args.outfile_name is not None:
outfile_name = args.outfile_name
else:
outfile_name = args.infile_name.split('.tsv')[0]
outfile_name = "SOLUTIONS_%s.py" % (outfile_name)
outfile = open(outfile_name,'w+')
outfile.write("# -*- coding: utf-8 -*-")
outfile.write("\n")
outfile.write(solutions_string)
outfile.write("\n")
outfile.write(extra_feedback_string)
outfile.write("\n")
outfile.write(incorrect_feedback_string)
outfile.write("\n")
outfile.write(points_per_question_string)
outfile.close()
################################################################################
################################################################################
if __name__=="__main__":
main()
| mattbellis/ggrade | scripts/parse_response_file.py | Python | gpl-2.0 | 2,923 |
# -*- coding: utf-8 -*-
# Copyright © 2014 SEE AUTHORS FILE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
ServiceNow worker.
"""
import os
import datetime
import json
import requests
from urllib import quote_plus
from reworker.worker import Worker
class ServiceNowWorkerError(Exception):
"""
Base exception class for ServiceNowWorker errors.
"""
pass
class ServiceNowWorker(Worker):
"""
Worker which provides basic functionality with ServiceNow change records.
"""
#: All allowed subcommands
subcommands = (
'DoesChangeRecordExist', 'UpdateStartTime',
'UpdateEndTime', 'CreateChangeRecord', 'DoesCTaskExist', 'CreateCTask')
def _get_crq_ids(self, crq):
"""
Returns the sys_id and number for a crq.
*Parameters*:
* crq: The Change Record name.
"""
url = self._config['api_root_url'] + '/table/change_request'
url += '?sysparm_query=%s&sysparm_fields=number,sys_id&sysparm_limit=1' % (
quote_plus('number=' + crq))
response = requests.get(
url,
auth=(
self._config['servicenow_user'],
self._config['servicenow_password']),
headers={'Accept': 'application/json'})
# we should get a 200, else it doesn't exist or server issue
if response.status_code == 200:
result = response.json()['result'][0]
return {'number': result['number'], 'sys_id': result['sys_id']}
return {'number': None, 'sys_id': None}
def does_change_record_exist(self, body, output):
"""
Subcommand which checks to see if a change record exists.
*Dynamic Parameters Requires*:
* change_record: the record to look for.
"""
# TODO: Use _get_crq_ids
expected_record = body.get('dynamic', {}).get('change_record', None)
if not expected_record:
raise ServiceNowWorkerError(
'No change_record to search for given.')
output.info('Checking for change record %s ...' % expected_record)
# service now call
url = self._config['api_root_url'] + '/table/change_request'
url += '?sysparm_query=%s&sysparm_fields=number&sysparm_limit=2' % (
quote_plus('number=' + expected_record))
response = requests.get(
url,
auth=(
self._config['servicenow_user'],
self._config['servicenow_password']),
headers={'Accept': 'application/json'})
# we should get a 200, else it doesn't exist or server issue
if response.status_code == 200:
change_record = response.json()['result'][0]['number']
if change_record == expected_record:
output.info('found change record %s' % change_record)
return {'status': 'completed', 'data': {'exists': True}}
# 404 means it can't be found
elif response.status_code == 404:
output.info('change record %s does not exist.' % expected_record)
if self._config.get('auto_create_change_if_missing', False):
output.info('Automatically creating a change record')
(chg, url) = self.create_change_record(self._config)
output.info('Created change %s' % str(chg))
_data = {
'exists': True,
'new_record': str(chg),
'new_record_url': str(url)
}
return {'status': 'completed', 'data': _data}
else:
return {'status': 'completed', 'data': {'exists': False}}
# anything else is an error
raise ServiceNowWorkerError('api returned %s instead of 200' % (
response.status_code))
def does_c_task_exist(self, body, output):
"""
Subcommand which checks to see if a c task exists.
*Dynamic Parameters Requires*:
* change_record: the record to look for.
"""
# TODO: Use _get_crq_ids
expected_record = body.get('dynamic', {}).get('ctask', None)
if not expected_record:
raise ServiceNowWorkerError(
'No ctask to search for given.')
change_record = body.get('dynamic', {}).get('change_record', None)
if not change_record:
raise ServiceNowWorkerError(
'No change_record to search for given.')
output.info('Checking for CTask %s ...' % expected_record)
# service now call
url = self._config['api_root_url'] + '/table/change_task'
url += '?sysparm_limit=1&sysparm_query=%s' % (
quote_plus('number=' + expected_record))
self.app_logger.info('Checking for CTask at %s' % url)
response = requests.get(
url,
auth=(
self._config['servicenow_user'],
self._config['servicenow_password']),
headers={'Accept': 'application/json'})
# we should get a 200, else it doesn't exist or server issue
if response.status_code == 200:
ctask_record = response.json()['result'][0]['number']
if ctask_record == expected_record:
output.info('found CTask record %s' % ctask_record)
return {'status': 'completed', 'data': {'exists': True}}
# 404 means it can't be found
elif response.status_code == 404:
output.info('ctask record %s does not exist.' % expected_record)
if self._config.get('auto_create_c_task_if_missing', False):
output.info('Automatically creating a ctask record')
body['dynamic']['change_record'] = change_record
result = self.create_c_task(body, output)
new_ctask = str(result['data']['ctask'])
output.info('Created ctask %s' % new_ctask)
_data = {
'exists': True,
'new_ctask': new_ctask,
}
return {'status': 'completed', 'data': _data}
else:
return {'status': 'completed', 'data': {'exists': False}}
# anything else is an error
raise ServiceNowWorkerError('api returned %s instead of 200' % (
response.status_code))
def update_time(self, body, output, kind):
"""
Subcommand which updates timing in Service Now.
*Parameters*:
* body: The message body.
* output: The output instance back to the user.
* kind: start or end
*Dynamic Parameters Requires*:
* change_record: the record to look for.
* environment: the environment record to update
"""
change_record = body.get('dynamic', {}).get('change_record', None)
environment = body.get('dynamic', {}).get('environment', None)
if not change_record:
raise ServiceNowWorkerError('No change_record was given.')
if not environment:
raise ServiceNowWorkerError('No environment was given.')
output.info('Updating the %s %s time for %s ...' % (
environment, kind, change_record))
# Get the sys_id
sys_id = self._get_crq_ids(change_record)['sys_id']
# We should get a 200, else it doesn't exist or server issue
if sys_id:
output.info('Found change record %s with sys_id %s' % (
change_record, sys_id))
# Now we have the sys_id, we should be able to update the time
key = 'u_%s_%s_time' % (environment, kind)
value = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
payload = {
key: value,
}
record_url = self._config['api_root_url'] + '%s%s' % (
'/table/change_request/', sys_id)
response = requests.put(
record_url,
auth=(
self._config['servicenow_user'],
self._config['servicenow_password']),
headers={'Accept': 'application/json'},
data=json.dumps(payload))
# Return success if we have a 200, else fall into the
# "Anything else is an error" below
if response.status_code == 200:
return {'status': 'completed'}
else:
raise ServiceNowWorkerError('API returned %s instead of 200' % (
response.status_code))
# Anything else is an error
output.error('Could not update timing due to missing change record')
raise ServiceNowWorkerError('Could not update timing due to missing change record')
def create_change_record(self, config):
"""
Create a new change record. Adds a record to the import table
which is later processed by transformation maps.
"""
url = config['api_import_url']
auth = (
config['servicenow_user'],
config['servicenow_password']
)
headers = {
'content-type': 'application/json',
'Accept': 'application/json'
}
# Process the change record template into a handy-dandy string
# to send in the API POST call
payload = self._do_change_template(config)
response = requests.post(
url,
data=payload,
headers=headers,
auth=auth)
if response.status_code == 201:
"""
Example response:
r = {'import_set': 'ISET0011337',
'result': [
{'display_name': 'number',
'display_value': 'CHG0007331',
'record_link': 'https://example.service-now.com/api/now/table/change_request/d6e68a52fd5f31ff296db3236d1f6bfb',
'status': 'inserted',
'sys_id': 'd6e68a52fd5f31ff296db3236d1f6bfb',
'table': 'change_request',
'transform_map': 'Auto Transform Change Map'}
],
'staging_table': 'u_test_change_creation'
}
"""
result = response.json()['result'][0]
change_record = result['display_value']
change_url = result['record_link']
self.app_logger.info("Change record {CHG_NUM} created: {CHG_URL}".format(
CHG_NUM=change_record,
CHG_URL=change_url)
)
return (change_record, change_url)
elif response.status_code == 403:
self.app_logger.info("Service Now API account unauthorized to create change record")
raise ServiceNowWorkerError(
"403 unauthorized response when creating change record: {ERR_MSG}".format(
ERR_MSG=response.text)
)
else:
self.app_logger.info("Unexpected response [{CODE}] when creating change record {ERR_MSG}".format(
CODE=response.status_code,
ERR_MSG=response.text)
)
raise ServiceNowWorkerError("Unexpected response [{CODE}] when creating change record {ERR_MSG}".format(
CODE=response.status_code,
ERR_MSG=response.text)
)
def create_c_task(self, body, output):
"""
Create a new CTask.
"""
output.info("Attempting to create a new CTask ...")
change_record = body.get('dynamic', {}).get('change_record', None)
if not change_record:
raise ServiceNowWorkerError(
'No change_record given for CTask creation.')
url = self._config['api_root_url'] + '/table/change_task'
auth = (
self._config['servicenow_user'],
self._config['servicenow_password']
)
headers = {
'content-type': 'application/json',
'Accept': 'application/json'
}
payload = self._config['c_task_payload'].copy()
payload['change_request'] = change_record
if body.get('dynamic', {}).get('ctask_description', None):
payload['short_description'] = body['dynamic']['ctask_description']
payload['description'] = payload['short_description']
response = requests.post(
url,
data=json.dumps(payload),
headers=headers,
auth=auth)
if response.status_code == 201:
result = response.json()['result']
ctask = result['number']
change_url = result['change_request']['link']
self.app_logger.info(
"CTask {CTASK} created for CHG {CHG_NUM}: {CHG_URL}".format(
CTASK=ctask,
CHG_NUM=change_record,
CHG_URL=change_url)
)
output.info("Created CTask {0}".format(ctask))
return {'status': 'completed', 'data': {'ctask': ctask}}
elif response.status_code == 403:
self.app_logger.info("Service Now API account unauthorized to create CTask")
raise ServiceNowWorkerError(
"403 unauthorized response when creating CTask: {ERR_MSG}".format(
ERR_MSG=response.text)
)
else:
self.app_logger.info("Unexpected response [{CODE}] when creating CTask {ERR_MSG}".format(
CODE=response.status_code,
ERR_MSG=response.text)
)
raise ServiceNowWorkerError("Unexpected response [{CODE}] when creating CTask {ERR_MSG}".format(
CODE=response.status_code,
ERR_MSG=response.text)
)
# Skip covering this, it mostly calls the date method (below)
def _do_change_template(self, config): # pragma: no cover
"""Processes a change record payload template. Makes a fresh copy
(object) from our config file, and then calculates and inserts dynamic
data (like dates, etc)
Returns a serialized dictionary representing the JSON payload for our POST """
# Get our base payload datastructure and make a copy for manipulating
payload = config['change_record_payload'].copy()
# Set our start/end date fields
payload.update(self._make_start_end_dates(
config['start_date_diff'],
config['end_date_diff'])
)
return json.dumps(payload)
def _make_start_end_dates(self, start_date_diff, end_date_diff):
"""Calculate the correct start/end dates for the new change record."""
start_diff = datetime.timedelta(**start_date_diff)
end_diff = datetime.timedelta(**end_date_diff)
# We don't want the end date to be before (or the same as) the
# start date, no no no!
if start_diff >= end_diff:
raise ServiceNowWorkerError("'start_date_diff' must be less than 'end_date_diff'")
now = datetime.datetime.now()
start_date = (now + start_diff).strftime('%Y-%m-%d %H:%M:%S')
end_date = (now + end_diff).strftime('%Y-%m-%d %H:%M:%S')
self.app_logger.info("Calculated start through end dates: {start_date} - {end_date}".format(
start_date=start_date,
end_date=end_date))
return {
"u_start_date": start_date,
"u_end_date": end_date
}
def process(self, channel, basic_deliver, properties, body, output):
"""
Writes out output messages from the bus.
*Keys Requires*:
* subcommand: the subcommand to execute.
"""
# Ack the original message
self.ack(basic_deliver)
corr_id = str(properties.correlation_id)
self.send(
properties.reply_to,
corr_id,
{'status': 'started'},
exchange=''
)
self.notify(
"Servicenow Worker starting",
"servicenow Worker starting",
'started',
corr_id
)
output.info("Starting now")
try:
try:
subcommand = str(body['parameters']['subcommand'])
if subcommand not in self.subcommands:
raise KeyError()
except KeyError:
raise ServiceNowWorkerError(
'No valid subcommand given. Nothing to do!')
if subcommand == 'DoesChangeRecordExist':
self.app_logger.info(
'Executing subcommand %s for correlation_id %s' % (
subcommand, corr_id))
result = self.does_change_record_exist(body, output)
elif subcommand == 'UpdateStartTime':
self.app_logger.info(
'Executing subcommand %s for correlation_id %s' % (
subcommand, corr_id))
result = self.update_time(body, output, 'start')
elif subcommand == 'UpdateEndTime':
self.app_logger.info(
'Executing subcommand %s for correlation_id %s' % (
subcommand, corr_id))
result = self.update_time(body, output, 'end')
elif subcommand == 'CreateChangeRecord':
self.app_logger.info(
'Executing subcommand %s for correlation_id %s' % (
subcommand, corr_id))
result = self.create_change_record(self._config, output)
elif subcommand == 'CreateCTask':
self.app_logger.info(
'Executing subcommand %s for correlation_id %s' % (
subcommand, corr_id))
result = self.create_c_task(body, output)
elif subcommand == 'DoesCTaskExist':
self.app_logger.info(
'Executing subcommand %s for correlation_id %s' % (
subcommand, corr_id))
result = self.does_c_task_exist(body, output)
else:
self.app_logger.warn(
'Could not the implementation of subcommand %s' % (
subcommand))
raise ServiceNowWorkerError('No subcommand implementation')
# Send results back
self.send(
properties.reply_to,
corr_id,
result,
exchange=''
)
# Notify on result. Not required but nice to do.
self.notify(
'ServiceNowWorker Executed Successfully',
'ServiceNowWorker successfully executed %s. See logs.' % (
subcommand),
'completed',
corr_id)
# Send out responses
self.app_logger.info(
'ServiceNowWorker successfully executed %s for '
'correlation_id %s. See logs.' % (
subcommand, corr_id))
except ServiceNowWorkerError, fwe:
# If a ServiceNowWorkerError happens send a failure log it.
self.app_logger.error('Failure: %s' % fwe)
self.send(
properties.reply_to,
corr_id,
{'status': 'failed'},
exchange=''
)
self.notify(
'ServiceNowWorker Failed',
str(fwe),
'failed',
corr_id)
output.error(str(fwe))
def main(): # pragma: no cover
from reworker.worker import runner
runner(ServiceNowWorker)
if __name__ == '__main__': # pragma nocover
main()
| RHInception/re-worker-servicenow | replugin/servicenowworker/__init__.py | Python | agpl-3.0 | 20,375 |
from __future__ import absolute_import
from __future__ import division
# Copyright (c) 2010-2017 openpyxl
import math
#constants
DEFAULT_ROW_HEIGHT = 15. # Default row height measured in point size.
BASE_COL_WIDTH = 13 # in characters
DEFAULT_COLUMN_WIDTH = 51.85 # in points, should be characters
DEFAULT_LEFT_MARGIN = 0.7 # in inches, = right margin
DEFAULT_TOP_MARGIN = 0.7874 # in inches = bottom margin
DEFAULT_HEADER = 0.3 # in inches
# Conversion functions
"""
From the ECMA Spec (4th Edition part 1)
Page setup: "Left Page Margin in inches" p. 1647
Docs from
http://startbigthinksmall.wordpress.com/2010/01/04/points-inches-and-emus-measuring-units-in-office-open-xml/
See also http://msdn.microsoft.com/en-us/library/dd560821(v=office.12).aspx
dxa: The main unit in OOXML is a twentieth of a point. Also called twips.
pt: point. In Excel there are 72 points to an inch
hp: half-points are used to specify font sizes. A font-size of 12pt equals 24 half points
pct: Half-points are used to specify font sizes. A font-size of 12pt equals 24 half points
EMU: English Metric Unit, EMUs are used for coordinates in vector-based
drawings and embedded pictures. One inch equates to 914400 EMUs and a
centimeter is 360000. For bitmaps the default resolution is 96 dpi (known as
PixelsPerInch in Excel). Spec p. 1122
For radial geometry Excel uses integert units of 1/60000th of a degree.
"""
def inch_to_dxa(value):
"""1 inch = 72 * 20 dxa"""
return int(value * 20 * 72)
def dxa_to_inch(value):
return value / 72 / 20
def dxa_to_cm(value):
return 2.54 * dxa_to_inch(value)
def cm_to_dxa(value):
emu = cm_to_EMU(value)
inch = EMU_to_inch(emu)
return inch_to_dxa(inch)
def pixels_to_EMU(value):
"""1 pixel = 9525 EMUs"""
return int(value * 9525)
def EMU_to_pixels(value):
return round(value / 9525)
def cm_to_EMU(value):
"""1 cm = 360000 EMUs"""
return int(value * 360000)
def EMU_to_cm(value):
return round(value / 360000, 4)
def inch_to_EMU(value):
"""1 inch = 914400 EMUs"""
return int(value * 914400)
def EMU_to_inch(value):
return round(value / 914400, 4)
def pixels_to_points(value, dpi=96):
"""96 dpi, 72i"""
return value * 72 / dpi
def points_to_pixels(value, dpi=96):
return int(math.ceil(value * dpi / 72))
def degrees_to_angle(value):
"""1 degree = 60000 angles"""
return int(round(value * 60000))
def angle_to_degrees(value):
return round(value / 60000, 2)
def short_color(color):
""" format a color to its short size """
if len(color) > 6:
return color[2:]
return color
| 171121130/SWI | venv/Lib/site-packages/openpyxl/utils/units.py | Python | mit | 2,629 |
# -*- coding: utf-8 -*-
'''
Yoda Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['dizigold.net', 'dizigold1.com']
self.base_link = 'http://www.dizigold2.com'
self.player_link = 'http://player.dizigold2.com/?id=%s&s=1&dil=%s'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
result = cache.get(self.dizigold_tvcache, 120)
tvshowtitle = cleantitle.get(tvshowtitle)
result = [i[0] for i in result if tvshowtitle == i[1]][0]
url = urlparse.urljoin(self.base_link, result)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def dizigold_tvcache(self):
try:
result = client.request(self.base_link)
result = client.parseDOM(result, 'div', attrs = {'class': 'dizis'})[0]
result = re.compile('href="(.+?)">(.+?)<').findall(result)
result = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in result]
result = [(i[0], cleantitle.get(i[1])) for i in result]
return result
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
if url == None: return
url = '/%s/%01d-sezon/%01d-bolum' % (url.replace('/', ''), int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
base_url = urlparse.urljoin(self.base_link, url)
result = client.request(base_url)
id = re.compile('var\s*view_id\s*=\s*"(\d*)"').findall(result)[0]
for dil in ['tr', 'or', 'en']:
query = self.player_link % (id, dil)
result = client.request(query, referer=base_url)
try:
url = client.parseDOM(result, 'iframe', ret='src')[-1]
if 'openload' in url:
host = 'openload.co' ; direct = False ; url = [{'url': url, 'quality': 'HD'}]
elif 'ok.ru' in url:
host = 'vk' ; direct = True ; url = directstream.odnoklassniki(url)
elif 'vk.com' in url:
host = 'vk' ; direct = True ; url = directstream.vk(url)
else: raise Exception()
for i in url: sources.append({'source': host, 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': direct, 'debridonly': False})
except:
pass
try:
url = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"').findall(result)
links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080]
links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080]
links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720]
for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/en/dizigold.py | Python | gpl-2.0 | 4,450 |
# -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# Copyright (c) 2012 Berlin Institute of Technology
# All rights reserved.
#
# Developed by: Philipp Meier <[email protected]>
# Neural Information Processing Group (NI)
# School for Electrical Engineering and Computer Science
# Berlin Institute of Technology
# MAR 5-6, Marchstr. 23, 10587 Berlin, Germany
# http://www.ni.tu-berlin.de/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal with the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the documentation
# and/or other materials provided with the distribution.
# * Neither the names of Neural Information Processing Group (NI), Berlin
# Institute of Technology, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# WITH THE SOFTWARE.
#_____________________________________________________________________________
#
# Acknowledgements:
# Philipp Meier <[email protected]>
#_____________________________________________________________________________
#
"""datafile implementation for wri file format"""
__docformat__ = 'restructuredtext'
__all__ = ['WriFile', '_WRI_H']
##---IMPORTS
import scipy as sp
from .datafile import DataFile, DataFileError
from ..funcs_general import dict_list_to_ndarray
##---CONSTANTS
VERBOSE = False
##---CLASSES
class _WRI_H(object):
"""WRI header struct"""
def __init__(self, fp):
"""
:type fp: file
:param fp: open file at seek(0)
"""
# version
self.srate = fp.readline().strip('\r\n').split()
if self.srate[0] != 'Sampling':
raise DataFileError('expected "Sampling:" in first row!"')
self.srate = int(self.srate[1][10:])
if VERBOSE:
print self.srate
print "header done."
class WriFile(DataFile):
"""WRI file format - Chen Sorter"""
## constructor
def __init__(self, filename=None, dtype=sp.float32):
# members
self.header = None
self.data = None
self.npdata = None
# super
super(WriFile, self).__init__(filename=filename, dtype=dtype)
## implementation
def _initialize_file(self, filename, **kwargs):
# open file
self.fp = open(filename, 'r')
# read header info
self.header = _WRI_H(self.fp)
self.data = {}
# read data
current_unit = None
line = self.fp.readline().strip('\r\n')
while line:
if line.isdigit():
# we found a spike for the current unit.
# Current unit should not be None at this point
self.data[current_unit].append(int(line))
else:
# This is a subheader indicating a new unit
current_unit = line[5]
self.data[current_unit] = []
line = self.fp.readline().strip('\r\n')
# Convert the lists to numpyarrays for the spike train alignment
# function
self.npdata = dict_list_to_ndarray(self.data)
if VERBOSE:
print "found_units: "
print self.data.keys()
def _close(self):
self.fp.close()
def _closed(self):
return self.fp.closed
def _filename(self):
return self.fp.name
def _get_data(self, **kwargs):
""" Returns the wri content as a dictionary of numpy arrays
:rtype: dict
:returns: mapping unit id to spike train
"""
return self.npdata
if __name__ == '__main__':
w = WriFile('C:\\\\SVN\\\\Datenanalyse\\\\Alle\\\write_test000.wri')
print w.get_data()
| pmeier82/BOTMpy | botmpy/common/datafile/wri.py | Python | mit | 4,875 |
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 742 $"
import copy
import numpy
try:
from PyQuante.CGBF import CGBF
module_pyq = True
except:
module_pyq = False
try:
from pyvtk import *
from pyvtk.DataSetAttr import *
module_pyvtk = True
except:
module_pyvtk = False
from cclib.bridge import makepyquante
from cclib.parser.utils import convertor
class Volume(object):
"""Represent a volume in space.
Required parameters:
origin -- the bottom left hand corner of the volume
topcorner -- the top right hand corner
spacing -- the distance between the points in the cube
Attributes:
data -- a numpy array of values for each point in the volume
(set to zero at initialisation)
numpts -- the numbers of points in the (x,y,z) directions
"""
def __init__(self, origin, topcorner, spacing):
self.origin = origin
self.spacing = spacing
self.topcorner = topcorner
self.numpts = []
for i in range(3):
self.numpts.append(int((self.topcorner[i]-self.origin[i])/self.spacing[i] + 1) )
self.data = numpy.zeros( tuple(self.numpts), "d")
def __str__(self):
"""Return a string representation."""
return "Volume %s to %s (density: %s)" % (self.origin, self.topcorner,
self.spacing)
def write(self, filename, format="Cube"):
"""Write the volume to file."""
format = format.upper()
if format.upper() not in ["VTK", "CUBE"]:
raise "Format must be either VTK or Cube"
elif format=="VTK":
self.writeasvtk(filename)
else:
self.writeascube(filename)
def writeasvtk(self, filename):
if not module_pyvtk:
raise Exception, "You need to have pyvtk installed"
ranges = (numpy.arange(self.data.shape[2]),
numpy.arange(self.data.shape[1]),
numpy.arange(self.data.shape[0]))
v = VtkData(RectilinearGrid(*ranges), "Test",
PointData(Scalars(self.data.ravel(), "from cclib", "default")))
v.tofile(filename)
def integrate(self):
boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] *
convertor(1, "Angstrom", "bohr")**3)
return sum(self.data.ravel()) * boxvol
def integrate_square(self):
boxvol = (self.spacing[0] * self.spacing[1] * self.spacing[2] *
convertor(1, "Angstrom", "bohr")**3)
return sum(self.data.ravel()**2) * boxvol
def writeascube(self, filename):
# Remember that the units are bohr, not Angstroms
convert = lambda x : convertor(x, "Angstrom", "bohr")
ans = []
ans.append("Cube file generated by cclib")
ans.append("")
format = "%4d%12.6f%12.6f%12.6f"
origin = [convert(x) for x in self.origin]
ans.append(format % (0, origin[0], origin[1], origin[2]))
ans.append(format % (self.data.shape[0], convert(self.spacing[0]), 0.0, 0.0))
ans.append(format % (self.data.shape[1], 0.0, convert(self.spacing[1]), 0.0))
ans.append(format % (self.data.shape[2], 0.0, 0.0, convert(self.spacing[2])))
line = []
for i in range(self.data.shape[0]):
for j in range(self.data.shape[1]):
for k in range(self.data.shape[2]):
line.append(scinotation(self.data[i][j][k]))
if len(line)==6:
ans.append(" ".join(line))
line = []
if line:
ans.append(" ".join(line))
line = []
outputfile = open(filename, "w")
outputfile.write("\n".join(ans))
outputfile.close()
def scinotation(num):
"""Write in scientific notation
>>> scinotation(1./654)
' 1.52905E-03'
>>> scinotation(-1./654)
'-1.52905E-03'
"""
ans = "%10.5E" % num
broken = ans.split("E")
exponent = int(broken[1])
if exponent<-99:
return " 0.000E+00"
if exponent<0:
sign="-"
else:
sign="+"
return ("%sE%s%s" % (broken[0],sign,broken[1][-2:])).rjust(12)
def getbfs(coords, gbasis):
"""Convenience function for both wavefunction and density based on PyQuante Ints.py."""
mymol = makepyquante(coords, [0 for x in coords])
sym2powerlist = {
'S' : [(0,0,0)],
'P' : [(1,0,0),(0,1,0),(0,0,1)],
'D' : [(2,0,0),(0,2,0),(0,0,2),(1,1,0),(0,1,1),(1,0,1)],
'F' : [(3,0,0),(2,1,0),(2,0,1),(1,2,0),(1,1,1),(1,0,2),
(0,3,0),(0,2,1),(0,1,2), (0,0,3)]
}
bfs = []
for i,atom in enumerate(mymol):
bs = gbasis[i]
for sym,prims in bs:
for power in sym2powerlist[sym]:
bf = CGBF(atom.pos(),power)
for expnt,coef in prims:
bf.add_primitive(expnt,coef)
bf.normalize()
bfs.append(bf)
return bfs
def wavefunction(coords, mocoeffs, gbasis, volume):
"""Calculate the magnitude of the wavefunction at every point in a volume.
Attributes:
coords -- the coordinates of the atoms
mocoeffs -- mocoeffs for one eigenvalue
gbasis -- gbasis from a parser object
volume -- a template Volume object (will not be altered)
"""
bfs = getbfs(coords, gbasis)
wavefn = copy.copy(volume)
wavefn.data = numpy.zeros( wavefn.data.shape, "d")
conversion = convertor(1,"bohr","Angstrom")
x = numpy.arange(wavefn.origin[0], wavefn.topcorner[0]+wavefn.spacing[0], wavefn.spacing[0]) / conversion
y = numpy.arange(wavefn.origin[1], wavefn.topcorner[1]+wavefn.spacing[1], wavefn.spacing[1]) / conversion
z = numpy.arange(wavefn.origin[2], wavefn.topcorner[2]+wavefn.spacing[2], wavefn.spacing[2]) / conversion
for bs in range(len(bfs)):
data = numpy.zeros( wavefn.data.shape, "d")
for i,xval in enumerate(x):
for j,yval in enumerate(y):
for k,zval in enumerate(z):
data[i, j, k] = bfs[bs].amp(xval,yval,zval)
numpy.multiply(data, mocoeffs[bs], data)
numpy.add(wavefn.data, data, wavefn.data)
return wavefn
def electrondensity(coords, mocoeffslist, gbasis, volume):
"""Calculate the magnitude of the electron density at every point in a volume.
Attributes:
coords -- the coordinates of the atoms
mocoeffs -- mocoeffs for all of the occupied eigenvalues
gbasis -- gbasis from a parser object
volume -- a template Volume object (will not be altered)
Note: mocoeffs is a list of numpy arrays. The list will be of length 1
for restricted calculations, and length 2 for unrestricted.
"""
bfs = getbfs(coords, gbasis)
density = copy.copy(volume)
density.data = numpy.zeros( density.data.shape, "d")
conversion = convertor(1,"bohr","Angstrom")
x = numpy.arange(density.origin[0], density.topcorner[0]+density.spacing[0], density.spacing[0]) / conversion
y = numpy.arange(density.origin[1], density.topcorner[1]+density.spacing[1], density.spacing[1]) / conversion
z = numpy.arange(density.origin[2], density.topcorner[2]+density.spacing[2], density.spacing[2]) / conversion
for mocoeffs in mocoeffslist:
for mocoeff in mocoeffs:
wavefn = numpy.zeros( density.data.shape, "d")
for bs in range(len(bfs)):
data = numpy.zeros( density.data.shape, "d")
for i,xval in enumerate(x):
for j,yval in enumerate(y):
tmp = []
for k,zval in enumerate(z):
tmp.append(bfs[bs].amp(xval, yval, zval))
data[i,j,:] = tmp
numpy.multiply(data, mocoeff[bs], data)
numpy.add(wavefn, data, wavefn)
density.data += wavefn**2
if len(mocoeffslist) == 1:
density.data = density.data*2. # doubly-occupied
return density
if __name__=="__main__":
try:
import psyco
psyco.full()
except ImportError:
pass
from cclib.parser import ccopen
import logging
a = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp_basis.log")
a.logger.setLevel(logging.ERROR)
c = a.parse()
b = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp.out")
b.logger.setLevel(logging.ERROR)
d = b.parse()
vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
wavefn = wavefunction(d.atomcoords[0], d.mocoeffs[0][d.homos[0]],
c.gbasis, vol)
assert abs(wavefn.integrate())<1E-6 # not necessarily true for all wavefns
assert abs(wavefn.integrate_square() - 1.00)<1E-3 # true for all wavefns
print wavefn.integrate(), wavefn.integrate_square()
vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
frontierorbs = [d.mocoeffs[0][(d.homos[0]-3):(d.homos[0]+1)]]
density = electrondensity(d.atomcoords[0], frontierorbs, c.gbasis, vol)
assert abs(density.integrate()-8.00)<1E-2
print "Combined Density of 4 Frontier orbitals=",density.integrate()
| faribas/RMG-Java | source/cclib/method/volume.py | Python | mit | 9,507 |
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test synchronizer using DocManagerSimulator
"""
import os
import sys
import time
sys.path[0:0] = [""]
from mongo_connector.connector import Connector
from tests import unittest, connector_opts
from tests.setup_cluster import ReplicaSet
from tests.util import assert_soon
class TestSynchronizer(unittest.TestCase):
""" Tests the synchronizers
"""
@classmethod
def setUpClass(cls):
""" Initializes the cluster
"""
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
cls.repl_set = ReplicaSet().start()
cls.conn = cls.repl_set.client()
cls.connector = Connector(
mongo_address=cls.repl_set.uri,
ns_set=['test.test'],
**connector_opts
)
cls.synchronizer = cls.connector.doc_managers[0]
cls.connector.start()
assert_soon(lambda: len(cls.connector.shard_set) != 0)
@classmethod
def tearDownClass(cls):
""" Tears down connector
"""
cls.connector.join()
cls.repl_set.stop()
def setUp(self):
""" Clears the db
"""
self.conn['test']['test'].delete_many({})
assert_soon(lambda: len(self.synchronizer._search()) == 0)
def test_insert(self):
"""Tests insert
"""
self.conn['test']['test'].insert_one({'name': 'paulie'})
while (len(self.synchronizer._search()) == 0):
time.sleep(1)
result_set_1 = self.synchronizer._search()
self.assertEqual(len(result_set_1), 1)
result_set_2 = self.conn['test']['test'].find_one()
for item in result_set_1:
self.assertEqual(item['_id'], result_set_2['_id'])
self.assertEqual(item['name'], result_set_2['name'])
def test_ns_set(self):
self.conn.test.other.insert_one({"replicated": False})
results = self.synchronizer._search()
self.assertEqual(len(results), 0,
"Should not replicate outside of test.test namespace")
def test_remove(self):
"""Tests remove
"""
self.conn['test']['test'].insert_one({'name': 'paulie'})
while (len(self.synchronizer._search()) != 1):
time.sleep(1)
self.conn['test']['test'].delete_one({'name': 'paulie'})
while (len(self.synchronizer._search()) == 1):
time.sleep(1)
result_set_1 = self.synchronizer._search()
self.assertEqual(len(result_set_1), 0)
def test_update(self):
"""Test that Connector can replicate updates successfully."""
doc = {"a": 1, "b": 2}
self.conn.test.test.insert_one(doc)
selector = {"_id": doc['_id']}
def update_and_retrieve(update_spec, replace=False):
if replace:
self.conn.test.test.replace_one(selector, update_spec)
else:
self.conn.test.test.update_one(selector, update_spec)
# self.conn.test.test.update(selector, update_spec)
# Give the connector some time to perform update
time.sleep(1)
return self.synchronizer._search()[0]
# Update whole document
doc = update_and_retrieve({"a": 1, "b": 2, "c": 10}, replace=True)
self.assertEqual(doc['a'], 1)
self.assertEqual(doc['b'], 2)
self.assertEqual(doc['c'], 10)
# $set only
doc = update_and_retrieve({"$set": {"b": 4}})
self.assertEqual(doc['a'], 1)
self.assertEqual(doc['b'], 4)
# $unset only
doc = update_and_retrieve({"$unset": {"a": True}})
self.assertNotIn('a', doc)
self.assertEqual(doc['b'], 4)
# mixed $set/$unset
doc = update_and_retrieve({"$unset": {"b": True}, "$set": {"c": 3}})
self.assertEqual(doc['c'], 3)
self.assertNotIn('b', doc)
# ensure update works when fields are given
opthread = self.connector.shard_set[0]
opthread.fields = ['a', 'b', 'c']
try:
doc = update_and_retrieve({"$set": {"d": 10}})
self.assertEqual(self.conn.test.test.find_one(doc['_id'])['d'], 10)
self.assertNotIn('d', doc)
doc = update_and_retrieve({"$set": {"a": 10}})
self.assertEqual(doc['a'], 10)
finally:
# cleanup
opthread.fields = None
if __name__ == '__main__':
unittest.main()
| XDestination/mongo-connector | tests/test_synchronizer.py | Python | apache-2.0 | 5,053 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the VSS resolver helper implementation."""
import unittest
from dfvfs.resolver import vshadow_resolver_helper
from tests.resolver import test_lib
class VShadowResolverHelperTest(test_lib.ResolverHelperTestCase):
"""Tests for the VSS resolver helper implementation."""
def testNewFileObject(self):
"""Tests the NewFileObject function."""
resolver_helper_object = vshadow_resolver_helper.VShadowResolverHelper()
self._TestNewFileObject(resolver_helper_object)
def testNewFileSystem(self):
"""Tests the NewFileSystem function."""
resolver_helper_object = vshadow_resolver_helper.VShadowResolverHelper()
self._TestNewFileSystem(resolver_helper_object)
if __name__ == '__main__':
unittest.main()
| manashmndl/dfvfs | tests/resolver/vshadow_resolver_helper.py | Python | apache-2.0 | 786 |
from django.views.generic import DetailView
from widgy.contrib.form_builder.views import HandleFormMixin
from test_form.models import TestModel
class TestView(DetailView, HandleFormMixin):
model = TestModel
def post(self,request, *args, **kwargs):
self.object = self.get_object()
return super(EventDetailView, self).post(*args, **kwargs) | zmetcalf/fusionbox-demo-project | test_form/views.py | Python | gpl-3.0 | 365 |
from ..algo import Algo
import numpy as np
import pandas as pd
from .. import tools
import logging
from cvxopt import solvers, matrix
solvers.options['show_progress'] = False
class Kelly(Algo):
""" Kelly fractioned betting. See
http://en.wikipedia.org/wiki/Kelly_criterion#Application_to_the_stock_market
for quick introduction.
"""
PRICE_TYPE = 'log'
REPLACE_MISSING = False
def __init__(self, window=float('inf'), r=0., fraction=1., long_only=False, min_history=None, max_leverage=1., reg=0., q=1.,
mu_estimate=False, gamma=0.):
"""
:param window: Window for calculating mean and variance. Use float('inf') for entire history.
:param min_history: Use zero weights for first min_periods.
:param r: Risk-free rate.
:param long_only: Restrict to positive portfolio weights.
:param fraction: Use fraction of Kelly weights. 1. is full Kelly, 0.5 is half Kelly.
:param max_leverage: Max leverage to use.
:param reg: Regularization parameter for covariance matrix (adds identity matrix).
:param mu_estimate: Mean is estimated to be proportional to historical variance
:param gamma: Penalize changing weights.
"""
if np.isinf(window):
window = int(1e+8)
min_history = min_history or 50
else:
min_history = min_history or window
super(Kelly, self).__init__(min_history=min_history)
self.window = window
self.r = r
self.fraction = fraction
self.long_only = long_only
self.max_leverage = max_leverage
self.reg = reg
self.q = q
self.mu_estimate = mu_estimate
self.gamma = gamma
def init_step(self, X):
# precalculate correlations
self.S = tools.rolling_cov_pairwise(X, window=self.window, min_periods=self.min_history)
self.M = pd.rolling_mean(X, window=self.window, min_periods=self.min_history)
def step(self, x, last_b):
# get sigma and mu matrix
mu = self.M.ix[x.name]
sigma = self.S[x.name]
# make sure sigma is properly indexed
sigma = sigma.reindex(index=x.index).reindex(columns=x.index)
# mu is proportional to individual variance
if self.mu_estimate:
mu = pd.Series(np.sqrt(np.diag(sigma)), index=mu.index)
# penalize changing weights
m = len(mu)
gamma = self.gamma
q = self.q
if gamma != 0:
sigma += gamma * np.eye(m)
if q == 0:
mu = 2. * gamma * last_b
else:
mu += 2.*gamma / q
# pure approach - problems with singular matrix
if not self.long_only:
sigma = np.matrix(sigma)
mu = np.matrix(mu).T
sigma_inv = np.linalg.inv(sigma)
b = (1 + self.r) * sigma_inv * (mu - self.r)
b = np.ravel(b)
else:
b = tools.opt_markowitz(mu, sigma, long_only=self.long_only, reg=self.reg, rf_rate=self.r, q=self.q, max_leverage=self.max_leverage)
# use Kelly fraction
b *= self.fraction
return b
def plot_fraction(self, S, fractions=np.linspace(0., 2., 10), **kwargs):
""" Plot graph with Kelly fraction on x-axis and total wealth on y-axis.
:param S: Stock prices.
:param fractions: List (ndarray) of fractions used.
"""
wealths = []
for fraction in fractions:
self.fraction = fraction
wealths.append(self.run(S).total_wealth)
ax = pd.Series(wealths, index=fractions, **kwargs).plot(**kwargs)
ax.set_xlabel('Kelly Fraction')
ax.set_ylabel('Total Wealth')
return ax
# use case
if __name__ == '__main__':
tools.quickrun(Kelly())
| greenlin/universal-portfolios | universal/algos/kelly.py | Python | mit | 3,834 |
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import OrderedDict
import logbook
import pandas as pd
from pandas.io.data import DataReader
import pytz
from six import iteritems
from . benchmarks import get_benchmark_returns
from . import treasuries, treasuries_can
from .paths import (
cache_root,
data_root,
)
from zipline.utils.tradingcalendar import (
trading_day as trading_day_nyse,
trading_days as trading_days_nyse,
)
logger = logbook.Logger('Loader')
# Mapping from index symbol to appropriate bond data
INDEX_MAPPING = {
'^GSPC':
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
'^GSPTSE':
(treasuries_can, 'treasury_curves_can.csv', 'bankofcanada.ca'),
'^FTSE': # use US treasuries until UK bonds implemented
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
}
def get_data_filepath(name):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root()
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name)
def get_cache_filepath(name):
cr = cache_root()
if not os.path.exists(cr):
os.makedirs(cr)
return os.path.join(cr, name)
def get_benchmark_filename(symbol):
return "%s_benchmark.csv" % symbol
def has_data_for_dates(series_or_df, first_date, last_date):
"""
Does `series_or_df` have data on or before first_date and on or after
last_date?
"""
dts = series_or_df.index
if not isinstance(dts, pd.DatetimeIndex):
raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts))
first, last = dts[[0, -1]]
return (first <= first_date) and (last >= last_date)
def load_market_data(trading_day=trading_day_nyse,
trading_days=trading_days_nyse,
bm_symbol='^GSPC'):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from Yahoo Finance. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to '^GSPC', the Yahoo
ticker for the S&P 500.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
first_date = trading_days[0]
# We expect to have benchmark and treasury data that's current up until
# **two** full trading days prior to the most recently completed trading
# day.
# Example:
# On Thu Oct 22 2015, the previous completed trading day is Wed Oct 21.
# However, data for Oct 21 doesn't become available until the early morning
# hours of Oct 22. This means that there are times on the 22nd at which we
# cannot reasonably expect to have data for the 21st available. To be
# conservative, we instead expect that at any time on the 22nd, we can
# download data for Tuesday the 20th, which is two full trading days prior
# to the date on which we're running a test.
# We'll attempt to download new data if the latest entry in our cache is
# before this date.
last_date = trading_days[
trading_days.get_loc(pd.Timestamp.utcnow(), method='ffill') - 2
]
benchmark_returns = ensure_benchmark_data(
bm_symbol,
first_date,
last_date,
# We need the trading_day to figure out the close prior to the first
# date so that we can compute returns for the first date.
trading_day,
)
treasury_curves = ensure_treasury_data(
bm_symbol,
first_date,
last_date,
)
return benchmark_returns, treasury_curves
def ensure_benchmark_data(symbol, first_date, last_date, trading_day):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
"""
path = get_data_filepath(get_benchmark_filename(symbol))
try:
data = pd.Series.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
logger.info(
"Cache at {path} does not have data from {start} to {end}.\n"
"Downloading benchmark data for '{symbol}'.",
start=first_date,
end=last_date,
symbol=symbol,
path=path,
)
data = get_benchmark_returns(symbol, first_date - trading_day, last_date)
data.to_csv(path)
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def ensure_treasury_data(bm_symbol, first_date, last_date):
"""
Ensure we have treasury data from treasury module associated with
`bm_symbol`.
Parameters
----------
bm_symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
"""
loader_module, filename, source = INDEX_MAPPING.get(
bm_symbol, INDEX_MAPPING['^GSPC']
)
first_date = max(first_date, loader_module.earliest_possible_date())
path = get_data_filepath(filename)
try:
data = pd.DataFrame.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(path)
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def _load_raw_yahoo_data(indexes=None, stocks=None, start=None, end=None):
"""Load closing prices from yahoo finance.
:Optional:
indexes : dict (Default: {'SPX': '^GSPC'})
Financial indexes to load.
stocks : list (Default: ['AAPL', 'GE', 'IBM', 'MSFT',
'XOM', 'AA', 'JNJ', 'PEP', 'KO'])
Stock closing prices to load.
start : datetime (Default: datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices from start date on.
end : datetime (Default: datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices until end date.
:Note:
This is based on code presented in a talk by Wes McKinney:
http://wesmckinney.com/files/20111017/notebook_output.pdf
"""
assert indexes is not None or stocks is not None, """
must specify stocks or indexes"""
if start is None:
start = pd.datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
if start is not None and end is not None:
assert start < end, "start date is later than end date."
data = OrderedDict()
if stocks is not None:
for stock in stocks:
print(stock)
stock_pathsafe = stock.replace(os.path.sep, '--')
cache_filename = "{stock}-{start}-{end}.csv".format(
stock=stock_pathsafe,
start=start,
end=end).replace(':', '-')
cache_filepath = get_cache_filepath(cache_filename)
if os.path.exists(cache_filepath):
stkd = pd.DataFrame.from_csv(cache_filepath)
else:
stkd = DataReader(stock, 'yahoo', start, end).sort_index()
stkd.to_csv(cache_filepath)
data[stock] = stkd
if indexes is not None:
for name, ticker in iteritems(indexes):
print(name)
stkd = DataReader(ticker, 'yahoo', start, end).sort_index()
data[name] = stkd
return data
def load_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads price data from Yahoo into a dataframe for each of the indicated
assets. By default, 'price' is taken from Yahoo's 'Adjusted Close',
which removes the impact of splits and dividends. If the argument
'adjusted' is False, then the non-adjusted 'close' field is used instead.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust the price for splits and dividends.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
if adjusted:
close_key = 'Adj Close'
else:
close_key = 'Close'
df = pd.DataFrame({key: d[close_key] for key, d in iteritems(data)})
df.index = df.index.tz_localize(pytz.utc)
return df
def load_bars_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads data from Yahoo into a panel with the following
column names for each indicated security:
- open
- high
- low
- close
- volume
- price
Note that 'price' is Yahoo's 'Adjusted Close', which removes the
impact of splits and dividends. If the argument 'adjusted' is True, then
the open, high, low, and close values are adjusted as well.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust open/high/low/close for splits and dividends.
The 'price' field is always adjusted.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
panel = pd.Panel(data)
# Rename columns
panel.minor_axis = ['open', 'high', 'low', 'close', 'volume', 'price']
panel.major_axis = panel.major_axis.tz_localize(pytz.utc)
# Adjust data
if adjusted:
adj_cols = ['open', 'high', 'low', 'close']
for ticker in panel.items:
ratio = (panel[ticker]['price'] / panel[ticker]['close'])
ratio_filtered = ratio.fillna(0).values
for col in adj_cols:
panel[ticker][col] *= ratio_filtered
return panel
def load_prices_from_csv(filepath, identifier_col, tz='UTC'):
data = pd.read_csv(filepath, index_col=identifier_col)
data.index = pd.DatetimeIndex(data.index, tz=tz)
data.sort_index(inplace=True)
return data
def load_prices_from_csv_folder(folderpath, identifier_col, tz='UTC'):
data = None
for file in os.listdir(folderpath):
if '.csv' not in file:
continue
raw = load_prices_from_csv(os.path.join(folderpath, file),
identifier_col, tz)
if data is None:
data = raw
else:
data = pd.concat([data, raw], axis=1)
return data
| ChinaQuants/zipline | zipline/data/loader.py | Python | apache-2.0 | 14,176 |
from jawaf.exceptions import ServerError
async def create_pool(**connection_settings):
"""Create a pool using the `engine` to call the right db backend.
:param connection_settings: Kwargs.
:return: Pool.
"""
engine = connection_settings.pop('engine')
if engine == 'postgresql':
from jawaf.adapters.db.postgresql import PostgreSQLBackend
return await PostgreSQLBackend().create_pool(**connection_settings)
raise ServerError(f'Unsupported DB Backend {engine}')
| danpozmanter/jawaf | jawaf/adapters/db/__init__.py | Python | bsd-3-clause | 505 |
import cv2
from matplotlib import pyplot
import numpy as np
def read_sample(filenames):
images = []
for filename in filenames:
image = cv2.imread(filename)
image = cv2.resize(image, (96, 96))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_array = []
for y in range(0, 96, 1):
for x in range(0, 96, 1):
image_array.append((image[y][x] / 255.))
image_array = np.array(image_array)
image_array = image_array.astype(np.float32)
images.append(image_array)
return np.vstack(images)
def plot_sample(x, y, axis):
img = x.reshape(96, 96)
axis.imshow(img, cmap='gray')
axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)
def draw_result(X, y):
fig = pyplot.figure(figsize=(6, 6))
fig.subplots_adjust(
left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(X.shape[0]):
ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
plot_sample(X[i], y[i], ax)
pyplot.show() | kanak87/oldboy_rep | yong_celeb_recognize/image.py | Python | mit | 1,069 |
#!/usr/bin/env python
#
# find and import a version of 'py'
#
import sys
import os
from os.path import dirname as opd, exists, join, basename, abspath
def searchpy(current):
while 1:
last = current
initpy = join(current, '__init__.py')
if not exists(initpy):
pydir = join(current, 'py')
# recognize py-package and ensure it is importable
if exists(pydir) and exists(join(pydir, '__init__.py')):
#for p in sys.path:
# if p == current:
# return True
if current != sys.path[0]: # if we are already first, then ok
sys.stderr.write("inserting into sys.path: %s\n" % current)
sys.path.insert(0, current)
return True
current = opd(current)
if last == current:
return False
if not searchpy(abspath(os.curdir)):
if not searchpy(opd(abspath(sys.argv[0]))):
if not searchpy(opd(__file__)):
pass # let's hope it is just on sys.path
import py
import pytest
if __name__ == '__main__':
print ("py lib is at %s" % py.__file__)
| snim2/rcsp | py/bin/_findpy.py | Python | gpl-2.0 | 1,167 |
# -*- coding: utf-8 -*-
"""WebHelpers used in BlueEgg."""
from webhelpers import date, feedgenerator, html, number, misc, text
| Zex/Starter | script/json/BlueEgg/blueegg/lib/helpers.py | Python | mit | 129 |
from django.test import TestCase
from flows.statestore.django_store import StateStore
from flows.statestore.tests.utils import test_store_state
class DjangoStateStoreTest(TestCase):
def test_django_store_state(self):
store = StateStore()
test_store_state(self, store)
| laterpay/django-flows | flows/statestore/tests/test_django.py | Python | bsd-2-clause | 309 |
"""
Copyright 2018 Oliver Smith
This file is part of pmbootstrap.
pmbootstrap is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pmbootstrap is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pmbootstrap. If not, see <http://www.gnu.org/licenses/>.
"""
from pmb.flasher.init import init
from pmb.flasher.run import run
from pmb.flasher.variables import variables
from pmb.flasher.frontend import frontend
| postmarketOS/pmbootstrap | pmb/flasher/__init__.py | Python | gpl-3.0 | 833 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_DATACATALOG_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
@pytest.mark.credential_file(GCP_DATACATALOG_KEY)
class CloudDataflowExampleDagsSystemTest(GoogleSystemTest):
def setUp(self):
super().setUp()
@provide_gcp_context(GCP_DATACATALOG_KEY)
def test_run_example_gcp_dataflow_native_java(self):
self.run_dag('example_gcp_datacatalog', CLOUD_DAG_FOLDER)
def tearDown(self):
super().tearDown()
| apache/incubator-airflow | tests/providers/google/cloud/operators/test_datacatalog_system.py | Python | apache-2.0 | 1,370 |
from storelocator.models import Location, StoreLocator, Shop
import factory
class LocationFactory(factory.DjangoModelFactory):
FACTORY_FOR = Location
iso = 'DE'
postalcode = '10437'
city = 'Berlin'
state = 'Brandenburg'
latitude = 52.5565
longitude = 13.3911
class StoreLocatorFactory(factory.DjangoModelFactory):
FACTORY_FOR = StoreLocator
name = factory.Sequence(lambda i: 'StoreLocator-{0}'.format(i))
slug = factory.Sequence(lambda i: 'storelocator-{0}'.format(i))
class ShopFactory(factory.DjangoModelFactory):
FACTORY_FOR = Shop
type = factory.Sequence(lambda i: 'type-{0}'.format(i))
name = factory.Sequence(lambda i: 'shop-{0}'.format(i))
city = 'Berlin'
postalcode = '13359'
street = 'Drontheimerstrasse 25'
iso = 'DE'
storelocator = factory.SubFactory(StoreLocatorFactory) | moccu/django-storelocator | storelocator/factories.py | Python | mit | 867 |
#!/usr/bin/env python
# Copyright (C) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage jobs in Jenkins server
import errno
import io
import os
import operator
import hashlib
import yaml
import xml.etree.ElementTree as XML
import jenkins
import re
from pprint import pformat
import logging
from jenkins_jobs.constants import MAGIC_MANAGE_STRING
from jenkins_jobs.parser import YamlParser
logger = logging.getLogger(__name__)
_DEFAULT_TIMEOUT = object()
class CacheStorage(object):
# ensure each instance of the class has a reference to the required
# modules so that they are available to be used when the destructor
# is being called since python will not guarantee that it won't have
# removed global module references during teardown.
_yaml = yaml
_logger = logger
def __init__(self, jenkins_url, flush=False):
cache_dir = self.get_cache_dir()
# One cache per remote Jenkins URL:
host_vary = re.sub('[^A-Za-z0-9\-\~]', '_', jenkins_url)
self.cachefilename = os.path.join(
cache_dir, 'cache-host-jobs-' + host_vary + '.yml')
if flush or not os.path.isfile(self.cachefilename):
self.data = {}
else:
with io.open(self.cachefilename, 'r', encoding='utf-8') as yfile:
self.data = yaml.load(yfile)
logger.debug("Using cache: '{0}'".format(self.cachefilename))
@staticmethod
def get_cache_dir():
home = os.path.expanduser('~')
if home == '~':
raise OSError('Could not locate home folder')
xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \
os.path.join(home, '.cache')
path = os.path.join(xdg_cache_home, 'jenkins_jobs')
if not os.path.isdir(path):
os.makedirs(path)
return path
def set(self, job, md5):
self.data[job] = md5
def is_cached(self, job):
if job in self.data:
return True
return False
def has_changed(self, job, md5):
if job in self.data and self.data[job] == md5:
return False
return True
def save(self):
# check we initialized sufficiently in case called via __del__
# due to an exception occurring in the __init__
if getattr(self, 'data', None) is not None:
try:
with io.open(self.cachefilename, 'w',
encoding='utf-8') as yfile:
self._yaml.dump(self.data, yfile)
except Exception as e:
self._logger.error("Failed to write to cache file '%s' on "
"exit: %s" % (self.cachefilename, e))
else:
self._logger.info("Cache saved")
self._logger.debug("Cache written out to '%s'" %
self.cachefilename)
def __del__(self):
self.save()
class Jenkins(object):
def __init__(self, url, user, password, timeout=_DEFAULT_TIMEOUT):
if timeout != _DEFAULT_TIMEOUT:
self.jenkins = jenkins.Jenkins(url, user, password, timeout)
else:
self.jenkins = jenkins.Jenkins(url, user, password)
self._jobs = None
self._job_list = None
@property
def jobs(self):
if self._jobs is None:
# populate jobs
self._jobs = self.jenkins.get_jobs()
return self._jobs
@property
def job_list(self):
if self._job_list is None:
self._job_list = set(job['name'] for job in self.jobs)
return self._job_list
def update_job(self, job_name, xml):
if self.is_job(job_name):
logger.info("Reconfiguring jenkins job {0}".format(job_name))
self.jenkins.reconfig_job(job_name, xml)
else:
logger.info("Creating jenkins job {0}".format(job_name))
self.jenkins.create_job(job_name, xml)
def is_job(self, job_name):
# first use cache
if job_name in self.job_list:
return True
# if not exists, use jenkins
return self.jenkins.job_exists(job_name)
def get_job_md5(self, job_name):
xml = self.jenkins.get_job_config(job_name)
return hashlib.md5(xml).hexdigest()
def delete_job(self, job_name):
if self.is_job(job_name):
logger.info("Deleting jenkins job {0}".format(job_name))
self.jenkins.delete_job(job_name)
def delete_all_jobs(self):
# execute a groovy script to delete all jobs is much faster than
# using the doDelete REST endpoint to delete one job at a time.
script = ('for(job in jenkins.model.Jenkins.theInstance.getProjects())'
' { job.delete(); }')
self.jenkins.run_script(script)
def get_plugins_info(self):
""" Return a list of plugin_info dicts, one for each plugin on the
Jenkins instance.
"""
try:
plugins_list = self.jenkins.get_plugins_info()
except jenkins.JenkinsException as e:
if re.search("Connection refused", str(e)):
logger.warn("Unable to retrieve Jenkins Plugin Info from {0},"
" using default empty plugins info list.".format(
self.jenkins.server))
plugins_list = [{'shortName': '',
'version': '',
'longName': ''}]
else:
raise e
logger.debug("Jenkins Plugin Info {0}".format(pformat(plugins_list)))
return plugins_list
def get_jobs(self, cache=True):
if not cache:
self._jobs = None
self._job_list = None
return self.jobs
def is_managed(self, job_name):
xml = self.jenkins.get_job_config(job_name)
try:
out = XML.fromstring(xml)
description = out.find(".//description").text
return description.endswith(MAGIC_MANAGE_STRING)
except (TypeError, AttributeError):
pass
return False
class Builder(object):
def __init__(self, jenkins_url, jenkins_user, jenkins_password,
config=None, jenkins_timeout=_DEFAULT_TIMEOUT,
ignore_cache=False, flush_cache=False, plugins_list=None):
self.jenkins = Jenkins(jenkins_url, jenkins_user, jenkins_password,
jenkins_timeout)
self.cache = CacheStorage(jenkins_url, flush=flush_cache)
self.global_config = config
self.ignore_cache = ignore_cache
self._plugins_list = plugins_list
@property
def plugins_list(self):
if self._plugins_list is None:
self._plugins_list = self.jenkins.get_plugins_info()
return self._plugins_list
def load_files(self, fn):
self.parser = YamlParser(self.global_config, self.plugins_list)
# handle deprecated behavior
if not hasattr(fn, '__iter__'):
logger.warning(
'Passing single elements for the `fn` argument in '
'Builder.load_files is deprecated. Please update your code '
'to use a list as support for automatic conversion will be '
'removed in a future version.')
fn = [fn]
files_to_process = []
for path in fn:
if os.path.isdir(path):
files_to_process.extend([os.path.join(path, f)
for f in os.listdir(path)
if (f.endswith('.yml')
or f.endswith('.yaml'))])
else:
files_to_process.append(path)
# symlinks used to allow loading of sub-dirs can result in duplicate
# definitions of macros and templates when loading all from top-level
unique_files = []
for f in files_to_process:
rpf = os.path.realpath(f)
if rpf not in unique_files:
unique_files.append(rpf)
else:
logger.warning("File '%s' already added as '%s', ignoring "
"reference to avoid duplicating yaml "
"definitions." % (f, rpf))
for in_file in unique_files:
# use of ask-for-permissions instead of ask-for-forgiveness
# performs better when low use cases.
if hasattr(in_file, 'name'):
fname = in_file.name
else:
fname = in_file
logger.debug("Parsing YAML file {0}".format(fname))
if hasattr(in_file, 'read'):
self.parser.parse_fp(in_file)
else:
self.parser.parse(in_file)
def delete_old_managed(self, keep=None):
jobs = self.jenkins.get_jobs()
deleted_jobs = 0
if keep is None:
keep = [job.name for job in self.parser.xml_jobs]
for job in jobs:
if job['name'] not in keep and \
self.jenkins.is_managed(job['name']):
logger.info("Removing obsolete jenkins job {0}"
.format(job['name']))
self.delete_job(job['name'])
deleted_jobs += 1
else:
logger.debug("Ignoring unmanaged jenkins job %s",
job['name'])
return deleted_jobs
def delete_job(self, jobs_glob, fn=None):
if fn:
self.load_files(fn)
self.parser.expandYaml([jobs_glob])
jobs = [j['name'] for j in self.parser.jobs]
else:
jobs = [jobs_glob]
if jobs is not None:
logger.info("Removing jenkins job(s): %s" % ", ".join(jobs))
for job in jobs:
self.jenkins.delete_job(job)
if(self.cache.is_cached(job)):
self.cache.set(job, '')
def delete_all_jobs(self):
jobs = self.jenkins.get_jobs()
logger.info("Number of jobs to delete: %d", len(jobs))
self.jenkins.delete_all_jobs()
def update_job(self, input_fn, jobs_glob=None, output=None):
self.load_files(input_fn)
self.parser.expandYaml(jobs_glob)
self.parser.generateXML()
logger.info("Number of jobs generated: %d", len(self.parser.xml_jobs))
self.parser.xml_jobs.sort(key=operator.attrgetter('name'))
if (output and not hasattr(output, 'write')
and not os.path.isdir(output)):
logger.info("Creating directory %s" % output)
try:
os.makedirs(output)
except OSError:
if not os.path.isdir(output):
raise
updated_jobs = 0
for job in self.parser.xml_jobs:
if output:
if hasattr(output, 'write'):
# `output` is a file-like object
logger.info("Job name: %s", job.name)
logger.debug("Writing XML to '{0}'".format(output))
try:
output.write(job.output())
except IOError as exc:
if exc.errno == errno.EPIPE:
# EPIPE could happen if piping output to something
# that doesn't read the whole input (e.g.: the UNIX
# `head` command)
return
raise
continue
output_fn = os.path.join(output, job.name)
logger.debug("Writing XML to '{0}'".format(output_fn))
with io.open(output_fn, 'w', encoding='utf-8') as f:
f.write(job.output().decode('utf-8'))
continue
md5 = job.md5()
if (self.jenkins.is_job(job.name)
and not self.cache.is_cached(job.name)):
old_md5 = self.jenkins.get_job_md5(job.name)
self.cache.set(job.name, old_md5)
if self.cache.has_changed(job.name, md5) or self.ignore_cache:
self.jenkins.update_job(job.name, job.output())
updated_jobs += 1
self.cache.set(job.name, md5)
else:
logger.debug("'{0}' has not changed".format(job.name))
return self.parser.xml_jobs, updated_jobs
| pyatil/jenkins-job-builder | jenkins_jobs/builder.py | Python | apache-2.0 | 13,004 |
# Imports
import sys, os
#sys.path.insert(0, os.path.abspath('.'))
# Configuration
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon']
master_doc = 'index'
# Data
project = u'pygame-mvctools'
release = '0.1.0'
| vxgmichel/pygame-mvctools | docs/source/conf.py | Python | gpl-3.0 | 224 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['MovingMedian'] , ['Seasonal_Hour'] , ['NoAR'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_MovingMedian_Seasonal_Hour_NoAR.py | Python | bsd-3-clause | 169 |
import requests
import json
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class ListingsTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
vendor = self.nodes[1]
buyer = self.nodes[2]
currency = "tbtc"
# no listings POSTed
api_url = vendor["gateway_url"] + "ob/listings"
r = requests.get(api_url)
if r.status_code == 200:
if len(json.loads(r.text)) == 0:
pass
else:
raise TestFailure("ListingsTest - FAIL: No listings should be returned")
elif r.status_code == 404:
raise TestFailure("ListingsTest - FAIL: Listings get endpoint not found")
else:
resp = json.loads(r.text)
raise TestFailure("ListingsTest - FAIL: Listings GET failed. Reason: %s", resp["reason"])
# POST listing
with open('testdata/'+ self.vendor_version +'/listing.json') as listing_file:
ljson = json.load(listing_file, object_pairs_hook=OrderedDict)
if self.vendor_version == "v4":
ljson["metadata"]["priceCurrency"] = "t" + self.cointype
else:
ljson["item"]["priceCurrency"]["code"] = "t" + self.cointype
ljson["metadata"]["acceptedCurrencies"] = ["t" + self.cointype.lower()]
currency = "T" + self.cointype
api_url = vendor["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(ljson, indent=4))
if r.status_code == 200:
pass
elif r.status_code == 404:
raise TestFailure("ListingsTest - FAIL: Listing post endpoint not found")
else:
resp = json.loads(r.text)
raise TestFailure("ListingsTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
# one listing POSTed and index returning correct data
api_url = vendor["gateway_url"] + "ob/listings"
r = requests.get(api_url)
if r.status_code == 404:
raise TestFailure("ListingsTest - FAIL: Listings get endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("ListingsTest - FAIL: Listings GET failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
if len(resp) != 1:
raise TestFailure("ListingsTest - FAIL: One listing should be returned")
listing = resp[0]
if currency.lower() not in listing["acceptedCurrencies"]:
raise TestFailure("ListingsTest - FAIL: Listing should have acceptedCurrencies")
# listing show endpoint returning correct data
slug = listing["slug"]
api_url = vendor["gateway_url"] + "ob/listing/" + slug
r = requests.get(api_url)
if r.status_code == 404:
raise TestFailure("ListingsTest - FAIL: Listings get endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("ListingsTest - FAIL: Listings GET failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
if currency.lower() not in resp["listing"]["metadata"]["acceptedCurrencies"]:
raise TestFailure("ListingsTest - FAIL: Listing should have acceptedCurrences in metadata")
# check vendor's index from another node
api_url = buyer["gateway_url"] + "ob/listings/" + vendor["peerId"]
r = requests.get(api_url)
if r.status_code == 404:
raise TestFailure("ListingsTest - FAIL: Listings get endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("ListingsTest - FAIL: Listings GET failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
if len(resp) != 1:
raise TestFailure("ListingsTest - FAIL: One listing should be returned")
if currency.lower() not in resp[0]["acceptedCurrencies"]:
raise TestFailure("ListingsTest - FAIL: Listing should have acceptedCurrences")
# check listing show page from another node
api_url = vendor["gateway_url"] + "ob/listing/" + vendor["peerId"] + "/" + slug
r = requests.get(api_url)
if r.status_code == 404:
raise TestFailure("ListingsTest - FAIL: Listings get endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("ListingsTest - FAIL: Listings GET failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
if currency.lower() not in resp["listing"]["metadata"]["acceptedCurrencies"]:
raise TestFailure("ListingsTest - FAIL: Listing should have acceptedCurrences in metadata")
print("ListingsTest - PASS")
if __name__ == '__main__':
print("Running ListingTest")
ListingsTest().main(["--regtest", "--disableexchangerates"])
| OpenBazaar/openbazaar-go | qa/listings.py | Python | mit | 5,086 |
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from django.contrib import admin
from wger.core.models import Language
from wger.exercises.models import Exercise
from wger.exercises.models import ExerciseComment
from wger.exercises.models import ExerciseCategory
from wger.exercises.models import Muscle
class ExerciseCommentInline(admin.TabularInline): # admin.StackedInline
model = ExerciseComment
extra = 1
class ExerciseAdmin(admin.ModelAdmin):
inlines = [ExerciseCommentInline]
admin.site.register(Exercise, ExerciseAdmin)
admin.site.register(ExerciseCategory)
admin.site.register(Language)
admin.site.register(Muscle)
| DeveloperMal/wger | wger/exercises/admin.py | Python | agpl-3.0 | 1,236 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.backend.jvm.targets.junit_tests import JUnitTests
class JavaLibrary(ExportableJvmLibrary):
"""A Java library.
Normally has conceptually-related sources; invoking the ``compile`` goal
on this target compiles Java and generates classes. Invoking the ``jar``
goal on this target creates a ``.jar``; but that's an unusual thing to do.
Instead, a ``jvm_binary`` might depend on this library; that binary is a
more sensible thing to bundle.
:API: public
"""
default_sources_globs = '*.java'
default_sources_exclude_globs = JUnitTests.java_test_globs
@classmethod
def subsystems(cls):
return super(JavaLibrary, cls).subsystems()
def __init__(self, *args, **kwargs):
"""
:param provides: The ``artifact``
to publish that represents this target outside the repo.
:param resources: An optional list of file paths (DEPRECATED) or
``resources`` targets (which in turn point to file paths). The paths
indicate text file resources to place in this module's jar.
"""
super(JavaLibrary, self).__init__(*args, **kwargs)
self.add_labels('java')
| cevaris/pants | src/python/pants/backend/jvm/targets/java_library.py | Python | apache-2.0 | 1,499 |
#!/usr/bin/env python
# -*- coding: utf-8 *-*
"""This program imports the peanut module and sets variable value equal to
constant in peanut module."""
import task_01.peanut
TIME = task_01.peanut.BUTTER
| neal-rogers/is210-week-06-warmup | task_02.py | Python | mpl-2.0 | 204 |
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from tests import MongoTestCase
from django_mongoengine.forms.fields import DictField
from django_mongoengine.forms.widgets import Dictionary, SubDictionary, Pair
#TODO : test for max_depth
class DictFieldTest(MongoTestCase):
"""
TestCase class that tests a DictField object
"""
def test_ouput(self):
"""
Test the output of a DictField
"""
self._init_field()
max_depth_test = 2
#valid input/outpout
valid_input = {
'[[key1,value1],[key2,value2],[key3,value3]]':
[['key1', 'value1'], ['key2', 'value2'], ['key3', 'value3']],
'[[key1,value1],[skey,[[skey1,svalue1],[skey2,svalue2],[skey3,svalue3]]],[key2,value2],[key3,value3]]':
[['key1', 'value1'], ['skey', [['skey1', 'svalue1'], ['skey2', 'svalue2'], ['skey3', 'svalue3']]], ['key2', 'value2'], ['key3', 'value3']],
'[[a,[[b,[[c,[[d,[[e,[[f,g]]]]]]]]]]]]':
[['a', [['b', [['c', [['d', [['e', [['f', 'g']]]]]]]]]]]],
}
valid_output = {
'[[key1,value1],[key2,value2],[key3,value3]]': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3'
},
'[[key1,value1],[skey,[[skey1,svalue1],[skey2,svalue2],[skey3,svalue3]]],[key2,value2],[key3,value3]]': {
'key1': 'value1',
'skey': {
'skey1': 'svalue1',
'skey2': 'svalue2',
'skey3': 'svalue3'
},
'key2': 'value2',
'key3': 'value3'
},
'[[a,[[b,[[c,[[d,[[e,[[f,g]]]]]]]]]]]]': {
'a': {
'b': {
'c': {
'd': {
'e': {
'f': 'g'
}
}
}
}
}
},
}
#invalid input/message
invalid_input = {
'[[key1,value1],[$key2,value2]]': [['key1', 'value1'], ['$key2', 'value2']],
'[[key1,value1],[_key2,value2]]': [['key1', 'value1'], ['_key2', 'value2']],
'[[key1,value1],[k.ey2,value2]]': [['key1', 'value1'], ['k.ey2', 'value2']],
'[[keykeykeykeykeykeykeykeykeykeykey,value1],[key2,value2]]': [['keykeykeykeykeykeykeykeykeykeykey', 'value1'], ['key2', 'value2']],
'[[err,value1],[key2,value2]]': [['err', 'value1'], ['key2', 'value2']],
'[[errmsg,value1],[key2,value2]]': [['errmsg', 'value1'], ['key2', 'value2']],
'[[key1,[key2,[key3,[key4,value4]]]]]': [['key1', [['key2', [['key3', [['key4', 'value4']]]]]]]],
}
invalid_message = {
'[[key1,value1],[$key2,value2]]': [u'Ensure the keys do not begin with : ["$","_"].'],
'[[key1,value1],[_key2,value2]]': [u'Ensure the keys do not begin with : ["$","_"].'],
'[[key1,value1],[k.ey2,value2]]': [self.field.error_messages['illegal'] % self.field.illegal_characters],
'[[keykeykeykeykeykeykeykeykeykeykey,value1],[key2,value2]]': [self.field.error_messages['length'] % self.field.key_limit],
'[[err,value1],[key2,value2]]': [self.field.error_messages['invalid_key'] % self.field.invalid_keys],
'[[errmsg,value1],[key2,value2]]': [self.field.error_messages['invalid_key'] % self.field.invalid_keys],
'[[key1,[key2,[key3,[key4,value4]]]]]': [self.field.error_messages['depth'] % max_depth_test],
}
# test valid inputs
for input, output in valid_output.items():
out = self.field.clean(valid_input[input])
assert isinstance(out, dict), 'output should be a dictionary'
self.assertDictEqual(out, output)
# test invalid inputs
self._init_field(depth=max_depth_test)
for input, input_list in invalid_input.items():
with self.assertRaises(ValidationError) as context_manager:
self.field.clean(input_list)
self.assertEqual(context_manager.exception.messages, invalid_message[input])
def test_rendering(self):
"""
Test the structure of a widget, after having passed a data dictionary
"""
self._init_field()
#contains the POST data dicts
data_inputs = {
'data1': {
u'widget_name_0_subdict_0': [u'a'],
u'widget_name_0_subdict_1_0_subdict_0': [u'b'],
u'widget_name_0_subdict_1_0_subdict_1_0_pair_0': [u'f'],
u'widget_name_0_subdict_1_0_subdict_1_0_pair_1': [u'g'],
}
}
#contains the data dicts
data_dicts = {
'data1': {
u'a': {
u'b': {
u'f': u'g'
}
}
}
}
#contains structures of output
output_structures = {
'data1': {
'type': 'Dictionary',
'widgets': [{'type': 'SubDictionary',
'widgets': [{'type': 'TextInput'}, {'type': 'Dictionary',
'widgets': [{'type': 'SubDictionary',
'widgets': [{'type': 'TextInput'}, {'type': 'Dictionary',
'widgets': [{'type': 'Pair', 'widgets':[{'type': 'TextInput'}, {'type': 'TextInput'}]}]
}]
}]
}]
}]
}
}
for data, datadict in data_inputs.items():
self.field.widget.render('widget_name', self.field.widget.value_from_datadict(datadict, {}, 'widget_name'))
self._check_structure(self.field.widget, output_structures[data])
self.field.widget.render('widget_name', data_dicts[data])
self._check_structure(self.field.widget, output_structures[data])
def test_static(self):
self._init_field(force=True)
structure = {
'type': 'Dictionary',
'widgets': [{'type': 'StaticPair', 'widgets': [{'type': 'HiddenInput'}, {'type': 'TextInput'}]
},
{'type': 'StaticSubDictionary',
'widgets': [{'type': 'StaticPair', 'widgets': [{'type': 'HiddenInput'}, {'type': 'TextInput'}]}]
},
{'type': 'StaticSubDictionary',
'widgets': [{'type': 'StaticPair',
'widgets': [{'type': 'HiddenInput'}, {'type': 'TextInput'}]},
{'type': 'StaticPair',
'widgets': [{'type': 'HiddenInput'}, {'type': 'TextInput'}]}]
}]
}
self._check_structure(self.field.widget, structure)
def _init_field(self, depth=None, force=False):
validate = [RegexValidator(regex='^[^$_]', message=u'Ensure the keys do not begin with : ["$","_"].', code='invalid_start')]
if force:
self.field = DictField(**{
'required': False,
'initial': {
'k': 'v',
'k2': {'k3': 'v2'},
'k4': {'k5': 'v3', 'k6': 'v4'}
},
'validators': validate,
'flags': ['FORCE_SCHEMA'],
'max_depth': depth,
})
else:
self.field = DictField(**{
'required': False,
'initial': {
'k': 'v',
'k2': {'k3': 'v2'}
},
'validators': validate,
'max_depth': depth,
})
def _check_structure(self, widget, structure):
assert isinstance(structure, dict), 'error, the comparative structure should be a dictionary'
assert isinstance(widget, eval(structure['type'])), 'widget should be a %s' % structure['type']
if 'widgets' in structure.keys():
assert isinstance(structure['widgets'], list), 'structure field "widgets" should be a list'
assert isinstance(widget.widgets, list), 'widget.widgets should be a list'
for i, w in enumerate(widget.widgets):
self._check_structure(w, structure['widgets'][i])
| arpitgoyaiitkgp/django-mongoengine | tests/forms/tests.py | Python | bsd-3-clause | 8,729 |
"""
Module defining Predicate Interface
contains list of methods every predicate must implement
Copyright (c) 2017 Michaela Bielikova <[email protected]>
"""
import abc
class PredicateInterface(object):
"""
Module defining Predicate Interface
"""
__metaclass__ = abc.ABCMeta
is_epsilon = False
@abc.abstractmethod
def negation(self):
"""
Predicate negation
:return: negation of given predicate
"""
return
@abc.abstractmethod
def conjunction(self, predicate):
"""
Predicate conjunction
:param predicate: second predicate
:return: conjunction of two predicates
"""
return
@abc.abstractmethod
def disjunction(self, predicate):
"""
Predicate disjunction
:param predicate: second predicate
:return: disjunction of two predicates
"""
return
@abc.abstractmethod
def is_equal(self, predicate):
"""
Checks whether the given predicates are equal
:param predicate: second predicate
:return: bool
"""
return
@abc.abstractmethod
def is_satisfiable(self):
"""
Checks whether the given predicate is satisfiable
:return: bool
"""
return
@abc.abstractmethod
def is_subset(self, predicate):
"""
Checks whether the given predicate represent a subset of the second one
:param predicate: second predicate
:return: bool
"""
return
@abc.abstractmethod
def get_universal(self):
"""
Creates a predicate representing the whole alphabet
:return: predicate object
"""
return
@abc.abstractmethod
def has_letter(self, symbol):
"""
Checks whether the given symbol belongs to the predicate
:param symbol: checked symbol
:return: bool
"""
return
| Miskaaa/symboliclib | symboliclib/predicate_interface.py | Python | gpl-3.0 | 1,980 |
import logging
pvl_logger = logging.getLogger('pvlib')
import numpy as np
import pandas as pd
from nose.tools import raises
from numpy.testing import assert_almost_equal
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pvlib.location import Location
from pvlib import clearsky
from pvlib import solarposition
# setup times and location to be tested.
tus = Location(32.2, -111, 'US/Arizona', 700)
times = pd.date_range(start='2014-06-24', end='2014-06-25', freq='3h')
times_localized = times.tz_localize(tus.tz)
ephem_data = solarposition.get_solarposition(times, tus)
def test_ineichen_required():
# the clearsky function should call lookup_linke_turbidity by default
# will fail without scipy
expected = pd.DataFrame(np.array([[0.,0.,0.],
[0.,0.,0.],
[40.53660309,302.47614235,78.1470311],
[98.88372629,865.98938602,699.93403875],
[122.57870881,931.83716051,1038.62116584],
[109.30270612,899.88002304,847.68806472],
[64.25699595,629.91187925,254.53048144],
[0.,0.,0.],
[0.,0.,0.]]),
columns=['dhi', 'dni', 'ghi'],
index=times_localized)
out = clearsky.ineichen(times, tus)
assert_frame_equal(expected, out)
def test_ineichen_supply_linke():
expected = pd.DataFrame(np.array([[0.,0.,0.],
[0.,0.,0.],
[40.18673553,322.0649964,80.23287692],
[95.14405816,876.49507151,703.48596755],
[118.45873721,939.81653473,1042.34531752],
[105.36671577,909.113377,851.3283881],
[61.91607984,647.40869542,257.47471759],
[0.,0.,0.],
[0.,0.,0.]]),
columns=['dhi', 'dni', 'ghi'],
index=times_localized)
out = clearsky.ineichen(times, tus, linke_turbidity=3)
assert_frame_equal(expected, out)
def test_ineichen_solpos():
clearsky.ineichen(times, tus, linke_turbidity=3,
solarposition_method='ephemeris')
def test_ineichen_airmass():
expected = pd.DataFrame(np.array([[0.,0.,0.],
[0.,0.,0.],
[41.70761136,293.72203458,78.22953786],
[95.20590465,876.1650047,703.31872722],
[118.46089555,939.8078753,1042.33896321],
[105.39577655,908.97804342,851.24640259],
[62.35382269,642.91022293,256.55363539],
[0.,0.,0.],
[0.,0.,0.]]),
columns=['dhi', 'dni', 'ghi'],
index=times_localized)
out = clearsky.ineichen(times, tus, linke_turbidity=3,
airmass_model='simple')
assert_frame_equal(expected, out)
def test_lookup_linke_turbidity():
times = pd.date_range(start='2014-06-24', end='2014-06-25',
freq='12h', tz=tus.tz)
# expect same value on 2014-06-24 0000 and 1200, and
# diff value on 2014-06-25
expected = pd.Series(np.array([3.10126582, 3.10126582, 3.11443038]),
index=times)
out = clearsky.lookup_linke_turbidity(times, tus.latitude, tus.longitude)
assert_series_equal(expected, out)
def test_lookup_linke_turbidity_nointerp():
times = pd.date_range(start='2014-06-24', end='2014-06-25',
freq='12h', tz=tus.tz)
# expect same value for all days
expected = pd.Series(np.array([3., 3., 3.]), index=times)
out = clearsky.lookup_linke_turbidity(times, tus.latitude, tus.longitude,
interp_turbidity=False)
assert_series_equal(expected, out)
def test_lookup_linke_turbidity_months():
times = pd.date_range(start='2014-04-01', end='2014-07-01',
freq='1M', tz=tus.tz)
expected = pd.Series(np.array([2.8943038, 2.97316456, 3.18025316]),
index=times)
out = clearsky.lookup_linke_turbidity(times, tus.latitude,
tus.longitude)
assert_series_equal(expected, out)
def test_lookup_linke_turbidity_nointerp_months():
times = pd.date_range(start='2014-04-10', end='2014-07-10',
freq='1M', tz=tus.tz)
expected = pd.Series(np.array([2.85, 2.95, 3.]), index=times)
out = clearsky.lookup_linke_turbidity(times, tus.latitude, tus.longitude,
interp_turbidity=False)
assert_series_equal(expected, out)
# changing the dates shouldn't matter if interp=False
times = pd.date_range(start='2014-04-05', end='2014-07-05',
freq='1M', tz=tus.tz)
out = clearsky.lookup_linke_turbidity(times, tus.latitude, tus.longitude,
interp_turbidity=False)
assert_series_equal(expected, out)
def test_haurwitz():
expected = pd.DataFrame(np.array([[0.],
[0.],
[82.85934048],
[699.74514735],
[1016.50198354],
[838.32103769],
[271.90853863],
[0.],
[0.]]),
columns=['ghi'], index=times_localized)
out = clearsky.haurwitz(ephem_data['zenith'])
assert_frame_equal(expected, out)
| dacoex/pvlib-python | pvlib/test/test_clearsky.py | Python | bsd-3-clause | 6,118 |
# -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2008 Johan Dahlin
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import imp
import os
import sys
from .utils import extract_libtool
class LibtoolImporter(object):
def __init__(self, name, path):
self.name = name
self.path = path
@classmethod
def find_module(cls, name, packagepath=None):
modparts = name.split('.')
filename = modparts.pop() + '.la'
# Given some.package.module 'path' is where subpackages of some.package
# should be looked for. See if we can find a ".libs/module.la" relative
# to those directories and failing that look for file
# "some/package/.libs/module.la" relative to sys.path
if len(modparts) > 0:
modprefix = os.path.join(*modparts)
modprefix = os.path.join(modprefix, '.libs')
else:
modprefix = '.libs'
for path in sys.path:
full = os.path.join(path, modprefix, filename)
if os.path.exists(full):
return cls(name, full)
def load_module(self, name):
realpath = extract_libtool(self.path)
# The first item of the suffix tuple (which can be, depending on platform,
# one or more valid filename extensions used to name c extension modules)
# is ignored by imp.load_module(). Thus, there is no use in pretending it
# is important and we set it to an empty string.
suffix = ('', 'rb', imp.C_EXTENSION)
mod = imp.load_module(name, open(realpath), realpath, suffix)
mod.__loader__ = self
return mod
@classmethod
def __enter__(cls):
sys.meta_path.append(cls)
@classmethod
def __exit__(cls, exc_type, exc_val, exc_tb):
sys.meta_path.remove(cls)
| anthrotype/gobject-introspection | giscanner/libtoolimporter.py | Python | gpl-2.0 | 2,704 |
from datetime import datetime
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import six
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from .dialects.postgresql import (
CreateTemporaryTransactionTableSQL,
InsertTemporaryTransactionSQL,
TransactionTriggerSQL
)
from .exc import ImproperlyConfigured
from .factory import ModelFactory
@compiles(sa.types.BigInteger, 'sqlite')
def compile_big_integer(element, compiler, **kw):
return 'INTEGER'
class TransactionBase(object):
issued_at = sa.Column(sa.DateTime, default=datetime.utcnow)
@property
def entity_names(self):
"""
Return a list of entity names that changed during this transaction.
"""
return [changes.entity_name for changes in self.changes]
@property
def changed_entities(self):
"""
Return all changed entities for this transaction log entry.
Entities are returned as a dict where keys are entity classes and
values lists of entitites that changed in this transaction.
"""
manager = self.__versioning_manager__
tuples = set(manager.version_class_map.items())
entities = {}
session = sa.orm.object_session(self)
for class_, version_class in tuples:
if class_.__name__ not in self.entity_names:
continue
tx_column = manager.option(class_, 'transaction_column_name')
entities[version_class] = (
session
.query(version_class)
.filter(getattr(version_class, tx_column) == self.id)
).all()
return entities
procedure_sql = """
CREATE OR REPLACE FUNCTION transaction_temp_table_generator()
RETURNS TRIGGER AS $$
BEGIN
{temporary_transaction_sql}
INSERT INTO temporary_transaction (id) VALUES (NEW.id);
RETURN NEW;
END;
$$
LANGUAGE plpgsql
"""
def create_triggers(cls):
sa.event.listen(
cls.__table__,
'after_create',
sa.schema.DDL(
procedure_sql.format(
temporary_transaction_sql=CreateTemporaryTransactionTableSQL(),
insert_temporary_transaction_sql=(
InsertTemporaryTransactionSQL(
transaction_id_values='NEW.id'
)
),
)
)
)
sa.event.listen(
cls.__table__,
'after_create',
sa.schema.DDL(str(TransactionTriggerSQL(cls)))
)
sa.event.listen(
cls.__table__,
'after_drop',
sa.schema.DDL(
'DROP FUNCTION IF EXISTS transaction_temp_table_generator()'
)
)
class TransactionFactory(ModelFactory):
model_name = 'Transaction'
def __init__(self, remote_addr=True):
self.remote_addr = remote_addr
def create_class(self, manager):
"""
Create Transaction class.
"""
class Transaction(
manager.declarative_base,
TransactionBase
):
__tablename__ = 'transaction'
__versioning_manager__ = manager
id = sa.Column(
sa.types.BigInteger,
primary_key=True,
autoincrement=True
)
if self.remote_addr:
remote_addr = sa.Column(sa.String(50))
if manager.user_cls:
user_cls = manager.user_cls
registry = manager.declarative_base._decl_class_registry
if isinstance(user_cls, six.string_types):
try:
user_cls = registry[user_cls]
except KeyError:
raise ImproperlyConfigured(
'Could not build relationship between Transaction'
' and %s. %s was not found in declarative class '
'registry. Either configure VersioningManager to '
'use different user class or disable this '
'relationship ' % (user_cls, user_cls)
)
user_id = sa.Column(
sa.inspect(user_cls).primary_key[0].type,
sa.ForeignKey(
'%s.%s' % (user_cls.__tablename__, sa.inspect(user_cls).primary_key[0].name)
),
index=True
)
user = sa.orm.relationship(user_cls)
def __repr__(self):
fields = ['id', 'issued_at', 'user']
field_values = OrderedDict(
(field, getattr(self, field))
for field in fields
if hasattr(self, field)
)
return '<Transaction %s>' % ', '.join(
(
'%s=%r' % (field, value)
if not isinstance(value, six.integer_types)
# We want the following line to ensure that longs get
# shown without the ugly L suffix on python 2.x
# versions
else '%s=%d' % (field, value)
for field, value in field_values.items()
)
)
if manager.options['native_versioning']:
create_triggers(Transaction)
return Transaction
| avilaton/sqlalchemy-continuum | sqlalchemy_continuum/transaction.py | Python | bsd-3-clause | 5,496 |
__author__ = 'ganeshchand'
def main():
print("This program uses main()")
# in python, you can call function and define it later
printhello()
def printhello():
print("Hello")
#boiler code to to call the main() to beging the program
if __name__ == "__main__":main()
| ganeshchand/python3 | basic/usingmainfunction.py | Python | apache-2.0 | 286 |
import contextlib
from datetime import datetime
import io
import os
from pathlib import Path
import dateutil.parser
import numpy as np
import pytest
from pandas.errors import EmptyDataError
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
@pytest.fixture
def dirpath(datapath):
return datapath("io", "sas", "data")
@pytest.fixture(params=[(1, range(1, 16)), (2, [16])])
def data_test_ix(request, dirpath):
i, test_ix = request.param
fname = os.path.join(dirpath, f"test_sas7bdat_{i}.csv")
df = pd.read_csv(fname)
epoch = datetime(1960, 1, 1)
t1 = pd.to_timedelta(df["Column4"], unit="d")
df["Column4"] = epoch + t1
t2 = pd.to_timedelta(df["Column12"], unit="d")
df["Column12"] = epoch + t2
for k in range(df.shape[1]):
col = df.iloc[:, k]
if col.dtype == np.int64:
df.iloc[:, k] = df.iloc[:, k].astype(np.float64)
return df, test_ix
# https://github.com/cython/cython/issues/1720
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestSAS7BDAT:
@pytest.mark.slow
def test_from_file(self, dirpath, data_test_ix):
df0, test_ix = data_test_ix
for k in test_ix:
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
df = pd.read_sas(fname, encoding="utf-8")
tm.assert_frame_equal(df, df0)
@pytest.mark.slow
def test_from_buffer(self, dirpath, data_test_ix):
df0, test_ix = data_test_ix
for k in test_ix:
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
with open(fname, "rb") as f:
byts = f.read()
buf = io.BytesIO(byts)
with pd.read_sas(
buf, format="sas7bdat", iterator=True, encoding="utf-8"
) as rdr:
df = rdr.read()
tm.assert_frame_equal(df, df0, check_exact=False)
@pytest.mark.slow
def test_from_iterator(self, dirpath, data_test_ix):
df0, test_ix = data_test_ix
for k in test_ix:
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr:
df = rdr.read(2)
tm.assert_frame_equal(df, df0.iloc[0:2, :])
df = rdr.read(3)
tm.assert_frame_equal(df, df0.iloc[2:5, :])
@pytest.mark.slow
def test_path_pathlib(self, dirpath, data_test_ix):
df0, test_ix = data_test_ix
for k in test_ix:
fname = Path(os.path.join(dirpath, f"test{k}.sas7bdat"))
df = pd.read_sas(fname, encoding="utf-8")
tm.assert_frame_equal(df, df0)
@td.skip_if_no("py.path")
@pytest.mark.slow
def test_path_localpath(self, dirpath, data_test_ix):
from py.path import local as LocalPath
df0, test_ix = data_test_ix
for k in test_ix:
fname = LocalPath(os.path.join(dirpath, f"test{k}.sas7bdat"))
df = pd.read_sas(fname, encoding="utf-8")
tm.assert_frame_equal(df, df0)
@pytest.mark.slow
@pytest.mark.parametrize("chunksize", (3, 5, 10, 11))
@pytest.mark.parametrize("k", range(1, 17))
def test_iterator_loop(self, dirpath, k, chunksize):
# github #13654
fname = os.path.join(dirpath, f"test{k}.sas7bdat")
with pd.read_sas(fname, chunksize=chunksize, encoding="utf-8") as rdr:
y = 0
for x in rdr:
y += x.shape[0]
assert y == rdr.row_count
def test_iterator_read_too_much(self, dirpath):
# github #14734
fname = os.path.join(dirpath, "test1.sas7bdat")
with pd.read_sas(
fname, format="sas7bdat", iterator=True, encoding="utf-8"
) as rdr:
d1 = rdr.read(rdr.row_count + 20)
with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr:
d2 = rdr.read(rdr.row_count + 20)
tm.assert_frame_equal(d1, d2)
def test_encoding_options(datapath):
fname = datapath("io", "sas", "data", "test1.sas7bdat")
df1 = pd.read_sas(fname)
df2 = pd.read_sas(fname, encoding="utf-8")
for col in df1.columns:
try:
df1[col] = df1[col].str.decode("utf-8")
except AttributeError:
pass
tm.assert_frame_equal(df1, df2)
from pandas.io.sas.sas7bdat import SAS7BDATReader
with contextlib.closing(SAS7BDATReader(fname, convert_header_text=False)) as rdr:
df3 = rdr.read()
for x, y in zip(df1.columns, df3.columns):
assert x == y.decode()
def test_productsales(datapath):
fname = datapath("io", "sas", "data", "productsales.sas7bdat")
df = pd.read_sas(fname, encoding="utf-8")
fname = datapath("io", "sas", "data", "productsales.csv")
df0 = pd.read_csv(fname, parse_dates=["MONTH"])
vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"]
df0[vn] = df0[vn].astype(np.float64)
tm.assert_frame_equal(df, df0)
def test_12659(datapath):
fname = datapath("io", "sas", "data", "test_12659.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "test_12659.csv")
df0 = pd.read_csv(fname)
df0 = df0.astype(np.float64)
tm.assert_frame_equal(df, df0)
def test_airline(datapath):
fname = datapath("io", "sas", "data", "airline.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "airline.csv")
df0 = pd.read_csv(fname)
df0 = df0.astype(np.float64)
tm.assert_frame_equal(df, df0, check_exact=False)
def test_date_time(datapath):
# Support of different SAS date/datetime formats (PR #15871)
fname = datapath("io", "sas", "data", "datetime.sas7bdat")
df = pd.read_sas(fname)
fname = datapath("io", "sas", "data", "datetime.csv")
df0 = pd.read_csv(
fname, parse_dates=["Date1", "Date2", "DateTime", "DateTimeHi", "Taiw"]
)
# GH 19732: Timestamps imported from sas will incur floating point errors
df.iloc[:, 3] = df.iloc[:, 3].dt.round("us")
tm.assert_frame_equal(df, df0)
@pytest.mark.parametrize("column", ["WGT", "CYL"])
def test_compact_numerical_values(datapath, column):
# Regression test for #21616
fname = datapath("io", "sas", "data", "cars.sas7bdat")
df = pd.read_sas(fname, encoding="latin-1")
# The two columns CYL and WGT in cars.sas7bdat have column
# width < 8 and only contain integral values.
# Test that pandas doesn't corrupt the numbers by adding
# decimals.
result = df[column]
expected = df[column].round()
tm.assert_series_equal(result, expected, check_exact=True)
def test_many_columns(datapath):
# Test for looking for column information in more places (PR #22628)
fname = datapath("io", "sas", "data", "many_columns.sas7bdat")
df = pd.read_sas(fname, encoding="latin-1")
fname = datapath("io", "sas", "data", "many_columns.csv")
df0 = pd.read_csv(fname, encoding="latin-1")
tm.assert_frame_equal(df, df0)
def test_inconsistent_number_of_rows(datapath):
# Regression test for issue #16615. (PR #22628)
fname = datapath("io", "sas", "data", "load_log.sas7bdat")
df = pd.read_sas(fname, encoding="latin-1")
assert len(df) == 2097
def test_zero_variables(datapath):
# Check if the SAS file has zero variables (PR #18184)
fname = datapath("io", "sas", "data", "zero_variables.sas7bdat")
with pytest.raises(EmptyDataError, match="No columns to parse from file"):
pd.read_sas(fname)
def test_corrupt_read(datapath):
# We don't really care about the exact failure, the important thing is
# that the resource should be cleaned up afterwards (BUG #35566)
fname = datapath("io", "sas", "data", "corrupt.sas7bdat")
msg = "'SAS7BDATReader' object has no attribute 'row_count'"
with pytest.raises(AttributeError, match=msg):
pd.read_sas(fname)
def round_datetime_to_ms(ts):
if isinstance(ts, datetime):
return ts.replace(microsecond=int(round(ts.microsecond, -3) / 1000) * 1000)
elif isinstance(ts, str):
_ts = dateutil.parser.parse(timestr=ts)
return _ts.replace(microsecond=int(round(_ts.microsecond, -3) / 1000) * 1000)
else:
return ts
def test_max_sas_date(datapath):
# GH 20927
# NB. max datetime in SAS dataset is 31DEC9999:23:59:59.999
# but this is read as 29DEC9999:23:59:59.998993 by a buggy
# sas7bdat module
fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat")
df = pd.read_sas(fname, encoding="iso-8859-1")
# SAS likes to left pad strings with spaces - lstrip before comparing
df = df.applymap(lambda x: x.lstrip() if isinstance(x, str) else x)
# GH 19732: Timestamps imported from sas will incur floating point errors
try:
df["dt_as_dt"] = df["dt_as_dt"].dt.round("us")
except pd._libs.tslibs.np_datetime.OutOfBoundsDatetime:
df = df.applymap(round_datetime_to_ms)
except AttributeError:
df["dt_as_dt"] = df["dt_as_dt"].apply(round_datetime_to_ms)
# if there are any date/times > pandas.Timestamp.max then ALL in that chunk
# are returned as datetime.datetime
expected = pd.DataFrame(
{
"text": ["max", "normal"],
"dt_as_float": [253717747199.999, 1880323199.999],
"dt_as_dt": [
datetime(9999, 12, 29, 23, 59, 59, 999000),
datetime(2019, 8, 1, 23, 59, 59, 999000),
],
"date_as_float": [2936547.0, 21762.0],
"date_as_date": [datetime(9999, 12, 29), datetime(2019, 8, 1)],
},
columns=["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"],
)
tm.assert_frame_equal(df, expected)
def test_max_sas_date_iterator(datapath):
# GH 20927
# when called as an iterator, only those chunks with a date > pd.Timestamp.max
# are returned as datetime.datetime, if this happens that whole chunk is returned
# as datetime.datetime
col_order = ["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"]
fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat")
results = []
for df in pd.read_sas(fname, encoding="iso-8859-1", chunksize=1):
# SAS likes to left pad strings with spaces - lstrip before comparing
df = df.applymap(lambda x: x.lstrip() if isinstance(x, str) else x)
# GH 19732: Timestamps imported from sas will incur floating point errors
try:
df["dt_as_dt"] = df["dt_as_dt"].dt.round("us")
except pd._libs.tslibs.np_datetime.OutOfBoundsDatetime:
df = df.applymap(round_datetime_to_ms)
except AttributeError:
df["dt_as_dt"] = df["dt_as_dt"].apply(round_datetime_to_ms)
df.reset_index(inplace=True, drop=True)
results.append(df)
expected = [
pd.DataFrame(
{
"text": ["max"],
"dt_as_float": [253717747199.999],
"dt_as_dt": [datetime(9999, 12, 29, 23, 59, 59, 999000)],
"date_as_float": [2936547.0],
"date_as_date": [datetime(9999, 12, 29)],
},
columns=col_order,
),
pd.DataFrame(
{
"text": ["normal"],
"dt_as_float": [1880323199.999],
"dt_as_dt": [np.datetime64("2019-08-01 23:59:59.999")],
"date_as_float": [21762.0],
"date_as_date": [np.datetime64("2019-08-01")],
},
columns=col_order,
),
]
for result, expected in zip(results, expected):
tm.assert_frame_equal(result, expected)
def test_null_date(datapath):
fname = datapath("io", "sas", "data", "dates_null.sas7bdat")
df = pd.read_sas(fname, encoding="utf-8")
expected = pd.DataFrame(
{
"datecol": [
datetime(9999, 12, 29),
pd.NaT,
],
"datetimecol": [
datetime(9999, 12, 29, 23, 59, 59, 998993),
pd.NaT,
],
},
)
tm.assert_frame_equal(df, expected)
| pandas-dev/pandas | pandas/tests/io/sas/test_sas7bdat.py | Python | bsd-3-clause | 12,142 |
import sys
import time
import threading
import BaseHTTPServer
import json
import kafka
from monascaclient import client
from monascaclient import ksclient
import warnings
# suppress warnings to improve performance
def no_warnings(*args):
pass
warnings.showwarning = no_warnings
max_wait_time = 20 # seconds
# May result in invalid measurements if disabled when notification engine is
# configured to retry notifications
ack_notifications = True
keystone = {
'username': 'mini-mon',
'password': 'password',
'project': 'test',
# 'auth_url': 'http://10.22.156.11:35357/v3',
'auth_url': 'http://192.168.10.5:35357/v3'
}
# monasca api urls
urls = [
# 'https://mon-ae1test-monasca01.useast.hpcloud.net:8080/v2.0',
# 'https://mon-ae1test-monasca02.useast.hpcloud.net:8080/v2.0',
# 'https://mon-ae1test-monasca03.useast.hpcloud.net:8080/v2.0',
'http://192.168.10.4:8080/v2.0',
]
# requires ip and port (default port is 9092)
kafka_host = '192.168.10.4:9092'
# kafka_hosts = ['10.22.156.11:9092','10.22.156.12:9092','10.22.156.13:9092']
kafka_topic = "alarm-state-transitions"
# server to catch the webhooks
webhook_server_config = ('192.168.10.4', 8001)
# webhook_server_config = ('10.22.156.11', 8001)
notification_method = {
'name': 'Test',
'type': 'WEBHOOK',
'address': 'http://{}:{}'.format(webhook_server_config[0],
webhook_server_config[1])
}
alarm_definition = {
'name': 'Test223',
'expression': 'alarm_perf < 10',
'alarm_actions': [],
'ok_actions': [],
'undetermined_actions': []
}
base_message = {
"alarm-transitioned":
{"tenantId": "nothing",
"alarmId": "noID",
"alarmDefinitionId": "notAnID",
"metrics":
[{"id": "null",
"name": "this is a test",
"dimensions":
{"service": "monitoring",
"hostname": "mini-mon"}}],
"alarmName": "TestingTesting",
"alarmDescription": "This is a test of the notification engine",
"oldState": "UNDETERMINED",
"newState": "OK",
"actionsEnabled": "true",
"stateChangeReason": "Because I made it so",
"severity": "LOW",
"timestamp": 1422918282}}
response_count = 0
last_response = 0
stop_server = False
class TestHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(self):
global response_count
global last_response
response_count += 1
last_response = time.time()
if ack_notifications:
self.send_response(200, 'OK')
def do_nothing(self, *args):
pass
log_message = do_nothing
def check_notifications(server_class=BaseHTTPServer.HTTPServer,
handler_class=TestHTTPRequestHandler):
httpd = server_class(webhook_server_config, handler_class)
httpd.timeout = 1
while response_count < 1 and not stop_server:
httpd.handle_request()
def create_notification_method(mon_client):
for notification in mon_client.notifications.list():
if (notification['type'] == notification_method['type'] and
notification['address'] == notification_method['address']):
print("Already exists, ID: {}".format(notification['id']))
return notification['id']
try:
resp = mon_client.notifications.create(**notification_method)
print('Notification Method ID: {}'.format(resp['id']))
return resp['id']
except Exception as ex:
print('Could not create notification method: {}'.format(ex))
return None
def create_alarm_definition(mon_client):
for definition in mon_client.alarm_definitions.list():
if definition['name'] == alarm_definition['name']:
mon_client.alarm_definitions.delete(alarm_id=definition['id'])
try:
resp = mon_client.alarm_definitions.create(**alarm_definition)
print('Alarm Definition ID: {}'.format(resp['id']))
return resp['id']
except Exception as ex:
print('Could not create alarm definition: {}'.format(ex))
return None
def produce_alarm_state_transition():
kafka_client = kafka.client.KafkaClient(kafka_host)
kafka_producer = kafka.producer.SimpleProducer(kafka_client, async=False)
base_message["alarm-transitioned"]["timestamp"] = int(time.time())
kafka_producer.send_messages(kafka_topic,
json.dumps(base_message))
def notification_function_test():
global last_response
global response_count
global stop_server
try:
print('Authenticating with keystone on {}'.
format(keystone['auth_url']))
ks_client = ksclient.KSClient(**keystone)
except Exception as ex:
print('Failed to authenticate: {}'.format(ex))
return False
token = ks_client.token
mon_client = client.Client('2_0', urls[0], token=token)
print("Creating notification method")
notification_id = create_notification_method(mon_client)
if not notification_id:
return False
alarm_definition['ok_actions'].append(notification_id)
alarm_definition['alarm_actions'].append(notification_id)
alarm_definition['undetermined_actions'].append(notification_id)
print("Creating alarm definition")
alarm_def_id = create_alarm_definition(mon_client)
if not alarm_def_id:
return False
base_message['alarm-transitioned']['alarmDefinitionId'] = alarm_def_id
server = threading.Thread(target=check_notifications,
args=(BaseHTTPServer.HTTPServer,
TestHTTPRequestHandler))
server.start()
time.sleep(1)
start_time = time.time()
produce_alarm_state_transition()
last_response = time.time()
print("Waiting for notifications")
while server.isAlive():
if(last_response + max_wait_time) < time.time():
stop_server = True
print("Max wait time exceeded after {} responses".
format(response_count))
return False
server.join((last_response+max_wait_time)-time.time())
final_time = time.time()
print("-----Test Results-----")
print("{} notifications arrived in {} seconds".
format(response_count, final_time-start_time))
return True
def main():
if not notification_function_test():
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| hpcloud-mon/monasca-perf | monasca_perf/notification_functional.py | Python | apache-2.0 | 6,537 |
#Copyright 2011 Dan Klinedinst
#
#This file is part of Gibson.
#
#Gibson is free software: you can redistribute it and/or modify it
#under the terms of the GNU General Public License as published by the
#Free Software Foundation, either version 3 of the License, or any
#later version.
#Gibson is distributed in the hope that it will be useful, but WITHOUT
#ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
#FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
#for more details.
#
#You should have received a copy of the GNU General Public License
#along with Gibson. If not, see <http://www.gnu.org/licenses/>.
class SluggerBase():
def __init__(self, panda, data):
self.starting_position = (0, 0, 0)
self.data = data
self.panda = panda
def createTunnel(self):
raise "Not Implemented"
def createSlug(self):
raise "Not Implemented" | robscetury/gibson | lib/gibson/slugs/__init__.py | Python | gpl-3.0 | 927 |
import datetime
import luigi
from io import *
from babel_datapipeline.util.misc import *
class AMinerParse(luigi.Task):
date = luigi.DateParameter(default=datetime.date.today())
def requires(self):
return AminerS3Targets()
def output(self):
makedir('citation_dict')
return luigi.LocalTarget(path='citation_dict/aminer_parse_%s.txt' % self.date)
def run(self):
from babel_util.parsers import aminer
p = aminer.AMinerParser()
with self.output().open('w') as outfile:
with self.input().open('r') as infile:
for paper in p.parse(infile):
for citation in paper["citations"]:
outfile.write("{0} {1}\n".format(paper["id"], citation))
| iwsmith/babel_datapipeline | babel_datapipeline/tasks/parsers.py | Python | agpl-3.0 | 768 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to search the SAO/NASA Astrophysics Data System
:author: Magnus Persson <[email protected]>
"""
import os
from astropy.table import Table
from urllib.parse import quote as urlencode
from ..query import BaseQuery
from ..utils import async_to_sync
from ..utils.class_or_instance import class_or_instance
from .utils import _get_data_from_xml
from . import conf
from xml.dom import minidom
__all__ = ['ADS', 'ADSClass']
@async_to_sync
class ADSClass(BaseQuery):
SERVER = conf.server
QUERY_SIMPLE_PATH = conf.simple_path
TIMEOUT = conf.timeout
ADS_FIELDS = conf.adsfields
SORT = conf.sort
NROWS = conf.nrows
NSTART = conf.nstart
TOKEN = conf.token
QUERY_SIMPLE_URL = SERVER + QUERY_SIMPLE_PATH
def __init__(self, *args):
""" set some parameters """
super(ADSClass, self).__init__()
@class_or_instance
def query_simple(self, query_string, get_query_payload=False,
get_raw_response=False, cache=True):
"""
Basic query. Uses a string and the ADS generic query.
"""
request_string = self._args_to_url(query_string)
request_fields = self._fields_to_url()
request_sort = self._sort_to_url()
request_rows = self._rows_to_url(self.NROWS, self.NSTART)
request_url = self.QUERY_SIMPLE_URL + request_string + request_fields + request_sort + request_rows
# primarily for debug purposes, but also useful if you want to send
# someone a URL linking directly to the data
if get_query_payload:
return request_url
response = self._request(method='GET', url=request_url,
headers={'Authorization': 'Bearer ' + self._get_token()},
timeout=self.TIMEOUT, cache=cache)
response.raise_for_status()
if get_raw_response:
return response
# parse the XML response into AstroPy Table
resulttable = self._parse_response(response.json())
return resulttable
def _parse_response(self, response):
try:
response['response']['docs'][0]['bibcode']
except IndexError:
raise RuntimeError('No results returned!')
# get the list of hits
hitlist = response['response']['docs']
t = Table()
# Grab the various fields and put into AstroPy table
for field in self.ADS_FIELDS:
tmp = _get_data_from_xml(hitlist, field)
t[field] = tmp
return t
def _args_to_url(self, query_string):
# convert arguments to a valid requests payload
# i.e. a dictionary
request_string = 'q=' + urlencode(query_string)
return request_string
def _fields_to_url(self):
request_fields = '&fl=' + ','.join(self.ADS_FIELDS)
return request_fields
def _sort_to_url(self):
request_sort = '&sort=' + urlencode(self.SORT)
return request_sort
def _rows_to_url(self, nrows=10, nstart=0):
request_rows = '&rows=' + str(nrows) + '&start=' + str(nstart)
return request_rows
def _get_token(self):
"""
Try to get token from the places Andy Casey's python ADS client expects it, otherwise return an error
"""
if self.TOKEN is not None:
return self.TOKEN
self.TOKEN = os.environ.get('ADS_DEV_KEY', None)
if self.TOKEN is not None:
return self.TOKEN
token_file = os.path.expanduser(os.path.join('~', '.ads', 'dev_key'))
try:
with open(token_file) as f:
self.TOKEN = f.read().strip()
return self.TOKEN
except IOError:
raise RuntimeError('No API token found! Get yours from: '
'https://ui.adsabs.harvard.edu/#user/settings/token '
'and store it in the API_DEV_KEY environment variable.')
ADS = ADSClass()
| ceb8/astroquery | astroquery/nasa_ads/core.py | Python | bsd-3-clause | 4,056 |
#!/usr/bin/env python
'''Ramachandran Plot in 3D by Ch. Fufezan 2009
usage: ramachandranPlot3D.py <bin size for angles> <pdb file>'''
import sys, os, time
from p3d import protein as protein
from collections import defaultdict as ddict
if (__name__ == '__main__'):
if (len(sys.argv) < 3):
print (__doc__)
sys.exit(1)
binSize = float(sys.argv[1])
if 180 % binSize != 0: sys.exit(1)
ramachandranPlot3D = ddict(int)
pdbs = sys.argv[2:]
StartTime = time.time()
for k,entry in enumerate(pdbs):
print('Analysing',entry,end='...')
try:
pdb = protein.Protein(entry,DunbrackNaming=True,BSPTree=False)
'''
query Set alphas to have one atom per residue from both chains
'''
for i,alpha in enumerate(pdb.query('alpha and model 1')):
PhiPsi = alpha.calcPhiPsi(allowAlternativeConfs=False)
ramachandranPlot3D[((PhiPsi[0][0]//binSize)*binSize,(PhiPsi[0][1]//binSize)*binSize)] += 1
'''
calcPhiPsi returns a list but allowAlternativeConfs= is set to False
the list has only one element and thus we can directly access
phi and psi with PhiPsi[0][0] and PhiPsi[0][1] respectively ...
'''
print('done - ',k)
except:
print('FAILED - ',k)
for coords,value in ramachandranPlot3D.items():
print(coords[0],'\t',coords[1],'\t',value) | fu/p3d | exampleScripts/ramachandranPlot3D.py | Python | gpl-3.0 | 1,281 |
# Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Internal Codehosting API interfaces."""
__metaclass__ = type
__all__ = [
'BRANCH_ALIAS_PREFIX',
'branch_id_alias',
'BRANCH_ID_ALIAS_PREFIX',
'BRANCH_TRANSPORT',
'compose_public_url',
'CONTROL_TRANSPORT',
'IBazaarApplication',
'ICodehostingAPI',
'ICodehostingApplication',
'LAUNCHPAD_ANONYMOUS',
'LAUNCHPAD_SERVICES',
'READ_ONLY',
'SUPPORTED_SCHEMES',
'WRITABLE',
]
import os.path
import urllib
from lazr.uri import URI
from zope.interface import Interface
from lp.app.validators.name import valid_name
from lp.services.config import config
from lp.services.webapp.interfaces import ILaunchpadApplication
# When LAUNCHPAD_SERVICES is provided as a login ID to XML-RPC methods, they
# bypass the normal security checks and give read-only access to all branches.
# This allows Launchpad services like the puller and branch scanner to access
# private branches.
LAUNCHPAD_SERVICES = '+launchpad-services'
assert not valid_name(LAUNCHPAD_SERVICES), (
"%r should *not* be a valid name." % (LAUNCHPAD_SERVICES,))
# When LAUNCHPAD_ANONYMOUS is passed, the XML-RPC methods behave as if no user
# was logged in.
LAUNCHPAD_ANONYMOUS = '+launchpad-anonymous'
assert not valid_name(LAUNCHPAD_ANONYMOUS), (
"%r should *not* be a valid name." % (LAUNCHPAD_ANONYMOUS,))
# These are used as permissions for getBranchInformation.
READ_ONLY = 'r'
WRITABLE = 'w'
# Indicates that a path's real location is on a branch transport.
BRANCH_TRANSPORT = 'BRANCH_TRANSPORT'
# Indicates that a path points to a control directory.
CONTROL_TRANSPORT = 'CONTROL_TRANSPORT'
# The path prefix for getting at branches via their short name.
BRANCH_ALIAS_PREFIX = '+branch'
# The path prefix for getting at branches via their id.
BRANCH_ID_ALIAS_PREFIX = '+branch-id'
def branch_id_alias(branch):
"""Return the path using the branch id alias."""
return '/%s/%s' % (BRANCH_ID_ALIAS_PREFIX, branch.id)
# The scheme types that are supported for codehosting.
SUPPORTED_SCHEMES = 'bzr+ssh', 'http'
class IBazaarApplication(ILaunchpadApplication):
"""Bazaar Application"""
class ICodehostingApplication(ILaunchpadApplication):
"""Branch Puller application root."""
class ICodehostingAPI(Interface):
"""The codehosting XML-RPC interface to Launchpad.
Published at 'codehosting' on the private XML-RPC server.
The code hosting service and puller use this to register branches, to
retrieve information about a user's branches, and to update their status.
"""
def acquireBranchToPull(branch_type_names):
"""Return a Branch to pull and mark it as mirror-started.
:param branch_type_names: Only consider branches of these type names.
An empty list means consider HOSTED, MIRRORED and IMPORTED
branches.
:return: A 5-tuple::
(branch_id, pull_url, unique_name, default_branch, branch_type)
where:
* branch_id is the database id of the branch,
* pull_url is where to pull from,
* unique_name is the unique_name of the branch,
* default_branch is the unique name of the default stacked on
branch for the branch's target (or '' if there is no such
branch), and
* branch_type is one of 'hosted', 'mirrored', or 'imported'.
or (), the empty tuple, if there is no branch to pull.
"""
def mirrorFailed(branchID, reason):
"""Notify Launchpad that the branch could not be mirrored.
The mirror_failures counter for the given branch record will be
incremented and the next_mirror_time will be set to NULL.
:param branchID: The database ID of the given branch.
:param reason: A string giving the reason for the failure.
:returns: True if the branch status was successfully updated.
`NoBranchWithID` fault if there's no branch with the given id.
"""
def recordSuccess(name, hostname, date_started, date_completed):
"""Notify Launchpad that a mirror script has successfully completed.
Create an entry in the ScriptActivity table with the provided data.
:param name: Name of the script.
:param hostname: Where the script was running.
:param date_started: When the script started, as an UTC time tuple.
:param date_completed: When the script completed (now), as an UTC time
tuple.
:returns: True if the ScriptActivity record was successfully inserted.
"""
def createBranch(login_id, branch_path):
"""Register a new hosted branch in Launchpad.
This is called by the bazaar.launchpad.net server when a user
pushes a new branch to it. See also
https://launchpad.canonical.com/SupermirrorFilesystemHierarchy.
:param login_id: the person ID of the user creating the branch.
:param branch_path: the path of the branch to be created. This should
be a URL-escaped string representing an absolute path.
:returns: the ID for the new branch or a Fault if the branch cannot be
created.
"""
def requestMirror(loginID, branchID):
"""Mark a branch as needing to be mirrored.
:param loginID: the person ID of the user requesting the mirror.
:param branchID: a branch ID.
"""
def branchChanged(login_id, branch_id, stacked_on_url, last_revision_id,
control_string, branch_string, repository_string):
"""Record that a branch has been changed.
See `IBranch.branchChanged`.
:param login_id: the person ID of the user changing the branch.
:param branch_id: The database id of the branch to operate on.
:param stacked_on_url: The unique name of the branch this branch is
stacked on, or '' if this branch is not stacked.
:param last_revision_id: The tip revision ID of the branch.
:param control_string: The format string of the control directory of
the branch.
:param branch_string: The format string of the branch.
:param repository_string: The format string of the branch's
repository.
"""
def translatePath(requester_id, path):
"""Translate 'path' so that the codehosting transport can access it.
:param requester_id: the database ID of the person requesting the
path translation.
:param path: the path being translated. This should be a URL escaped
string representing an absolute path.
:raise `PathTranslationError`: if 'path' cannot be translated.
:raise `InvalidPath`: if 'path' is known to be invalid.
:raise `PermissionDenied`: if the requester cannot see the branch.
:returns: (transport_type, transport_parameters, path_in_transport)
where 'transport_type' is one of BRANCH_TRANSPORT or
CONTROL_TRANSPORT, 'transport_parameters' is a dict of data that
the client can use to construct the transport and
'path_in_transport' is a path relative to that transport. e.g.
(BRANCH_TRANSPORT, {'id': 3, 'writable': False}, '.bzr/README').
"""
def compose_public_url(scheme, unique_name, suffix=None):
# Accept sftp as a legacy protocol.
accepted_schemes = set(SUPPORTED_SCHEMES)
accepted_schemes.add('sftp')
assert scheme in accepted_schemes, "Unknown scheme: %s" % scheme
host = URI(config.codehosting.supermirror_root).host
if isinstance(unique_name, unicode):
unique_name = unique_name.encode('utf-8')
# After quoting and encoding, the path should be perfectly
# safe as a plain ASCII string, str() just enforces this
path = '/' + str(urllib.quote(unique_name, safe='/~+'))
if suffix:
path = os.path.join(path, suffix)
return str(URI(scheme=scheme, host=host, path=path))
| abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/code/interfaces/codehosting.py | Python | agpl-3.0 | 8,119 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def TaskFilterSpec(vim, *args, **kwargs):
'''This data object type defines the specification for the task filter used to
query tasks in the history collector database. The client creates a task
history collector with a filter specification, then retrieves the tasks from
the task history collector.'''
obj = vim.client.factory.create('ns0:TaskFilterSpec')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'alarm', 'entity', 'eventChainId', 'parentTaskKey', 'rootTaskKey',
'scheduledTask', 'state', 'tag', 'time', 'userName', 'dynamicProperty',
'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| xuru/pyvisdk | pyvisdk/do/task_filter_spec.py | Python | mit | 1,332 |
import re
def test_phones_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_into_from_edit_page(0)
assert contact_from_home_page.homephone == clear(contact_from_edit_page.homephone)
assert contact_from_home_page.mobilephone == clear(contact_from_edit_page.mobilephone)
assert contact_from_home_page.workphone == clear(contact_from_edit_page.workphone)
assert contact_from_home_page.secondaryphone == clear(contact_from_edit_page.secondaryphone)
def test_phones_on_contact_view_page(app):
contact_from_view_page = app.contact.get_contact_from_view_page(0)
contact_from_edit_page = app.contact.get_contact_into_from_edit_page(0)
assert contact_from_view_page.homephone == contact_from_edit_page.homephone
assert contact_from_view_page.mobilephone == contact_from_edit_page.mobilephone
assert contact_from_view_page.workphone == contact_from_edit_page.workphone
assert contact_from_view_page.secondaryphone == contact_from_edit_page.secondaryphone
def clear(s):
return re.sub("[() -]", "", s)
| tankisleva/python_training | test/test_phones.py | Python | apache-2.0 | 1,114 |
from ..idea_base import IdeaBaseFinder
class AndroidStudioFinder(IdeaBaseFinder):
application_id = 'com.google.android.studio'
preferences_folder = 'AndroidStudioPreview'
xpath = "//component[@name='RecentProjectsManager']" \
"/option[@name='recentPaths']/list/option/@value" | wolfhechel/alfred-code | finders/androidstudio/__init__.py | Python | mit | 304 |
###
# Copyright (c) 2013, Nicolas Coevoet
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class ChanRegTestCase(PluginTestCase):
plugins = ('ChanReg',)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| ncoevoet/ChanReg | test.py | Python | mit | 1,737 |
for i in [1,2,3]:
print(i)
if i == 2:
break
for i in [1,2,3]:
print(i)
for j in [3,4,5]:
print(i*j)
if i*j > 8:
print('youpla!')
break
| rcarmo/pythonium | tests/break-for-loop.py | Python | lgpl-2.1 | 200 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from django.core.urlresolvers import reverse
from mox3.mox import IsA # noqa
from openstack_dashboard.api import nova
from openstack_dashboard.test import helpers as test
from openstack_dashboard.test.test_data import utils as test_utils
from nec_portal.dashboards.admin.capacity import panel # noqa
INDEX_URL = reverse('horizon:admin:capacity:index')
class CapacityViewTests(test.BaseAdminViewTests):
"""A test of the screen of capacity's index.
CheckPoint 1. A expected template is used.
"""
def test_capacity(self):
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/capacity/capacity/_index.html')
class CapacityAZTabTests(test.BaseAdminViewTests):
"""A test of the screen of capacity's az tab.
CheckPoint 1. A expected template is used.
CheckPoint 2. A expected context is returned.
"""
TEST_GROUP = 'test_az_group'
TEST_NAME = 'test.az:name'
CONTEXT_GROUP = 'az'
def setUp(self):
test.BaseAdminViewTests.setUp(self)
self.testdata = test_utils.TestData()
test_utils.load_test_data(self.testdata)
@mock.patch('novaclient.v2.client.Client')
@test.create_stubs({nova: ('availability_zone_list',), })
def test_capacity_az(self, request):
self.mox.ReplayAll()
url_param = '?group=' + self.TEST_GROUP + '&name=' + self.TEST_NAME
nova.novaclient(self.request).availability_zones = \
self.availability_zones
availability_zone_list = self.availability_zones.list()
for az in availability_zone_list:
if not az.zoneName == 'internal':
context_name = az.zoneName
break
context_url = './capacity_az/detail?group=' + self.CONTEXT_GROUP + \
'&name=' + context_name
res = self.client.get(INDEX_URL + url_param +
'&tab=capacity_group_tabs__capacity_az')
self.assertTemplateUsed(res, 'admin/capacity/capacity_az/_index.html')
self.assertEqual(res.context['detail_url'], context_url)
class CapacityHostTabTests(test.BaseAdminViewTests):
"""A test of the screen of capacity's host tab.
CheckPoint 1. A expected template is used.
CheckPoint 2. A expected context is returned.
"""
TEST_GROUP = 'test_host_group'
TEST_NAME = 'test_host,name'
CONTEXT_GROUP = 'host'
def setUp(self):
test.BaseAdminViewTests.setUp(self)
self.testdata = test_utils.TestData()
test_utils.load_test_data(self.testdata)
@mock.patch('novaclient.v2.client.Client')
@test.create_stubs({nova: ('hypervisor_list',), })
def test_capacity_host(self, request):
self.mox.ReplayAll()
url_param = '?group=' + self.TEST_GROUP + '&name=' + self.TEST_NAME
nova.novaclient(self.request).hypervisors = self.hypervisors
hypervisor_list = self.hypervisors.list()
context_name = hypervisor_list[0].hypervisor_hostname
context_url = './capacity_host/detail?group=' + self.CONTEXT_GROUP + \
'&name=' + context_name
res = self.client.get(INDEX_URL + url_param +
'&tab=capacity_group_tabs__capacity_host')
self.assertTemplateUsed(res,
'admin/capacity/capacity_host/_index.html')
self.assertEqual(res.context['detail_url'], context_url)
| NECCSiPortal/NECCSPortal-dashboard | nec_portal/dashboards/admin/capacity/tests.py | Python | apache-2.0 | 4,009 |
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-deferred-delete'
POLICY_ROOT = 'os_compute_api:os-deferred-delete:%s'
deferred_delete_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
base.create_rule_default(
BASE_POLICY_NAME,
base.RULE_ADMIN_OR_OWNER,
'Restore a soft deleted server or force delete a server before \
deferred cleanup',
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (restore)'
},
{
'method': 'POST',
'path': '/servers/{server_id}/action (forceDelete)'
}
])
]
def list_rules():
return deferred_delete_policies
| vmturbo/nova | nova/policies/deferred_delete.py | Python | apache-2.0 | 1,462 |
#!/usr/bin/python
'''
Copyright (c) 2013-2014, Magnus Skjegstad / Forsvarets Forskningsinstitutt (FFI)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Authors: Halvdan Hoem Grelland, Magnus Skjegstad
'''
import time
from p2pdprd_types import Node, NodeCollection, IPCMessage
# P2P-dprd IPC protocol
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
class IPCProtocol(DatagramProtocol):
"""
Twisted-implementation of the p2pdprd IPC protocol
"""
def __init__(self, p2pdprd_listening_sock, my_listening_sock, update_callback=None):
"""
Construct the Protocol object.
- p2pdprd_listening_sock is the ipc socket path of the p2pdprd instance
- my_listening_sock is the path to the socket used to receive messages from p2pdprd
- update_callback is called with an array of Nodes whenever an update is received from p2pdprd. Set to None to disable (check self.candidate_nodes instead)
"""
self.candidate_nodes = None
self.p2pdprd_listening_sock = p2pdprd_listening_sock
self.update_callback = update_callback
self.my_listening_sock = my_listening_sock
self.updated = None
def startProtocol(self):
"""Called when a transport is connected to this protocol"""
print "P2P-DPRD IPC-Protocol listening on", self.transport.port
self.sendSubscribe()
def sendDatagram(self, message):
"""Send a datagram over p2p-dprd IPC"""
self.transport.write(message, self.p2pdprd_listening_sock)
def sendSubscribe(self):
# Subscribe to candidate nodes
m = IPCMessage.subscribe_candidate_nodes(self.my_listening_sock)
self.sendDatagram(m.pack())
def sendUnsubscribe(self):
# Unsubscribe to candidate nodes
m = IPCMessage.unsubscribe_candidate_nodes(self.my_listening_sock)
self.sendDatagram(m.pack())
def setLocation(self, lat, lon):
# Set position in lat/long
m = IPCMessage.set_position(lat, lon)
self.sendDatagram(m.pack())
def setCoordinationRange(self, coord_range):
# Set coordination range
m = IPCMessage.set_coordination_range(coord_range)
self.sendDatagram(m.pack())
def datagramReceived(self, datagram, host):
"""Called upon receiving a datagram from p2pdprd IPC"""
# Write to servers' candidate nodes buffer
self.updated = time.time()
self.candidate_nodes = NodeCollection.from_bytes(datagram)
print self.candidate_nodes
if (self.update_callback != None):
self.update_callback(self.candidate_nodes)
###############################################################################
# TESTS #
###############################################################################
def test_protocol():
import os
p2pdprd_listening_sock = "/tmp/p2p-dprd.sock"
my_listening_sock = "/tmp/test.sock"
protocol = IPCProtocol(p2pdprd_listening_sock, my_listening_sock)
# Delete the unix socket path if it already exists
import os
if os.path.exists(my_listening_sock):
os.remove(my_listening_sock)
reactor.listenUNIXDatagram(my_listening_sock, protocol)
reactor.run()
def test_stuff():
# Run tests for dev purposes
# Create a NodeCollection.
nc = NodeCollection(1,2,
[Node(123, 45.45, 23.34, 12, "127.0.0.1", 12345, "128.0.0.1", 54321, 456789),
Node(321, 54.45, 11.98, 10, "128.0.0.1", 12345, "127.0.0.1", 36412, 985446),
Node(123, 45.45, 23.34, 12, "127.0.0.1", 12345, "128.0.0.1", 54321, 456789),
Node(321, 54.45, 11.98, 10, "128.0.0.1", 12345, "127.0.0.1", 36412, 985446),
Node(123, 45.45, 23.34, 12, "127.0.0.1", 12345, "128.0.0.1", 54321, 456789),
Node(321, 54.45, 11.98, 10, "128.0.0.1", 12345, "127.0.0.1", 36412, 985446),
Node(123, 45.45, 23.34, 12, "127.0.0.1", 12345, "128.0.0.1", 54321, 456789),
Node(321, 54.45, 11.98, 10, "128.0.0.1", 12345, "127.0.0.1", 36412, 985446)])
assert(nc.node_count == 8)
print nc, '\n'
# Pack it
binary_data = nc.pack()
assert(binary_data is not None)
# Unpack it
nc_unpacked = NodeCollection.from_bytes(binary_data)
assert(str(nc) == str(nc_unpacked))
def test_stuff_ipc():
sub_msg = IPCMessage.subscribe_candidate_nodes('/tmp/pypypy.py').pack()
print sub_msg
pos_msg = IPCMessage.set_position(11.43, 67.65).pack()
print pos_msg
coord_msg = IPCMessage.set_coordination_range(23).pack()
print coord_msg
unsub_msg = IPCMessage.unsubscribe_candidate_nodes('/tmp/pypypy.py').pack()
print unsub_msg
import socket
out_sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
out_sock.connect('/tmp/p2p-dprd.sock')
out_sock.send(sub_msg)
out_sock.send(pos_msg)
out_sock.send(coord_msg)
out_sock.send(unsub_msg)
###############################################################################
# Uncomment to run tests #
###############################################################################
#test_stuff()
#test_stuff_ipc()
#test_protocol()
| MagnusS/p2p-dprd | python/p2pdprd.py | Python | bsd-2-clause | 6,539 |
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from platformio import exception
def get_test_names(config):
test_dir = config.get("platformio", "test_dir")
if not os.path.isdir(test_dir):
raise exception.TestDirNotExists(test_dir)
names = []
for item in sorted(os.listdir(test_dir)):
if os.path.isdir(os.path.join(test_dir, item)):
names.append(item)
if not names:
names = ["*"]
return names
| platformio/platformio-core | platformio/commands/test/helpers.py | Python | apache-2.0 | 1,029 |
from importlib import import_module
from django.conf import settings
# Default settings
BOOTSTRAP3_DEFAULTS = {
"css_url": {
"url": "https://stackpath.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css",
"integrity": "sha384-HSMxcRTRxnN+Bdg0JdbxYKrThecOKuH5zCYotlSAcp1+c8xmyTe9GYg1l9a69psu",
"crossorigin": "anonymous",
},
"theme_url": None,
"javascript_url": {
"url": "https://stackpath.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js",
"integrity": "sha384-aJ21OjlMXNL5UyIl/XNwTMqvzeRMZH2w8c5cRVpzpU8Y5bApTppSuUkhZXN0VxHd",
"crossorigin": "anonymous",
},
"jquery_url": "//code.jquery.com/jquery.min.js",
"javascript_in_head": False,
"include_jquery": False,
"horizontal_label_class": "col-md-3",
"horizontal_field_class": "col-md-9",
"set_placeholder": True,
"required_css_class": "",
"error_css_class": "has-error",
"success_css_class": "has-success",
"formset_renderers": {"default": "bootstrap3.renderers.FormsetRenderer"},
"form_renderers": {"default": "bootstrap3.renderers.FormRenderer"},
"field_renderers": {
"default": "bootstrap3.renderers.FieldRenderer",
"inline": "bootstrap3.renderers.InlineFieldRenderer",
},
}
def get_bootstrap_setting(name, default=None):
"""Read a setting."""
# Start with a copy of default settings
bootstrap3 = BOOTSTRAP3_DEFAULTS.copy()
# Override with user settings from settings.py
bootstrap3.update(getattr(settings, "BOOTSTRAP3", {}))
# Update use_i18n
bootstrap3["use_i18n"] = i18n_enabled()
return bootstrap3.get(name, default)
def jquery_url():
"""Return the full url to jQuery file to use."""
return get_bootstrap_setting("jquery_url")
def javascript_url():
"""Return the full url to the Bootstrap JavaScript file."""
return get_bootstrap_setting("javascript_url")
def css_url():
"""Return the full url to the Bootstrap CSS file."""
return get_bootstrap_setting("css_url")
def theme_url():
"""Return the full url to the theme CSS file."""
return get_bootstrap_setting("theme_url")
def i18n_enabled():
"""Return the projects i18n setting."""
return getattr(settings, "USE_I18N", False)
def get_renderer(renderers, **kwargs):
layout = kwargs.get("layout", "")
path = renderers.get(layout, renderers["default"])
mod, cls = path.rsplit(".", 1)
return getattr(import_module(mod), cls)
def get_formset_renderer(**kwargs):
renderers = get_bootstrap_setting("formset_renderers")
return get_renderer(renderers, **kwargs)
def get_form_renderer(**kwargs):
renderers = get_bootstrap_setting("form_renderers")
return get_renderer(renderers, **kwargs)
def get_field_renderer(**kwargs):
renderers = get_bootstrap_setting("field_renderers")
return get_renderer(renderers, **kwargs)
| dyve/django-bootstrap3 | src/bootstrap3/bootstrap.py | Python | bsd-3-clause | 2,894 |
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
#self.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) # previous stride is 2
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(14)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
#x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| hooram/ownphotos-backend | wideresnet.py | Python | mit | 6,597 |
# -*- coding: utf-8 -*-
# © 2013-2016 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Partner relations",
"version": "9.0.1.1.1",
"author": "Therp BV,Camptocamp,Odoo Community Association (OCA)",
"complexity": "normal",
"category": "Customer Relationship Management",
"license": "AGPL-3",
"depends": [
'base',
],
"demo": [
"data/demo.xml",
],
"data": [
"views/res_partner_relation_all.xml",
'views/res_partner.xml',
'views/res_partner_relation_type.xml',
'views/menu.xml',
'security/ir.model.access.csv',
],
"auto_install": False,
"installable": True,
}
| be-cloud-be/horizon-addons | partner-contact/partner_multi_relation/__openerp__.py | Python | agpl-3.0 | 726 |
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.openstack.common import log as logging
from trove.instance.models import DBInstance
LOG = logging.getLogger(__name__)
class Account(object):
"""Shows all trove instance ids owned by an account."""
def __init__(self, id, instance_ids):
self.id = id
self.instance_ids = instance_ids
@staticmethod
def load(context, id):
db_infos = DBInstance.find_all(tenant_id=id, deleted=False)
instance_ids = []
for db_info in db_infos:
instance_ids.append(db_info.id)
return Account(id, instance_ids)
class AccountsSummary(object):
def __init__(self, accounts):
self.accounts = accounts
@classmethod
def load(cls):
# TODO(pdmars): This should probably be changed to a more generic
# database filter query if one is added, however, this should suffice
# for now.
db_infos = DBInstance.find_all(deleted=False)
tenant_ids_for_instances = [db_info.tenant_id for db_info in db_infos]
tenant_ids = set(tenant_ids_for_instances)
LOG.debug("All tenants with instances: %s" % tenant_ids)
accounts = []
for tenant_id in tenant_ids:
num_instances = tenant_ids_for_instances.count(tenant_id)
accounts.append({'id': tenant_id, 'num_instances': num_instances})
return cls(accounts)
| CMSS-BCRDB/RDSV1.0 | trove/extensions/account/models.py | Python | apache-2.0 | 2,015 |
#!/usr/bin/env python
from datetime import datetime
from ocad_primitives import *
class ocad_project:
def __init__(self, data, title):
self.data = data
self.lastChanged = ""
self.listIndex = 0
self.updateDate()
self.title = title # TODO : get only filename
self.ontology = ontology()
self.layout = layout()
def addToList(self):
self.updateDate()
l = self.data.builder.get_object("recentProjects")
l.append([self.title, self.lastChanged])
self.listIndex = len(l)-1
def delFromList(self):
l = self.data.builder.get_object("recentProjects")
l.remove(l.get_iter(self.listIndex))
def updateDate(self):
self.lastChanged = datetime.now().strftime("%d.%m.%Y %H:%M")
def save(self):
p_file = "../Projects/" + self.title + ".ocp"
f = open(".ocad.tmp", "w") # remember
f.write(p_file)
f.close()
f = open(p_file, "w")
f.write(self.title + "\n")
f.write(self.ontology.generic_path + "\n")
f.write(self.ontology.specific_path + "\n")
f.write(self.layout.path + "\n")
f.write(self.lastChanged)
f.close()
| Victor-Haefner/ontocad | src/ocad_project.py | Python | gpl-3.0 | 1,067 |
import argparse
_ascii = None
_hex = None
_fn = None
def main():
global _ascii
global _hex
global _fn
print _ascii
print _hex
print _fn
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='convert HEX < === > ASCII')
parser.add_argument('-ascii', dest='ascii', help='ASCII')
parser.add_argument('-hex', dest='hex', help='HEX', action='store_true')
parser.add_argument('-f', type=str, required=False, metavar='file', dest='fn', help='file name')
_ascii = parser.parse_args().ascii
_hex = parser.parse_args().hex
_fn = parser.parse_args().fn
if _ascii is None and _hex is None:
print 'no argments input'
print _fn
else:
main()
| softtyphoon/tz | tools/hex2ascii.py | Python | gpl-2.0 | 697 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorArray: a dynamically sized array of Tensors.
@@TensorArray
"""
# Mixture of pep8 and non-pep8 names, so disable pylint bad-name
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import tf_should_use
# TensorArray object accesses many of the hidden generated ops, but is
# in fact built to wrap these methods.
# pylint: disable=protected-access
class TensorArray(object):
"""Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.
This class is meant to be used with dynamic iteration primitives such as
`while_loop` and `map_fn`. It supports gradient back-propagation via special
"flow" control flow dependencies.
"""
def __init__(self,
dtype,
size=None,
dynamic_size=None,
clear_after_read=None,
tensor_array_name=None,
handle=None,
flow=None,
infer_shape=True,
element_shape=None,
colocate_with_first_write_call=True,
name=None):
"""Construct a new TensorArray or wrap an existing TensorArray handle.
A note about the parameter `name`:
The name of the `TensorArray` (even if passed in) is uniquified: each time
a new `TensorArray` is created at runtime it is assigned its own name for
the duration of the run. This avoids name collisions if a `TensorArray`
is created within a `while_loop`.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
clear_after_read: Boolean (optional, default: True). If True, clear
TensorArray values after reading them. This disables read-many
semantics, but allows early release of memory.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None.
flow: (optional) A float `Tensor` scalar coming from an existing
`TensorArray.flow`.
infer_shape: (optional, default: True) If True, shape inference
is enabled. In this case, all elements must have the same shape.
element_shape: (optional, default: None) A `TensorShape` object specifying
the shape constraints of each of the elements of the TensorArray.
Need not be fully defined.
colocate_with_first_write_call: If `True`, the TensorArray will be
colocated on the same device as the Tensor used on its first write
(write operations include `write`, `unstack`, and `split`). If `False`,
the TensorArray will be placed on the device determined by the
device context available during its initialization.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
if handle is not None and tensor_array_name:
raise ValueError(
"Cannot construct with both handle and tensor_array_name")
if handle is not None and not isinstance(handle, ops.Tensor):
raise TypeError("Handle must be a Tensor")
if handle is None and size is None:
raise ValueError("Size must be provided if handle is not provided")
if handle is not None and size is not None:
raise ValueError("Cannot provide both a handle and size "
"at the same time")
if handle is not None and element_shape is not None:
raise ValueError("Cannot provide both a handle and element_shape "
"at the same time")
if handle is not None and dynamic_size is not None:
raise ValueError("Cannot provide both a handle and dynamic_size "
"at the same time")
if handle is not None and clear_after_read is not None:
raise ValueError("Cannot provide both a handle and clear_after_read "
"at the same time")
if clear_after_read is None:
clear_after_read = True
dynamic_size = dynamic_size or False
self._dtype = dtype
# Used to keep track of what tensors the TensorArray should be
# colocated with. We choose to colocate the TensorArray with the
# first tensor written to it.
self._colocate_with_first_write_call = colocate_with_first_write_call
if colocate_with_first_write_call:
self._colocate_with = []
else:
self._colocate_with = None
# Record the current static shape for the array elements. The element
# shape is defined either by `element_shape` or the shape of the tensor
# of the first write. If `infer_shape` is true, all writes checks for
# shape equality.
if element_shape is None:
self._infer_shape = infer_shape
self._element_shape = []
else:
self._infer_shape = True
self._element_shape = [tensor_shape.TensorShape(element_shape)]
with ops.name_scope(name, "TensorArray", [handle, size, flow]) as scope:
if handle is not None:
self._handle = handle
if flow is None:
raise ValueError("flow must not be None if handle is not None.")
self._flow = flow
else:
# Construct the TensorArray with an empty device. The first
# write into the TensorArray from a Tensor with a set device
# will retroactively set the device value of this op.
def create():
return gen_data_flow_ops._tensor_array_v3(
dtype=dtype,
size=size,
element_shape=element_shape,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name,
name=scope)
if colocate_with_first_write_call:
with ops.device(None), ops.colocate_with(None, ignore_existing=True):
self._handle, self._flow = create()
else:
self._handle, self._flow = create()
@property
def flow(self):
"""The flow `Tensor` forcing ops leading to this TensorArray state."""
return self._flow
@property
def dtype(self):
"""The data type of this TensorArray."""
return self._dtype
@property
def handle(self):
"""The reference to the TensorArray."""
return self._handle
def _merge_element_shape(self, shape):
"""Changes the element shape of the array given a shape to merge with.
Args:
shape: A `TensorShape` object to merge with.
Raises:
ValueError: if the provided shape is incompatible with the current
element shape of the `TensorArray`.
"""
if self._element_shape:
if not shape.is_compatible_with(self._element_shape[0]):
raise ValueError(
"Inconsistent shapes: saw %s but expected %s "
"(and infer_shape=True)" % (shape, self._element_shape[0]))
self._element_shape[0] = self._element_shape[0].merge_with(shape)
else:
self._element_shape.append(shape)
@contextlib.contextmanager
def _maybe_colocate_with(self, value):
"""Colocate operations with an internal colocation group or `value`.
Args:
value: `Tensor`, the tensor to try to colocate with.
Yields:
Does not yield anything, but the new context is a colocation context.
If no internal colocation group is set, colocate with `value` and set
the internal colocation group to be value.
"""
if not self._colocate_with_first_write_call:
yield
else:
if not self._colocate_with:
self._colocate_with.append(value)
with ops.colocate_with(self._colocate_with[0]):
yield
def identity(self):
"""Returns a TensorArray with the same content and properties.
Returns:
A new TensorArray object with flow that ensures the control dependencies
from the contexts will become control dependencies for writes, reads, etc.
Use this object all for subsequent operations.
"""
flow = array_ops.identity(self._flow)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow,
infer_shape=self._infer_shape,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
return ta
def grad(self, source, flow=None, name=None):
# tensor_array_grad requires a flow input when forward
# TensorArrays are dynamically sized. This forces the creation
# of the grad TensorArray only once the final forward array's size
# is fixed.
if flow is None:
flow = self.flow
with ops.name_scope(name, "TensorArrayGrad", [self._handle]):
with ops.colocate_with(self._handle):
g_handle, unused_flow = gen_data_flow_ops._tensor_array_grad_v3(
handle=self._handle, source=source, flow_in=flow, name=name)
with ops.control_dependencies([g_handle]):
flow = array_ops.identity(flow, name="gradient_flow")
g = TensorArray(
dtype=self._dtype,
handle=g_handle,
flow=flow,
infer_shape=self._infer_shape,
colocate_with_first_write_call=False)
g._element_shape = self._element_shape
return g
def read(self, index, name=None):
"""Read the value at location `index` in the TensorArray.
Args:
index: 0-D. int32 tensor with the index to read from.
name: A name for the operation (optional).
Returns:
The tensor at index `index`.
"""
value = gen_data_flow_ops._tensor_array_read_v3(
handle=self._handle,
index=index,
flow_in=self._flow,
dtype=self._dtype,
name=name)
if self._element_shape:
value.set_shape(self._element_shape[0].dims)
return value
@tf_should_use.should_use_result
def write(self, index, value, name=None):
"""Write `value` into index `index` of the TensorArray.
Args:
index: 0-D. int32 scalar with the index to write to.
value: N-D. Tensor of type `dtype`. The Tensor to write to this index.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the write occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if there are more writers than specified.
"""
with ops.name_scope(name, "TensorArrayWrite", [self._handle, index, value]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
flow_out = gen_data_flow_ops._tensor_array_write_v3(
handle=self._handle,
index=index,
value=value,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
if ta._infer_shape:
ta._merge_element_shape(value.get_shape())
return ta
def stack(self, name=None):
"""Return the values in the TensorArray as a stacked `Tensor`.
All of the values must have been written and their shapes must all match.
If input shapes have rank-`R`, then output shape will have rank-`(R+1)`.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray stacked into one tensor.
"""
with ops.colocate_with(self._handle):
with ops.name_scope(name, "TensorArrayStack", [self._handle]):
return self.gather(math_ops.range(0, self.size()), name=name)
def gather(self, indices, name=None):
"""Return selected values in the TensorArray as a packed `Tensor`.
All of selected values must have been written and their shapes
must all match.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
name: A name for the operation (optional).
Returns:
The in the `TensorArray` selected by `indices`, packed into one tensor.
"""
if self._element_shape:
element_shape = self._element_shape[0]
else:
element_shape = tensor_shape.TensorShape(None)
value = gen_data_flow_ops._tensor_array_gather_v3(
handle=self._handle,
indices=indices,
flow_in=self._flow,
dtype=self._dtype,
name=name,
element_shape=element_shape)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims)
return value
def concat(self, name=None):
"""Return the values in the TensorArray as a concatenated `Tensor`.
All of the values must have been written, their ranks must match, and
and their shapes must all match for all dimensions except the first.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray concatenated into one tensor.
"""
if self._element_shape and self._element_shape[0].dims is not None:
element_shape_except0 = (
tensor_shape.TensorShape(self._element_shape[0].dims[1:]))
else:
element_shape_except0 = tensor_shape.TensorShape(None)
value, _ = gen_data_flow_ops._tensor_array_concat_v3(
handle=self._handle,
flow_in=self._flow,
dtype=self._dtype,
name=name,
element_shape_except0=element_shape_except0)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims[1:])
return value
@tf_should_use.should_use_result
def unstack(self, value, name=None):
"""Unstack the values of a `Tensor` in the TensorArray.
If input value shapes have rank-`R`, then the output TensorArray will
contain elements whose shapes are rank-`(R-1)`.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unstack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the unstack occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArrayUnstack", [self._handle, value]):
num_elements = array_ops.shape(value)[0]
return self.scatter(
indices=math_ops.range(0, num_elements), value=value, name=name)
@tf_should_use.should_use_result
def scatter(self, indices, value, name=None):
"""Scatter the values of a `Tensor` in specific indices of a `TensorArray`.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the scatter occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArrayScatter",
[self._handle, value, indices]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
flow_out = gen_data_flow_ops._tensor_array_scatter_v3(
handle=self._handle,
indices=indices,
value=value,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
if ta._infer_shape:
val_shape = flow_out.op.inputs[2].get_shape()
element_shape = tensor_shape.unknown_shape()
if val_shape.dims is not None:
element_shape = tensor_shape.TensorShape(val_shape.dims[1:])
ta._merge_element_shape(element_shape)
return ta
@tf_should_use.should_use_result
def split(self, value, lengths, name=None):
"""Split the values of a `Tensor` into the TensorArray.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to split.
lengths: 1-D. int32 vector with the lengths to use when splitting
`value` along its first dimension.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the split occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArraySplit",
[self._handle, value, lengths]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
lengths_64 = math_ops.to_int64(lengths)
flow_out = gen_data_flow_ops._tensor_array_split_v3(
handle=self._handle,
value=value,
lengths=lengths_64,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
if ta._infer_shape:
val_shape = flow_out.op.inputs[1].get_shape()
clengths = tensor_util.constant_value(flow_out.op.inputs[2])
element_shape = tensor_shape.unknown_shape()
if val_shape.dims is not None:
if clengths is not None and clengths.max() == clengths.min():
element_shape = tensor_shape.TensorShape([clengths[0]] +
val_shape.dims[1:])
ta._merge_element_shape(element_shape)
return ta
def size(self, name=None):
"""Return the size of the TensorArray."""
return gen_data_flow_ops._tensor_array_size_v3(
handle=self._handle, flow_in=self.flow, name=name)
@tf_should_use.should_use_result
def close(self, name=None):
"""Close the current TensorArray."""
return gen_data_flow_ops._tensor_array_close_v3(
handle=self._handle, name=name)
# pylint: enable=protected-access
| npuichigo/ttsflow | third_party/tensorflow/tensorflow/python/ops/tensor_array_ops.py | Python | apache-2.0 | 19,978 |
Subsets and Splits