repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mwweinberg/china-daily-email | combine_rfa_qz.py | 1 | 4422 | from bs4 import BeautifulSoup
import urllib
#csv is for the csv writer
import csv
#this will hold the output
holder = {}
#opens the input doc
txt = open("qz-rfa.csv")
#is the contents of the doc
#inputs = txt.read()
#opens the output doc
output_txt = open("output.txt", "w")
print txt
def headliner(url):
#iterate through the urls
parsed_urls = csv.reader(url)
for row in parsed_urls:
number = 0
row_contents = row[number]
print row_contents
number += 1
if "rfa" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Radio Free Asia: '
headline = soup.find_all('title')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"id" : "storytext"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
"""
output_txt.write(str(headline_text))
output_txt.write("\n")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write(str(headline_text))
output_txt.write("\n")
output_txt.write(str(article_text))
output_txt.write("\n")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write("\r")
"""
if "qz" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Quartz: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the htlm text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "item-body"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
"""
output_txt.write(str(headline_text))
output_txt.write("\n")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write(str(headline_text))
output_txt.write("\n")
output_txt.write(str(article_text))
output_txt.write("\n")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write("\r")
output_txt.write("\r")
"""
else:
print "not a story from a known source"
headliner(txt)
#this is just for debugging
print holder
#iterates through the headlines in holder and writes them to the doc
#this is the TOC
for head, body in holder.items():
output_txt.write(str(head))
output_txt.write("\r")
output_txt.write("\r")
#iterates through the headlines and body in holder and writes them to doc
#this is the body of the email
for head, body in holder.items():
output_txt.write("\r")
output_txt.write(str(head))
output_txt.write("\r")
output_txt.write("\r")
output_txt.write(str(body))
output_txt.write("\r")
txt.close()
output_txt.close()
| mit | 8,655,176,811,197,885,000 | 27.901961 | 100 | 0.55337 | false |
eddiejessup/ahoy | ahoy/dc_dx_measurers.py | 1 | 3735 | from __future__ import print_function, division
from abc import ABCMeta, abstractmethod
import numpy as np
from ciabatta.meta import make_repr_str
from ahoy.ring_buffer import CylinderBuffer
from ahoy import measurers, c_measurers
def get_K(t, dt, t_rot_0):
A = 0.5
ts = np.arange(0.0, t, dt)
gs = ts / t_rot_0
K = np.exp(-gs) * (1.0 - A * (gs + (gs ** 2) / 2.0))
trunc_scale = np.abs(K[K >= 0.0].sum() / K[K < 0.0].sum())
K[K < 0.0] *= trunc_scale
norm_const = np.sum(K * -ts * dt)
K /= norm_const
return K
class DcDxMeasurer(measurers.Measurer):
__metaclass__ = ABCMeta
@abstractmethod
def get_dc_dxs(self):
return
class SpatialDcDxMeasurer(DcDxMeasurer):
def __init__(self, directions, grad_c_measurer):
self.directions = directions
self.grad_c_measurer = grad_c_measurer
def get_dc_dxs(self):
grad_c = self.grad_c_measurer.get_grad_cs()
return np.sum(self.directions.u * grad_c, axis=-1)
def __repr__(self):
fs = [('grad_c_measurer', self.grad_c_measurer)]
return make_repr_str(self, fs)
class TemporalDcDxMeasurer(DcDxMeasurer):
def __init__(self, c_measurer, v_0, dt_mem, t_mem, t_rot_0,
time):
self.c_measurer = c_measurer
self.v_0 = v_0
self.dt_mem = dt_mem
self.t_mem = t_mem
cs = self.c_measurer.get_cs()
n = cs.shape[0]
self.K_dt = get_K(self.t_mem, self.dt_mem, t_rot_0) * self.dt_mem
self.c_mem = CylinderBuffer(n, self.K_dt.shape[0])
self.time = time
# Optimisation, only calculate dc_dx when c memory is updated.
self.dc_dx_cache = np.zeros([n])
self.t_last_update = 0.0
def _iterate(self):
cs = self.c_measurer.get_cs()
self.c_mem.update(cs)
def _get_dc_dxs(self):
return self.c_mem.integral_transform(self.K_dt) / self.v_0
def iterate(self):
t_now = self.time.t
if t_now - self.t_last_update > 0.99 * self.dt_mem:
self._iterate()
self.dc_dx_cache = self._get_dc_dxs()
self.t_last_update = t_now
# TODO: This is bad, it both returns a value *and* has side-effects.
# Iterating the measurer and getting the value should be distinct.
def get_dc_dxs(self):
self.iterate()
return self.dc_dx_cache
def __repr__(self):
fs = [('c_measurer', self.c_measurer), ('v_0', self.v_0),
('dt_mem', self.dt_mem), ('t_mem', self.t_mem),
('t_last_update', self.t_last_update)]
return make_repr_str(self, fs)
def dc_dx_factory(temporal_chemo_flag,
ds=None,
ps=None, v_0=None, dt_mem=None, t_mem=None, t_rot_0=None, time=None,
c_field_flag=None, c_field=None):
if temporal_chemo_flag:
return temporal_dc_dx_factory(ps, v_0, dt_mem, t_mem, t_rot_0, time,
c_field_flag, c_field)
else:
return spatial_dc_dx_factory(ds, c_field_flag, c_field, ps)
def spatial_dc_dx_factory(ds, c_field_flag=None, c_field=None, ps=None):
if not c_field_flag:
grad_c_measurer = c_measurers.ConstantGradCMeasurer(ds.n, ds.dim)
else:
grad_c_measurer = c_measurers.FieldGradCMeasurer(c_field, ps)
return SpatialDcDxMeasurer(ds, grad_c_measurer)
def temporal_dc_dx_factory(ps, v_0, dt_mem, t_mem, t_rot_0, time,
c_field_flag=None, c_field=None):
if not c_field_flag:
c_measurer = c_measurers.LinearCMeasurer(ps)
else:
c_measurer = c_measurers.FieldCMeasurer(c_field, ps)
return TemporalDcDxMeasurer(c_measurer, v_0, dt_mem, t_mem, t_rot_0, time)
| bsd-3-clause | 5,803,516,902,066,002,000 | 31.763158 | 86 | 0.584739 | false |
Bleyddyn/malpi | exp/test.py | 1 | 5744 | from time import time
import datetime
import numpy as np
import matplotlib.pyplot as plt
from malpi.cnn import *
from malpi.data_utils import get_CIFAR10_data
from malpi.solver import Solver
from optparse import OptionParser
from malpi.fast_layers import *
def plot_solver(solver):
plt.subplot(2, 1, 1)
plt.plot(solver.loss_history, 'o')
plt.xlabel('iteration')
plt.ylabel('loss')
plt.subplot(2, 1, 2)
plt.plot(solver.train_acc_history, '-o')
plt.plot(solver.val_acc_history, '-o')
plt.legend(['train', 'val'], loc='upper left')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
def getCIFAR10(verbose=True):
data = get_CIFAR10_data(num_training=49000)
if verbose:
for k, v in data.iteritems():
print '%s: ' % k, v.shape
return data
def log( message, name='test' ):
logFileName = name + ".log"
fmt = '%Y-%m-%d-%H-%M-%S'
datestr = datetime.datetime.now().strftime(fmt)
with open(logFileName,'a') as outf:
outf.write(datestr + ": " + message + "\n")
def hyperparameterGenerator( oneRun = False ):
variations = np.array([0.9,1.0,1.1])
if oneRun:
reguls = [3.37091767808e-05]
lrs = [0.0002006801544726]
else:
reguls = np.array([3.37091767808e-05]) * variations
lrs = np.array([0.0002006801544726]) * variations
#reguls = 10 ** np.random.uniform(-5, -4, 2) #[0.0001, 0.001, 0.01]
#lrs = 10 ** np.random.uniform(-6, -3, 5) #[1e-4, 1e-3, 1e-2]
#reguls = np.append([3.37091767808e-05],reguls)
#lrs = np.append([0.000182436504066],lrs)
decays = [1.0]
for reg in reguls:
for lr in lrs:
for decay in decays:
hparams = { "reg": reg, "lr": lr, "lr_decay":decay, "epochs":6, "batch_size":50, "update":"adam" }
yield hparams
def train():
name = "ThreeLayerTest2"
# layers = ["conv-8", "maxpool", "conv-16", "maxpool", "conv-32", "fc-10"]
# layer_params = [{'filter_size':3}, {'pool_stride':2, 'pool_width':2, 'pool_height':2},
# {'filter_size':3}, {'pool_stride':2, 'pool_width':2, 'pool_height':2},
# {'filter_size':3},
# {'relu':False}]
layers = ["conv-8", "maxpool", "conv-16", "maxpool", "conv-32", "fc-10"]
layer_params = [{'filter_size':3, 'stride':1, 'pad':1 }, {'pool_stride':2, 'pool_width':2, 'pool_height':2},
{'filter_size':3}, {'pool_stride':2, 'pool_width':2, 'pool_height':2},
{'filter_size':3},
{'relu':False}]
log( "%s = %s" % (name, str(layers)), name )
log( " %s" % (str(layer_params,)), name )
data = getCIFAR10(verbose=False)
model_name = name + ".pickle"
val_accs = []
best_solver = None
best_val_acc = 0.0
best_model = load_malpi( model_name, verbose=False)
if best_model:
best_val_acc = best_model.validation_accuracy
for hparams in hyperparameterGenerator(oneRun=False):
model = MalpiConvNet(layers, layer_params, reg=hparams['reg'], dtype=np.float16, verbose=False)
model.hyper_parameters = hparams
solver = Solver(model, data,
num_epochs=hparams['epochs'], batch_size=hparams['batch_size'],
lr_decay=hparams['lr_decay'],
update_rule=hparams['update'],
optim_config={
'learning_rate': hparams['lr'],
},
verbose=True, print_every=50)
log( "Started training model: %s" % (name,), name=name )
log( " Hyper-parameters: %s" % (str(hparams),), name=name )
solver.train()
log( " Validation Accuracy: %f" % (solver.best_val_acc,) , name=name )
log( "Finished training", name=name )
val_accs.append(solver.best_val_acc)
if solver.best_val_acc > best_val_acc:
best_val_acc = solver.best_val_acc
best_model = model
best_solver = solver
log( "", name=name )
best_model.name = name
best_model.validation_accuracy = best_val_acc
best_model.save(model_name)
#plot_solver(best_solver)
print val_accs
# print('\a') # Sound a bell
# print('\a')
# print('\a')
def classify(data):
model = load_malpi('SimpleTest1.pickle')
scores = model.loss(data)
print scores
def testload():
model = load_malpi('SimpleTest1.pickle')
data = getCIFAR10(verbose=False)
solver = Solver(model, data)
train_acc = solver.check_accuracy(data["X_train"], data["y_train"], num_samples=1000)
val_acc = solver.check_accuracy(data["X_val"], data["y_val"])
print "train acc: %f; val_acc: %f" % (train_acc,val_acc)
def testIM2COL():
conv_param = {'filter_size':3, 'stride':1, 'pad':1 }
x = np.zeros((1,3,32,32))
w = np.zeros((8, 3, 3, 3))
b = np.zeros(8)
x = x.astype(np.float32)
w = w.astype(np.float32)
b = b.astype(np.float32)
conv_forward_im2col(x, w, b, conv_param)
#Try: Conv-64, Conv-64, maxpool, conv-128, conv-128, maxpool, conv-256, conv-256, maxpool, conv-512, conv-512, maxpool, conv-512, conv-512, maxpool, FC-4096, FC-4096, FC-1000, softmax
def describeModel( name ):
model = load_malpi(name+'.pickle')
# if not hasattr(model, 'input_dim'):
# model.input_dim = {}
model.describe()
# model.save(name+'.pickle')
def getOptions():
parser = OptionParser()
parser.add_option("-d","--describe",dest="name",help="Describe a model saved in a pickle file: <name>.pickle");
(options, args) = parser.parse_args()
return (options, args)
if __name__ == "__main__":
(options, args) = getOptions()
if options.name:
describeModel(options.name)
else:
train()
#testIM2COL()
| mit | 7,756,415,624,282,956,000 | 32.788235 | 183 | 0.584784 | false |
tomis007/pyboi | pyboi/processor/z80.py | 1 | 71153 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, PickleType
from ..base import Base
from ctypes import c_int8
from enum import Enum
import pickle
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(name='z80')
class CpuState(Base):
"""
SQLAlchemy base class to save cpustate.
...
Attributes
----------
id : int
primary key for database
savename : string
game save name
gbregisters : pickle
pickled list of the cpu registers A-F
stack_ptr : int
the stack pointer
program_ctr : int
the program counter
"""
__tablename__ = 'cpuState'
id = Column(Integer, primary_key=True)
savename = Column(String)
gbregisters = Column(PickleType)
stack_ptr = Column(Integer)
program_ctr = Column(Integer)
def __repr__(self):
return "<CPU_STATE(savename=%r>" % self.savename
class Z80():
"""
An implementation of the gameboy's ~z80 (similar) cpu.
...
Attributes
----------
reg : list of ints
registers A-F in the z80 cpu
pc : int
program counter
sp : int
stack pointer
mem : Memory
memory object for this processor's memory
opcodes : Dictionary
function dictionary for dispatching the opcodes
"""
def __init__(self, mem):
"""
__init__ function
...
Refer to Z80 class documentation for attribute info.
"""
self.count = 0
self.reg = [0 for _ in range(8)]
# register index constants
self.A = 0
self.B = 1
self.C = 2
self.D = 3
self.E = 4
self.F = 5
self.H = 6
self.L = 7
# register enums
self.reg_pairs = Enum('RegPairs', 'HL BC DE AF')
self.load_vals = Enum('ImmediateByte', 'N NN SP')
self.HL = self.reg_pairs.HL
self.BC = self.reg_pairs.BC
self.DE = self.reg_pairs.DE
self.AF = self.reg_pairs.AF
self.N = self.load_vals.N
self.NN = self.load_vals.NN
self.SP = self.load_vals.SP
self.flags = Enum('Flags', 'Z N H C')
#pc/sp
self.pc = 0x100
self.sp = 0xfffe
self.interrupt_enable = False
self.mem = mem
#timers
self.div_clock = 0
self.tima_clock = 0
self.opcodes = {
0x76: lambda: self.halt(),
0xcb: lambda: self.extended_opcode(),
0xf3: lambda: self.disable_interrupts(),
0xd9: lambda: self.ret_interrupts(),
0x00: lambda: self.NOP(),
0x06: lambda: self.ld_byte_n(self.B),
0x0e: lambda: self.ld_byte_n(self.C),
0x16: lambda: self.ld_byte_n(self.D),
0x1e: lambda: self.ld_byte_n(self.E),
0x26: lambda: self.ld_byte_n(self.H),
0x2e: lambda: self.ld_byte_n(self.L),
0x7f: lambda: self.ld_r1_r2(self.A, self.A),
0x78: lambda: self.ld_r1_r2(self.A, self.B),
0x79: lambda: self.ld_r1_r2(self.A, self.C),
0x7a: lambda: self.ld_r1_r2(self.A, self.D),
0x7b: lambda: self.ld_r1_r2(self.A, self.E),
0x7c: lambda: self.ld_r1_r2(self.A, self.H),
0x7d: lambda: self.ld_r1_r2(self.A, self.L),
0x7e: lambda: self.ld_r1_r2(self.A, self.HL),
0x40: lambda: self.ld_r1_r2(self.B, self.B),
0x41: lambda: self.ld_r1_r2(self.B, self.C),
0x42: lambda: self.ld_r1_r2(self.B, self.D),
0x43: lambda: self.ld_r1_r2(self.B, self.E),
0x44: lambda: self.ld_r1_r2(self.B, self.H),
0x45: lambda: self.ld_r1_r2(self.B, self.L),
0x46: lambda: self.ld_r1_r2(self.B, self.HL),
0x48: lambda: self.ld_r1_r2(self.C, self.B),
0x49: lambda: self.ld_r1_r2(self.C, self.C),
0x4a: lambda: self.ld_r1_r2(self.C, self.D),
0x4b: lambda: self.ld_r1_r2(self.C, self.E),
0x4c: lambda: self.ld_r1_r2(self.C, self.H),
0x4d: lambda: self.ld_r1_r2(self.C, self.L),
0x4e: lambda: self.ld_r1_r2(self.C, self.HL),
0x50: lambda: self.ld_r1_r2(self.D, self.B),
0x51: lambda: self.ld_r1_r2(self.D, self.C),
0x52: lambda: self.ld_r1_r2(self.D, self.D),
0x53: lambda: self.ld_r1_r2(self.D, self.E),
0x54: lambda: self.ld_r1_r2(self.D, self.H),
0x55: lambda: self.ld_r1_r2(self.D, self.L),
0x56: lambda: self.ld_r1_r2(self.D, self.HL),
0x58: lambda: self.ld_r1_r2(self.E, self.B),
0x59: lambda: self.ld_r1_r2(self.E, self.C),
0x5a: lambda: self.ld_r1_r2(self.E, self.D),
0x5b: lambda: self.ld_r1_r2(self.E, self.E),
0x5c: lambda: self.ld_r1_r2(self.E, self.H),
0x5d: lambda: self.ld_r1_r2(self.E, self.L),
0x5e: lambda: self.ld_r1_r2(self.E, self.HL),
0x60: lambda: self.ld_r1_r2(self.H, self.B),
0x61: lambda: self.ld_r1_r2(self.H, self.C),
0x62: lambda: self.ld_r1_r2(self.H, self.D),
0x63: lambda: self.ld_r1_r2(self.H, self.E),
0x64: lambda: self.ld_r1_r2(self.H, self.H),
0x65: lambda: self.ld_r1_r2(self.H, self.L),
0x66: lambda: self.ld_r1_r2(self.H, self.HL),
0x68: lambda: self.ld_r1_r2(self.L, self.B),
0x69: lambda: self.ld_r1_r2(self.L, self.C),
0x6a: lambda: self.ld_r1_r2(self.L, self.D),
0x6b: lambda: self.ld_r1_r2(self.L, self.E),
0x6c: lambda: self.ld_r1_r2(self.L, self.H),
0x6d: lambda: self.ld_r1_r2(self.L, self.L),
0x6e: lambda: self.ld_r1_r2(self.L, self.HL),
0x70: lambda: self.ld_r1_r2(self.HL, self.B),
0x71: lambda: self.ld_r1_r2(self.HL, self.C),
0x72: lambda: self.ld_r1_r2(self.HL, self.D),
0x73: lambda: self.ld_r1_r2(self.HL, self.E),
0x74: lambda: self.ld_r1_r2(self.HL, self.H),
0x75: lambda: self.ld_r1_r2(self.HL, self.L),
0x36: lambda: self.ld_r1_r2(self.HL, self.N),
0x0a: lambda: self.load_a(self.BC),
0x1a: lambda: self.load_a(self.DE),
0xfa: lambda: self.load_a(self.NN),
0x3e: lambda: self.load_a(self.N),
0x7f: lambda: self.write_a(self.A),
0x47: lambda: self.write_a(self.B),
0x4f: lambda: self.write_a(self.C),
0x57: lambda: self.write_a(self.D),
0x5f: lambda: self.write_a(self.E),
0x67: lambda: self.write_a(self.H),
0x6f: lambda: self.write_a(self.L),
0x02: lambda: self.write_a(self.BC),
0x12: lambda: self.write_a(self.DE),
0x77: lambda: self.write_a(self.HL),
0xea: lambda: self.write_a(self.NN),
0xf2: lambda: self.load_a_c(store=False),
0xe2: lambda: self.load_a_c(store=True),
0x3a: lambda: self.load_a_hl(dec=True, load=True),
0x32: lambda: self.load_a_hl(dec=True, load=False),
0x2a: lambda: self.load_a_hl(dec=False, load=True),
0x22: lambda: self.load_a_hl(dec=False, load=False),
0xe0: lambda: self.a_n(True),
0xf0: lambda: self.a_n(False),
0x01: lambda: self.ld_nn(self.BC, set_sp=False),
0x11: lambda: self.ld_nn(self.DE, set_sp=False),
0x21: lambda: self.ld_nn(self.HL, set_sp=False),
0x31: lambda: self.ld_nn(self.sp, set_sp=True),
0xf9: lambda: self.ld_sp_hl(),
0xf8: lambda: self.ldhl_sp(),
0x08: lambda: self.ld_nn_sp(),
0xf5: lambda: self.push_nn(self.A, self.F),
0xc5: lambda: self.push_nn(self.B, self.C),
0xd5: lambda: self.push_nn(self.D, self.E),
0xe5: lambda: self.push_nn(self.H, self.L),
0xf1: lambda: self.pop_nn(self.A, self.F),
0xc1: lambda: self.pop_nn(self.B, self.C),
0xd1: lambda: self.pop_nn(self.D, self.E),
0xe1: lambda: self.pop_nn(self.H, self.L),
0x87: lambda: self.add_a_n(self.A, add_carry=False),
0x80: lambda: self.add_a_n(self.B, add_carry=False),
0x81: lambda: self.add_a_n(self.C, add_carry=False),
0x82: lambda: self.add_a_n(self.D, add_carry=False),
0x83: lambda: self.add_a_n(self.E, add_carry=False),
0x84: lambda: self.add_a_n(self.H, add_carry=False),
0x85: lambda: self.add_a_n(self.L, add_carry=False),
0x86: lambda: self.add_a_n(self.HL, add_carry=False),
0xc6: lambda: self.add_a_n(self.N, add_carry=False),
0x8f: lambda: self.add_a_n(self.A, add_carry=True),
0x88: lambda: self.add_a_n(self.B, add_carry=True),
0x89: lambda: self.add_a_n(self.C, add_carry=True),
0x8a: lambda: self.add_a_n(self.D, add_carry=True),
0x8b: lambda: self.add_a_n(self.E, add_carry=True),
0x8c: lambda: self.add_a_n(self.H, add_carry=True),
0x8d: lambda: self.add_a_n(self.L, add_carry=True),
0x8e: lambda: self.add_a_n(self.HL, add_carry=True),
0xce: lambda: self.add_a_n(self.N, add_carry=True),
0x97: lambda: self.sub_a_n(self.A, sub_carry=False),
0x90: lambda: self.sub_a_n(self.B, sub_carry=False),
0x91: lambda: self.sub_a_n(self.C, sub_carry=False),
0x92: lambda: self.sub_a_n(self.D, sub_carry=False),
0x93: lambda: self.sub_a_n(self.E, sub_carry=False),
0x94: lambda: self.sub_a_n(self.H, sub_carry=False),
0x95: lambda: self.sub_a_n(self.L, sub_carry=False),
0x96: lambda: self.sub_a_n(self.HL, sub_carry=False),
0xd6: lambda: self.sub_a_n(self.N, sub_carry=False),
0x9f: lambda: self.sub_a_n(self.A, sub_carry=True),
0x98: lambda: self.sub_a_n(self.B, sub_carry=True),
0x99: lambda: self.sub_a_n(self.C, sub_carry=True),
0x9a: lambda: self.sub_a_n(self.D, sub_carry=True),
0x9b: lambda: self.sub_a_n(self.E, sub_carry=True),
0x9c: lambda: self.sub_a_n(self.H, sub_carry=True),
0x9d: lambda: self.sub_a_n(self.L, sub_carry=True),
0x9e: lambda: self.sub_a_n(self.HL, sub_carry=True),
0xde: lambda: self.sub_a_n(self.N, sub_carry=True),
0xa7: lambda: self.and_n(self.A),
0xa0: lambda: self.and_n(self.B),
0xa1: lambda: self.and_n(self.C),
0xa2: lambda: self.and_n(self.D),
0xa3: lambda: self.and_n(self.E),
0xa4: lambda: self.and_n(self.H),
0xa5: lambda: self.and_n(self.L),
0xa6: lambda: self.and_n(self.HL),
0xe6: lambda: self.and_n(self.N),
0xb7: lambda: self.or_n(self.A, exclusive_or=False),
0xb0: lambda: self.or_n(self.B, exclusive_or=False),
0xb1: lambda: self.or_n(self.C, exclusive_or=False),
0xb2: lambda: self.or_n(self.D, exclusive_or=False),
0xb3: lambda: self.or_n(self.E, exclusive_or=False),
0xb4: lambda: self.or_n(self.H, exclusive_or=False),
0xb5: lambda: self.or_n(self.L, exclusive_or=False),
0xb6: lambda: self.or_n(self.HL, exclusive_or=False),
0xf6: lambda: self.or_n(self.N, exclusive_or=False),
0xaf: lambda: self.or_n(self.A, exclusive_or=True),
0xa8: lambda: self.or_n(self.B, exclusive_or=True),
0xa9: lambda: self.or_n(self.C, exclusive_or=True),
0xaa: lambda: self.or_n(self.D, exclusive_or=True),
0xab: lambda: self.or_n(self.E, exclusive_or=True),
0xac: lambda: self.or_n(self.H, exclusive_or=True),
0xad: lambda: self.or_n(self.L, exclusive_or=True),
0xae: lambda: self.or_n(self.HL, exclusive_or=True),
0xee: lambda: self.or_n(self.N, exclusive_or=True),
0xbf: lambda: self.cp_n(self.A),
0xb8: lambda: self.cp_n(self.B),
0xb9: lambda: self.cp_n(self.C),
0xba: lambda: self.cp_n(self.D),
0xbb: lambda: self.cp_n(self.E),
0xbc: lambda: self.cp_n(self.H),
0xbd: lambda: self.cp_n(self.L),
0xbe: lambda: self.cp_n(self.HL),
0xfe: lambda: self.cp_n(self.N),
0x3c: lambda: self.inc_n(self.A),
0x04: lambda: self.inc_n(self.B),
0x0c: lambda: self.inc_n(self.C),
0x14: lambda: self.inc_n(self.D),
0x1c: lambda: self.inc_n(self.E),
0x24: lambda: self.inc_n(self.H),
0x2c: lambda: self.inc_n(self.L),
0x34: lambda: self.inc_n(self.HL),
0x3d: lambda: self.dec_n(self.A),
0x05: lambda: self.dec_n(self.B),
0x0d: lambda: self.dec_n(self.C),
0x15: lambda: self.dec_n(self.D),
0x1d: lambda: self.dec_n(self.E),
0x25: lambda: self.dec_n(self.H),
0x2d: lambda: self.dec_n(self.L),
0x35: lambda: self.dec_n(self.HL),
0x09: lambda: self.add_hl(self.B, self.C, add_sp=False),
0x19: lambda: self.add_hl(self.D, self.E, add_sp=False),
0x29: lambda: self.add_hl(self.H, self.L, add_sp=False),
0x39: lambda: self.add_hl(self.B, self.C, add_sp=True),
0xe8: lambda: self.add_sp_n(),
0x03: lambda: self.inc_nn(self.B, self.C, inc_sp=False),
0x13: lambda: self.inc_nn(self.D, self.E, inc_sp=False),
0x23: lambda: self.inc_nn(self.H, self.L, inc_sp=False),
0x33: lambda: self.inc_nn(self.B, self.C, inc_sp=True),
0x0b: lambda: self.dec_nn(self.B, self.C, dec_sp=False),
0x1b: lambda: self.dec_nn(self.D, self.E, dec_sp=False),
0x2b: lambda: self.dec_nn(self.H, self.L, dec_sp=False),
0x3b: lambda: self.dec_nn(self.B, self.C, dec_sp=True),
0xc3: lambda: self.jump_nn(),
0xc2: lambda: self.jump_cc(False, self.flags.Z, immmediate_jump=False),
0xca: lambda: self.jump_cc(True, self.flags.Z, immmediate_jump=False),
0xd2: lambda: self.jump_cc(False, self.flags.C, immmediate_jump=False),
0xda: lambda: self.jump_cc(True, self.flags.C, immmediate_jump=False),
0xe9: lambda: self.jump_hl(),
0x18: lambda: self.jump_n(),
0x20: lambda: self.jump_cc(False, self.flags.Z, immmediate_jump=True),
0x28: lambda: self.jump_cc(True, self.flags.Z, immmediate_jump=True),
0x30: lambda: self.jump_cc(False, self.flags.C, immmediate_jump=True),
0x38: lambda: self.jump_cc(True, self.flags.C, immmediate_jump=True),
0x27: lambda: self.dec_adjust(),
0x2f: lambda: self.complement_a(),
0x3f: lambda: self.complement_cf(),
0x37: lambda: self.set_cf(),
0x07: lambda: self.rotate_l_a_c(),
0x17: lambda: self.rotate_l_a(),
0x0f: lambda: self.rotate_r_a_c(),
0x1f: lambda: self.rotate_r_a(),
0xcd: lambda: self.call(),
0xc4: lambda: self.call_cc(self.flags.Z, False),
0xcc: lambda: self.call_cc(self.flags.Z, True),
0xd4: lambda: self.call_cc(self.flags.C, False),
0xdc: lambda: self.call_cc(self.flags.C, True),
0xc9: lambda: self.ret(),
0xc0: lambda: self.ret_cc(self.flags.Z, False),
0xc8: lambda: self.ret_cc(self.flags.Z, True),
0xd0: lambda: self.ret_cc(self.flags.C, False),
0xd8: lambda: self.ret_cc(self.flags.C, True),
0x10: lambda: self.stop(),
0xc7: lambda: self.restart(0x00),
0xcf: lambda: self.restart(0x08),
0xd7: lambda: self.restart(0x10),
0xdf: lambda: self.restart(0x18),
0xe7: lambda: self.restart(0x20),
0xef: lambda: self.restart(0x28),
0xf7: lambda: self.restart(0x30),
0xff: lambda: self.restart(0x38),
0xfb: lambda: self.enable_interrupts()
}
self.ext_opcodes = {
0x3f: lambda: self.srl_n(self.A, False),
0x38: lambda: self.srl_n(self.B, False),
0x39: lambda: self.srl_n(self.C, False),
0x3a: lambda: self.srl_n(self.D, False),
0x3b: lambda: self.srl_n(self.E, False),
0x3c: lambda: self.srl_n(self.H, False),
0x3d: lambda: self.srl_n(self.L, False),
0x3e: lambda: self.srl_n(self.HL, False),
0x2f: lambda: self.srl_n(self.A, True),
0x28: lambda: self.srl_n(self.B, True),
0x29: lambda: self.srl_n(self.C, True),
0x2a: lambda: self.srl_n(self.D, True),
0x2b: lambda: self.srl_n(self.E, True),
0x2c: lambda: self.srl_n(self.H, True),
0x2d: lambda: self.srl_n(self.L, True),
0x2e: lambda: self.srl_n(self.HL, True),
0x1f: lambda: self.rr_n(self.A),
0x18: lambda: self.rr_n(self.B),
0x19: lambda: self.rr_n(self.C),
0x1a: lambda: self.rr_n(self.D),
0x1b: lambda: self.rr_n(self.E),
0x1c: lambda: self.rr_n(self.H),
0x1d: lambda: self.rr_n(self.L),
0x1e: lambda: self.rr_n(self.HL),
0x37: lambda: self.swap(self.A),
0x30: lambda: self.swap(self.B),
0x31: lambda: self.swap(self.C),
0x32: lambda: self.swap(self.D),
0x33: lambda: self.swap(self.E),
0x34: lambda: self.swap(self.H),
0x35: lambda: self.swap(self.L),
0x36: lambda: self.swap(self.HL),
0x27: lambda: self.sla_n(self.A),
0x20: lambda: self.sla_n(self.B),
0x21: lambda: self.sla_n(self.C),
0x22: lambda: self.sla_n(self.D),
0x23: lambda: self.sla_n(self.E),
0x24: lambda: self.sla_n(self.H),
0x25: lambda: self.sla_n(self.L),
0x26: lambda: self.sla_n(self.HL),
0x07: lambda: self.rotate_n_lc(self.A),
0x00: lambda: self.rotate_n_lc(self.B),
0x01: lambda: self.rotate_n_lc(self.C),
0x02: lambda: self.rotate_n_lc(self.D),
0x03: lambda: self.rotate_n_lc(self.E),
0x04: lambda: self.rotate_n_lc(self.H),
0x05: lambda: self.rotate_n_lc(self.L),
0x06: lambda: self.rotate_n_lc(self.HL),
0x17: lambda: self.rotate_l_n(self.A),
0x10: lambda: self.rotate_l_n(self.B),
0x11: lambda: self.rotate_l_n(self.C),
0x12: lambda: self.rotate_l_n(self.D),
0x13: lambda: self.rotate_l_n(self.E),
0x14: lambda: self.rotate_l_n(self.H),
0x15: lambda: self.rotate_l_n(self.L),
0x16: lambda: self.rotate_l_n(self.HL),
0x0f: lambda: self.rrc_n(self.A),
0x08: lambda: self.rrc_n(self.B),
0x09: lambda: self.rrc_n(self.C),
0x0a: lambda: self.rrc_n(self.D),
0x0b: lambda: self.rrc_n(self.E),
0x0c: lambda: self.rrc_n(self.H),
0x0d: lambda: self.rrc_n(self.L),
0x0e: lambda: self.rrc_n(self.HL),
0x47: lambda: self.bit_br(0, self.A),
0x40: lambda: self.bit_br(0, self.B),
0x41: lambda: self.bit_br(0, self.C),
0x42: lambda: self.bit_br(0, self.D),
0x43: lambda: self.bit_br(0, self.E),
0x44: lambda: self.bit_br(0, self.H),
0x45: lambda: self.bit_br(0, self.L),
0x46: lambda: self.bit_br(0, self.HL),
0x4f: lambda: self.bit_br(1, self.A),
0x48: lambda: self.bit_br(1, self.B),
0x49: lambda: self.bit_br(1, self.C),
0x4a: lambda: self.bit_br(1, self.D),
0x4b: lambda: self.bit_br(1, self.E),
0x4c: lambda: self.bit_br(1, self.H),
0x4d: lambda: self.bit_br(1, self.L),
0x4e: lambda: self.bit_br(1, self.HL),
0x57: lambda: self.bit_br(2, self.A),
0x50: lambda: self.bit_br(2, self.B),
0x51: lambda: self.bit_br(2, self.C),
0x52: lambda: self.bit_br(2, self.D),
0x53: lambda: self.bit_br(2, self.E),
0x54: lambda: self.bit_br(2, self.H),
0x55: lambda: self.bit_br(2, self.L),
0x56: lambda: self.bit_br(2, self.HL),
0x5f: lambda: self.bit_br(3, self.A),
0x58: lambda: self.bit_br(3, self.B),
0x59: lambda: self.bit_br(3, self.C),
0x5a: lambda: self.bit_br(3, self.D),
0x5b: lambda: self.bit_br(3, self.E),
0x5c: lambda: self.bit_br(3, self.H),
0x5d: lambda: self.bit_br(3, self.L),
0x5e: lambda: self.bit_br(3, self.HL),
0x67: lambda: self.bit_br(4, self.A),
0x60: lambda: self.bit_br(4, self.B),
0x61: lambda: self.bit_br(4, self.C),
0x62: lambda: self.bit_br(4, self.D),
0x63: lambda: self.bit_br(4, self.E),
0x64: lambda: self.bit_br(4, self.H),
0x65: lambda: self.bit_br(4, self.L),
0x66: lambda: self.bit_br(4, self.HL),
0x6f: lambda: self.bit_br(5, self.A),
0x68: lambda: self.bit_br(5, self.B),
0x69: lambda: self.bit_br(5, self.C),
0x6a: lambda: self.bit_br(5, self.D),
0x6b: lambda: self.bit_br(5, self.E),
0x6c: lambda: self.bit_br(5, self.H),
0x6d: lambda: self.bit_br(5, self.L),
0x6e: lambda: self.bit_br(5, self.HL),
0x77: lambda: self.bit_br(6, self.A),
0x70: lambda: self.bit_br(6, self.B),
0x71: lambda: self.bit_br(6, self.C),
0x72: lambda: self.bit_br(6, self.D),
0x73: lambda: self.bit_br(6, self.E),
0x74: lambda: self.bit_br(6, self.H),
0x75: lambda: self.bit_br(6, self.L),
0x76: lambda: self.bit_br(6, self.HL),
0x7f: lambda: self.bit_br(7, self.A),
0x78: lambda: self.bit_br(7, self.B),
0x79: lambda: self.bit_br(7, self.C),
0x7a: lambda: self.bit_br(7, self.D),
0x7b: lambda: self.bit_br(7, self.E),
0x7c: lambda: self.bit_br(7, self.H),
0x7d: lambda: self.bit_br(7, self.L),
0x7e: lambda: self.bit_br(7, self.HL),
0xc7: lambda: self.set_b_r(self.A, 0, 1),
0xc0: lambda: self.set_b_r(self.B, 0, 1),
0xc1: lambda: self.set_b_r(self.C, 0, 1),
0xc2: lambda: self.set_b_r(self.D, 0, 1),
0xc3: lambda: self.set_b_r(self.E, 0, 1),
0xc4: lambda: self.set_b_r(self.H, 0, 1),
0xc5: lambda: self.set_b_r(self.L, 0, 1),
0xc6: lambda: self.set_b_r(self.HL, 0, 1),
0xcf: lambda: self.set_b_r(self.A, 1, 1),
0xc8: lambda: self.set_b_r(self.B, 1, 1),
0xc9: lambda: self.set_b_r(self.C, 1, 1),
0xca: lambda: self.set_b_r(self.D, 1, 1),
0xcb: lambda: self.set_b_r(self.E, 1, 1),
0xcc: lambda: self.set_b_r(self.H, 1, 1),
0xcd: lambda: self.set_b_r(self.L, 1, 1),
0xce: lambda: self.set_b_r(self.HL, 1, 1),
0xd7: lambda: self.set_b_r(self.A, 2, 1),
0xd0: lambda: self.set_b_r(self.B, 2, 1),
0xd1: lambda: self.set_b_r(self.C, 2, 1),
0xd2: lambda: self.set_b_r(self.D, 2, 1),
0xd3: lambda: self.set_b_r(self.E, 2, 1),
0xd4: lambda: self.set_b_r(self.H, 2, 1),
0xd5: lambda: self.set_b_r(self.L, 2, 1),
0xd6: lambda: self.set_b_r(self.HL, 2, 1),
0xdf: lambda: self.set_b_r(self.A, 3, 1),
0xd8: lambda: self.set_b_r(self.B, 3, 1),
0xd9: lambda: self.set_b_r(self.C, 3, 1),
0xda: lambda: self.set_b_r(self.D, 3, 1),
0xdb: lambda: self.set_b_r(self.E, 3, 1),
0xdc: lambda: self.set_b_r(self.H, 3, 1),
0xdd: lambda: self.set_b_r(self.L, 3, 1),
0xde: lambda: self.set_b_r(self.HL, 3, 1),
0xe7: lambda: self.set_b_r(self.A, 4, 1),
0xe0: lambda: self.set_b_r(self.B, 4, 1),
0xe1: lambda: self.set_b_r(self.C, 4, 1),
0xe2: lambda: self.set_b_r(self.D, 4, 1),
0xe3: lambda: self.set_b_r(self.E, 4, 1),
0xe4: lambda: self.set_b_r(self.H, 4, 1),
0xe5: lambda: self.set_b_r(self.L, 4, 1),
0xe6: lambda: self.set_b_r(self.HL, 4, 1),
0xef: lambda: self.set_b_r(self.A, 5, 1),
0xe8: lambda: self.set_b_r(self.B, 5, 1),
0xe9: lambda: self.set_b_r(self.C, 5, 1),
0xea: lambda: self.set_b_r(self.D, 5, 1),
0xeb: lambda: self.set_b_r(self.E, 5, 1),
0xec: lambda: self.set_b_r(self.H, 5, 1),
0xed: lambda: self.set_b_r(self.L, 5, 1),
0xee: lambda: self.set_b_r(self.HL, 5, 1),
0xf7: lambda: self.set_b_r(self.A, 6, 1),
0xf0: lambda: self.set_b_r(self.B, 6, 1),
0xf1: lambda: self.set_b_r(self.C, 6, 1),
0xf2: lambda: self.set_b_r(self.D, 6, 1),
0xf3: lambda: self.set_b_r(self.E, 6, 1),
0xf4: lambda: self.set_b_r(self.H, 6, 1),
0xf5: lambda: self.set_b_r(self.L, 6, 1),
0xf6: lambda: self.set_b_r(self.HL, 6, 1),
0xff: lambda: self.set_b_r(self.A, 7, 1),
0xf8: lambda: self.set_b_r(self.B, 7, 1),
0xf9: lambda: self.set_b_r(self.C, 7, 1),
0xfa: lambda: self.set_b_r(self.D, 7, 1),
0xfb: lambda: self.set_b_r(self.E, 7, 1),
0xfc: lambda: self.set_b_r(self.H, 7, 1),
0xfd: lambda: self.set_b_r(self.L, 7, 1),
0xfe: lambda: self.set_b_r(self.HL, 7, 1),
0x87: lambda: self.set_b_r(self.A, 0, 0),
0x80: lambda: self.set_b_r(self.B, 0, 0),
0x81: lambda: self.set_b_r(self.C, 0, 0),
0x82: lambda: self.set_b_r(self.D, 0, 0),
0x83: lambda: self.set_b_r(self.E, 0, 0),
0x84: lambda: self.set_b_r(self.H, 0, 0),
0x85: lambda: self.set_b_r(self.L, 0, 0),
0x86: lambda: self.set_b_r(self.HL, 0, 0),
0x8f: lambda: self.set_b_r(self.A, 1, 0),
0x88: lambda: self.set_b_r(self.B, 1, 0),
0x89: lambda: self.set_b_r(self.C, 1, 0),
0x8a: lambda: self.set_b_r(self.D, 1, 0),
0x8b: lambda: self.set_b_r(self.E, 1, 0),
0x8c: lambda: self.set_b_r(self.H, 1, 0),
0x8d: lambda: self.set_b_r(self.L, 1, 0),
0x8e: lambda: self.set_b_r(self.HL, 1, 0),
0x97: lambda: self.set_b_r(self.A, 2, 0),
0x90: lambda: self.set_b_r(self.B, 2, 0),
0x91: lambda: self.set_b_r(self.C, 2, 0),
0x92: lambda: self.set_b_r(self.D, 2, 0),
0x93: lambda: self.set_b_r(self.E, 2, 0),
0x94: lambda: self.set_b_r(self.H, 2, 0),
0x95: lambda: self.set_b_r(self.L, 2, 0),
0x96: lambda: self.set_b_r(self.HL, 2, 0),
0x9f: lambda: self.set_b_r(self.A, 3, 0),
0x98: lambda: self.set_b_r(self.B, 3, 0),
0x99: lambda: self.set_b_r(self.C, 3, 0),
0x9a: lambda: self.set_b_r(self.D, 3, 0),
0x9b: lambda: self.set_b_r(self.E, 3, 0),
0x9c: lambda: self.set_b_r(self.H, 3, 0),
0x9d: lambda: self.set_b_r(self.L, 3, 0),
0x9e: lambda: self.set_b_r(self.HL, 3, 0),
0xa7: lambda: self.set_b_r(self.A, 4, 0),
0xa0: lambda: self.set_b_r(self.B, 4, 0),
0xa1: lambda: self.set_b_r(self.C, 4, 0),
0xa2: lambda: self.set_b_r(self.D, 4, 0),
0xa3: lambda: self.set_b_r(self.E, 4, 0),
0xa4: lambda: self.set_b_r(self.H, 4, 0),
0xa5: lambda: self.set_b_r(self.L, 4, 0),
0xa6: lambda: self.set_b_r(self.HL, 4, 0),
0xaf: lambda: self.set_b_r(self.A, 5, 0),
0xa8: lambda: self.set_b_r(self.B, 5, 0),
0xa9: lambda: self.set_b_r(self.C, 5, 0),
0xaa: lambda: self.set_b_r(self.D, 5, 0),
0xab: lambda: self.set_b_r(self.E, 5, 0),
0xac: lambda: self.set_b_r(self.H, 5, 0),
0xad: lambda: self.set_b_r(self.L, 5, 0),
0xae: lambda: self.set_b_r(self.HL, 5, 0),
0xb7: lambda: self.set_b_r(self.A, 6, 0),
0xb0: lambda: self.set_b_r(self.B, 6, 0),
0xb1: lambda: self.set_b_r(self.C, 6, 0),
0xb2: lambda: self.set_b_r(self.D, 6, 0),
0xb3: lambda: self.set_b_r(self.E, 6, 0),
0xb4: lambda: self.set_b_r(self.H, 6, 0),
0xb5: lambda: self.set_b_r(self.L, 6, 0),
0xb6: lambda: self.set_b_r(self.HL, 6, 0),
0xbf: lambda: self.set_b_r(self.A, 7, 0),
0xb8: lambda: self.set_b_r(self.B, 7, 0),
0xb9: lambda: self.set_b_r(self.C, 7, 0),
0xba: lambda: self.set_b_r(self.D, 7, 0),
0xbb: lambda: self.set_b_r(self.E, 7, 0),
0xbc: lambda: self.set_b_r(self.H, 7, 0),
0xbd: lambda: self.set_b_r(self.L, 7, 0),
0xbe: lambda: self.set_b_r(self.HL, 7, 0)
}
def save_state(self, name, session):
"""
Save the cpu state into the SQLAlchemy session session.
...
Parameters
----------
name : string
name to associate with the save
session : A SQLAlchemy Session object
session to save the state in
Returns
-------
Human readable error message, or None on success
"""
pickledregisters = pickle.dumps(self.reg)
cpu_state = CpuState(savename=name, stack_ptr=self.sp,
program_ctr=self.pc,
gbregisters=pickledregisters)
session.add(cpu_state)
session.commit()
def init_boot(self):
"""
Initializes the cpu for running the bootstrap "bios".
"""
self.pc = 0
self.sp = 0
def execute_boot_opcode(self, num=1):
"""
Executes an opcode of the booting sequence, takes
????? instructions to complete.
Reads instructions with mem.read_bios() instead
of from normal memory.
Returns
-------
int
number of clock cycles taken
"""
if self.pc >= 0x100:
log.info("BIOS COMPLETE!")
self.dump_registers()
quit()
opcode = self.mem.read_bios(self.pc)
self.pc += 1
try:
#log.info("executing: " + hex(opcode) + " @ " + hex(self.pc - 1))
cycles = self.opcodes[opcode]()
except KeyError:
log.critical('INVALID OPCODE ' + hex(opcode) + ' @ ' + hex(self.pc))
cycles = 0
return cycles
def execute_opcode(self, num=1):
"""
Executes num number of opcode instructions.
...
Parameters
----------
num : int
number of opcode instructions to execute
Returns
-------
int
number of clock cycles taken to execute
"""
opcode = self.mem.read(self.pc)
self.pc += 1
try:
cycles = self.opcodes[opcode]()
except KeyError:
log.critical('INVALID OPCODE ' + hex(opcode) + ' @ ' + hex(self.pc))
quit()
cycles += self.check_interrupts()
self.update_timers(cycles)
return cycles
def extended_opcode(self):
"""
Extended opcodes.
Returns
-------
int
number of cycles taken
"""
opcode = self.mem.read(self.pc)
self.pc += 1
try:
cycles = self.ext_opcodes[opcode]()
except KeyError:
log.critical('EXTENDED INVALID OPCODE ' + hex(opcode) + ' @ ' + hex(self.pc))
quit()
cycles = 0
return cycles
def check_interrupts(self):
"""
Checks to see if any interrupts need to be serviced
"""
if not self.interrupt_enable:
return 0
ie = self.mem.read(0xffff)
ir = self.mem.read(0xff0f)
for bit in range(5):
if self.is_set(ie, bit) and self.is_set(ir, bit):
self.push_pc()
self.pc = 0x40 + (bit << 3)
ir &= ~(1 << bit)
self.mem.write(ir & 0xff, 0xff0f)
self.interrupt_enable = False
return 20
return 0
def update_timers(self, cycles):
"""
Updates the timers, requests interrupts if needed.
"""
self.div_clock += cycles
if self.div_clock >= 256:
self.div_clock = self.div_clock % 256
self.mem.inc_div()
tima_ctrl = self.mem.read(0xff07)
if tima_ctrl & 0x4 == 0:
self.tima_clock = 0
else:
self.tima_clock += cycles
rate = self.get_tima_rate(tima_ctrl)
if self.tima_clock >= rate:
self.tima_clock = self.tima_clock % rate
self.mem.inc_tima()
def get_tima_rate(self, ctrl):
"""
Gets the increment rate from tima ctrl.
"""
speed = ctrl & 0x3
if speed == 0:
return 1024
elif speed == 1:
return 16
elif speed == 2:
return 64
else:
return 256
def request_interrupt(self, num):
"""
Request an interrupt to be serviced by cpu
num == 0 - VBlank
1 - LCD STAT
2 - Timer
3 - Serial
4 - Joypad
"""
ir = self.mem.read(0xff0f)
ir |= 1 << num
self.mem.write(ir, 0xff0f)
def NOP(self):
""" No operation """
return 4
def ld_byte_n(self, reg_index):
"""
Load a byte from memory into register.
Byte is located at pc.
...
Parameters
----------
reg_index : int
index of reg to load
"""
self.reg[reg_index] = self.mem.read(self.pc)
self.pc += 1
return 8
def ld_r1_r2(self, r1, r2):
"""
Put value r2 into r1.
r1,r2 = A,B,C,D,E,H,L,(HL)
...
Parameters
----------
r1 : int
index of r1
r2 : int
index of r2
"""
if r2 != self.HL and r1 != self.HL:
self.reg[r1] = self.reg[r2]
return 4
elif r2 == self.HL:
self.reg[r1] = self.mem.read(self.get_reg(self.H, self.L))
return 8
elif r2 == self.N:
self.mem.write(self.mem.read(self.pc), self.get_reg(self.H, self.L))
self.pc += 1
return 12
else:
self.mem.write(self.reg[r2], self.get_reg(self.H, self.L))
return 8
def load_a(self, src):
"""
Put value src into A.
src = (BC/DE/nn), n
...
Parameters
----------
src
which src to load into a
"""
if src == self.BC:
self.reg[self.A] = self.mem.read(self.get_reg(self.B, self.C))
return 8
elif src == self.DE:
self.reg[self.A] = self.mem.read(self.get_reg(self.D, self.E))
return 8
elif src == self.NN:
self.reg[self.A] = self.mem.read(self.mem.read_word(self.pc))
self.pc += 2
return 16
else: #self.N
self.reg[self.A] = self.mem.read(self.pc)
self.pc += 1
return 8
def write_a(self, dest):
"""
Put value A into dest.
...
Parameters
----------
dest : A-L, (BC/DE/HL/nn)
place to store A
"""
if dest == self.BC:
self.mem.write(self.reg[self.A], self.get_reg(self.B, self.C))
return 8
elif dest == self.DE:
self.mem.write(self.reg[self.A], self.get_reg(self.D, self.E))
return 8
elif dest == self.HL:
self.mem.write(self.reg[self.A], self.get_reg(self.H, self.L))
return 8
elif dest == self.NN:
self.mem.write(self.reg[self.A], self.mem.read_word(self.pc))
self.pc += 2
return 16
else:
self.reg[dest] = self.reg[self.A]
return 4
def load_a_c(self, store=False):
"""
Load A, (C) - put value at 0xff00 + regC into A, or
Put A into address 0xff00 + regC
...
Parameters
----------
store : bool
False - Put value 0xff00 + regC into A
True - store A at 0xff00 + regC
Returns
-------
int
num of cycles
"""
if store:
self.mem.write(self.reg[self.A], self.reg[self.C] + 0xff00)
else:
self.reg[self.A] = self.mem.read(self.reg[self.C] + 0xff00)
return 8
def load_a_hl(self, dec, load):
"""
Store/load A in (HL), or (HL) in A, increment/decrement HL.
...
Parameters
----------
dec : bool
Decrement register HL if True, increments if False
load : bool
Load value at (HL) into A if true
Store A at (HL) if false
Returns
-------
int
num of cycles
"""
if load:
self.reg[self.A] = self.mem.read(self.get_reg(self.H, self.L))
else:
self.mem.write(self.reg[self.A], self.get_reg(self.H, self.L))
HL_val = self.get_reg(self.H, self.L)
HL_val += -1 if dec else 1
self.set_reg(self.H, self.L, HL_val)
return 8
def a_n(self, store):
"""
Store/load A in memory address 0xff00 + n
Parameters
----------
store : bool
if true writes, if false loads
Returns
-------
int
num of cycles
"""
offset = self.mem.read(self.pc)
self.pc += 1
if store:
self.mem.write(self.reg[self.A], offset + 0xff00)
else:
#print('address: ' + hex(offset+ 0xff00) + ' ' + hex(self.mem.read(offset + 0xff00)))
self.reg[self.A] = self.mem.read(offset + 0xff00)
return 12
def ld_nn(self, dest, set_sp=False):
"""
Put value nn into dest.
Dest = BC/DE/HL/SP
Parameters
----------
dest : int
destination register pair (defined in class constants)
if not self.BC/DE/HL defaults to setting stack pointer
set_sp : bool
if True, loads value into stack pointer
if False, doesnt
Returns
-------
int
num of cycles
"""
word = self.mem.read_word(self.pc)
self.pc += 2
if set_sp:
self.sp = word
return 12
elif dest == self.BC:
r1 = self.B
r2 = self.C
elif dest == self.DE:
r1 = self.D
r2 = self.E
elif dest == self.HL:
r1 = self.H
r2 = self.L
self.set_reg(r1, r2, word)
return 12
def ld_sp_hl(self):
"""
Put HL into sp.
Returns
-------
int
number of cycles
"""
self.sp = self.get_reg(self.H, self.L)
return 8
def ldhl_sp(self):
"""
Put sp + n effective address into HL.
n = one byte signed value
Flags:
Z/N - Reset
H/C - Set/Reset according to operation
"""
#interpret as signed byte
n = c_int8(self.mem.read(self.pc)).value
self.pc += 1
self.set_reg(self.H, self.L, self.sp + n)
self.reset_flags()
if (self.sp & 0xf) + (n & 0xf) > 0xf:
self.set_flag(self.flags.H)
if (self.sp & 0xff) + (n & 0xff) > 0xff:
self.set_flag(self.flags.C)
return 12
def ld_nn_sp(self):
"""
Put sp at address nn (two byte immediate address).
Returns
-------
int
number of clock cycles
"""
address = self.mem.read_word(self.pc)
self.pc += 2
self.mem.write(self.sp & 0xff, address)
self.mem.write((self.sp & 0xff00) >> 8, address + 1)
return 20
def push_nn(self, r1, r2):
"""
Push register pair r1r2 onto stack.
Decrement sp twice.
Parameters
----------
r1, r2
register pair r1r2
"""
self.sp -= 1
self.mem.write(self.reg[r1], self.sp)
self.sp -= 1
self.mem.write(self.reg[r2], self.sp)
return 16
def pop_nn(self, r1, r2):
"""
Pop two bytes off stack into register pair r1r2.
Increment sp twice.
Parameters
----------
r1
reg1
r2
reg2
"""
self.reg[r2] = self.mem.read(self.sp)
if r2 == self.F:
self.reg[r2] &= 0xf0
self.sp += 1
self.reg[r1] = self.mem.read(self.sp)
self.sp += 1
return 12
def set_reg(self, r1, r2, word):
"""
set register pair r1r2 to 16 bit word.
Parameters
----------
r1,r2 : ints
indexes of r1 r2 registers to set
r1 = H, r2 = L sets pair HL
"""
self.reg[r1] = (word & 0xff00) >> 8
self.reg[r2] = word & 0xff
def get_reg(self, r1, r2):
"""
Access register r1r2 - combination of r1 and r1 registers.
For example get_reg(H,L) accesses register HL
...
Returns
-------
int
value of HL register
"""
return ((self.reg[r1] << 8) | self.reg[r2])
def set_flag(self, flag):
"""
Sets Flag flag in the F register.
Parameters
----------
flag : Flag enum
which flag to set
"""
if flag == self.flags.Z:
self.reg[self.F] |= 0x80
elif flag == self.flags.H:
self.reg[self.F] |= 0x20
elif flag == self.flags.C:
self.reg[self.F] |= 0x10
elif flag == self.flags.N:
self.reg[self.F] |= 0x40
def reset_flag(self, flag):
"""
Resets Flag flag in the F register.
Parameters
----------
flag : Flag enum
which flag to reset
"""
if flag == self.flags.Z:
self.reg[self.F] &= 0x70
elif flag == self.flags.H:
self.reg[self.F] &= 0xd0
elif flag == self.flags.C:
self.reg[self.F] &= 0xe0
elif flag == self.flags.N:
self.reg[self.F] &= 0xb0
def flag_set(self, flag):
"""
Returns True if flag is set
False if not
Parameters
----------
flag : Flag enum
which flag to check
Returns
-------
bool
True if set, False if not
"""
if flag == self.flags.Z:
return self.reg[self.F] & 0x80 != 0
elif flag == self.flags.H:
return self.reg[self.F] & 0x20 != 0
elif flag == self.flags.C:
return self.reg[self.F] & 0x10 != 0
elif flag == self.flags.N:
return self.reg[self.F] & 0x40 != 0
def add_a_n(self, src, add_carry=False):
"""
Add n to A (and carry if add_carry is true).
Flags:
Z - Set if zero
N - Reset
H - Set if carry from bit 3
C - Set if carry from bit 7
Parameters
----------
src
source A-L, (HL), or n
Returns
-------
int
clock cycles taken
"""
a_reg = self.reg[self.A]
if src == self.N:
val = self.mem.read(self.pc)
self.pc += 1
elif src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
else: #src is index of A-L
val = self.reg[src]
carry_bit = 1 if add_carry and self.flag_set(self.flags.C) else 0
self.reg[self.A] = (a_reg + carry_bit + val) & 0xff
self.reset_flags()
if self.reg[self.A] == 0:
self.set_flag(self.flags.Z)
if (a_reg & 0xf) + (val & 0xf) + carry_bit > 0xf:
self.set_flag(self.flags.H)
if a_reg + val + carry_bit > 0xff:
self.set_flag(self.flags.C)
return 8 if src == self.N or src == self.HL else 4
def sub_a_n(self, src, sub_carry=False):
"""
Subtract n from A (n + carry if sub_carry is true)
Flags:
Z - Set if 0
N - Set
H - Set if no borrow from bit 4
C - Set if no borrow
Parameters
----------
src
source A-L, (HL), or n
Returns
-------
int
number of cylces elapsed
"""
a_reg = self.reg[self.A]
if src == self.N:
val = self.mem.read(self.pc)
self.pc += 1
elif src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
else: #src is index of A-L
val = self.reg[src]
carry_bit = 1 if sub_carry and self.flag_set(self.flags.C) else 0
self.reg[self.A] = (a_reg - val - carry_bit) & 0xff
self.reset_flags()
if self.reg[self.A] == 0:
self.set_flag(self.flags.Z)
if (a_reg & 0xf) < (val & 0xf) + carry_bit:
self.set_flag(self.flags.H)
if a_reg < val + carry_bit:
self.set_flag(self.flags.C)
self.set_flag(self.flags.N)
return 8 if src == self.N or src == self.HL else 4
def and_n(self, src):
"""
Logically AND n with A, result in A
Flags:
Z - Set if result is 0
N/C - Reset
H - Set
Parameters
----------
src
source A-L, (HL), or n
Returns
-------
int
number of cycles elapsed
"""
a_reg = self.reg[self.A]
if src == self.N:
val = self.mem.read(self.pc)
self.pc += 1
elif src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
else: #src is index of A-L
val = self.reg[src]
self.reg[self.A] = val & a_reg & 0xff
self.reset_flags()
if self.reg[self.A] == 0:
self.set_flag(self.flags.Z)
self.set_flag(self.flags.H)
return 8 if src == self.N or src == self.HL else 4
def or_n(self, src, exclusive_or=False):
"""
Logically OR or XOR n with A, result in A.
Flags:
Z - Set if 0
N/H/C - Reset
Parameters
----------
src
source A-L, (HL), or n
exclusive_or
if True uses exclusive OR not OR
Returns
-------
int
number of cycles elapsed
"""
a_reg = self.reg[self.A]
if src == self.N:
val = self.mem.read(self.pc)
self.pc += 1
elif src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
else: # src is index of A-L
val = self.reg[src]
#
# if exclusive_or:
# print("data: " + hex(val))
# print("reg a: " + hex(a_reg))
# print((a_reg ^ val) & 0xff)
self.reg[self.A] = (a_reg ^ val) if exclusive_or else (a_reg | val)
self.reg[self.A] &= 0xff
self.reset_flags()
if self.reg[self.A] == 0:
self.set_flag(self.flags.Z)
return 8 if val == self.HL or val == self.N else 4
def cp_n(self, src):
"""
Compare A with n (A - n subtraction but results arent saved).
Flags:
Z - Set if 0
N - Set
H - Set if no borrow from bit 4
C - Set if no borrow (if A is less than n)
Parameters
----------
src
A-L, (HL), N
Returns
-------
int
number of clock cycles
"""
a_reg = self.reg[self.A]
if src == self.N:
val = self.mem.read(self.pc)
self.pc += 1
elif src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
else: # src is index
val = self.reg[src]
self.reset_flags()
self.set_flag(self.flags.N)
if val == a_reg:
self.set_flag(self.flags.Z)
if (a_reg & 0xf) < (val & 0xf):
self.set_flag(self.flags.H)
if a_reg < val:
self.set_flag(self.flags.C)
return 8 if src == self.N or src == self.HL else 4
def inc_n(self, src):
"""
Increment register n
Flags:
Z - Set if 0
N - Reset
H - Set if carry from bit 3
C - Not affected
Parameters
----------
src
A-L, (HL)
Returns
-------
int
number of cycles
"""
if src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
#log.debug('INC HL: address:' + hex(self.get_reg(self.H, self.L)))
#log.debug('OLD VAL: ' + hex(val))
old_val = val
self.mem.write((val + 1) & 0xff, self.get_reg(self.H, self.L))
val = (val + 1) & 0xff
#log.debug('NEW VAL: ' + hex((val + 1) & 0xff))
else: # src is index
old_val = self.reg[src]
val = (self.reg[src] + 1) & 0xff
self.reg[src] = val
self.reset_flag(self.flags.Z)
if val == 0:
self.set_flag(self.flags.Z)
self.reset_flag(self.flags.N)
self.reset_flag(self.flags.H)
if old_val & 0xf == 0xf:
self.set_flag(self.flags.H)
return 12 if src == self.HL else 4
def dec_n(self, src):
"""
Decrement register n.
Flags:
Z - Set if 0
N - Set
H - Set if no borrow from bit 4
C - Not affected
Parameters
----------
src
A-L, (HL)
Returns
-------
int
number of cycles
"""
if src == self.HL:
val = self.mem.read(self.get_reg(self.H, self.L))
self.mem.write((val - 1) & 0xff, self.get_reg(self.H, self.L))
val = (val - 1) & 0xff
else: # src is index
val = (self.reg[src] - 1) & 0xff
self.reg[src] = val
self.set_flag(self.flags.Z) if val == 0 \
else self.reset_flag(self.flags.Z)
self.set_flag(self.flags.N)
self.set_flag(self.flags.H) if (val + 1) & 0xf0 != 0xf0 & val \
else self.reset_flag(self.flags.H)
return 12 if src == self.HL else 4
def add_hl(self, r1, r2, add_sp=False):
"""
Add n to HL.
Flags:
Z - Not affected
N - Reset
H - Set if carry from bit 11
C - Set if carry from bit 15
Parameters
----------
r1, r2
register index for HL, BC, DE
add_sp : bool
if true addes to sp not register pair
Returns
-------
int
cycles taken
"""
hl = self.get_reg(self.H, self.L)
val = self.sp if add_sp else self.get_reg(r1, r2)
self.set_reg(self.H, self.L, (val + hl) & 0xffff)
self.reset_flag(self.flags.N)
if (val & 0xfff) + (hl & 0xfff) > 0xfff:
self.set_flag(self.flags.H)
else:
self.reset_flag(self.flags.H)
if val + hl > 0xffff:
self.set_flag(self.flags.C)
else:
self.reset_flag(self.flags.C)
return 8
def add_sp_n(self):
"""
Adds an immediate signed byte to sp.
Flags:
Z, N - Reset
H, C - Set/Reset according to operation
NOTE: Specifications vague if this is 8 or
16 bit flag addition behavior
Returns
-------
int
cycles taken
"""
# read as a signed byte
val = c_int8(self.mem.read(self.pc)).value
self.pc += 1
self.reset_flags()
if (self.sp & 0xf) + (val & 0xf) > 0xf:
self.set_flag(self.flags.H)
if (self.sp & 0xff) + (val & 0xff) > 0xff:
self.set_flag(self.flags.C)
self.sp += val
self.sp &= 0xffff
return 16
def inc_nn(self, r1, r2, inc_sp=False):
"""
Increment register pair r1r2.
Parameters
----------
r1r2
register pair r1r2
inc_sp : Boolean
if True increments SP not r1r2
Returns
-------
int
clock cycles taken
"""
if inc_sp:
self.sp += 1
self.sp &= 0xffff
else:
val = self.get_reg(r1, r2)
self.set_reg(r1, r2, (val + 1) & 0xffff)
return 8
def dec_nn(self, r1, r2, dec_sp=False):
"""
Decrement register pair r1r2
Parameters
----------
r1r2
register pair r1r2
dec_sp : boolean
if True decrements SP not r1r2
Returns
-------
int
clock cycles taken
"""
if dec_sp:
self.sp -= 1
self.sp &= 0xffff
else:
val = self.get_reg(r1, r2)
self.set_reg(r1, r2, (val - 1) & 0xffff)
return 8
def jump_nn(self):
"""
Jump to nn.
"""
val = self.mem.read_word(self.pc)
self.pc = val
return 12
def jump_cc(self, isSet, flag, immmediate_jump=False):
"""
Jump to address n if flag and isSet match
Parameters
----------
isSet : bool
Returns
-------
int
number of cycles
"""
if self.flag_set(flag) == isSet:
return self.jump_n() if immmediate_jump else self.jump_nn()
if not immmediate_jump:
self.pc += 1 #two byte jump address so skip it
self.pc += 1
return 12
def jump_hl(self):
"""
Jump to address in HL
Returns
-------
int
cycles taken
"""
self.pc = self.get_reg(self.H, self.L)
return 4
def jump_n(self):
"""
Add n to current address and jump to it.
Returns
-------
int
cycles taken
"""
val = c_int8(self.mem.read(self.pc)).value
self.pc += 1
self.pc += val
return 8
def dec_adjust(self):
"""
Decimal adjust reg A to a representation of Binary Coded Decimal.
Flags
Z - Set if A is zero
N - Not affected
H - Reset
C - Set or reset
referenced GB programming manual page 110 and
github.com/gekkio/mooneye-gb
Returns
-------
int
clock cycles taken
"""
carry = False
a_reg = self.reg[self.A]
if not self.flag_set(self.flags.N):
if self.flag_set(self.flags.C) or a_reg > 0x99:
a_reg += 0x60
carry = True
a_reg &= 0xff
if self.flag_set(self.flags.H) or a_reg & 0x0f > 0x09:
a_reg += 0x06
a_reg &= 0xff
elif self.flag_set(self.flags.C):
carry = True
a_reg += 0x9a if self.flag_set(self.flags.H) else 0xa0
a_reg &= 0xff
elif self.flag_set(self.flags.H):
a_reg += 0xfa
a_reg &= 0xff
self.reset_flag(self.flags.H)
self.reset_flag(self.flags.Z)
if a_reg == 0:
self.set_flag(self.flags.Z)
self.reset_flag(self.flags.C)
if carry:
self.set_flag(self.flags.C)
self.reg[self.A] = a_reg
return 4
def complement_a(self):
"""
Complements register A (toggles all bits).
Flags
N/H - Set
C/Z - Not affected
Returns
-------
int
number of cycles taken
"""
self.reg[self.A] ^= 0xff
self.set_flag(self.flags.N)
self.set_flag(self.flags.H)
return 4
def complement_cf(self):
"""
Complements the carry flag (toggles it).
Flags
Z - Not affected
H/N - Reset
C - Toggles
Returns
-------
int
cycles taken
"""
if self.flag_set(self.flags.C):
self.reset_flag(self.flags.C)
else:
self.set_flag(self.flags.C)
self.reset_flag(self.flags.N)
self.reset_flag(self.flags.H)
return 4
def set_cf(self):
"""
Sets the carry flag.
Flags
Z - Not affected
H/N - Reset
C - Set
Returns
-------
int
cycles taken
"""
self.set_flag(self.flags.C)
self.reset_flag(self.flags.H)
self.reset_flag(self.flags.N)
return 4
def rotate_l_a_c(self):
"""
Rotates A left, old bit 7 to carry flag.
Flags
Z - Set if 0 NOTE??? RESET??
N/H - Reset
C - Contains old bit 7 data
Returns
-------
int
cycles taken
"""
a_reg = self.reg[self.A]
msb = (a_reg & 0x80) >> 7
a_reg <<= 1
a_reg |= msb
self.reset_flags()
if msb == 1:
self.set_flag(self.flags.C)
self.reg[self.A] = a_reg & 0xff
return 4
def rotate_n_lc(self, src):
"""
Rotates n left, old bit 7 to carry flag.
Flags
Z - Set if 0
N/H - Reset
C - Old bit 7 data
...
Parameters
-----------
src
A-L, HL
Returns
-------
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
msb = (data & 0x80) >> 7
data <<= 1
data |= msb
self.reset_flags()
if msb == 1:
self.set_flag(self.flags.C)
if data == 0:
self.set_flag(self.flags.Z)
if src == self.HL:
self.mem.write(data & 0xff, self.get_reg(self.H, self.L))
return 16
else:
self.reg[src] = data & 0xff
return 8
def rotate_l_n(self, src):
"""
Rotates n left through carry flag.
src - A-HL
Flags
Z - set if 0
N/H - reset
C - old bit 7 data
...
Returns
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
msb = ((data & 0x80) >> 7) & 1
carry_in = 1 if self.flag_set(self.flags.C) else 0
data = (data << 1 | carry_in) & 0xff
self.reset_flags()
if msb == 1:
self.set_flag(self.flags.C)
if data == 0:
self.set_flag(self.flags.Z)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
return 16
else:
self.reg[src] = data & 0xff
return 8
def rotate_l_a(self):
"""
Rotate A left through carry flag.
Flags
Z/N/H - Reset
C - Old bit 7 data
Returns
-------
int
cycles taken
"""
a_reg = self.reg[self.A]
a_reg <<= 1
if self.flag_set(self.flags.C):
a_reg |= 1 # set lsb to C
self.reset_flags()
if a_reg & 0x100 == 0x100:
self.set_flag(self.flags.C)
self.reg[self.A] = a_reg & 0xff
return 4
def rotate_r_a_c(self):
"""
Rotates A right, old bit 0 to carry flag.
Flags:
C - Old bit 0
Z/H/N - Reset
Returns
-------
int
clock cycles taken
"""
a_reg = self.reg[self.A]
lsb = a_reg & 0x1
a_reg >>= 1
a_reg |= lsb << 7
self.reset_flags()
if lsb == 1:
self.set_flag(self.flags.C)
self.reg[self.A] = a_reg & 0xff
return 4
#TODO??? 0 flag set?
def rotate_r_a(self):
"""
Rotate A right through carry flag.
Flags
C - Old bit 0
Z/H/N - Reset
Returns
-------
int
cycles taken
"""
a_reg = self.reg[self.A]
lsb = a_reg & 0x1
a_reg >>= 1
if self.flag_set(self.flags.C):
a_reg |= 0x80
self.reset_flags()
if lsb == 1:
self.set_flag(self.flags.C)
self.reg[self.A] = a_reg & 0xff
return 4
def rr_n(self, src):
"""
Rotate n right through Carry Flag
n = A-L, (HL)
Flags
Z - set if 0
N/H - Reset
C - Old bit 0
Returns
-------
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
lsb = data & 0x1
carryIn = 1 if self.flag_set(self.flags.C) else 0
data = (data >> 1) | (carryIn << 7)
data &= 0xff
self.reset_flags()
if data == 0:
self.set_flag(self.flags.Z)
if lsb != 0:
self.set_flag(self.flags.C)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
else:
self.reg[src] = data
return 16 if src == self.HL else 8
def rrc_n(self, src):
"""
Rotate n right. Old bit 0 to carry flag
Flags
Z - Set if 0
N/H - Reset
C - Old bit 0 data
Returns
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
lsb = data & 0x1
data = (data >> 1) | (lsb << 7)
data &= 0xff
self.reset_flags()
if data == 0:
self.set_flag(self.flags.Z)
if lsb == 1:
self.set_flag(self.flags.C)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
else:
self.reg[src] = data
return 8 if src != self.HL else 16
#TODO
def stop(self):
self.pc += 1
log.critical("IMPLEMENT STOP")
return 0
#TODO
def disable_interrupts(self):
self.interrupt_enable = False
return 4
#TODO
def enable_interrupts(self):
self.interrupt_enable = True
return 4
def call(self):
"""
Push address of next instruction onto stack and then jump to address
nn.
Returns
-------
int
cycles taken
"""
address = self.mem.read_word(self.pc)
self.pc += 2
self.push_pc()
self.pc = address
return 12
def call_cc(self, flag, isSet):
"""
Call address n if isSet and flag match
Returns
-------
int
cycles taken
"""
if self.flag_set(flag) == isSet:
return 12 + self.call()
else:
self.pc += 2
return 12
def ret(self):
"""
Pops two bytes from stack jumps to that address
Returns
-------
int
cycles taken
"""
self.pc = self.mem.read_word(self.sp)
self.sp += 2
return 8
def ret_cc(self, flag, isSet):
"""
Return if isSet and flag match
Returns
-------
int
cycles taken
"""
if self.flag_set(flag) == isSet:
return 12 + self.ret()
else:
return 8
def push_pc(self):
"""
Pushes current program counter value to the stack
MSB first
"""
self.sp -= 1
self.mem.write((self.pc & 0xff00) >> 8, self.sp)
self.sp -= 1
self.mem.write((self.pc & 0xff), self.sp)
def dump_registers(self):
"""
Prints the current cpu registers and their values to the screen.
"""
print("A: ", hex(self.reg[self.A]))
print("B: ", hex(self.reg[self.B]))
print("C: ", hex(self.reg[self.C]))
print("D: ", hex(self.reg[self.D]))
print("E: ", hex(self.reg[self.E]))
print("F: ", hex(self.reg[self.F]))
print("H: ", hex(self.reg[self.H]))
print("L: ", hex(self.reg[self.L]))
print("PC: ", hex(self.pc))
print("SP: ", hex(self.sp))
def reset_flags(self):
""" Resets all Flags to 0. """
self.reg[self.F] = 0
def sla_n(self, src):
"""
Shift src left into carry, LSB of n set to 0.
Flags
Z - Set if 0
H/N - Reset
C - old bit 7 data
Parameters
----------
src
A-L, (HL)
Returns
-------
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
msb = (data & 0x80) & 0xff
data <<= 1
data &= 0xff
self.reset_flags()
if data == 0:
self.set_flag(self.flags.Z)
if msb != 0:
self.set_flag(self.flags.C)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
else:
self.reg[src] = data
return 8 if src != self.HL else 16
def srl_n(self, src, signed):
"""
Shift n right into Carry. MSB set to 0 if signed = True, else unchanged
n : A-L, (HL)
Flags:
Z - set if 0
N/H - Reset
C - Old bit 0 data
Parameters
----------
src
register to shift
signed
if True MSB set to 0, if false not changed
Returns
-------
int
cycles taken
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
lsb = data & 0x1
if signed:
bit7 = data & 0x80
data >>= 1
data |= bit7
else:
data >>= 1
data &= 0x7f
self.reset_flags()
if data == 0:
self.set_flag(self.flags.Z)
if lsb == 1:
self.set_flag(self.flags.C)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
else:
self.reg[src] = data
return 16 if src == self.HL else 8
def swap(self, src):
"""
Swaps the upper and lower nibbles of n.
n = A-L/(HL)
Flags
Z - Set if 0
N/H/C - Reset
Parameters
----------
src
to swap A-L/(HL)
Returns
-------
int
clock cycles
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
lower_nibble = data & 0xf
data = ((data & 0xf0) >> 4) | (lower_nibble << 4)
self.reset_flags()
if data == 0x0:
self.set_flag(self.flags.Z)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
else:
self.reg[src] = data
return 16 if src == self.HL else 8
def bit_br(self, bit, reg):
"""
Tests bit b in register r.
Flags:
Z - Set if 0
N - reset
H - set
C - not affected
Returns
-------
int
number of cycles
"""
if reg == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[reg]
self.reset_flag(self.flags.Z)
if not self.is_set(data, bit):
self.set_flag(self.flags.Z)
self.reset_flag(self.flags.N)
self.set_flag(self.flags.H)
return 8 if reg == self.HL else 4
def set_b_r(self, src, bit, new_bit):
"""
Sets bit bit in src to new_bit.
"""
if src == self.HL:
data = self.mem.read(self.get_reg(self.H, self.L))
else:
data = self.reg[src]
data = self.set_bit(data, bit, new_bit)
if src == self.HL:
self.mem.write(data, self.get_reg(self.H, self.L))
return 8
else:
self.reg[src] = data
return 4
def set_bit(self, num, bit, new_bit):
"""
Sets bit bit in num to new_bit.
"""
if new_bit == 1:
return num | 1 << bit
else:
return num & ~(1 << bit)
def is_set(self, num, bit):
"""
Tests if bit bit is set in num.
Returns
-------
True if 1
False if 0
"""
return ((num >> bit) & 0x1) == 0x1
def restart(self, offset):
"""
Pushes current address onto the stack, and then jumps to
0x0 + offset.
"""
self.push_pc()
self.pc = offset
return 16
#TODO
def ret_interrupts(self):
"""
Returns and enables interrupts
"""
self.interrupt_enable = True
return self.ret()
def halt(self):
"""
TODO
"""
return 4
| mit | -9,042,496,380,038,771,000 | 29.895788 | 98 | 0.479938 | false |
skosukhin/spack | var/spack/repos/builtin/packages/notmuch/package.py | 1 | 1675 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Notmuch(AutotoolsPackage):
"""Notmuch is a mail indexer.
Essentially, is a very thin front end on top of xapian.
"""
homepage = "https://notmuchmail.org/"
url = "https://notmuchmail.org/releases/notmuch-0.23.7.tar.gz"
version('0.23.7', '1ad339b6d0c03548140434c7bcdf0624')
depends_on('zlib')
depends_on('talloc')
depends_on('[email protected]:')
depends_on('xapian-core')
| lgpl-2.1 | -5,391,594,835,763,923,000 | 38.880952 | 78 | 0.666269 | false |
gpoulin/pybeem | beem/ui/ui.py | 1 | 2119 | import os
import json
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
sip.setapi('QDate', 2)
sip.setapi('QDateTime', 2)
sip.setapi('QTextStream', 2)
sip.setapi('QTime', 2)
sip.setapi('QUrl', 2)
from PyQt4 import QtGui, QtCore
signal = QtCore.pyqtSignal
slot = QtCore.pyqtSlot
property = QtCore.pyqtProperty
from beem.io import grid_from_3ds
from beem.experiment import Grid
from beem.ui.graph import contourplot
_pref = dict()
def select_file(folder = None, filter = None):
filename = QtGui.QFileDialog.getOpenFileNames(directory = folder,
filter = filter)
return filename
def open3ds(filename = None, folder = None):
"""Return a Grid object from 3ds files
"""
if filename == None:
filename = select_file(folder = folder, filter = '3ds (*.3ds)')
if len(filename)==0:
return None
for f in filename:
try:
a = a + grid_from_3ds(f)
except NameError:
a = grid_from_3ds(f)
return a
def fast_analysis(filename = None):
"""Do the default analysis on 3ds files
"""
grid=open3ds(filename)
if grid==None:
return None
grid.normal_fit()
grid.update_dict()
grid.fit()
contourplot(grid.extract_good())
return grid
def find_config():
"""Return the location of the config and
create folder to store it if needed
"""
if os.name == 'posix':
folder = os.path.expanduser('~/.pybeem')
elif os.name == 'nt':
folder = os.path.expandvars('%APPDATA%/pybeem')
else:
raise Exception("Don't know where to save config. OS unknown")
if not os.path.exists(folder):
os.makedirs(folder)
return folder + '/pybeem.conf'
def save_pref(filename = None):
if filename == None:
filename = find_config()
fid = open(filename,'w')
json.dump(_pref,fid)
fid.close()
def load_pref(filename = None):
global _pref
if filename == None:
filename = find_config()
if os.path.exists(filename):
fid = open(filename,'r')
_pref.update(json.load(fid))
fid.close()
| gpl-3.0 | 6,990,025,243,627,736,000 | 23.356322 | 71 | 0.622935 | false |
kapadia/geoblend | benchmark/benchmark_vector.py | 1 | 2033 |
import os
import benchmark
import numpy as np
import rasterio as rio
from skimage.morphology import disk
from geoblend.vector import create_vector
class Benchmark_Vector_Small(benchmark.Benchmark):
def setUp(self):
directory = os.path.dirname(os.path.realpath(__file__))
fixtures = os.path.join(directory, '..', 'tests', 'fixtures')
srcpath = os.path.join(fixtures, 'source.tif')
refpath = os.path.join(fixtures, 'reference.tif')
with rio.open(srcpath) as src:
self.source = src.read(1).astype(np.float64)
with rio.open(refpath) as ref:
self.reference = ref.read(1).astype(np.float64)
# Create a non-rectangular mask
d = disk(60)
dim = 121 # disk dimension is 121x121
d2 = 0.5 * dim
height, width = self.source.shape
h2, w2 = 0.5 * height, 0.5 * width
y0, y1 = int(h2 - d2), int(h2 + d2)
x0, x1 = int(w2 - d2), int(w2 + d2)
self.mask = np.zeros_like(self.source, dtype=np.uint8)
self.mask[y0:y1, x0:x1] = d
def test_cython(self):
vector = create_vector(self.source, self.reference, self.mask)
class Benchmark_Vector_Large(benchmark.Benchmark):
def setUp(self):
directory = os.path.dirname(os.path.realpath(__file__))
fixtures = os.path.join(directory, '..', 'tests', 'fixtures')
srcpath = os.path.join(fixtures, '20150805_090528_0823_analytic', '20150805_090528_0823_analytic.tif')
refpath = os.path.join(fixtures, '20150805_090528_0823_analytic', 'resampled', 'reference.tif')
with rio.open(srcpath) as src:
self.source = src.read(1).astype(np.float64)
self.mask = src.read(4).astype(np.uint8)
with rio.open(refpath) as ref:
self.reference = ref.read(1).astype(np.float64)
def test_cython(self):
vector = create_vector(self.source, self.reference, self.mask)
if __name__ == '__main__':
benchmark.main(format='markdown', each=10) | mit | -4,203,776,271,449,375,000 | 30.292308 | 110 | 0.618298 | false |
stochasticHydroTools/RigidMultiblobsWall | single_sphere/single_sphere_rejection.py | 1 | 1547 | '''This script will display a histogram of a single sphere's height when next to a wall using sphere.py.
1,000,000 heights will be generated by iterating over n_steps, and written to a text file: rejection_locations.txt
On top of the histogram is a plot of the analytical GB distribution
Prints the time taken for all the calculations'''
import numpy as np
import time
import sphere as s
outFile = 'rejection_locations.txt'
# constants listed for convenience, none here are changed from what is in sphere.py
s.A = 0.265*np.sqrt(3./2.)
s.VISCOSITY = 8.9e-4
s.WEIGHT = 1.*0.0000000002*(9.8*1e6)
s.KT = 300.*1.3806488e-5
s.REPULSION_STRENGTH = 7.5 * s.KT
s.DEBYE_LENGTH = 0.5*s.A
sample_state = [0., 0., 1.1] # the position of a single sphere
n_steps = 1000000 # the number of height positions to be generated
f = open(outFile, 'w')
start_time = time.time()
# generate appropriate normalization constant
partition_steps = 10000 # number of samples generated for Z
partitionZ = s.generate_partition(partition_steps)
for i in range(n_steps):
# get a position from rejection function
sample_state = s.single_sphere_rejection(partitionZ)
# send that position to the data file
f.write(str(sample_state[2]) + '\n')
f.close()
end_time = time.time() - start_time
print(end_time) # should take somewhere around 80 seconds for one million heights
num_points = 100000
x, y = s.analytical_distribution(num_points) # calculate points for the analytical curve
s.plot_distribution(outFile, x, y, n_steps) # generate historgram and analytical curve
| gpl-3.0 | 848,121,353,568,755,100 | 34.976744 | 114 | 0.74596 | false |
googleinterns/cloud-monitoring-notification-delivery-integration-sample-code | jira_integration_example/tests/jira_notification_handler_integration_test.py | 1 | 11299 | # Copyright 2020 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import functools
from copy import deepcopy
import pytest
from google.cloud import monitoring_v3
from google.api_core import exceptions
from google.api_core import retry
from jira import JIRA
import main
from tests import constants
@retry.Retry(predicate=retry.if_exception_type(exceptions.GoogleAPICallError), deadline=10)
def short_retry(callable_function, *args):
return callable_function(*args)
@retry.Retry(predicate=retry.if_exception_type(AssertionError), deadline=180)
def long_retry(callable_function, *args):
return callable_function(*args)
@pytest.fixture
def config():
return main.app.config
@pytest.fixture
def jira_client(config):
# setup
oauth_dict = {'access_token': config['JIRA_ACCESS_TOKEN'],
'access_token_secret': config['JIRA_ACCESS_TOKEN_SECRET'],
'consumer_key': config['JIRA_CONSUMER_KEY'],
'key_cert': config['JIRA_KEY_CERT']}
jira_client = JIRA(config['JIRA_URL'], oauth=oauth_dict)
yield jira_client
# tear down
test_issues = jira_client.search_issues('summary~"test condition"')
for issue in test_issues:
issue.delete()
@pytest.fixture
def metric_descriptor(config, request):
def create_metric_descriptor(metric_name):
# setup
metric_client = monitoring_v3.MetricServiceClient()
gcp_project_path = metric_client.project_path(config['PROJECT_ID'])
test_metric_descriptor = deepcopy(constants.TEST_METRIC_DESCRIPTOR_TEMPLATE)
test_metric_descriptor['type'] = test_metric_descriptor['type'].format(METRIC_NAME=metric_name)
metric_descriptor = metric_client.create_metric_descriptor(
gcp_project_path,
test_metric_descriptor)
metric_descriptor = short_retry(metric_client.get_metric_descriptor, metric_descriptor.name)
# tear down (addfinalizer is called after the test finishes execution)
request.addfinalizer(functools.partial(metric_client.delete_metric_descriptor, metric_descriptor.name))
return metric_descriptor
return create_metric_descriptor
@pytest.fixture
def notification_channel(config):
# setup
notification_channel_client = monitoring_v3.NotificationChannelServiceClient()
gcp_project_path = notification_channel_client.project_path(config['PROJECT_ID'])
test_notification_channel = constants.TEST_NOTIFICATION_CHANNEL_TEMPLATE
test_notification_channel['labels']['topic'] = constants.TEST_NOTIFICATION_CHANNEL_TEMPLATE['labels']['topic'].format(PROJECT_ID=config['PROJECT_ID'])
notification_channel = notification_channel_client.create_notification_channel(
gcp_project_path,
test_notification_channel)
notification_channel = short_retry(notification_channel_client.get_notification_channel,
notification_channel.name)
yield notification_channel
# tear down
notification_channel_client.delete_notification_channel(notification_channel.name)
@pytest.fixture
def alert_policy(config, notification_channel, request):
def create_alert_policy(alert_policy_name, metric_name):
# setup
policy_client = monitoring_v3.AlertPolicyServiceClient()
gcp_project_path = policy_client.project_path(config['PROJECT_ID'])
test_alert_policy = deepcopy(constants.TEST_ALERT_POLICY_TEMPLATE)
test_alert_policy['notification_channels'].append(notification_channel.name)
test_alert_policy['display_name'] = alert_policy_name
test_alert_policy['user_labels']['metric'] = metric_name
metric_path = constants.METRIC_PATH.format(METRIC_NAME=metric_name)
test_alert_policy['conditions'][0]['condition_threshold']['filter'] = test_alert_policy['conditions'][0]['condition_threshold']['filter'].format(METRIC_PATH=metric_path)
alert_policy = policy_client.create_alert_policy(
gcp_project_path,
test_alert_policy)
alert_policy = short_retry(policy_client.get_alert_policy, alert_policy.name)
# tear down (addfinalizer is called after the test finishes execution)
request.addfinalizer(functools.partial(policy_client.delete_alert_policy, alert_policy.name))
return alert_policy
return create_alert_policy
def append_to_time_series(config, metric_name, point_value):
client = monitoring_v3.MetricServiceClient()
gcp_project_path = client.project_path(config['PROJECT_ID'])
series = monitoring_v3.types.TimeSeries()
series.metric.type = constants.METRIC_PATH.format(METRIC_NAME=metric_name)
series.resource.type = constants.RESOURCE_TYPE
series.resource.labels['instance_id'] = constants.INSTANCE_ID
series.resource.labels['zone'] = constants.ZONE
point = series.points.add()
point.value.double_value = point_value
now = time.time()
point.interval.end_time.seconds = int(now)
point.interval.end_time.nanos = int(
(now - point.interval.end_time.seconds) * 10**9)
client.create_time_series(gcp_project_path, [series])
def test_open_close_ticket(config, metric_descriptor, notification_channel, alert_policy, jira_client):
# Sanity check that the test fixtures were initialized with values that the rest of the test expects
metric_descriptor = metric_descriptor('integ-test-metric')
alert_policy = alert_policy('integ-test-policy', 'integ-test-metric')
assert metric_descriptor.type == constants.TEST_METRIC_DESCRIPTOR_TEMPLATE['type'].format(METRIC_NAME='integ-test-metric')
assert notification_channel.display_name == constants.TEST_NOTIFICATION_CHANNEL_TEMPLATE['display_name']
assert alert_policy.display_name == 'integ-test-policy'
assert alert_policy.notification_channels[0] == notification_channel.name
def assert_jira_issue_is_created():
# Search for all issues where the status is 'unresolved' and
# the integ-test-metric custom field is set to this the Cloud Monitoring project ID
project_id = config['PROJECT_ID']
query_string = f'description~"custom/integ-test-metric for {project_id}" and status=10000'
created_monitoring_issues = jira_client.search_issues(query_string)
assert len(created_monitoring_issues) == 1
def assert_jira_issue_is_resolved():
# Search for all issues where the status is 'resolved' and
# the integ-test-metric custom field is set to this the Cloud Monitoring project ID
project_id = config['PROJECT_ID']
query_string = f'description~"custom/integ-test-metric for {project_id}" and status={config["CLOSED_JIRA_ISSUE_STATUS"]}'
resolved_monitoring_issues = jira_client.search_issues(query_string)
assert len(resolved_monitoring_issues) == 1
# trigger incident and check jira issue created
append_to_time_series(config, 'integ-test-metric', constants.TRIGGER_NOTIFICATION_THRESHOLD_DOUBLE + 1)
long_retry(assert_jira_issue_is_created) # issue status id for "To Do"
# resolve incident and check jira issue resolved
append_to_time_series(config, 'integ-test-metric', constants.TRIGGER_NOTIFICATION_THRESHOLD_DOUBLE)
long_retry(assert_jira_issue_is_resolved)
def test_open_resolve_multiple_tickets(config, metric_descriptor, notification_channel, alert_policy, jira_client):
# Sanity check that the test fixtures were initialized with values that the rest of the test expects
metric_descriptor_1 = metric_descriptor('integ-test-metric-1')
alert_policy_1 = alert_policy('integ-test-policy-1', 'integ-test-metric-1')
metric_descriptor_2 = metric_descriptor('integ-test-metric-2')
alert_policy_2 = alert_policy('integ-test-policy-2', 'integ-test-metric-2')
assert notification_channel.display_name == constants.TEST_NOTIFICATION_CHANNEL_TEMPLATE['display_name']
assert metric_descriptor_1.type == constants.TEST_METRIC_DESCRIPTOR_TEMPLATE['type'].format(METRIC_NAME='integ-test-metric-1')
assert alert_policy_1.display_name == 'integ-test-policy-1'
assert alert_policy_1.notification_channels[0] == notification_channel.name
assert metric_descriptor_2.type == constants.TEST_METRIC_DESCRIPTOR_TEMPLATE['type'].format(METRIC_NAME='integ-test-metric-2')
assert alert_policy_2.display_name == 'integ-test-policy-2'
assert alert_policy_2.notification_channels[0] == notification_channel.name
def assert_jira_issues_are_created(metric_names):
# Search for all issues where the status is 'unresolved' and
# the integ-test-metric custom field is set to this the Cloud Monitoring project ID
project_id = config['PROJECT_ID']
for metric_name in metric_names:
query_string = f'description~"custom/{metric_name} for {project_id}" and status=10000' # issue status for To Do
created_monitoring_issues = jira_client.search_issues(query_string)
assert len(created_monitoring_issues) == 1
def assert_jira_issues_are_resolved(metric_names):
# Search for all issues where the status is 'resolved' and
# the integ-test-metric custom field is set to this the Cloud Monitoring project ID
project_id = config['PROJECT_ID']
for metric_name in metric_names:
query_string = f'description~"custom/{metric_name} for {project_id}" and status={config["CLOSED_JIRA_ISSUE_STATUS"]}'
resolved_monitoring_issues = jira_client.search_issues(query_string)
assert len(resolved_monitoring_issues) == 1
# trigger incident for integ-test-policy-1 and check jira issue created
append_to_time_series(config, 'integ-test-metric-1', constants.TRIGGER_NOTIFICATION_THRESHOLD_DOUBLE + 1)
long_retry(assert_jira_issues_are_created, ['integ-test-metric-1'])
# trigger incident for integ-test-policy-2 and check issues for policy 1 and 2 exist
append_to_time_series(config, 'integ-test-metric-2', constants.TRIGGER_NOTIFICATION_THRESHOLD_DOUBLE + 1)
long_retry(assert_jira_issues_are_created, ['integ-test-metric-1', 'integ-test-metric-2'])
# resolve incident for integ-test-policy-1 and check jira issue resolved for policy 1, unresolved for 2
append_to_time_series(config, 'integ-test-metric-1', constants.TRIGGER_NOTIFICATION_THRESHOLD_DOUBLE)
long_retry(assert_jira_issues_are_resolved, ['integ-test-metric-1'])
long_retry(assert_jira_issues_are_created, ['integ-test-metric-2'])
# resolve incident for integ-test-policy-2 and check both jira issues are resolved
append_to_time_series(config, 'integ-test-metric-2', constants.TRIGGER_NOTIFICATION_THRESHOLD_DOUBLE)
long_retry(assert_jira_issues_are_resolved, ['integ-test-metric-1', 'integ-test-metric-2'])
| apache-2.0 | -1,554,068,126,540,217,900 | 47.286325 | 177 | 0.721568 | false |
abmantz/lmc | setup.py | 1 | 3790 | """A setuptools based setup module.
See (and based on):
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='lmc',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.2.1',
description='Logarithmantic Monte Carlo',
long_description=long_description,
# The project's main homepage.
url='https://github.com/abmantz/lmc',
# Author details
author='Adam Mantz',
author_email='[email protected]',
# Choose your license
license='LGPL-3.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
# What does your project relate to?
#keywords='sample setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['examples']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
)
| lgpl-3.0 | 3,584,288,068,886,303,000 | 34.092593 | 94 | 0.665963 | false |
icoderaven/slytherin_dagger | src/utils.py | 1 | 1217 | #!/usr/bin/env python
import math
import numpy as np
#----------------------------------------------------------------------
#converts angles in degrees to radians
#----------------------------------------------------------------------
def deg_to_rad(angle):
return angle*math.pi/180.0
#----------------------------------------------------------------------
#converts angles in radians to degrees
#----------------------------------------------------------------------
def rad_to_deg(angle):
return angle*180.0/math.pi
#----------------------------------------------------------------------
#converts ROS Point/Vector3 object to a numpy array
#----------------------------------------------------------------------
def convert_position_to_array(position):
pos = np.zeros(3)
pos[0] = position.x
pos[1] = position.y
pos[2] = position.z
return pos
#----------------------------------------------------------------------
#converts ROS Quaternion object to a numpy array
#----------------------------------------------------------------------
def convert_orientation_to_array(orientation):
q = np.zeros(4)
q[0] = orientation.x
q[1] = orientation.y
q[2] = orientation.z
q[3] = orientation.w
return q
| bsd-3-clause | 7,963,987,814,229,428,000 | 32.805556 | 72 | 0.380444 | false |
i-am-Q/my_py | mypa.py | 1 | 12022 | #mypa.py
import speech_recognition as sr #imports Speech_Recognition Modules
from AppKit import NSSpeechSynthesizer #imports Speech Synthesizer for tts
from random import randint #imports randint function
import time #imports time modules to pause actions
import sys #imports system functions
import json #imports json
import urllib
#import pyaudio #imports pyaudio to play audio
#import wave #imports .wav conversion
r = sr.Recognizer() #used to shorten recognizer
cont = True #used for while loops
playAgain = True
nssp = NSSpeechSynthesizer #used to shorten apples native speech synthisizer
ve = nssp.alloc().init() #used to shorten memory allocation and initiation of speech synthisizer
voice = 'com.apple.speech.synthesis.voice.alex' #voice chosen for speech synthesizer
ve.setVoice_(voice) #sets appropriate voice
key = '19915fc68b895c6e' #api key for wunderground
def wakeUp(): #main function, will be used to "wake up" similar to "hey siri"
while cont: #ensures that the loop runs continuously
iSaid = listen() #listens for my imput
if iSaid == 'hey bro': #'hey bro' is the wake up command. This statement checks to see if user wants to do something
talking('How can I help you sir',2) #talking funtion called
selection()
else: #if nothing is said that matches up prints warning and plays sound
print 'no go' #warning
print('\a')
def preSelection(userText, compare): #figures out which commands are being given based on a list of key words
try:
myCommand = [userText.find(word) != -1 for word in compare] #creates a true or false array based on what was said
comInterpreted = [i for i, x in enumerate(myCommand) if x] #extracts position of only true responses
return comInterpreted #sends back the response
except AttributeError:
print AttributeError #response when nothing is said
def selection():
while True:
iSaid = listen() #listen function called
broCommands = ['high','low','quit','current','weather','forecast','rock','paper','scissors','outside'] #key words to look out for
findTruth = preSelection(iSaid, broCommands)
print findTruth
if (findTruth == [0,1]): #'game' command to start hi low game
playHiLow() #stars hi low game
elif (findTruth == [2]): #'quit command to terminate program
break #quits program
elif (findTruth == [3,4]) or (findTruth == [4,9]): #'weather' command to get current weather
currentWeather() #gets weather
elif (findTruth == [4,5]): #'forecast' command to get four day fourcast
forecast() #gets forecast
elif (findTruth == [6,7,8]): #'rps' command to play rps
rpsGame() #plays rps game
def talking(text,talkTime): #(text) are the words that will be said. (talkTime) is the amount of time needed to complete sentence. This function will say the words and ensure that the program pauses so it does not hear itself
while ve.isSpeaking: #loop runs while the comp is talking
ve.startSpeakingString_(text) #says the text that is passed to the function
time.sleep(talkTime) #takes a pause while the computer speaks
break
def listen(): #listens to what is being said
with sr.Microphone() as source: #determines which mic to use
print '-'*50
print ''
print 'Adjusting for background noise' #preps to take background noise
r.adjust_for_ambient_noise(source) #listens for ambient noise
print("Set minimum energy threshold to {}".format(r.energy_threshold)) #displays ambient noise adjustment
print 'I am listining' #preps to listen to user
audio = r.listen(source) #listens to user
try:
myWords = r.recognize(audio) #turns captured audio into text
print('This time you said:' + myWords) #prints what you said
print ''
return myWords #returns the text so that it can continue to be used
except LookupError: #warns user that the audio could not be interpereted
talking('I am sorry, I could not understand what you said',3)
def playHiLow(): #Higher or Lower Game
playAgain = True #defines play again state
while playAgain: #loop to play game
numGuess = 0 #on a new game, number of guesses restarts
compNum = randint(1,10) #computer picks number
print compNum #DELETE LATER display computers number
talking('I have picked a number between one and ten. Can you guess what it is?',5) #let user know game has started
while True: #starts loop for current game
playerGuess = listen() #listens for players guess
if playerGuess == 'sex': #checks for the number 6 (has difficulty understanding difference between 6 and sex)
playerGuess = '6' #turns 'sex' into string '6'
try: #checks to see if it can make an integer
playerGuess = int(playerGuess) #turns number string into int
except ValueError: #if it can not turn into a string act like it did not understand
talking('I am sorry, I could not understand what you said',3) #proclaim ignorance
if playerGuess == compNum: #checks for a winning condition
numGuess += 1 #adds final count to number of guesses
text = 'Congratulations! You won in %i guesses!' %(numGuess) #congratulates the winner
talking(text,4) #says congratulations
talking('Do you want to play again. Yes or no?',2) #asks to play again
reDo = listen() #listens for user response
if reDo == 'yes': #checks if new game is to be played
break #breaks current loop to start a new game
else: #if anything else is said, assume a quit
playAgain = False #signal to end the entire game
break #break current loop
elif playerGuess < compNum: #check if players guess is below computers number
talking('Guess higher',1) #tell user to guess higher
numGuess += 1 #add to guess count
elif playerGuess > compNum: #check if players guess is above computers guess
talking('Guess lower',1) #tell user to guess lower
numGuess += 1
def getZIP():
url = 'http://ipinfo.io/json'
f = urllib.urlopen(url)
json_string = f.read()
parsed_json = json.loads(json_string)
zip = parsed_json['postal']
city = parsed_json['city']
state = parsed_json['region']
data = [zip,city,state]
return data
def currentWeather(): #current weather function
zip = getZIP() #listens for zip code
text = 'getting weather information on ' + zip[1] + ',' + zip[2]
talking(text, 4)
url = 'http://api.wunderground.com/api/' + key + '/geolookup/conditions/q/PA/' + zip[0] + '.json' #goes to wunderground api
f = urllib.urlopen(url) #gets data
json_string = f.read() #reads data
parsed_json = json.loads(json_string) #parses data
city = parsed_json['location']['city']
state = parsed_json['location']['state']
weather = parsed_json['current_observation']['weather']
temperature_string = parsed_json['current_observation']['temp_f']
temperature_string = str(temperature_string)
feelslike_string = parsed_json['current_observation']['feelslike_f']
weatherText = 'Weather in ' + city + ', ' + state + ': ' + weather.lower() + '. The temperature is ' + temperature_string + ' but it feels like ' + feelslike_string + '.'
talking(weatherText, 10)
f.close()
def forecast(): #four day forecast
zip = getZIP() #listens for zip code
text = 'getting weather information on ' + zip[1] + ',' + zip[2]
talking(text, 4)
url = 'http://api.wunderground.com/api/' + key + '/geolookup/forecast/q/' + zip[0] + '.json' #goes to wunderground api
f = urllib.urlopen(url) #gets data
json_string = f.read() #reads data
parsed_json = json.loads(json_string) #parses data
for day in parsed_json['forecast']['simpleforecast']['forecastday']: #loop to anounce forecast
x = day['date']['day'] #day is an intiger
y = str(x) #convert intiger to string
forecastText = 'The weather for ' + day['date']['weekday'] + ', ' + day['date']['monthname'] + ' ' + y + ' will be ' + day['conditions'] + ' with a high of ' + day['high']['fahrenheit'] + ' degrees fahrenheit and a low of ' + day['low']['fahrenheit'] + ' degrees farenheit'
talking(forecastText, 10)
f.close()
class rpsGame:
def __init__(self): #play RPS
compScore = 0
playerScore = 0
tieGame = 0
player = 0
playing = True
validity = True
talking('Lets play a game of Rock, Paper, Scissors', 3) #lets player know that the game is starting
while playing : #starts loop to play game
while validity: #starts loop for player selection
iSaid = listen() #listens for player response
broCommands = ['rock','paper','scissors','quit','Rock'] #key words to look out for
playerHand = preSelection(iSaid, broCommands)
if (playerHand == [0]) or (playerHand == [4]):
player = 1
break
elif playerHand == [1]:
player = 2
break
elif playerHand == [2]:
player = 3
break
elif playerHand == [3]:
player = 4
break
else:
print 'Invalid Choice'
if player ==4: #quits game
if playerScore > compScore:
text = 'final score, player %i, computer %i, Congratulations you win' % (playerScore, compScore)
elif playerScore < compScore:
text = 'final score, player %i, computer %i, Computer wins' % (playerScore, compScore)
else :
text = 'final score, player %i, computer %i, tie game' % (playerScore, compScore)
talking(text, 6)
break
else: #starts to determine a winner
comp = self.compHand() #gets a "hand" for computer
result = self.playHand(comp, player) #gets a result
playerChoice = self.interpret(player) #turns numbers into readable text
compChoice = self.interpret (comp)
print '\nYou played %s and the computer played %s' % (playerChoice, compChoice)
talking(result, 2)
print ''
print '-'*34
if result == 'Computer wins!':
compScore += 1
elif result == 'Player wins!':
playerScore += 1
elif result == 'Tie game':
tieGame += 1
print 'Player: %i Computer: %i Tie Games: %i' % (playerScore, compScore, tieGame)
print '-'*34
print ''
def compHand(self): #needed for rps game
compVal = randint(1,3)
return compVal
def interpret(self,num): #needed for rps game
if num == 1:
talking('Rock', 1)
return 'Rock'
elif num == 2:
talking('Paper', 1)
return 'Paper'
elif num == 3:
talking('Scissors', 1)
return 'Scissors'
def playHand(self,comp, player): #needed for rps game
if comp == player:
return 'Tie game'
if (comp == 1 and player == 3) or (comp == 2 and player == 1) or (comp == 3 and player == 2):
return 'Computer wins!'
else:
return 'Player wins!'
"""
if myWords == "run": #looks for 'run' command to run the hi low game
print 'got it'
import hi_lowGame
elif myWords == "game": #looks for 'game' command to run rps game
print 'starting game'
import rps_game
else: #lets user know that the command does not do anything
print 'not a command'
import random
"""
wakeUp() | gpl-2.0 | 7,865,665,472,232,642,000 | 45.242308 | 278 | 0.619198 | false |
google-research/google-research | eeg_modelling/eeg_viewer/lookup_test.py | 1 | 2435 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
import re
from absl.testing import absltest
from eeg_modelling.eeg_viewer import lookup
CHANNEL_KEYS = ['eeg_channel/EEG feat_1-REF/samples',
'eeg_channel/EEG feat_2/samples']
CHANNEL_MATCHERS = [
re.compile(r'eeg_channel/EEG (\w+)(-\w+)*/samples'),
re.compile(r'eeg_channel/POL (EKG\w+)/samples'),
re.compile(r'eeg_channel/(\w+)/samples'),
re.compile(r'eeg_channel/EEG (\w+)(-\w+)*/resampled_samples'),
re.compile(r'(seizure_bin)ary_per_sec'),
]
class LookupTest(absltest.TestCase):
def setUp(self):
super(LookupTest, self).setUp()
self.test_lookup = lookup.Lookup(CHANNEL_KEYS, CHANNEL_MATCHERS)
def testGetKeyFromIndex(self):
self.assertEqual('eeg_channel/EEG feat_1-REF/samples',
self.test_lookup.GetKeyFromIndex(0))
self.assertEqual('eeg_channel/EEG feat_2/samples',
self.test_lookup.GetKeyFromIndex(1))
def testGetKeyFromIndexReturnsNone(self):
self.assertIsNone(self.test_lookup.GetKeyFromIndex(3))
def testGetIndexFromShorthand(self):
self.assertEqual('0', self.test_lookup.GetIndexFromShorthand('FEAT_1'))
self.assertEqual('1', self.test_lookup.GetIndexFromShorthand('FEAT_2'))
def testGetIndexFromShorthandReturnsNone(self):
self.assertIsNone(self.test_lookup.GetIndexFromShorthand('FEAT_3'))
def testGetShorthandFromKey(self):
self.assertEqual('FEAT_1', self.test_lookup.GetShorthandFromKey(
'eeg_channel/EEG feat_1-REF/samples'))
self.assertEqual('FEAT_2', self.test_lookup.GetShorthandFromKey(
'eeg_channel/EEG feat_2/samples'))
def testGetShorthandFromKeyReturnsNone(self):
self.assertIsNone(self.test_lookup.GetShorthandFromKey(
'eeg_channel/EEG feat_3/samples'))
if __name__ == '__main__':
absltest.main()
| apache-2.0 | 6,273,548,492,056,489,000 | 34.289855 | 75 | 0.714579 | false |
jessica-taylor/quipp2 | src/python/graphbuilder.py | 1 | 16660 | import math
import numpy as np
from numpy import linalg
import itertools
from callhaskell import *
class Queue(object):
def __init__(self, items=None):
if items is None:
self.items = []
else:
self.items = list(reversed(items))
def dequeue(self):
return self.items.pop()
class VarId(object):
def __init__(self, state, id, expfam):
assert isinstance(id, int)
self.state = state
self.id = id
self.expfam = expfam
class FactorId(object):
def __init__(self, state, id):
assert isinstance(id, int)
self.state = state
self.id = id
class RandFunId(object):
def __init__(self, state, id):
assert isinstance(id, int)
self.state = state
self.id = id
class GraphState(object):
def __init__(self):
self.vars = {}
self.var_count = 0
self.rand_funs = []
self.factors = []
self.var_replacements = {}
def new_var(self, expfam):
varid = self.var_count
self.var_count += 1
self.vars[varid] = expfam
return VarId(self, varid, expfam)
def resolve_var(self, varid):
if varid.id in self.var_replacements:
return self.var_replacements[varid.id]
else:
return varid
def unify_vars(self, a, b):
self.var_replacements[b.id] = a
del self.vars[b.id]
return a
def unify_values(self, typ, a, b):
# TODO: this fails for e.g. Maybe
avars = typ.embedded_vars(a)
bvars = typ.embedded_vars(b)
assert len(avars) == len(bvars)
for av, bv in zip(avars, bvars):
self.unify_vars(av, bv)
return a
def new_factor(self, fac_info, args):
facid = len(self.factors)
self.factors.append((fac_info, map(self.resolve_var, args)))
return FactorId(self, facid)
def new_rand_fun(self, arg_exp_fams, res_exp_fam):
rfid = len(self.rand_funs)
self.rand_funs.append((arg_exp_fams, res_exp_fam))
return RandFunId(self, rfid)
def new_sample_from_rand_fun(self, rfid, arg_vars):
(arg_exp_fams, res_exp_fam) = self.rand_funs[rfid.id]
assert len(arg_exp_fams) == len(arg_vars)
v = self.new_var(res_exp_fam)
fac = self.new_factor({'type': 'randFun', 'id': rfid.id}, [v] + arg_vars)
return v
def new_const_factor(self, varid, value):
varid = self.resolve_var(varid)
ef = self.vars[varid.id]
return self.new_factor({'type': 'constant', 'expFam': ef, 'value': value}, [varid])
def new_const_var(self, ef, value):
varid = self.new_var(ef)
fac = self.new_const_factor(varid, value)
return varid
def rand_function(self, arg_types, res_type):
arg_tuple_type = Tuple(*arg_types)
rfids = [self.new_rand_fun(arg_tuple_type.exp_fams(), ef) for ef in res_type.exp_fams()]
def rf(*args):
assert len(args) == len(arg_types)
arg_vars = arg_tuple_type.embedded_vars(tuple(args))
res_vars = [self.new_sample_from_rand_fun(rfid, arg_vars) for rfid in rfids]
return res_type.vars_to_value(Queue(res_vars))
return rf
def to_JSON(self):
def replace_id(varid):
if varid.id in self.var_replacements:
return self.var_replacements[varid.id].id
else:
return varid.id
return {
'vars': [[varid, expfam] for (varid, expfam) in self.vars.items()],
'randFuns': [{'argExpFams': aefs, 'resExpFam': ref} for (aefs, ref) in self.rand_funs],
'factors': [{'factor': facinfo, 'argVarIds': [replace_id(a) for a in args]} for (facinfo, args) in self.factors]
}
"""
Type interface:
t.exp_fams()
Returns a list of exponential family names
t.embedded_vars(value)
Returns a list of var ids in value
t.vars_to_value(vars)
Given a queue of var ids, create a value
t.unfreeze(state, value)
Unfreezes a frozen value
"""
class DoubleValue(object):
def __init__(self, varid):
self.varid = varid
def get_type(self):
return Double
def freeze(self, varvals):
return varvals[self.varid.id]['value']
gaussian_exp_fam = {'type': 'gaussian'}
bernoulli_exp_fam = {'type': 'bernoulli'}
def categorical_exp_fam(n):
return {'type': 'categorical', 'n': n}
class DoubleClass(object):
def exp_fams(self):
return [gaussian_exp_fam]
def embedded_vars(self, value):
return [value.varid]
def vars_to_value(self, vars):
return DoubleValue(vars.dequeue())
def unfreeze(self, state, value):
return DoubleValue(state.new_const_var(gaussian_exp_fam, value))
def __repr__(self):
return 'Double'
Double = DoubleClass()
class BoolValue(object):
def __init__(self, varid):
self.varid = varid
def get_type(self):
return Bool
def freeze(self, varvals):
return varvals[self.varid.id]['value']
class BoolClass(object):
def exp_fams(self):
return [bernoulli_exp_fam]
def embedded_vars(self, value):
return [value.varid]
def vars_to_value(self, vars):
return BoolValue(vars.dequeue())
def unfreeze(self, state, value):
return BoolValue(state.new_const_var(bernoulli_exp_fam, value))
def __repr__(self):
return 'Bool'
Bool = BoolClass()
# class TupleValue(object):
#
# def __init__(self, fields):
# self.fields = tuple(fields)
class Tuple(object):
def __init__(self, *types):
self.types = types
def exp_fams(self):
ef = []
for t in self.types:
ef.extend(t.exp_fams())
return ef
def embedded_vars(self, value):
vs = []
for (t, v) in zip(self.types, value):
vs.extend(t.embedded_vars(v))
return vs
def vars_to_value(self, vars):
val = []
for t in self.types:
val.append(t.vars_to_value(vars))
return tuple(val)
def freeze(self, varvals, value):
return tuple([t.freeze(varvals, x) for (t, x) in zip(self.types, value)])
def unfreeze(self, state, value):
return tuple([t.unfreeze(state, v) for (t, v) in zip(self.types, value)])
def __repr__(self):
return repr(self.types)
class CategoricalValue(object):
def __init__(self, varid, n):
self.varid = varid
self.n = n
def get_type(self):
return Categorical(self.n)
def freeze(self, varvals):
return varvals[self.varid.id]['value']
class Categorical(object):
def __init__(self, n):
self.n = n
def exp_fams(self):
return [categorical_exp_fam(self.n)]
def embedded_vars(self, value):
return [value.varid]
def vars_to_value(self, vars):
return CategoricalValue(vars.dequeue(), self.n)
def unfreeze(self, state, value):
return CategoricalValue(state.new_const_var(categorical_exp_fam(self.n), value), self.n)
def __repr__(self):
return 'Categorical(' + str(self.n) + ')'
def get_type(value):
if hasattr(value, 'get_type'):
return value.get_type()
elif isinstance(value, (tuple, list)):
return Tuple(*map(get_type, value))
else:
raise Exception('Unknown value type ' + str(type(value)) + ', value ' + str(value))
def freeze_value(value, varvals):
if hasattr(value, 'freeze'):
return value.freeze(varvals)
elif isinstance(value, (tuple, list)):
return tuple([freeze_value(v, varvals) for v in value])
else:
raise Exception('Unknown value type ' + str(type(value)) + ', value ' + str(value))
def Vector(n, typ):
print 'vector', n, typ, Tuple(*([typ]*n))
return Tuple(*([typ]*n))
Unit = Tuple()
current_graph_state = GraphState()
def rand_function(*ts):
return current_graph_state.rand_function(ts[:-1], ts[-1])
def uniform_categorical(n):
v = current_graph_state.new_var(categorical_exp_fam(n))
current_graph_state.new_factor({'type': 'uniformCategorical', 'n': n}, [v])
return CategoricalValue(v, n)
def normal(mean, stdev):
v = current_graph_state.new_var(gaussian_exp_fam)
current_graph_state.new_factor({'type': 'normal', 'mean': mean, 'stdev': stdev}, [v])
return DoubleValue(v)
def conditioned_network(state, typ, sampler, frozen_samps):
samples = [sampler() for i in range(len(samps))]
for (latent, s), fs in zip(samples, frozen_samps):
unfrozen = typ.unfreeze(fs)
state.unify_values(get_type(s), s, unfrozen)
return [latent for (latent, _) in samples]
def condition_on_frozen_samples(graph_state, samples, frozen_samples):
for s,f in zip(samples, frozen_samples):
typ = get_type(s[1])
graph_state.unify_values(typ, s[1], typ.unfreeze(graph_state, f))
return graph_state.to_JSON()
def infer_states_and_parameters(templ):
(state, params) = hs_init_em(templ)
state = hs_infer_state(templ, state, params)
score = hs_score(templ, state, params)
yield (state, params, score)
i = 0
while True:
print 'iter', i
params = hs_infer_params(templ, state, params)
state = hs_infer_state(templ, state, params)
score = hs_score(templ, state, params)
yield (state, params, score)
i += 1
def translate_params_for_fn(params):
if len(params[1][0]) == 0:
probs = [math.exp(x) for x in [0.0] + params[0]]
sum_probs = sum(probs)
return [p/sum_probs for p in probs]
else:
variance = -1.0 / (2 * params[0][1])
factors = [params[0][0]] + params[1][0]
return (variance, [f*variance for f in factors])
def params_to_cluster_centers(params):
d = dict(params)
cluster_centers = []
for i in d:
ps = d[i]
variance = -1.0 / (2 * ps[0][1])
factors = [ps[0][0]] + ps[1][0]
scaled_factors = [f*variance for f in factors]
centers = [scaled_factors[0]] + [x + scaled_factors[0] for x in scaled_factors[1:]]
cluster_centers.append(centers)
return zip(*cluster_centers)
def cluster_centers_error(cs1, cs2):
errs = []
def tup_dist(t1, t2):
return sum((a-b)**2 for (a, b) in zip(t1, t2))
for cs in itertools.permutations(cs1):
errs.append(sum(map(tup_dist, cs, cs2)))
return min(errs)
def cluster_assignment_accuracy(cs1, cs2):
accuracies = []
for perm in itertools.permutations(range(3)):
accuracies.append(float(len([() for (a, b) in zip(cs1, cs2) if a == perm[b]])) / len(cs1))
return max(accuracies)
def translate_params(params):
return [(x, translate_params_for_fn(y)) for (x, y) in params]
def mean(xs):
return sum(xs) / len(xs)
def run_clustering_example(run):
global current_graph_state
n = 100
accs = []
for i in range(100):
current_graph_state = GraphState()
sampler = run()
samples = [sampler() for i in range(n)]
templ = current_graph_state.to_JSON()
rand_params = hs_rand_template_params(templ)
print hs_sample_bayes_net(templ, rand_params)
varvals = state_to_varvals(hs_sample_bayes_net(templ, rand_params))
frozen_samples = [freeze_value(samp, varvals) for samp in samples]
true_latents = [x[0] for x in frozen_samples]
print true_latents
templ = condition_on_frozen_samples(current_graph_state, samples, [x[1] for x in frozen_samples])
print 'best score', params_score(templ, rand_params)
state_params_list = infer_states_and_parameters(templ)
rand_cs = params_to_cluster_centers(rand_params)
iter_accs = []
j = 0
for (state, params, score) in state_params_list:
print 'score', score
cs = params_to_cluster_centers(params)
# if j > 1:
# varvals = state_to_varvals(state)
# state_latents = [freeze_value(samp[0], varvals) for samp in samples]
# acc = cluster_assignment_accuracy(true_latents, state_latents)
# iter_accs.append(acc)
j += 1
accs.append(iter_accs)
print map(mean, zip(*accs))
def params_to_matrix(params):
coords = []
component_variances = []
for (i, ((base_n1, n2), (lin,))) in params:
component_variances.append(-1.0 / (2 * n2))
coords.append([-l/(2 * n2) for l in [base_n1] + lin])
return component_variances, np.matrix(coords)
def matrix_to_gaussian(variances_mat):
variances, mat = variances_mat
mean = mat[:,0]
a = mat[:, 1:]
return (mean, a * a.T + np.diag(variances))
def gaussian_kl(p, q):
(pm, pv) = p
(qm, qv) = q
n = pm.shape[0]
assert pv.shape[0] == n == qv.shape[0]
return 0.5 * (np.trace(linalg.inv(qv) * pv) + ((qm - pm).T * linalg.inv(qv) * (qm - pm)).item((0,0)) - n + linalg.slogdet(qv)[1] - linalg.slogdet(pv)[1])
def rotation_invariant_dist(A, B):
# min_R ||AR - B||^2
# = min_R tr((AR - B)^T(AR - B))
# = min_R tr(R^TA^T A R - B^T A R - R^T A^T B + B^T B)
# = ||A||^2 + ||B||^2 - 2 max_R tr(R^T A^T B)
#
# A^T B = USV^T
#
# = ||A||^2 + ||B||^2 - 2 max_R tr(R^T USV^T)
# = ||A||^2 + ||B||^2 - 2 max_R tr(V^T R^T US)
# = ||A||^2 + ||B||^2 - 2 max_R tr(S)
# -> R = UV^T
u, s, v = linalg.svd(A.T * B)
r = u * v
# print linalg.norm(A*r - B)**2
return (r, linalg.norm(A)**2 + linalg.norm(B)**2 - 2 * sum(s))
# IDEA: compute Gaussian from factors, KL divergence!
def params_score(templ, params):
(state, _) = hs_init_em(templ)
state = hs_infer_state(templ, state, params, iters=10)
return hs_score(templ, state, params)
def run_factor_analysis_example(run):
global current_graph_state
n = 200
accs = []
for i in range(1):
current_graph_state = GraphState()
sampler = run()
samples = [sampler() for i in range(n)]
templ = current_graph_state.to_JSON()
rand_params = hs_rand_template_params(templ)
rand_mat = params_to_matrix(rand_params)
print rand_mat
varvals = state_to_varvals(hs_sample_bayes_net(templ, rand_params))
frozen_samples = [freeze_value(samp, varvals) for samp in samples]
true_latents = [x[0] for x in frozen_samples]
# print true_latents
templ = condition_on_frozen_samples(current_graph_state, samples, [x[1] for x in frozen_samples])
print 'best score', params_score(templ, rand_params)
state_params_list = infer_states_and_parameters(templ)
# rand_cs = params_to_cluster_centers(rand_params)
iter_accs = []
j = 0
for (state, params, score) in state_params_list:
print 'score', score
guess_mat = params_to_matrix(params)
# cs = params_to_cluster_centers(params)
if j > 1:
print guess_mat
print 'kl', gaussian_kl(matrix_to_gaussian(rand_mat), matrix_to_gaussian(guess_mat))
print 'rid', rotation_invariant_dist(rand_mat[1], guess_mat[1])
j += 1
# accs.append(iter_accs)
# print map(mean, zip(*accs))
def get_transition_matrix(params):
base, mat = params
rows = []
for i in range(1 + len(mat[0])):
if i == 0:
logodds = [0.0] + base
else:
logodds = [0.0] + [b + m[i-1] for (b,m) in zip(base, mat)]
probs = list(map(math.exp, logodds))
sum_probs = sum(probs)
rows.append([p / sum_probs for p in probs])
return rows
def hmm_parameters(params):
return (get_transition_matrix(params[0][1]), get_transition_matrix(params[1][1]))
def matrix_dist(m1, m2):
return linalg.norm(np.matrix(m1) - np.matrix(m2))**2
def permute_rows(perm, mat):
return [mat[i] for i in perm]
def permute_cols(perm, mat):
return [[r[i] for i in perm] for r in mat]
def hmm_parameters_dist(tms1, tms2):
(tm1, om1) = tms1
(tm2, om2) = tms2
perms_and_dists = []
for perm in itertools.permutations(range(len(tm1))):
tm2p = permute_rows(perm, permute_cols(perm, tm2))
om2p = permute_rows(perm, om2)
perms_and_dists.append((perm, matrix_dist(tm1, tm2p) + matrix_dist(om1, om2p)))
return min(perms_and_dists, key=lambda x: x[1])
def run_hmm_example(run):
global current_graph_state
n = 100
accs = []
for i in range(1):
current_graph_state = GraphState()
sampler = run()
samples = [sampler() for i in range(n)]
templ = current_graph_state.to_JSON()
rand_params = hs_rand_template_params(templ)
rand_hmm = hmm_parameters(rand_params)
print rand_hmm
# rand_mat = params_to_matrix(rand_params)
varvals = state_to_varvals(hs_sample_bayes_net(templ, rand_params))
frozen_samples = [freeze_value(samp, varvals) for samp in samples]
true_latents = [x[0] for x in frozen_samples]
# print true_latents
templ = condition_on_frozen_samples(current_graph_state, samples, [x[1] for x in frozen_samples])
print 'best score', params_score(templ, rand_params)
state_params_list = infer_states_and_parameters(templ)
# rand_cs = params_to_cluster_centers(rand_params)
iter_accs = []
j = 0
prev_state_latents = None
for (state, params, score) in state_params_list:
print 'score', score
# guess_mat = params_to_matrix(params)
# cs = params_to_cluster_centers(params)
if j > 1:
inferred_hmm = hmm_parameters(params)
print inferred_hmm
print hmm_parameters_dist(rand_hmm, inferred_hmm)
varvals = state_to_varvals(state)
state_latents = [freeze_value(samp[0], varvals) for samp in samples]
prev_state_latents = state_latents
j += 1
# accs.append(iter_accs)
# print map(mean, zip(*accs))
| mit | 7,413,421,166,586,676,000 | 28.176883 | 155 | 0.635954 | false |
apetrone/pegasus | targets/iphoneos.py | 1 | 1909 | import os
import logging
from pegasus.models import TargetPlatform, Architecture, ProductType
from pegasus.targets.macosx_common import (
process_params_for_driver,
link_product_dependency,
get_full_product_name,
get_full_product_path,
get_full_symbols_path,
get_product_install_name,
check_source_compiles,
check_source_runs,
scan_sdks_for_platform)
class iPhoneOS(TargetPlatform):
def default_compiler(self):
return "clang"
def default_driver(self):
return "xcode"
def default_architectures(self):
return [
Architecture.armv7,
Architecture.arm64
]
def prevalidate_driver_data(self, data):
pass
def post_load_driver_schema(self, schema):
scan_sdks_for_platform(self, schema, "iPhoneOS")
def supports_fat_binaries(self):
return True
def process_params_for_driver(self, layoutparams, driver, driver_name):
return process_params_for_driver(layoutparams, driver, driver_name)
def link_product_dependency(self, toplevel, dependent):
return link_product_dependency(self, toplevel, dependent)
def get_full_product_name(self, product):
return get_full_product_name(self, product)
def get_full_product_path(self, product):
return get_full_product_path(self, product)
def get_full_symbols_path(self, product):
return get_full_symbols_path(self, product)
def get_product_install_name(self, product):
return get_product_install_name(self, product)
def product_supported(self, product, driver_instance, driver_name):
unsupported = [ProductType.DynamicLibrary]
if product.output in unsupported:
logging.warn("Product type \"%s\" is not supported with the current platform/driver configuration." % product.output)
return False
return True
def check_source_compiles(self, source, **kwargs):
return check_source_compiles(source, **kwargs)
def check_source_runs(self, source, **kwargs):
return check_source_runs(source, **kwargs)
| bsd-2-clause | -7,147,181,823,248,737,000 | 26.666667 | 120 | 0.754322 | false |
agilman/flask-template | app/auth.py | 1 | 1738 |
from flask import session, redirect, request, render_template
from app import app
from app.models import *
def getUserFromDb(username,password):
userQuery = Users.query.filter_by(username=username)
if userQuery.count()==0:
return "No such user"
else:
usr = userQuery.first()
if usr.passwordHash==password:
return usr
else:
return "Login failed"
@app.route("/auth/login",methods=["GET","POST"])
def login():
form = request.form
if request.method=="POST":
username = form["username"]
password = form["password"]
dbUser = getUserFromDb(username,password)
if type(dbUser) is str:
return "MSG : BAD LOG IN"
session['userName']=username
session['userId']=dbUser.id
return redirect("/users/"+username)
else:
return render_template("login.html")
@app.route("/auth/register",methods=["GET","POST"])
def register(username=None):
form = request.form
#TODO:
#Make sure unique constraint is satisfied before trying to add to db
if request.method=="POST":
username = form["username"]
password = form["password"]
email = form["email"]
user = Users(username=username, email=email, password=password)
db.session.add(user)
db.session.commit()
session['userName'] = username
session['userId'] = user.id
return redirect("/users/"+username)
else:
return render_template("register.html")
@app.route("/auth/logout")
def logout():
session.pop('userName', None)
session.pop('userId', None)
session.clear()
return redirect("/")
| mit | -1,983,810,625,785,931,800 | 25.738462 | 72 | 0.600115 | false |
NoneGroupTeam/Let-s-Go | webapp/webapp/settings.py | 1 | 3246 | """
Django settings for webapp project.
Generated by 'django-admin startproject' using Django 1.9.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fopfdg5g999+e8&sk39q(%unup0d_b_e#p$jeq#qhw27d=v0#t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost',
'127.0.0.1',
'none.lc4t.me',
'letsgo.lc4t.me'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'corsheaders',
]
MIDDLEWARE_CLASSES = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'corsheaders.middleware.CorsPostCsrfMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
SESSION_COOKIE_DOMAIN = 'localhost'
# SESSION_COOKIE_HTTPONLY = False
ROOT_URLCONF = 'webapp.urls'
AUTH_USER_MODEL = "app.AppUser"
# AUTH_PROFILE_MODULE = 'app.Profile'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3'
},
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Chongqing'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
TEST_RUNNER = 'webapp.runner.PytestTestRunner'
| gpl-3.0 | 7,738,174,100,558,951,000 | 27.725664 | 78 | 0.687616 | false |
ENCODE-DCC/pyencoded-tools | permissions_qa_scripts/originals/UPLOADS/submit.bam.py | 1 | 4935 | """ Example file submission script
Requires the `aws` command line utility: http://aws.amazon.com/cli/
"""
import hashlib
import json
import os
import requests
import subprocess
import sys
import time
host = 'REPLACEME'
encoded_access_key = 'UISQC32B'
encoded_secret_access_key = 'ikc2wbs27minvwo4'
path = 'test.bam'
my_lab = '/labs/thomas-gingeras/'
my_award = '/awards/U54HG004557/'
# From http://hgwdev.cse.ucsc.edu/~galt/encode3/validatePackage/validateEncode3-latest.tgz
encValData = 'encValData'
assembly = 'hg19'
# ~2s/GB
print("Calculating md5sum.")
md5sum = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(1024*1024), b''):
md5sum.update(chunk)
data = {
"dataset": "TSTNEW",
"file_format": "bam",
"file_size": os.path.getsize(path),
"assembly": "hg19",
"md5sum": md5sum.hexdigest(),
"output_type": "alignments",
"submitted_file_name": path,
"lab": my_lab,
"award": my_award,
}
####################
# Local validation
gzip_types = [
"CEL",
"bam",
"bed",
"csfasta",
"csqual",
"fasta",
"fastq",
"gff",
"gtf",
"tar",
"sam",
"wig"
]
magic_number = open(path, 'rb').read(2)
is_gzipped = magic_number == b'\x1f\x8b'
if data['file_format'] in gzip_types:
assert is_gzipped, 'Expected gzipped file'
else:
assert not is_gzipped, 'Expected un-gzipped file'
chromInfo = '-chromInfo=%s/%s/chrom.sizes' % (encValData, assembly)
validate_map = {
('bam', None): ['-type=bam', chromInfo],
('bed', 'unknown'): ['-type=bed6+', chromInfo], # if this fails we will drop to bed3+
('bigBed', 'bedLogR'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bed', 'bedLogR'): ['-type=bed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bigBed', 'bedMethyl'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bed', 'bedMethyl'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bigBed', 'unknown'): ['-type=bigBed6+', chromInfo], # if this fails we will drop to bigBed3+
('bigWig', None): ['-type=bigWig', chromInfo],
('bigBed', 'broadPeak'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bed', 'broadPeak'): ['-type=bed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('fasta', None): ['-type=fasta'],
('fastq', None): ['-type=fastq'],
('bigBed', 'gappedPeak'): ['-type=bigBed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bed', 'gappedPeak'): ['-type=bed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('gtf', None): None,
('idat', None): ['-type=idat'],
('bigBed', 'narrowPeak'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bed', 'narrowPeak'): ['-type=bed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('rcc', None): ['-type=rcc'],
('tar', None): None,
('tsv', None): None,
('csv', None): None,
('2bit', None): None,
('csfasta', None): ['-type=csfasta'],
('csqual', None): ['-type=csqual'],
('bigBed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('CEL', None): None,
('sam', None): None,
('wig', None): None,
('hdf5', None): None,
('gff', None): None
}
validate_args = validate_map.get((data['file_format'], data.get('file_format_type')))
if validate_args is not None:
print("Validating file.")
try:
subprocess.check_output(['validateFiles'] + validate_args + [path])
except subprocess.CalledProcessError as e:
print(e.output)
raise
####################
# POST metadata
headers = {
'Content-type': 'application/json',
'Accept': 'application/json',
}
print("Submitting metadata.")
r = requests.post(
host + '/file',
auth=(encoded_access_key, encoded_secret_access_key),
data=json.dumps(data),
headers=headers,
)
try:
r.raise_for_status()
except:
print('Submission failed: %s %s' % (r.status_code, r.reason))
print(r.text)
raise
item = r.json()['@graph'][0]
print(json.dumps(item, indent=4, sort_keys=True))
####################
# POST file to S3
creds = item['upload_credentials']
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
# ~10s/GB from Stanford - AWS Oregon
# ~12-15s/GB from AWS Ireland - AWS Oregon
print("Uploading file.")
start = time.time()
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
print("Upload failed with exit code %d" % e.returncode)
sys.exit(e.returncode)
else:
end = time.time()
duration = end - start
print("Uploaded in %.2f seconds" % duration)
| mit | 467,417,177,298,291,700 | 29.091463 | 106 | 0.611145 | false |
novafloss/django-agnocomplete | demo/tests/test_errors.py | 1 | 4942 | from django.test import TestCase
from django.urls import reverse
from django.test import override_settings
from django.core.exceptions import SuspiciousOperation
import mock
from requests.exceptions import Timeout
from agnocomplete import get_namespace
from . import get_json
from . import LoaddataLiveTestCase
from ..autocomplete import (
# Classic search
AutocompletePerson,
# URL Proxies
AutocompleteUrlSimpleAuth,
AutocompleteUrlHeadersAuth,
)
def raise_standard_exception(*args, **kwargs):
raise Exception("Nothing exceptional")
def raise_suspiciousoperation(*args, **kwargs):
raise SuspiciousOperation("You are not allowed to do this")
def raise_timeout(*args, **kwargs):
raise Timeout("Timeout")
class ErrorHandlingTest(object):
expected_status = 500
@property
def klass(self):
raise NotImplementedError("You need a `klass` property")
@property
def mock_function(self):
raise NotImplementedError("You need a `mock_function` property")
@property
def klass_path(self):
return '{}.{}'.format(self.klass.__module__, self.klass.__name__)
@property
def mock_path(self):
paths = [self.klass_path, 'items']
return ".".join(paths)
@property
def url(self):
ac_url_name = get_namespace() + ':agnocomplete'
return reverse(ac_url_name, args=[self.klass.__name__])
def test_errors(self):
with mock.patch(self.mock_path, self.mock_function):
response = self.client.get(
self.url,
data={"q": "nothing important"})
self.assertEqual(response.status_code, self.expected_status)
data = get_json(response, 'errors')
self.assertEqual(len(data), 1)
class ErrorHandlingAutocompletePersonTest(ErrorHandlingTest, TestCase):
klass = AutocompletePerson
mock_function = raise_standard_exception
class ErrorHandlingSuspiciousOperationTest(ErrorHandlingTest, TestCase):
klass = AutocompletePerson
mock_function = raise_suspiciousoperation
expected_status = 400
@override_settings(HTTP_HOST='')
class ErrorHandlingURLProxySimpleAuthTest(
ErrorHandlingTest, LoaddataLiveTestCase):
klass = AutocompleteUrlSimpleAuth
mock_function = raise_standard_exception
def test_search_query_wrong_auth(self):
# URL construct
instance = self.klass()
search_url = instance.search_url
klass = self.klass_path
with mock.patch(klass + '.get_search_url') as mock_auto:
mock_auto.return_value = self.live_server_url + search_url
# Search using the URL proxy view
search_url = get_namespace() + ':agnocomplete'
with mock.patch(klass + '.get_http_call_kwargs') as mock_headers:
mock_headers.return_value = {
'auth_token': 'BADAUTHTOKEN',
'q': 'person',
}
response = self.client.get(
reverse(
search_url, args=[self.klass.__name__]),
data={'q': "person"}
)
self.assertEqual(response.status_code, 403)
@override_settings(HTTP_HOST='')
class ErrorHandlingURLProxyHeadersAuthTest(
ErrorHandlingTest, LoaddataLiveTestCase):
klass = AutocompleteUrlHeadersAuth
mock_function = raise_standard_exception
def test_search_headers_wrong_auth(self):
# URL construct
instance = self.klass()
search_url = instance.search_url
klass = self.klass_path
with mock.patch(klass + '.get_search_url') as mock_auto:
mock_auto.return_value = self.live_server_url + search_url
# Search using the URL proxy view
search_url = get_namespace() + ':agnocomplete'
with mock.patch(klass + '.get_http_headers') as mock_headers:
mock_headers.return_value = {
'NOTHING': 'HERE'
}
response = self.client.get(
reverse(
search_url, args=[self.klass.__name__]),
data={'q': "person"}
)
self.assertEqual(response.status_code, 403)
@override_settings(HTTP_HOST='')
class ErrorHandlingURLProxyTimeoutTest(LoaddataLiveTestCase):
klass = AutocompleteUrlHeadersAuth
@property
def klass_path(self):
return '{}.{}'.format(self.klass.__module__, self.klass.__name__)
def test_timeout(self):
# Search using the URL proxy view
search_url = get_namespace() + ':agnocomplete'
with mock.patch('requests.get', raise_timeout):
response = self.client.get(
reverse(
search_url, args=[self.klass.__name__]),
data={'q': "person"}
)
self.assertEqual(response.status_code, 408)
| mit | 7,850,101,734,890,066,000 | 31.300654 | 77 | 0.616754 | false |
yephper/django | tests/middleware_exceptions/tests.py | 1 | 45313 | import sys
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.core.signals import got_request_exception
from django.http import HttpResponse
from django.template import engines
from django.template.response import TemplateResponse
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import patch_logger
class TestException(Exception):
pass
# A middleware base class that tracks which methods have been called
class TestMiddleware(object):
def __init__(self):
self.process_request_called = False
self.process_view_called = False
self.process_response_called = False
self.process_template_response_called = False
self.process_exception_called = False
def process_request(self, request):
self.process_request_called = True
def process_view(self, request, view_func, view_args, view_kwargs):
self.process_view_called = True
def process_template_response(self, request, response):
self.process_template_response_called = True
return response
def process_response(self, request, response):
self.process_response_called = True
return response
def process_exception(self, request, exception):
self.process_exception_called = True
# Middleware examples that do the right thing
class RequestMiddleware(TestMiddleware):
def process_request(self, request):
super(RequestMiddleware, self).process_request(request)
return HttpResponse('Request Middleware')
class ViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(ViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
return HttpResponse('View Middleware')
class ResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(ResponseMiddleware, self).process_response(request, response)
return HttpResponse('Response Middleware')
class TemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(TemplateResponseMiddleware, self).process_template_response(request, response)
template = engines['django'].from_string('Template Response Middleware')
return TemplateResponse(request, template)
class ExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(ExceptionMiddleware, self).process_exception(request, exception)
return HttpResponse('Exception Middleware')
# Sample middlewares that raise exceptions
class BadRequestMiddleware(TestMiddleware):
def process_request(self, request):
super(BadRequestMiddleware, self).process_request(request)
raise TestException('Test Request Exception')
class BadViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(BadViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
raise TestException('Test View Exception')
class BadTemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(BadTemplateResponseMiddleware, self).process_template_response(request, response)
raise TestException('Test Template Response Exception')
class BadResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(BadResponseMiddleware, self).process_response(request, response)
raise TestException('Test Response Exception')
class BadExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(BadExceptionMiddleware, self).process_exception(request, exception)
raise TestException('Test Exception Exception')
# Sample middlewares that omit to return an HttpResonse
class NoTemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(NoTemplateResponseMiddleware, self).process_template_response(request, response)
class NoResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(NoResponseMiddleware, self).process_response(request, response)
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class BaseMiddlewareExceptionTest(SimpleTestCase):
def setUp(self):
self.exceptions = []
got_request_exception.connect(self._on_request_exception)
self.client.handler.load_middleware()
def tearDown(self):
got_request_exception.disconnect(self._on_request_exception)
self.exceptions = []
def _on_request_exception(self, sender, request, **kwargs):
self.exceptions.append(sys.exc_info())
def _add_middleware(self, middleware):
self.client.handler._request_middleware.insert(0, middleware.process_request)
self.client.handler._view_middleware.insert(0, middleware.process_view)
self.client.handler._template_response_middleware.append(middleware.process_template_response)
self.client.handler._response_middleware.append(middleware.process_response)
self.client.handler._exception_middleware.append(middleware.process_exception)
def assert_exceptions_handled(self, url, errors, extra_error=None):
try:
self.client.get(url)
except TestException:
# Test client intentionally re-raises any exceptions being raised
# during request handling. Hence actual testing that exception was
# properly handled is done by relying on got_request_exception
# signal being sent.
pass
except Exception as e:
if type(extra_error) != type(e):
self.fail("Unexpected exception: %s" % e)
self.assertEqual(len(self.exceptions), len(errors))
for i, error in enumerate(errors):
exception, value, tb = self.exceptions[i]
self.assertEqual(value.args, (error, ))
def assert_middleware_usage(self, middleware, request, view, template_response, response, exception):
self.assertEqual(middleware.process_request_called, request)
self.assertEqual(middleware.process_view_called, view)
self.assertEqual(middleware.process_template_response_called, template_response)
self.assertEqual(middleware.process_response_called, response)
self.assertEqual(middleware.process_exception_called, exception)
class MiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_middleware(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_template_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, True, True, False)
self.assert_middleware_usage(middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_exception_middleware(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view'], Exception())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_template_response_error(self):
middleware = TestMiddleware()
self._add_middleware(middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response_error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(middleware, True, True, True, True, False)
@override_settings(
MIDDLEWARE_CLASSES=['middleware_exceptions.middleware.ProcessExceptionMiddleware'],
)
def test_exception_in_render_passed_to_process_exception(self):
# Repopulate the list of middlewares since it's already been populated
# by setUp() before the MIDDLEWARE_CLASSES setting got overridden
self.client.handler.load_middleware()
response = self.client.get('/middleware_exceptions/exception_in_render/')
self.assertEqual(response.content, b'Exception caught')
class BadMiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadTemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/template_response/',
['Test Template Response Exception']
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view', 'Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead.",
'Test Response Exception'
]
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return "
"an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_response_no_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = NoResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [
"NoResponseMiddleware.process_response didn't return an HttpResponse object. It returned None instead."
],
ValueError())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_template_response_no_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = NoTemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled(
'/middleware_exceptions/template_response/', [
"NoTemplateResponseMiddleware.process_template_response didn't "
"return an HttpResponse object. It returned None instead."
],
ValueError()
)
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
_missing = object()
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class RootUrlconfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF=None)
def test_missing_root_urlconf(self):
# Removing ROOT_URLCONF is safe, as override_settings will restore
# the previously defined settings.
del settings.ROOT_URLCONF
with self.assertRaises(AttributeError):
self.client.get("/middleware_exceptions/view/")
class MyMiddleware(object):
def __init__(self):
raise MiddlewareNotUsed
def process_request(self, request):
pass
class MyMiddlewareWithExceptionMessage(object):
def __init__(self):
raise MiddlewareNotUsed('spam eggs')
def process_request(self, request):
pass
@override_settings(
DEBUG=True,
ROOT_URLCONF='middleware_exceptions.urls',
)
class MiddlewareNotUsedTests(SimpleTestCase):
rf = RequestFactory()
def test_raise_exception(self):
request = self.rf.get('middleware_exceptions/view/')
with self.assertRaises(MiddlewareNotUsed):
MyMiddleware().process_request(request)
@override_settings(MIDDLEWARE_CLASSES=[
'middleware_exceptions.tests.MyMiddleware',
])
def test_log(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed: 'middleware_exceptions.tests.MyMiddleware'"
)
@override_settings(MIDDLEWARE_CLASSES=[
'middleware_exceptions.tests.MyMiddlewareWithExceptionMessage',
])
def test_log_custom_message(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed('middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'): spam eggs"
)
@override_settings(DEBUG=False)
def test_do_not_log_when_debug_is_false(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 0)
| bsd-3-clause | 5,373,502,232,765,956,000 | 46.828664 | 117 | 0.680357 | false |
gabinetedigital/videre | videos/models.py | 1 | 2845 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Governo do Estado do Rio Grande do Sul
# Copyright (C) 2011 Lincoln de Sousa <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Tag(models.Model):
name = models.CharField(_(u'name'), max_length=300)
def __unicode__(self):
return self.name
class Url(models.Model):
url = models.CharField(_(u'url'), max_length=300)
content_type = models.CharField(_(u'content type'), max_length=128)
video = models.ForeignKey('Video', verbose_name=_(u'video'))
def __unicode__(self):
return self.url
class Video(models.Model):
title = models.CharField(_(u'title'), max_length=200)
creation_date = models.DateTimeField(
_(u'creation date'), default=datetime.now)
event_date = models.DateTimeField(_(u'event date'), blank=True, null=True)
summary = models.TextField(_(u'summary'),)
author = models.CharField(_(u'author'), max_length=200)
license_name = models.CharField(_(u'license name'), max_length=200)
license_link = models.CharField(_(u'license link'), max_length=300)
thumb_url = models.CharField(_(u'thumb url'), max_length=300, blank=True)
tags = models.ManyToManyField('Tag', verbose_name=_(u'tags'),)
def __unicode__(self):
return self.title
def as_dict(self):
""" Returns a dictionary representation of a video object """
date_handler = lambda x: getattr(x, 'isoformat', lambda:None)()
return {
'id': self.id,
'title': self.title,
'creation_date': date_handler(self.creation_date),
'event_date': date_handler(self.event_date),
'summary': self.summary,
'author': self.author,
'license_name': self.license_name,
'license_link': self.license_link,
'thumb_url': self.thumb_url,
'tags': list(self.tags.values_list('name', flat=True)),
'sources': [{
'url': i.url,
'content_type': i.content_type,
} for i in self.url_set.all()],
}
| agpl-3.0 | 8,764,051,673,740,353,000 | 38.513889 | 78 | 0.643234 | false |
thyu/.dotfiles | setup.py | 1 | 2604 | #!/usr/bin/env python3
import shutil
import logging
from pathlib import Path
# setup logging level
logging.basicConfig(level=logging.INFO)
# some global paths
DOTFILES_ROOT = Path(__file__).resolve().parent
SUBMODULES = DOTFILES_ROOT / "submodules"
def setup_vim():
VIMRC_PATH = Path.home() / ".vimrc"
VIM_DIR = Path.home() / ".vim"
VIM_PLUGIN_DIR = VIM_DIR / "vim_plugins"
# remove old files / folders, copy files from .dotfiles
logging.info(f"Remove {VIM_DIR}")
shutil.rmtree(VIM_DIR)
logging.info(f"Remove {VIMRC_PATH}")
VIMRC_PATH.unlink()
# copy default .vimrc file
shutil.copy2(DOTFILES_ROOT/ "vimrc", VIMRC_PATH)
# setup vim-plug plugin management
logging.info("Configuring vim-plug")
AUTOLOAD_DIR = VIM_DIR / "autoload"
AUTOLOAD_DIR.mkdir(parents=True, exist_ok=True)
VIM_PLUG_SOURCE = SUBMODULES / "vim-plug/plug.vim"
shutil.copy2(VIM_PLUG_SOURCE, AUTOLOAD_DIR)
# install themes
logging.info("Installing colorschemes")
shutil.copytree(SUBMODULES / "vim-colorschemes/colors", VIM_DIR / "colors")
# install plugins - we use a separate plugin_config.vim
VIM_DIR.mkdir(parents=True, exist_ok=True)
PLUGIN_CONFIG = VIM_DIR / "plugin_config.vim"
with open(PLUGIN_CONFIG, "w") as f:
# start vim-plug
f.write("call plug#begin('~/.vim/vim_plugins')\n")
# vim-airline
logging.info("Installing vim-airline")
DST_PATH = VIM_PLUGIN_DIR / "vim-airline"
f.write(f"Plug '{DST_PATH}'\n")
shutil.copytree(SUBMODULES / "vim-airline", DST_PATH)
# vim-airline-theme
logging.info("Installing vim-airline-themes")
DST_PATH = VIM_PLUGIN_DIR / "vim-airline-themes"
f.write(f"Plug '{DST_PATH}'\n")
f.write("let g:airline_theme='cool'\n")
shutil.copytree(SUBMODULES / "vim-airline-themes", DST_PATH)
# nerdtree
logging.info("Installing NERDTree")
DST_PATH = VIM_PLUGIN_DIR / "nerdtree"
f.write(f"Plug '{DST_PATH}'\n")
shutil.copytree(SUBMODULES / "nerdtree", DST_PATH)
# goyo
logging.info("Installing Goyo")
DST_PATH = VIM_PLUGIN_DIR / "goyo.vim"
f.write(f"Plug '{DST_PATH}'\n")
shutil.copytree(SUBMODULES / "goyo.vim", DST_PATH)
# end vim-plug
f.write("call plug#end()\n")
def setup_tmux() -> None:
logging.info("Copying tmux.conf")
shutil.copy2(DOTFILES_ROOT/ "tmux.conf", Path.home() / ".tmux.conf")
if __name__ == "__main__":
setup_vim()
setup_tmux()
logging.info("Setup complete!")
| mit | -7,623,812,741,486,072,000 | 30 | 79 | 0.629416 | false |
mitodl/odl-video-service | odl_video/envs_test.py | 1 | 5920 | """Tests for environment variable parsing functions"""
from unittest.mock import patch
import os
import pytest
from odl_video.envs import (
EnvironmentVariableParseException,
get_any,
get_bool,
get_int,
get_key,
get_list_of_str,
get_string,
parse_env,
)
FAKE_ENVIRONS = {
"true": "True",
"false": "False",
"positive": "123",
"negative": "-456",
"zero": "0",
"float": "1.1",
"expression": "123-456",
"none": "None",
"string": "a b c d e f g",
"list_of_int": "[3,4,5]",
"list_of_str": '["x", "y", \'z\']',
"key": (
"-----BEGIN PUBLIC KEY-----\\n"
"MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCQMjkVo9gogtb8DI2bZyFGvnnN\\n"
"81Q4d0crS4S9UDrxHJU/yrKg1UJAYwhecZdOOQnmWilZg9m25Q4hxx8kETivje11\\n"
"9Pg6aoiaVt59+ThgIIsOgwuDAdZdCBzuR+FfG9tVGrR+ek7AWm3Rp/kJt/6h4jN7\\n"
"/q0txR0v1rqmowS1mQIDAQAB\\n"
"-----END PUBLIC KEY-----\\n"
),
}
def test_get_any():
"""
get_any should parse an environment variable into a bool, int, or a string
"""
expected = {
"true": True,
"false": False,
"positive": 123,
"negative": -456,
"zero": 0,
"float": "1.1",
"expression": "123-456",
"none": "None",
"string": "a b c d e f g",
"list_of_int": "[3,4,5]",
"list_of_str": '["x", "y", \'z\']',
"key": (
"-----BEGIN PUBLIC KEY-----\\n"
"MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCQMjkVo9gogtb8DI2bZyFGvnnN\\n"
"81Q4d0crS4S9UDrxHJU/yrKg1UJAYwhecZdOOQnmWilZg9m25Q4hxx8kETivje11\\n"
"9Pg6aoiaVt59+ThgIIsOgwuDAdZdCBzuR+FfG9tVGrR+ek7AWm3Rp/kJt/6h4jN7\\n"
"/q0txR0v1rqmowS1mQIDAQAB\\n"
"-----END PUBLIC KEY-----\\n"
),
}
with patch("odl_video.envs.os", environ=FAKE_ENVIRONS):
for key, value in expected.items():
assert get_any(key, "default") == value
assert get_any("missing", "default") == "default"
def test_get_string():
"""
get_string should get the string from the environment variable
"""
with patch("odl_video.envs.os", environ=FAKE_ENVIRONS):
for key, value in FAKE_ENVIRONS.items():
assert get_string(key, "default") == value
assert get_string("missing", "default") == "default"
assert get_string("missing", "default") == "default"
def test_get_int():
"""
get_int should get the int from the environment variable, or raise an exception if it's not parseable as an int
"""
with patch("odl_video.envs.os", environ=FAKE_ENVIRONS):
assert get_int("positive", 1234) == 123
assert get_int("negative", 1234) == -456
assert get_int("zero", 1234) == 0
for key, value in FAKE_ENVIRONS.items():
if key not in ("positive", "negative", "zero"):
with pytest.raises(EnvironmentVariableParseException) as ex:
get_int(key, 1234)
assert ex.value.args[
0
] == "Expected value in {key}={value} to be an int".format(
key=key,
value=value,
)
assert get_int("missing", "default") == "default"
def test_get_bool():
"""
get_bool should get the bool from the environment variable, or raise an exception if it's not parseable as a bool
"""
with patch("odl_video.envs.os", environ=FAKE_ENVIRONS):
assert get_bool("true", 1234) is True
assert get_bool("false", 1234) is False
for key, value in FAKE_ENVIRONS.items():
if key not in ("true", "false"):
with pytest.raises(EnvironmentVariableParseException) as ex:
get_bool(key, 1234)
assert ex.value.args[
0
] == "Expected value in {key}={value} to be a boolean".format(
key=key,
value=value,
)
assert get_int("missing", "default") == "default"
def test_get_list_of_str():
"""
get_list_of_str should parse a list of strings
"""
with patch("odl_video.envs.os", environ=FAKE_ENVIRONS):
assert get_list_of_str("list_of_str", ["noth", "ing"]) == ["x", "y", "z"]
for key, value in FAKE_ENVIRONS.items():
if key != "list_of_str":
with pytest.raises(EnvironmentVariableParseException) as ex:
get_list_of_str(key, ["noth", "ing"])
assert ex.value.args[
0
] == "Expected value in {key}={value} to be a list of str".format(
key=key,
value=value,
)
assert get_list_of_str("missing", "default") == "default"
def test_get_key():
"""get_key should parse the string, escape and return a bytestring"""
with patch("odl_video.envs.os", environ=FAKE_ENVIRONS):
assert get_key("foo_key", None) is None
assert get_key("foo_key", "") == b""
assert get_key("key", None) == (
b"-----BEGIN PUBLIC KEY-----\n"
b"MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCQMjkVo9gogtb8DI2bZyFGvnnN\n"
b"81Q4d0crS4S9UDrxHJU/yrKg1UJAYwhecZdOOQnmWilZg9m25Q4hxx8kETivje11\n"
b"9Pg6aoiaVt59+ThgIIsOgwuDAdZdCBzuR+FfG9tVGrR+ek7AWm3Rp/kJt/6h4jN7\n"
b"/q0txR0v1rqmowS1mQIDAQAB\n"
b"-----END PUBLIC KEY-----\n"
)
def test_parse_env():
"""ensure that the parse_env function is properly processing env files """
try:
testpath = "testenv.txt"
with open(testpath, "w") as testfile:
testfile.write("FOO_VAR=bar=var\nexport FOO_NUM=42\n")
parse_env(testpath)
assert get_string("FOO_VAR", "") == "bar=var"
assert get_int("FOO_NUM", 0) == 42
finally:
os.remove(testpath)
| bsd-3-clause | 1,943,514,092,109,895,400 | 32.828571 | 117 | 0.555912 | false |
pacificgilly1992/PGrainrate | Backups/Externals/externals1.0.5.py | 1 | 4428 | ############################################################################
# Project: The Lenard effect of preciptation at the RUAO,
# Title: Ensemble processing of the PG, Time and Rain Rate data,
# Author: James Gilmore,
# Email: [email protected].
# Version: 1.0.5
# Date: 07/12/15
############################################################################
#Initialising the python script
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import numpy as np
from lowess import lowess
import sys
############################################################################
#Plotting functions for PG and Rain Rate
def PGRainFull(xlimmax=None, ylimmax=None, outFile=None, fileformat=None, RainRate5mm=None, TimeTip5mm=None, timekeep=None, PG=None, PGtip=None, slope=None, intercept=None, p_value=None, r_value=None, pearson_cor=None, std_err=None, mann_wht=None):
"Plot 3 subplots all of which completment the main focus, i.e. (1) PG vs."
"Rain Rate along with side plots for (2) Rain Rate and (3) PG between the"
"times that charged rain was detected. Statistical information was also "
"added in the remaining quadrant to fill the white space but can easily "
"be removed if neseccary."
plt.clf()
fig = plt.figure()
#plt.suptitle("Raindrop Charge: " + outFile)
pgrain = fig.add_subplot(222)
pgrain.scatter(RainRate5mm, PGtip)
pgrain.set_xlabel("Rain Rate (mm/hr)")
pgrain.set_ylabel("Potential Gradient (V/m)")
pgrain.grid()
pgrain.set_xlim(-.1,xlimmax)
pgrain.set_ylim(-1050, ylimmax)
pgrain.invert_yaxis()
pgrain.plot(np.arange(-.1, xlimmax+0.3, 0.2),np.arange(-.1, xlimmax+0.3, 0.2)*slope+intercept)
PGRainsort = np.array(sorted(zip(RainRate5mm, PGtip)))
eps = sys.float_info.epsilon
pgrain.plot(PGRainsort[:,0], lowess(PGRainsort[:,0]+eps, PGRainsort[:,1]+eps, 1/2))
x0, x1 = pgrain.get_xlim()
y0, y1 = pgrain.get_ylim()
pgrain.set_aspect(np.abs((x1-x0)/(y1-y0)))
#PG Plot
pg = fig.add_subplot(221)
pg.plot(timekeep,PG)
pg.set_xlabel("Time (hrs)")
pg.set_xlim(np.min(TimeTip5mm),np.max(TimeTip5mm))
pg.set_ylim(-1050, ylimmax)
pg.invert_yaxis()
#pg.axes.get_yaxis().set_visible(False)
pg.grid()
x0, x1 = pg.get_xlim()
y0, y1 = pg.get_ylim()
pg.set_aspect(np.abs((x1-x0)/(y1-y0)))
#Rain plot
rain = fig.add_subplot(224)
rain.plot(RainRate5mm,TimeTip5mm)
rain.set_ylabel("Time (hrs)")
rain.set_ylim(np.min(TimeTip5mm),np.max(TimeTip5mm))
rain.set_xlim(-.1,xlimmax)
rain.grid()
x0, x1 = rain.get_xlim()
y0, y1 = rain.get_ylim()
rain.set_aspect(np.abs((x1-x0)/(y1-y0)))
#Info Plot
info = fig.add_subplot(223)
info.axis('off')
info.text(-0.1, .9, '$Year and Day$', fontsize=15)
info.text(-0.1, .75, '$P-Value$: ', fontsize=15)
info.text(-0.1, .6, '$R^2$: ', fontsize=15)
info.text(-0.1, .45, "$Pearson's Cor$: ", fontsize=15)
info.text(-0.1, .3, "$Standard Error$: ", fontsize=15)
info.text(-0.1, .15, "$Mann-Whitney$: ", fontsize=15)
info.text(0.6, .9, outFile, fontsize=15)
info.text(0.6, .75, round(p_value,7), fontsize=15)
info.text(0.6, .6, round(r_value**2,5), fontsize=15)
info.text(0.6, .45, round(pearson_cor[1],5), fontsize=15)
info.text(0.6, .3, round(std_err,5), fontsize=15)
info.text(0.6, .15, round(mann_wht,5), fontsize=15)
x0, x1 = info.get_xlim()
y0, y1 = info.get_ylim()
info.set_aspect(np.abs((x1-x0)/(y1-y0)))
plt.tight_layout(pad=0.4, w_pad=-0.5, h_pad=0.5)
plt.savefig('plots/new/' + outFile + "." + fileformat)
plt.close(fig)
def PGRainSlim(xlimmax=None, ylimmax=None, outFile=None, fileformat=None, RainRate5mm=None, PGtip=None, slope=None, intercept=None):
plt.clf()
fig = plt.figure()
#plt.suptitle("Raindrop Charge: " + outFile)
pgrain = fig.add_subplot(111)
pgrain.scatter(RainRate5mm, PGtip)
pgrain.set_xlabel("Rain Rate (mm/hr)")
pgrain.set_ylabel("Potential Gradient (V/m)")
pgrain.grid()
pgrain.set_xlim(-.1,xlimmax)
pgrain.set_ylim(-200, ylimmax)
pgrain.invert_yaxis()
#pgrain.plot(np.arange(-.1, xlimmax+0.3, 0.2),np.arange(-.1, xlimmax+0.3, 0.2)*slope+intercept)
PGRainsort = np.array(sorted(zip(RainRate5mm, PGtip)))
eps = sys.float_info.epsilon
pgrain.plot(PGRainsort[:,0], lowess(PGRainsort[:,0]+eps, PGRainsort[:,1]+eps, 1/2))
x0, x1 = pgrain.get_xlim()
y0, y1 = pgrain.get_ylim()
pgrain.set_aspect(np.abs((x1-x0)/(y1-y0)))
plt.savefig('plots/new/' + outFile + "." + fileformat)
plt.close(fig)
| gpl-3.0 | -4,713,461,817,470,815,000 | 31.8 | 248 | 0.648374 | false |
javierLiarte/tdd-goat-python | tests/selenium/conftest.py | 1 | 1089 | import pytest
import os
from selenium import webdriver
browsers = {
'firefox': webdriver.Firefox,
'chrome': webdriver.Chrome,
}
@pytest.fixture(scope='session', params=browsers.keys())
def driver(request):
''' driver factory, for allowing more than one browser object in a fixture '''
if 'DISPLAY' not in os.environ:
pytest.skip('Test requires display server (export DISPLAY)')
class DriverFactory(object):
def get(self):
b = browsers[request.param]()
request.addfinalizer(lambda *args: b.quit())
return b
return DriverFactory()
@pytest.fixture
def bf(driver, url):
''' browser factory, for allowing more than one browser object in a fixture '''
class BrowserFactory(object):
def get(self):
b = driver.get()
b.set_window_size(1200, 800)
b.implicitly_wait(3)
b.get(url)
return b
return BrowserFactory()
def pytest_addoption(parser):
parser.addoption('--url', action='store',
default='http://localhost:8111/')
@pytest.fixture(scope='session')
def url(request):
return request.config.option.url | gpl-2.0 | -5,603,045,507,090,966,000 | 25.585366 | 81 | 0.68595 | false |
zijistark/ck2utils | esc/eu4culture_map.py | 1 | 4853 | #!/usr/bin/env python3
from collections import defaultdict
import math
from pathlib import Path
import re
import sys
import urllib.request
import numpy as np
from PIL import Image
import spectra
from ck2parser import rootpath, csv_rows, SimpleParser, Obj
from localpaths import eu4dir
from print_time import print_time
@print_time
def main():
parser = SimpleParser()
parser.basedir = eu4dir
if len(sys.argv) > 1:
parser.moddirs.append(Path(sys.argv[1]))
rgb_number_map = {}
default_tree = parser.parse_file('map/default.map')
provinces_path = parser.file('map/' + default_tree['provinces'].val)
climate_path = parser.file('map/' + default_tree['climate'].val)
max_provinces = default_tree['max_provinces'].val
colors = {
'land': np.uint8((127, 127, 127)),
'sea': np.uint8((68, 107, 163)),
'desert': np.uint8((94, 94, 94))
}
prov_color_lut = np.empty(max_provinces, '3u1')
for row in csv_rows(parser.file('map/' + default_tree['definitions'].val)):
try:
number = int(row[0])
except ValueError:
continue
if number < max_provinces:
rgb = tuple(np.uint8(row[1:4]))
rgb_number_map[rgb] = np.uint16(number)
grouped_cultures = []
for _, tree in parser.parse_files('common/cultures/*'):
for n, v in tree:
cultures = []
for n2, v2 in v:
if (isinstance(v2, Obj) and
not re.match(r'((fe)?male|dynasty)_names', n2.val)):
cultures.append(n2.val)
grouped_cultures.append(cultures)
region_colors = []
for _, tree in parser.parse_files('common/region_colors/*'):
for n, v in tree:
region_colors.append(spectra.rgb(*(n2.val / 255 for n2 in v)))
culture_color = {'noculture': colors['land']}
spherical_code = {
1: [(0, 0, 1)],
2: [(0, 0, 1), (0, 0, -1)],
3: [(1, 0, 0), (-1 / 2, math.sqrt(3) / 2, 0),
(-1 / 2, -math.sqrt(3) / 2, 0)]
}
out_of_gamut = 0
for i, cultures in enumerate(grouped_cultures):
group_color = region_colors[i + 1].to('lab').values
num_cultures = len(cultures)
try:
code = spherical_code[num_cultures]
except KeyError:
url_fmt = 'http://neilsloane.com/packings/dim3/pack.3.{}.txt'
url = url_fmt.format(num_cultures)
with urllib.request.urlopen(url) as response:
txt = response.read()
floats = [float(x) for x in txt.split()]
code = list(zip(*[iter(floats)]*3))
spherical_code[num_cultures] = code
for culture, coords in zip(cultures, code):
offset_lab = [a + b * 14 for a, b in zip(group_color, coords)]
color = spectra.lab(*offset_lab)
if color.rgb != color.clamped_rgb:
out_of_gamut += 1
upscaled = [round(x * 255) for x in color.clamped_rgb]
culture_color[culture] = np.uint8(upscaled)
culture_count = sum(len(x) for x in grouped_cultures)
print('Out of gamut: {:.2%}'.format(out_of_gamut / culture_count),
file=sys.stderr)
for path in parser.files('history/provinces/*'):
match = re.match(r'\d+', path.stem)
if not match:
continue
number = int(match.group())
if number >= max_provinces:
continue
properties = {'culture': 'noculture'}
history = defaultdict(list)
for n, v in parser.parse_file(path):
if n.val in properties:
properties[n.val] = v.val
elif isinstance(n.val, tuple) and n.val <= (1444, 11, 11):
history[n.val].extend((n2.val, v2.val) for n2, v2 in v
if n2.val in properties)
properties.update(p2 for _, v in sorted(history.items()) for p2 in v)
prov_color_lut[number] = culture_color[properties['culture']]
for n in parser.parse_file(climate_path)['impassable']:
prov_color_lut[int(n.val)] = colors['desert']
for n in default_tree['sea_starts']:
prov_color_lut[int(n.val)] = colors['sea']
for n in default_tree['lakes']:
prov_color_lut[int(n.val)] = colors['sea']
image = Image.open(str(provinces_path))
a = np.array(image).view('u1,u1,u1')[..., 0]
b = np.vectorize(lambda x: rgb_number_map[tuple(x)], otypes=[np.uint16])(a)
mod = parser.moddirs[0].name.lower() + '_' if parser.moddirs else ''
borders_path = rootpath / (mod + 'eu4borderlayer.png')
borders = Image.open(str(borders_path))
out = Image.fromarray(prov_color_lut[b])
out.paste(borders, mask=borders)
out_path = rootpath / (mod + 'eu4culture_map.png')
out.save(str(out_path))
if __name__ == '__main__':
main()
| gpl-2.0 | 2,957,810,107,190,530,600 | 37.212598 | 79 | 0.571605 | false |
cgwire/zou | zou/app/blueprints/playlists/resources.py | 1 | 9428 | import slugify
from flask import request, send_file as flask_send_file
from flask_restful import Resource
from flask_jwt_extended import jwt_required
from zou.app import config
from zou.app.mixin import ArgsMixin
from zou.app.utils import permissions
from zou.app.services import (
entities_service,
playlists_service,
persons_service,
preview_files_service,
projects_service,
shots_service,
user_service,
)
from zou.app.stores import file_store, queue_store
from zou.app.utils import fs
from zou.utils.movie import EncodingParameters
class ProjectPlaylistsResource(Resource, ArgsMixin):
"""
Retrieve all playlists related to given project. Result is paginated and
can be sorted.
"""
@jwt_required
def get(self, project_id):
user_service.block_access_to_vendor()
user_service.check_project_access(project_id)
page = self.get_page()
sort_by = self.get_sort_by()
task_type_id = self.get_text_parameter("task_type_id")
return playlists_service.all_playlists_for_project(
project_id,
for_client=permissions.has_client_permissions(),
page=page,
sort_by=sort_by,
task_type_id=task_type_id
)
class EpisodePlaylistsResource(Resource, ArgsMixin):
"""
Retrieve all playlists related to given episode. The full list is returned
because the number of playlists in an episode is not that big.
"""
@jwt_required
def get(self, project_id, episode_id):
user_service.block_access_to_vendor()
user_service.check_project_access(project_id)
sort_by = self.get_sort_by()
task_type_id = self.get_text_parameter("task_type_id")
if episode_id not in ["main", "all"]:
shots_service.get_episode(episode_id)
return playlists_service.all_playlists_for_episode(
project_id,
episode_id,
permissions.has_client_permissions(),
sort_by=sort_by,
task_type_id=task_type_id,
)
class ProjectPlaylistResource(Resource):
@jwt_required
def get(self, project_id, playlist_id):
user_service.block_access_to_vendor()
user_service.check_project_access(project_id)
return playlists_service.get_playlist_with_preview_file_revisions(
playlist_id
)
class EntityPreviewsResource(Resource):
@jwt_required
def get(self, entity_id):
"""
Retrieve all previews related to a given entity. It sends them
]as a dict. Keys are related task type ids and values are arrays
of preview for this task type.
"""
entity = entities_service.get_entity(entity_id)
user_service.check_project_access(entity["project_id"])
return playlists_service.get_preview_files_for_entity(entity_id)
class PlaylistDownloadResource(Resource):
@jwt_required
def get(self, playlist_id, build_job_id):
playlist = playlists_service.get_playlist(playlist_id)
project = projects_service.get_project(playlist["project_id"])
build_job = playlists_service.get_build_job(build_job_id)
user_service.check_project_access(playlist["project_id"])
if build_job["status"] != "succeeded":
return {"error": True, "message": "Build is not finished"}, 400
else:
movie_file_path = fs.get_file_path(
config,
file_store.get_local_movie_path,
file_store.open_movie,
"playlists",
build_job_id,
"mp4",
)
context_name = slugify.slugify(project["name"], separator="_")
if project["production_type"] == "tvshow":
episode_id = playlist["episode_id"]
if episode_id is not None:
episode = shots_service.get_episode(playlist["episode_id"])
episode_name = episode["name"]
elif playlist["is_for_all"]:
episode_name = "all assets"
else:
episode_name = "main pack"
context_name += "_%s" % slugify.slugify(
episode_name, separator="_"
)
attachment_filename = "%s_%s_%s.mp4" % (
slugify.slugify(build_job["created_at"], separator="").replace(
"t", "_"
),
context_name,
slugify.slugify(playlist["name"], separator="_"),
)
return flask_send_file(
movie_file_path,
conditional=True,
mimetype="video/mp4",
as_attachment=True,
attachment_filename=attachment_filename,
)
class BuildPlaylistMovieResource(Resource):
@jwt_required
def get(self, playlist_id):
playlist = playlists_service.get_playlist(playlist_id)
user_service.check_manager_project_access(playlist["project_id"])
project = projects_service.get_project(playlist["project_id"])
(width, height) = preview_files_service.get_preview_file_dimensions(
project
)
fps = preview_files_service.get_preview_file_fps(project)
params = EncodingParameters(width=width, height=height, fps=fps)
shots = [
{"preview_file_id": x.get("preview_file_id")}
for x in playlist["shots"]
]
if config.ENABLE_JOB_QUEUE:
remote = config.ENABLE_JOB_QUEUE_REMOTE
# remote worker can not access files local to the web app
assert not remote or config.FS_BACKEND in ["s3", "swift"]
current_user = persons_service.get_current_user()
queue_store.job_queue.enqueue(
playlists_service.build_playlist_job,
args=(playlist, shots, params, current_user["email"], remote),
job_timeout=3600,
)
return {"job": "running"}
else:
job = playlists_service.build_playlist_movie_file(
playlist, shots, params, remote=False
)
return {"job": job["status"]}
class PlaylistZipDownloadResource(Resource):
@jwt_required
def get(self, playlist_id):
playlist = playlists_service.get_playlist(playlist_id)
project = projects_service.get_project(playlist["project_id"])
user_service.block_access_to_vendor()
user_service.check_playlist_access(playlist)
zip_file_path = playlists_service.build_playlist_zip_file(playlist)
context_name = slugify.slugify(project["name"], separator="_")
if project["production_type"] == "tvshow":
episode_id = playlist["episode_id"]
if episode_id is not None:
episode = shots_service.get_episode(playlist["episode_id"])
episode_name = episode["name"]
elif playlist["is_for_all"]:
episode_name = "all assets"
else:
episode_name = "main pack"
context_name += "_%s" % slugify.slugify(episode_name, separator="_")
attachment_filename = "%s_%s.zip" % (
context_name,
slugify.slugify(playlist["name"], separator="_"),
)
return flask_send_file(
zip_file_path,
conditional=True,
mimetype="application/zip",
as_attachment=True,
attachment_filename=attachment_filename,
)
class BuildJobResource(Resource):
@jwt_required
def get(self, playlist_id, build_job_id):
user_service.block_access_to_vendor()
playlist = playlists_service.get_playlist(playlist_id)
user_service.check_playlist_access(playlist)
return playlists_service.get_build_job(build_job_id)
@jwt_required
def delete(self, playlist_id, build_job_id):
user_service.block_access_to_vendor()
playlist = playlists_service.get_playlist(playlist_id)
user_service.check_playlist_access(playlist)
playlists_service.remove_build_job(playlist, build_job_id)
return "", 204
class ProjectBuildJobsResource(Resource):
"""
Retrieve all build jobs related to given project.
It's mainly used for synchronisation purpose.
"""
@jwt_required
def get(self, project_id):
permissions.check_admin_permissions()
projects_service.get_project(project_id)
return playlists_service.get_build_jobs_for_project(project_id)
class ProjectAllPlaylistsResource(Resource, ArgsMixin):
"""
Retrieve all playlists related to given project.
It's mainly used for synchronisation purpose.
"""
@jwt_required
def get(self, project_id):
permissions.check_admin_permissions()
projects_service.get_project(project_id)
page = self.get_page()
return playlists_service.get_playlists_for_project(project_id, page)
class TempPlaylistResource(Resource, ArgsMixin):
"""
Retrieve all playlists related to given project.
It's mainly used for synchronisation purpose.
"""
@jwt_required
def post(self, project_id):
user_service.check_project_access(project_id)
task_ids = request.json.get("task_ids", [])
return playlists_service.generate_temp_playlist(task_ids) or []
| agpl-3.0 | 1,423,871,682,942,486,500 | 34.443609 | 80 | 0.609037 | false |
trevor/mailman3 | src/mailman/database/schema/mm_20121015000000.py | 1 | 3289 | # Copyright (C) 2012-2014 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""3.0b2 -> 3.0b3 schema migrations.
Renamed:
* bans.mailing_list -> bans.list_id
Removed:
* mailinglist.new_member_options
* mailinglist.send_remindersn
"""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'upgrade',
]
from mailman.database.schema.helpers import make_listid, pivot
VERSION = '20121015000000'
def upgrade(database, store, version, module_path):
if database.TAG == 'sqlite':
upgrade_sqlite(database, store, version, module_path)
else:
upgrade_postgres(database, store, version, module_path)
def upgrade_sqlite(database, store, version, module_path):
database.load_schema(
store, version, 'sqlite_{}_01.sql'.format(version), module_path)
results = store.execute("""
SELECT id, mailing_list
FROM ban;
""")
for id, mailing_list in results:
# Skip global bans since there's nothing to update.
if mailing_list is None:
continue
store.execute("""
UPDATE ban_backup SET list_id = '{}'
WHERE id = {};
""".format(make_listid(mailing_list), id))
# Pivot the bans backup table to the real thing.
pivot(store, 'ban')
pivot(store, 'mailinglist')
def upgrade_postgres(database, store, version, module_path):
# Get the old values from the ban table.
results = store.execute('SELECT id, mailing_list FROM ban;')
store.execute('ALTER TABLE ban ADD COLUMN list_id TEXT;')
for id, mailing_list in results:
# Skip global bans since there's nothing to update.
if mailing_list is None:
continue
store.execute("""
UPDATE ban SET list_id = '{0}'
WHERE id = {1};
""".format(make_listid(mailing_list), id))
store.execute('ALTER TABLE ban DROP COLUMN mailing_list;')
store.execute('ALTER TABLE mailinglist DROP COLUMN new_member_options;')
store.execute('ALTER TABLE mailinglist DROP COLUMN send_reminders;')
store.execute('ALTER TABLE mailinglist DROP COLUMN subscribe_policy;')
store.execute('ALTER TABLE mailinglist DROP COLUMN unsubscribe_policy;')
store.execute(
'ALTER TABLE mailinglist DROP COLUMN subscribe_auto_approval;')
store.execute('ALTER TABLE mailinglist DROP COLUMN private_roster;')
store.execute(
'ALTER TABLE mailinglist DROP COLUMN admin_member_chunksize;')
# Record the migration in the version table.
database.load_schema(store, version, None, module_path)
| gpl-3.0 | 3,540,926,000,866,135,600 | 32.561224 | 78 | 0.679842 | false |
sixam/dw6824 | src/ui/stroke.py | 1 | 2694 | import copy
from PyQt4 import QtCore, QtGui
from utils.utils import Utils
sizeX = 1024
sizeY = 768
class Stroke:
"""Basic Stroke"""
def __init__(self, path=[], width=0, color=[0,0,0,255], id='none'):
self.path = path
self.width = width
self.color = color
if id == 'none':
self.id = Utils.generateID()
else:
self.id = id
def __str__(self):
c = self.getBarycenter()
return "Stroke : %s - [%01.02f,%01.02f] - c: {0} - pts:{1}".format(self.color,len(self.path)) % (self.id[0:5],c[0]/sizeX,c[1]/sizeY)
def __copy__(self):
new = Stroke()
new.path = copy.copy(self.path);
new.width = copy.copy(self.width);
new.color = copy.copy(self.color);
new.id = copy.copy(self.id)
return new
def __cmp__(self, other):
eq = True
if self.path != other.path:
eq = False
if self.width != other.width:
eq = False
if self.color != other.color:
eq = False
if self.id != other.id:
eq = False
b1 = self.getBarycenter()
b2 = other.getBarycenter()
if b1[0]!=b2[0] or b1[1]!=b2[1]:
eq = False
if eq:
return 0
return -1
def marshall(self):
""" Wraps the stroke data into a RPC-friendly format """
packet = {}
packet['path'] = self.path
packet['width'] = self.width
packet['color'] = self.color
packet['id'] = self.id
return packet
def toPainterPath(self):
""" Transform the strokes to a QT line """
points = self.path
path = QtGui.QPainterPath(QtCore.QPointF(*points[0]));
for pt in points:
path.lineTo(QtCore.QPointF(*pt));
return path
def getBarycenter(self):
x = 0
y = 0
n = len(self.path)
if n > 0:
for pt in self.path:
x += pt[0]
y += pt[1]
x /= float(n)
y /= float(n)
return [x,y]
def moveTo(self,newpos):
""" Change the stroke position to the supplied location """
c = self.getBarycenter()
offset = [newpos[0]-c[0],newpos[1]-c[1]]
self.offsetPosBy(offset)
def offsetPosBy(self,offset):
""" Change the stroke position by an offset """
if isinstance(offset,QtCore.QPointF):
x = offset.x()
y = offset.y()
else:
x = offset[0]
y = offset[1]
for i,pt in enumerate(self.path):
pt[0] = pt[0] + x
pt[1] = pt[1] + y
| bsd-3-clause | 2,516,270,057,821,345,000 | 27.357895 | 140 | 0.489978 | false |
daniyalzade/burgaz | settings.py | 1 | 4442 | import os
from shopify_settings import *
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
try:
from djangoappengine.settings_base import *
USING_APP_ENGINE = True
except ImportError:
USING_APP_ENGINE = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', '
'NAME': os.path.join(SITE_ROOT, 'db-development.sqlite3'),
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with
'PORT': '', # Set to empty string for default. Not used with sq
}
}
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'static'),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#w%yp9_5wnupojr=4o0mwap#!)y=q9ovu=o#xnytga7u5^bf27'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'shopify_app.context_processors.current_shop',
)
if not USING_APP_ENGINE:
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.static',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'shopify_app.middleware.LoginProtection',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'home',
'shopify_app',
)
if USING_APP_ENGINE:
INSTALLED_APPS += (
'djangoappengine',
'djangotoolbox',
)
else:
INSTALLED_APPS += (
'django.contrib.sites',
'django.contrib.staticfiles',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | -8,659,885,461,204,517,000 | 30.062937 | 96 | 0.660288 | false |
thijsmie/tantalus | src/tantalus/logic/transaction.py | 1 | 9287 | from tantalus_db.base import db
from tantalus_db.models import Referencing, Transaction, TransactionLine, ServiceLine, Relation, Product, BtwType
from tantalus_db.utility import get_or_none, transactional
from tantalus.logic.rows import transform_collection
from collections import defaultdict
from datetime import datetime
@transactional
def new_transaction(data):
relation = get_or_none(data['relation'], Relation)
if relation is None:
raise Exception("Relation does not exist!")
if relation.numbered_reference:
reference = Referencing.get_reference()
else:
reference = 0
tr = Transaction.query.filter(Transaction.relation == relation).order_by(
Transaction.informal_reference.desc()).first()
if tr is None:
informal_reference = 1
else:
informal_reference = tr.informal_reference + 1
t = Transaction(
reference=reference,
informal_reference=informal_reference,
relation=relation,
deliverydate=datetime.strptime(data["deliverydate"], "%Y-%m-%d").date(),
processeddate=datetime.now().date(),
description=data.get("description", ""),
two_to_one_has_btw=data.get("two_to_one_has_btw", False),
two_to_one_btw_per_row=data.get("two_to_one_btw_per_row", False)
)
for prd in data["sell"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = product.take(int(prd['amount']))
t.one_to_two.append(line)
for prd in data["buy"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
product=product,
amount=int(prd['amount']),
prevalue=int(prd['price']),
value=product.value * int(prd['amount']),
btwtype=product.btwtype
)
product.give(line)
t.two_to_one.append(line)
for prd in data["service"]:
btw = prd.get('btw', 0)
btwtype = BtwType.query.filter(BtwType.percentage == btw).first()
if btwtype is None:
btwtype = BtwType(
name=str(btw)+"%",
percentage=btw
)
db.session.add(btwtype)
line = ServiceLine(
service=prd['contenttype'],
amount=int(prd['amount']),
value=int(prd['price']),
btwtype=btwtype
)
t.services.append(line)
rec = transaction_record(t)
t.total = rec["total"]
db.session.add(t)
relation.budget -= rec["total"]
return t
@transactional
def edit_transaction(t, data):
# Easy stuff first
old_total = t.total
t.revision += 1
t.two_to_one_has_btw = data.get("two_to_one_has_btw", t.two_to_one_has_btw)
t.two_to_one_btw_per_row = data.get("two_to_one_btw_per_row", t.two_to_one_btw_per_row)
if "deliverydate" in data:
t.deliverydate = datetime.strptime(data["deliverydate"], "%Y-%m-%d").date()
if "description" in data:
t.description = data["description"]
newsell = []
for prd in data["sell"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
value=int(prd['amount'])*product.value,
prevalue=int(prd['amount'])*product.value,
amount=int(prd['amount']),
product=product,
btwtype=product.btwtype
)
newsell.append(line)
t.one_to_two = transform_collection(t.one_to_two, newsell, True)
newbuy = []
for prd in data["buy"]:
product = get_or_none(prd["id"], Product)
if product is None:
raise Exception("Product with id {} does not exist.".format(prd["id"]))
line = TransactionLine(
product=product,
amount=int(prd['amount']),
prevalue=int(prd['price']),
value=int(prd['amount'])*product.value,
btwtype=product.btwtype
)
newbuy.append(line)
t.two_to_one = transform_collection(t.two_to_one, newbuy, False)
t.services = []
for prd in data["service"]:
btw = prd.get('btw', 0)
btwtype = BtwType.query.filter(BtwType.percentage == btw).first()
if btwtype is None:
btwtype = BtwType(
name=str(btw)+"%",
percentage=btw
)
db.session.add(btwtype)
line = ServiceLine(
service=prd['contenttype'],
amount=int(prd['amount']),
value=int(prd['price']),
btwtype=btwtype
)
t.services.append(line)
record = transaction_record(t)
t.total = record["total"]
db.session.add(t)
t.relation.budget += old_total - t.total
return t
def make_row_record(row):
return {
"contenttype": row.product.contenttype,
"group": row.product.group.name,
"prevalue": row.prevalue,
"value": row.value,
"amount": row.amount,
"btw": row.btwtype.percentage
}
def make_service_record(row):
return {
"contenttype": row.service,
"amount": row.amount,
"prevalue": row.value,
"value": row.value,
"btw": row.btwtype.percentage
}
def transaction_process(transaction):
sellrows = [make_row_record(row) for row in transaction.one_to_two]
buyrows = [make_row_record(row) for row in transaction.two_to_one]
servicerows = [make_service_record(row) for row in transaction.services]
btwtotals = defaultdict(float)
btwvalues = defaultdict(int)
# Current total including btw, btw rounded per invoice
for row in sellrows:
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
btwtotals[row["btw"]] -= btw
btwvalues[row["btw"]] -= row["prevalue"]
row["btwvalue"] = btw
# Current total including btw, btw rounded per invoice
for row in servicerows:
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
btwtotals[row["btw"]] -= btw
btwvalues[row["btw"]] -= row["prevalue"]
row["btwvalue"] = btw
buybtwtotals = defaultdict(float)
for row in buyrows:
if transaction.two_to_one_has_btw:
if transaction.two_to_one_btw_per_row:
# Current total including btw, btw rounded per row
btw = round(row["prevalue"] * row["btw"] / 100.0 / (row["btw"]/100. + 1))
else:
# Current total including btw, btw rounded for full invoice
# We should use decimals here, but floats are good enough for now
btw = row["prevalue"] * row["btw"] / 100. / (row["btw"]/100. + 1)
else:
if transaction.two_to_one_btw_per_row:
# Current total excluding btw, btw rounded per row
btw = round(row["prevalue"] * row["btw"] / 100.0)
btwvalues[row["btw"]] += btw
else:
# Current total excluding btw, btw rounded for full invoice
# We should use decimals here, but floats are good enough for now
btw = row["prevalue"] * row["btw"] / 100.0
btwvalues[row["btw"]] += btw
btwvalues[row["btw"]] += row["prevalue"]
btwtotals[row["btw"]] += btw
buybtwtotals[row["btw"]] += btw
row["btwvalue"] = btw
row["value_exl"] = row["value"] * (1 - row["btw"] / 100.0 / (row["btw"]/100. + 1))
for k, v in btwtotals.items():
btwtotals[k] = int(round(v))
return dict(btwtotals), dict(btwvalues), dict(buybtwtotals), sellrows, buyrows, servicerows
def transaction_record(transaction):
btwtotals, btwvalues, buybtwtotals, sellrows, buyrows, servicerows = transaction_process(transaction)
selltotal = sum(r['prevalue'] for r in sellrows)
buytotal = sum(r['prevalue'] for r in buyrows)
servicetotal = sum(r['prevalue'] for r in servicerows)
total = selltotal - buytotal + servicetotal
if not transaction.two_to_one_has_btw:
total -= sum(buybtwtotals.values())
return {
"reference": str(transaction.reference).zfill(4),
"name": transaction.relation.name + " " + str(transaction.informal_reference).zfill(3),
"sell": sellrows,
"buy": buyrows,
"service": servicerows,
"selltotal": selltotal,
"buytotal": buytotal,
"btwtotals": btwtotals,
"btwvalues": btwvalues,
"btwtotal": sum(btwtotals.values()),
"servicetotal": servicetotal,
"description": transaction.description,
"processeddate": transaction.processeddate,
"deliverydate": transaction.deliverydate,
"total": int(total),
"id": transaction.id,
"revision": transaction.revision,
"lastedit": transaction.time_updated,
"two_to_one_has_btw": transaction.two_to_one_has_btw,
"two_to_one_btw_per_row": transaction.two_to_one_btw_per_row
}
| mit | 3,800,890,605,192,939,000 | 32.527076 | 113 | 0.582535 | false |
jackrzhang/zulip | scripts/lib/clean_node_cache.py | 1 | 2324 | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
if False:
from typing import Set
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ZULIP_PATH)
from scripts.lib.node_cache import generate_sha1sum_node_modules
from scripts.lib.zulip_tools import get_caches_to_be_purged, \
get_environment, get_recent_deployments, parse_cache_script_args, \
purge_unused_caches
ENV = get_environment()
NODE_MODULES_CACHE_PATH = "/srv/zulip-npm-cache"
if ENV == "travis":
NODE_MODULES_CACHE_PATH = os.path.join(os.environ["HOME"], "zulip-npm-cache")
try:
subprocess.check_output(["/home/travis/zulip-yarn/bin/yarn", '--version'])
except OSError:
print('yarn not found. Most probably we are running static-analysis and '
'hence yarn is not installed. Exiting without cleaning npm cache.')
sys.exit(0)
def get_caches_in_use(threshold_days):
# type: (int) -> Set[str]
setups_to_check = set([ZULIP_PATH, ])
caches_in_use = set()
if ENV == "prod":
setups_to_check |= get_recent_deployments(threshold_days)
if ENV == "dev":
# In dev always include the currently active cache in order
# not to break current installation in case dependencies
# are updated with bumping the provision version.
CURRENT_CACHE = os.path.dirname(os.path.realpath(os.path.join(ZULIP_PATH, "node_modules")))
caches_in_use.add(CURRENT_CACHE)
for setup_dir in setups_to_check:
node_modules_link_path = os.path.join(setup_dir, "node_modules")
if not os.path.islink(node_modules_link_path):
# If 'package.json' file doesn't exist then no node_modules
# cache is associated with this setup.
continue
# The actual cache path doesn't include the /node_modules
caches_in_use.add(os.path.dirname(os.readlink(node_modules_link_path)))
return caches_in_use
def main(args: argparse.Namespace) -> None:
caches_in_use = get_caches_in_use(args.threshold_days)
purge_unused_caches(
NODE_MODULES_CACHE_PATH, caches_in_use, "node modules cache", args)
if __name__ == "__main__":
args = parse_cache_script_args("This script cleans unused zulip npm caches.")
main(args)
| apache-2.0 | 6,640,714,881,982,662,000 | 37.733333 | 99 | 0.673838 | false |
eliangcs/http-prompt | tests/test_interaction.py | 1 | 2286 | import os
import sys
import pexpect
import pytest
from .base import TempAppDirTestCase
from .utils import get_http_prompt_path
from http_prompt import config
class TestInteraction(TempAppDirTestCase):
def setUp(self):
super(TestInteraction, self).setUp()
# Use temporary directory as user config home.
# Will restore it in tearDown().
self.orig_config_home = os.getenv('XDG_CONFIG_HOME')
os.environ['XDG_CONFIG_HOME'] = self.temp_dir
# Make sure pexpect uses the same terminal environment
self.orig_term = os.getenv('TERM')
os.environ['TERM'] = 'screen-256color'
def tearDown(self):
super(TestInteraction, self).tearDown()
os.environ['XDG_CONFIG_HOME'] = self.orig_config_home
if self.orig_term:
os.environ['TERM'] = self.orig_term
else:
os.environ.pop('TERM', None)
def write_config(self, content):
config_path = config.get_user_config_path()
with open(config_path, 'a') as f:
f.write(content)
@pytest.mark.skipif(sys.platform == 'win32',
reason="pexpect doesn't work well on Windows")
@pytest.mark.slow
def test_interaction(self):
bin_path = get_http_prompt_path()
child = pexpect.spawn(bin_path, env=os.environ)
# TODO: Test more interaction
child.sendline('exit')
child.expect_exact('Goodbye!', timeout=20)
child.close()
@pytest.mark.skipif(sys.platform == 'win32',
reason="pexpect doesn't work well on Windows")
@pytest.mark.slow
def test_vi_mode(self):
self.write_config('vi = True\n')
bin_path = get_http_prompt_path()
child = pexpect.spawn(bin_path, env=os.environ)
child.expect_exact('http://localhost:8000> ')
# Enter 'htpie', switch to command mode (ESC),
# move two chars left (hh), and insert (i) a 't'
child.send('htpie')
child.send('\x1b')
child.sendline('hhit')
child.expect_exact('http http://localhost:8000')
# Enter 'exit'
child.send('\x1b')
child.send('i')
child.sendline('exit')
child.expect_exact('Goodbye!', timeout=20)
child.close()
| mit | -1,556,437,619,179,186,700 | 27.936709 | 70 | 0.60105 | false |
MaxMorgenstern/EmeraldAI | EmeraldAI/Logic/Database/SQlite3.py | 1 | 5843 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from cachetools import cached
from EmeraldAI.Logic.Singleton import Singleton
from EmeraldAI.Logic.Modules import Global
from EmeraldAI.Config.Config import Config
from EmeraldAI.Logic.Logger import FileLogger
class SQlite3(object):
__metaclass__ = Singleton
__Database = None
def __init__(self):
self.__Database = self.GetDB(Config().Get("Database", "SQliteDatabase"))
def GetDB(self, database):
return Worker(os.path.join(Global.EmeraldPath, "Data", "SqliteDB", database.rstrip(".sqlite") + ".sqlite"))
@cached(cache={})
def Execute(self, sql, args=None):
return self.ExecuteDB(self.__Database, sql, args)
def ExecuteDB(self, db, sql, args=None):
db.execute(sql, args)
return db.getLastrowid()
@cached(cache={})
def Fetchall(self, sql, args=None):
return self.FetchallDB(self.__Database, sql, args)
def FetchallCacheBreaker(self, sql, args=None):
return self.FetchallDB(self.__Database, sql, args)
def FetchallDB(self, db, sql, args=None):
return db.execute(sql, args)
def Disconnect(self):
self.DisconnectDB(self.__Database)
def DisconnectDB(self, db):
db.close()
###############################################################################
# Copyright (c) 2014 Palantir Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#__author__ = "Shawn Lee"
#__email__ = "[email protected]"
#__license__ = "MIT"
#
# Thread safe sqlite3 interface.
import sqlite3
import threading
import uuid
try:
import queue as Queue # module re-named in Python 3
except ImportError:
import Queue
class Worker(threading.Thread):
def __init__(self, file_name, max_queue_size=100):
threading.Thread.__init__(self, name=__name__)
self.daemon = True
self._sqlite3_conn = sqlite3.connect(
file_name, check_same_thread=False,
detect_types=sqlite3.PARSE_DECLTYPES)
self._sqlite3_conn.text_factory = str
self._sqlite3_cursor = self._sqlite3_conn.cursor()
self._sql_queue = Queue.Queue(maxsize=max_queue_size)
self._results = {}
self._max_queue_size = max_queue_size
self._select_events = {}
self._close_event = threading.Event()
self._close_lock = threading.Lock()
self.start()
def run(self):
execute_count = 0
for token, query, values in iter(self._sql_queue.get, None):
if query:
self._run_query(token, query, values)
execute_count += 1
if (self._sql_queue.empty() or
execute_count == self._max_queue_size):
self._sqlite3_conn.commit()
execute_count = 0
if self._close_event.is_set() and self._sql_queue.empty():
self._sqlite3_conn.commit()
self._sqlite3_conn.close()
return
def _run_query(self, token, query, values):
if query.lower().strip().startswith("select"):
try:
self._sqlite3_cursor.execute(query, values)
self._results[token] = self._sqlite3_cursor.fetchall()
except sqlite3.Error as err:
self._results[token] = (
"Query returned error: %s: %s: %s" % (query, values, err))
finally:
self._select_events.setdefault(token, threading.Event())
self._select_events[token].set()
else:
try:
self._sqlite3_cursor.execute(query, values)
except sqlite3.Error as err:
# TODO
print err
def close(self):
with self._close_lock:
if not self.is_alive():
return "Already Closed"
self._close_event.set()
self._sql_queue.put(("", "", ""), timeout=5)
self.join()
@property
def queue_size(self):
return self._sql_queue.qsize()
def _query_results(self, token):
try:
self._select_events.setdefault(token, threading.Event())
self._select_events[token].wait()
return self._results[token]
finally:
self._select_events[token].clear()
del self._results[token]
del self._select_events[token]
def execute(self, query, values=None):
if self._close_event.is_set():
return "Close Called"
values = values or []
token = str(uuid.uuid4())
self._sql_queue.put((token, query, values), timeout=5)
if query.lower().strip().startswith("select"):
return self._query_results(token)
def getLastrowid(self):
return self._sqlite3_cursor.lastrowid
| apache-2.0 | -8,573,672,131,020,047,000 | 33.988024 | 115 | 0.608249 | false |
Azure/azure-sdk-for-python | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/aio/operations/_linked_services_operations.py | 1 | 16517 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LinkedServicesOperations:
"""LinkedServicesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datafactory.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_factory(
self,
resource_group_name: str,
factory_name: str,
**kwargs
) -> AsyncIterable["_models.LinkedServiceListResponse"]:
"""Lists linked services.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LinkedServiceListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.datafactory.models.LinkedServiceListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LinkedServiceListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_factory.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LinkedServiceListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_factory.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/linkedservices'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
factory_name: str,
linked_service_name: str,
linked_service: "_models.LinkedServiceResource",
if_match: Optional[str] = None,
**kwargs
) -> "_models.LinkedServiceResource":
"""Creates or updates a linked service.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param linked_service_name: The linked service name.
:type linked_service_name: str
:param linked_service: Linked service resource definition.
:type linked_service: ~azure.mgmt.datafactory.models.LinkedServiceResource
:param if_match: ETag of the linkedService entity. Should only be specified for update, for
which it should match existing entity or can be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LinkedServiceResource, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.LinkedServiceResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LinkedServiceResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'linkedServiceName': self._serialize.url("linked_service_name", linked_service_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(linked_service, 'LinkedServiceResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LinkedServiceResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/linkedservices/{linkedServiceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
factory_name: str,
linked_service_name: str,
if_none_match: Optional[str] = None,
**kwargs
) -> Optional["_models.LinkedServiceResource"]:
"""Gets a linked service.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param linked_service_name: The linked service name.
:type linked_service_name: str
:param if_none_match: ETag of the linked service entity. Should only be specified for get. If
the ETag matches the existing entity tag, or if * was provided, then no content will be
returned.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LinkedServiceResource, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.LinkedServiceResource or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.LinkedServiceResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'linkedServiceName': self._serialize.url("linked_service_name", linked_service_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LinkedServiceResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/linkedservices/{linkedServiceName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
factory_name: str,
linked_service_name: str,
**kwargs
) -> None:
"""Deletes a linked service.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param linked_service_name: The linked service name.
:type linked_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'linkedServiceName': self._serialize.url("linked_service_name", linked_service_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/linkedservices/{linkedServiceName}'} # type: ignore
| mit | -2,004,889,245,169,860,900 | 50.295031 | 216 | 0.642126 | false |
basho-labs/riak-mesos-tools | setup.py | 1 | 4313 | #
# Copyright (C) 2016 Basho Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from codecs import open
from os import path
from riak_mesos import constants
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='riak-mesos',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=constants.version,
description='Riak Mesos Command Line Interface',
long_description=long_description,
# The project's main homepage.
url='https://github.com/basho-labs/riak-mesos-tools',
# Author details
author='Basho Technologies, Inc.',
author_email='[email protected]',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: TODO: License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='dcos command riak database mesosphere',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'docopt',
'dcos>=0.4.6,<0.4.12',
'kazoo',
'click',
'futures'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax, for
# example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'riak-mesos=riak_mesos.cli:cli'
],
},
)
| apache-2.0 | 3,413,416,331,684,066,300 | 34.941667 | 79 | 0.663112 | false |
sbraverman/salt-integration-testing | tests/helpers/sit_helper_test.py | 1 | 1161 | import unittest
from helpers.sit_helper import SITHelper
class SitHelperTest(unittest.TestCase):
def setUp(self):
self.sit_helper = SITHelper('tests/helpers/configs')
self.sit_helper_empty = SITHelper('tests/helpers/empty_configs')
def test_get_custom_user_data(self):
self.assertEquals(self.sit_helper.get_custom_user_data(), "'test'\n")
def test_get_roles(self):
self.assertEquals(self.sit_helper.get_roles(), ['php', 'lb'])
self.assertEquals(self.sit_helper.get_roles(), ['php', 'lb'])
self.assertEquals(self.sit_helper_empty.get_roles(), False)
def test_get_states_for_role(self):
self.assertEquals(self.sit_helper.get_states_for_role('php'), ['apache', 'server'])
self.assertEquals(self.sit_helper.get_states_for_role('lb'), ['lb', 'server'])
self.assertEquals(self.sit_helper.get_states_for_role('saltmaster'),
"Failed to find state list for role: saltmaster. error: 'saltmaster'")
def test_get_configs(self):
self.assertEquals(self.sit_helper.get_configs('roles'), {'php': ['apache', 'server'], 'lb': ['lb', 'server']})
| gpl-3.0 | -1,396,964,101,881,587,000 | 42 | 118 | 0.651163 | false |
jcmgray/quijy | tests/test_gen/test_operators.py | 1 | 10683 | import pytest
import numpy as np
from numpy.testing import assert_allclose
import quimb as qu
@pytest.mark.parametrize("sparse", [False, True])
@pytest.mark.parametrize("stype", ['csr', 'csc'])
@pytest.mark.parametrize("dtype", ["don't pass", None, np.float64, np.complex128])
def test_hamiltonian_builder(sparse, stype, dtype):
from quimb.gen.operators import hamiltonian_builder
@hamiltonian_builder
def simple_ham(sparse=None, stype=None, dtype=None):
H = qu.qu([[0.0, 1.0], [1.0, 0.0]], sparse=True, stype='csr', dtype=dtype)
return H
@hamiltonian_builder
def simple_ham_complex(sparse=None, stype=None, dtype=None):
H = qu.qu([[0.0, 1.0j], [-1.0j, 0.0]], sparse=True, stype='csr', dtype=dtype)
return H
if dtype == "don't pass":
H = simple_ham(sparse=sparse, stype=stype)
else:
H = simple_ham(sparse=sparse, stype=stype, dtype=dtype)
if dtype == "don't pass" or dtype is None:
# check that passng no actual dtype keeps it as float
assert H.dtype == np.float64
else:
# check that explicit dtypes are respected
assert H.dtype == dtype
assert qu.issparse(H) == sparse
assert qu.isdense(H) != sparse
if sparse:
assert H.format == stype
with pytest.raises(ValueError): # check immutability
H[0,0] = 100
if dtype == "don't pass":
H = simple_ham_complex(sparse=sparse, stype=stype)
elif dtype is np.float64:
with pytest.warns(np.ComplexWarning):
H = simple_ham_complex(sparse=sparse, stype=stype, dtype=dtype)
else:
H = simple_ham_complex(sparse=sparse, stype=stype, dtype=dtype)
if dtype == "don't pass" or dtype is None:
# check that passng no actual dtype keeps it as float
assert H.dtype == np.complex128
else:
# check that explicit dtypes are respected
assert H.dtype == dtype
assert qu.issparse(H) == sparse
assert qu.isdense(H) != sparse
if sparse:
assert H.format == stype
with pytest.raises(ValueError): # check immutability
H[0,0] = 100
return
class TestSpinOperator:
def test_spin_half(self):
Sx = qu.spin_operator('x', 1 / 2)
assert_allclose(Sx, [[0.0, 0.5], [0.5, 0.0]])
Sy = qu.spin_operator('y', 1 / 2)
assert_allclose(Sy, [[0.0, -0.5j], [0.5j, 0.0]])
Sz = qu.spin_operator('z', 1 / 2)
assert_allclose(Sz, [[0.5, 0.0], [0.0, -0.5]])
Sp = qu.spin_operator('+', 1 / 2)
assert_allclose(Sp, [[0.0, 1.0], [0.0, 0.0]])
Sm = qu.spin_operator('-', 1 / 2)
assert_allclose(Sm, [[0.0, 0.0], [1.0, 0.0]])
SI = qu.spin_operator('I', 1 / 2)
assert_allclose(SI, [[1.0, 0.0], [0.0, 1.0]])
@pytest.mark.parametrize("label", ('x', 'y', 'z'))
@pytest.mark.parametrize("S", [1, 3 / 2, 2, 5 / 2])
def test_spin_high(self, label, S):
D = int(2 * S + 1)
op = qu.spin_operator(label, S)
assert_allclose(qu.eigvalsh(op), np.linspace(-S, S, D), atol=1e-13)
class TestPauli:
def test_pauli_dim2(self):
for dir in (1, 'x', 'X',
2, 'y', 'Y',
3, 'z', 'Z'):
x = qu.pauli(dir)
assert_allclose(qu.eigvalsh(x), [-1, 1])
def test_pauli_dim3(self):
for dir in (1, 'x', 'X',
2, 'y', 'Y',
3, 'z', 'Z'):
x = qu.pauli(dir, dim=3)
assert_allclose(qu.eigvalsh(x), [-1, 0, 1],
atol=1e-15)
def test_pauli_bad_dim(self):
with pytest.raises(KeyError):
qu.pauli('x', 4)
def test_pauli_bad_dir(self):
with pytest.raises(KeyError):
qu.pauli('w', 2)
class TestControlledZ:
def test_controlled_z_dense(self):
cz = qu.controlled('z')
assert_allclose(cz, np.diag([1, 1, 1, -1]))
def test_controlled_z_sparse(self):
cz = qu.controlled('z', sparse=True)
assert(qu.issparse(cz))
assert_allclose(cz.A, np.diag([1, 1, 1, -1]))
class TestGates:
@pytest.mark.parametrize("gate", ['Rx', 'Ry', 'Rz', 'T_gate', 'S_gate',
'CNOT', 'cX', 'cY', 'cZ', 'hadamard',
'phase_gate', 'iswap', 'swap', 'U_gate',
'fsim'])
@pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
@pytest.mark.parametrize('sparse', [False, True])
def test_construct(self, gate, dtype, sparse):
if gate in {'Rx', 'Ry', 'Rz', 'phase_gate'}:
args = (0.43827,)
elif gate in {'U_gate'}:
args = (0.1, 0.2, 0.3)
elif gate in {'fsim'}:
args = (-1.3, 5.4)
else:
args = ()
G = getattr(qu, gate)(*args, dtype=dtype, sparse=sparse)
assert G.dtype == dtype
assert qu.issparse(G) is sparse
psi = qu.rand_ket(G.shape[0])
Gpsi = G @ psi
assert qu.expec(Gpsi, Gpsi) == pytest.approx(1.0)
def test_gates_import(self):
from quimb.gates import Z
assert_allclose(Z, [[1, 0], [0, -1]])
def test_fsim(self):
assert_allclose(qu.fsim(- qu.pi / 2, 0.0), qu.iswap(), atol=1e-12)
class TestHamHeis:
def test_ham_heis_2(self):
h = qu.ham_heis(2, cyclic=False)
evals = qu.eigvalsh(h)
assert_allclose(evals, [-0.75, 0.25, 0.25, 0.25])
gs = qu.groundstate(h)
assert_allclose(qu.expec(gs, qu.singlet()), 1.)
@pytest.mark.parametrize("parallel", [False, True])
def test_ham_heis_sparse_cyclic_4(self, parallel):
h = qu.ham_heis(4, sparse=True, cyclic=True, parallel=parallel)
lk = qu.eigvalsh(h, k=4)
assert_allclose(lk, [-2, -1, -1, -1])
def test_ham_heis_bz(self):
h = qu.ham_heis(2, cyclic=False, b=1)
evals = qu.eigvalsh(h)
assert_allclose(evals, [-3 / 4, -3 / 4, 1 / 4, 5 / 4])
@pytest.mark.parametrize("stype", ["coo", "csr", "csc", "bsr"])
def test_sformat_construct(self, stype):
h = qu.ham_heis(4, sparse=True, stype=stype)
assert h.format == stype
class TestHamJ1J2:
def test_ham_j1j2_3_dense(self):
h = qu.ham_j1j2(3, j2=1.0, cyclic=False)
h2 = qu.ham_heis(3, cyclic=True)
assert_allclose(h, h2)
def test_ham_j1j2_6_sparse_cyc(self):
h = qu.ham_j1j2(6, j2=0.5, sparse=True, cyclic=True)
lk = qu.eigvalsh(h, k=5)
assert_allclose(lk, [-9 / 4, -9 / 4, -7 / 4, -7 / 4, -7 / 4])
def test_ham_j1j2_4_bz(self):
h = qu.ham_j1j2(4, j2=0.5, cyclic=True, bz=0)
lk = qu.eigvalsh(h, k=11)
assert_allclose(lk, [-1.5, -1.5, -0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5])
h = qu.ham_j1j2(4, j2=0.5, cyclic=True, bz=0.05)
lk = qu.eigvalsh(h, k=11)
assert_allclose(lk, [-1.5, -1.5, -0.55, -0.55, -0.55,
-0.5, -0.5, -0.5, -0.45, -0.45, -0.45])
class TestHamMBL:
@pytest.mark.parametrize("cyclic", [False, True])
@pytest.mark.parametrize("sparse", [False, True])
@pytest.mark.parametrize("dh_dim", [1, 2, 3, 'y', 'xz'])
@pytest.mark.parametrize("dh_dist", ['s', 'g'])
def test_construct(self, cyclic, sparse, dh_dim, dh_dist):
qu.ham_mbl(n=3, dh=3, cyclic=cyclic, sparse=sparse, dh_dim=dh_dim,
dh_dist=dh_dist)
@pytest.mark.parametrize("cyclic", [False, True])
@pytest.mark.parametrize("sparse", [False, True])
def test_construct_qp(self, cyclic, sparse):
qu.ham_mbl(n=3, dh=3, cyclic=cyclic, sparse=sparse, dh_dist='qp')
class TestHamHeis2D:
@pytest.mark.parametrize("cyclic", [False, True])
@pytest.mark.parametrize("sparse", [False, True])
@pytest.mark.parametrize("parallel", [False, True])
@pytest.mark.parametrize("bz", [0.0, 0.7])
def test_construct(self, cyclic, sparse, parallel, bz):
qu.ham_heis_2D(2, 3, cyclic=cyclic, sparse=sparse,
parallel=parallel, bz=bz)
class TestSpinZProjector:
@pytest.mark.parametrize("sz", [-2, -1, 0, 1, 2])
def test_works(self, sz):
prj = qu.zspin_projector(4, sz)
h = qu.ham_heis(4)
h0 = prj.T @ h @ prj
v0s = qu.eigvecsh(h0)
for i in range(v0s.shape[1]):
v0 = v0s[:, [i]]
vf = prj @ v0
prjv = vf @ vf.H
# Check reconstructed full eigenvectors commute with full ham
assert_allclose(prjv @ h, h @ prjv, atol=1e-13)
if sz == 0:
# Groundstate must be in most symmetric subspace
gs = qu.groundstate(h)
gs0 = prj @ v0s[:, [0]]
assert_allclose(qu.expec(gs, gs0), 1.0)
assert_allclose(qu.expec(h, gs0), qu.expec(h, gs))
def test_raises(self):
with pytest.raises(ValueError):
qu.zspin_projector(5, 0)
with pytest.raises(ValueError):
qu.zspin_projector(4, 1 / 2)
@pytest.mark.parametrize("sz", [(-1 / 2, 1 / 2), (3 / 2, 5 / 2)])
def test_spin_half_double_space(self, sz):
prj = qu.zspin_projector(5, sz)
h = qu.ham_heis(5)
h0 = prj.T @ h @ prj
v0s = qu.eigvecsh(h0)
for i in range(v0s.shape[1]):
v0 = v0s[:, [i]]
vf = prj @ v0
prjv = vf @ vf.H
# Check reconstructed full eigenvectors commute with full ham
assert_allclose(prjv @ h, h @ prjv, atol=1e-13)
if sz == 0:
# Groundstate must be in most symmetric subspace
gs = qu.groundstate(h)
gs0 = prj @ v0s[:, [0]]
assert_allclose(qu.expec(gs, gs0), 1.0)
assert_allclose(qu.expec(h, gs0), qu.expec(h, gs))
class TestSwap:
@pytest.mark.parametrize("sparse", [False, True])
def test_swap_qubits(self, sparse):
a = qu.up() & qu.down()
s = qu.swap(2, sparse=sparse)
assert_allclose(s @ a, qu.down() & qu.up())
@pytest.mark.parametrize("sparse", [False, True])
def test_swap_higher_dim(self, sparse):
a = qu.rand_ket(9)
s = qu.swap(3, sparse=sparse)
assert_allclose(s @ a, a.reshape([3, 3]).T.reshape([9, 1]))
class TestHubbardSpinless:
def test_half_filling_groundstate(self):
H = qu.ham_hubbard_hardcore(8, t=0.5, V=1.0, mu=1.0)
gs = qu.groundstate(H)
dims = [2] * 8
cn = qu.num(2)
ens = [qu.expec(cn, qu.ptr(gs, dims, i)) for i in range(8)]
for en in ens:
assert en == pytest.approx(0.5, rel=1e-6)
| mit | -1,548,288,738,012,105,500 | 34.491694 | 85 | 0.539455 | false |
fperez/sympy | sympy/functions/special/tensor_functions.py | 1 | 1178 | from sympy.core.function import Function
from sympy.core import sympify, S
from sympy.utilities.decorator import deprecated
###############################################################################
###################### Kronecker Delta, Levi-Civita etc. ######################
###############################################################################
class Dij(Function):
"""
Represents the Kronecker Delta Function
if i == j, Dij(i, j) = 1
otherwise Dij(i, j) = 0
where i, j are usually integers
"""
nargs = (1, 2)
@classmethod
def eval(cls, i, j=0):
i, j = map(sympify, (i, j))
if i == j:
return S.One
elif i.is_number and j.is_number:
return S.Zero
class Eijk(Function):
"""
Represents the Levi-Civita symbol (antisymmetric symbol)
"""
nargs = 3
@classmethod
def eval(cls, i, j, k):
i, j, k = map(sympify, (i, j, k))
if (i,j,k) in [(1,2,3), (2,3,1), (3,1,2)]:
return S.One
elif (i,j,k) in [(1,3,2), (3,2,1), (2,1,3)]:
return S.NegativeOne
elif i==j or j==k or k==i:
return S.Zero
| bsd-3-clause | -3,756,714,093,554,926,600 | 27.731707 | 79 | 0.44652 | false |
nirvaris/nirvaris-contactform | setup.py | 1 | 1175 | import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='nirvaris-contactform',
version='0.15',
packages=['contactform'],
include_package_data=True,
license='MIT License', # example license
description='A simple Django app to have a contact us form in a website.',
long_description=README,
url='https://github.com/nirvaris/nirvaris-contactform',
author='Nirvaris',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
)
| mit | -7,020,979,583,007,888,000 | 34.606061 | 78 | 0.646809 | false |
ryanvarley/ExoData | exodata/astroquantities.py | 1 | 1806 | """ Temp module until astro units are added to quantities
"""
from quantities import *
L_s = solar_luminosity = UnitQuantity(
'solar_luminosity',
3.839*(10**26)*W,
symbol='L_s',
)
L_s.latex_symbol = 'L_\odot'
R_s = solar_radius = UnitLength(
'solar_radius',
6.995 * (10**8) * m,
aliases=['solar_radii'],
symbol='R_s',
)
R_s.latex_symbol = 'R_\odot'
R_e = earth_radius = UnitLength(
'earth_radius',
6.371 * (10**6) * m,
aliases=['earth_radii'],
symbol='R_e',
)
R_e.latex_symbol = 'R_\oplus'
R_j = jupiter_radius = UnitLength(
'jupiter_radius',
6.9911 * (10**7) * m,
aliases=['jupiter_radii'],
symbol='R_j',
)
R_j.latex_symbol = 'R_J'
M_s = solar_mass = UnitMass(
'solar_mass', 1.99*(10**30)*kg,
aliases=['solar_masses'],
symbol='M_s',
)
M_s.latex_symbol = 'M_\odot'
M_e = earth_mass = UnitMass(
'earth_mass', 5.97219*(10**24)*kg,
aliases=['earth_masses'],
symbol='M_e',
)
M_e.latex_symbol = 'M_\oplus'
M_j = jupiter_mass = UnitMass(
'jupiter_mass', 1.8986*(10**27)*kg,
aliases=['jupiter_masses'],
symbol='M_j',
)
M_j.latex_symbol = 'M_J'
Gyear = giga_year = UnitTime(
'giga_year', 10**9*year,
symbol='Gyr',
)
JulianDay = julian_day = JD = UnitTime(
'julian_day', day,
symbol='JD',
)
""" Note while quantities doesnt directly support units with an offset in most cases Julian Days are treated like days.
It is useful then to know if your working in Julian Days, MJD, BJD etc"""
ModifiedJulianDate = modified_julian_day = MJD = UnitTime(
'modified_julian_day', day,
symbol='MJD',
)
# Compound Units
gcm3 = CompoundUnit('g /cm**3')
gcm3.latex_symbol = 'g/cm^3'
kgm3 = CompoundUnit('kg /m**3')
kgm3.latex_symbol = 'kg/m^3'
ms2 = CompoundUnit('m/s**2')
ms2.latex_symbol = 'm/s^2' | mit | 8,007,810,740,782,752,000 | 21.036585 | 119 | 0.609081 | false |
richardliaw/ray | python/ray/runtime_context.py | 1 | 4842 | import ray.worker
import logging
logger = logging.getLogger(__name__)
class RuntimeContext(object):
"""A class used for getting runtime context."""
def __init__(self, worker):
assert worker is not None
self.worker = worker
def get(self):
"""Get a dictionary of the current_context.
For fields that are not available (for example actor id inside a task)
won't be included in the field.
Returns:
dict: Dictionary of the current context.
"""
context = {
"job_id": self.job_id,
"node_id": self.node_id,
"task_id": self.task_id,
"actor_id": self.actor_id
}
# Remove fields that are None.
return {
key: value
for key, value in context.items() if value is not None
}
@property
def job_id(self):
"""Get current job ID for this worker or driver.
Job ID is the id of your Ray drivers that create tasks or actors.
Returns:
If called by a driver, this returns the job ID. If called in
a task, return the job ID of the associated driver.
"""
job_id = self.worker.current_job_id
assert not job_id.is_nil()
return job_id
@property
def node_id(self):
"""Get current node ID for this worker or driver.
Node ID is the id of a node that your driver, task, or actor runs.
Returns:
a node id for this worker or driver.
"""
node_id = self.worker.current_node_id
assert not node_id.is_nil()
return node_id
@property
def task_id(self):
"""Get current task ID for this worker or driver.
Task ID is the id of a Ray task.
This shouldn't be used in a driver process.
Example:
>>> @ray.remote
>>> class Actor:
>>> def ready(self):
>>> return True
>>>
>>> @ray.remote
>>> def f():
>>> return True
>>>
>>> # All the below code will generate different task ids.
>>> # Task ids are available for actor creation.
>>> a = Actor.remote()
>>> # Task ids are available for actor tasks.
>>> a.ready.remote()
>>> # Task ids are available for normal tasks.
>>> f.remote()
Returns:
The current worker's task id. None if there's no task id.
"""
# only worker mode has actor_id
assert self.worker.mode == ray.worker.WORKER_MODE, (
f"This method is only available when the process is a\
worker. Current mode: {self.worker.mode}")
task_id = self.worker.current_task_id
return task_id if not task_id.is_nil() else None
@property
def actor_id(self):
"""Get the current actor ID in this worker.
ID of the actor of the current process.
This shouldn't be used in a driver process.
Returns:
The current actor id in this worker. None if there's no actor id.
"""
# only worker mode has actor_id
assert self.worker.mode == ray.worker.WORKER_MODE, (
f"This method is only available when the process is a\
worker. Current mode: {self.worker.mode}")
actor_id = self.worker.actor_id
return actor_id if not actor_id.is_nil() else None
@property
def was_current_actor_reconstructed(self):
"""Check whether this actor has been restarted
Returns:
Whether this actor has been ever restarted.
"""
assert not self.actor_id.is_nil(), (
"This method should't be called inside Ray tasks.")
actor_info = ray.state.actors(self.actor_id.hex())
return actor_info and actor_info["NumRestarts"] != 0
@property
def current_placement_group_id(self):
"""Get the current Placement group ID of this worker.
Returns:
The current placement group id of this worker.
"""
return self.worker.placement_group_id
@property
def should_capture_child_tasks_in_placement_group(self):
"""Get if the current task should capture parent's placement group.
This returns True if it is called inside a driver.
Returns:
Return True if the current task should implicitly
capture the parent placement group.
"""
return self.worker.should_capture_child_tasks_in_placement_group
_runtime_context = None
def get_runtime_context():
global _runtime_context
if _runtime_context is None:
_runtime_context = RuntimeContext(ray.worker.global_worker)
return _runtime_context
| apache-2.0 | -9,188,469,718,043,545,000 | 29.840764 | 78 | 0.57311 | false |
andy-z/ged4py | docs/example_code/example3.py | 1 | 1988 | import sys
from ged4py.parser import GedcomReader
from ged4py.date import DateValueVisitor
class DateFormatter(DateValueVisitor):
"""Visitor class that produces string representation of dates.
"""
def visitSimple(self, date):
return f"{date.date}"
def visitPeriod(self, date):
return f"from {date.date1} to {date.date2}"
def visitFrom(self, date):
return f"from {date.date}"
def visitTo(self, date):
return f"to {date.date}"
def visitRange(self, date):
return f"between {date.date1} and {date.date2}"
def visitBefore(self, date):
return f"before {date.date}"
def visitAfter(self, date):
return f"after {date.date}"
def visitAbout(self, date):
return f"about {date.date}"
def visitCalculated(self, date):
return f"calculated {date.date}"
def visitEstimated(self, date):
return f"estimated {date.date}"
def visitInterpreted(self, date):
return f"interpreted {date.date} ({date.phrase})"
def visitPhrase(self, date):
return f"({date.phrase})"
format_visitor = DateFormatter()
with GedcomReader(sys.argv[1]) as parser:
# iterate over each INDI record in a file
for i, indi in enumerate(parser.records0("INDI")):
print(f"{i}: {indi.name.format()}")
# get all possible event types and print their dates,
# full list of events is longer, this is only an example
events = indi.sub_tags("BIRT", "CHR", "DEAT", "BURI", "ADOP", "EVEN")
for event in events:
date = event.sub_tag_value("DATE")
# Some event types like generic EVEN can define TYPE tag
event_type = event.sub_tag_value("TYPE")
# pass a visitor to format the date
if date:
date_str = date.accept(format_visitor)
else:
date_str = "N/A"
print(f" event: {event.tag} date: {date_str} type: {event_type}")
| mit | 2,323,925,988,355,210,000 | 29.584615 | 80 | 0.612173 | false |
mitchellrj/neo4j-rest-client | neo4jrestclient/constants.py | 1 | 1325 | # -*- coding: utf-8 -*-
__author__ = "Javier de la Rosa"
__license__ = "GPL 3"
__version__ = "2.0.2"
__email__ = "[email protected]"
__url__ = "https://github.com/versae/neo4j-rest-client"
__description__ = """Object-oriented Python library to interact with """ \
"""Neo4j standalone REST server"""
__status__ = "Development"
# Order
BREADTH_FIRST = "breadth_first"
DEPTH_FIRST = "depth_first"
# Relationships
RELATIONSHIPS_ALL = "all"
RELATIONSHIPS_IN = "in"
RELATIONSHIPS_OUT = "out"
# Return
RETURN_ALL_NODES = "all"
RETURN_ALL_BUT_START_NODE = "all_but_start_node"
# Stop
STOP_AT_DEPTH_ONE = 1
STOP_AT_END_OF_GRAPH = "none"
# Uniqueness
NONE = "none"
NODE_GLOBAL = "node_global"
NODE_PATH = "node_path"
NODE_RECENT = "node recent" # Deprecated
RELATIONSHIP_GLOBAL = "relationship_global"
RELATIONSHIP_PATH = "relationship_path"
RELATIONSHIP_RECENT = "relationship recent" # Deprecated
# Returns
NODE = "node"
RELATIONSHIP = "relationship"
PATH = "path"
FULLPATH = "fullpath"
POSITION = "position"
RAW = "raw"
ITERABLE = "iterable"
# Indexes
INDEX = "index"
INDEX_NODE = "index_node"
INDEX_RELATIONSHIP = "index_relationship"
INDEX_EXACT = "exact"
INDEX_FULLTEXT = "fulltext"
# Cypher ordering
ASC = "asc"
DESC = "desc"
# Transactions
TX_GET = "GET"
TX_PUT = "PUT"
TX_POST = "POST"
TX_DELETE = "DELETE"
| gpl-3.0 | 3,486,054,625,117,709,000 | 24.480769 | 74 | 0.68 | false |
luckydonald/pytgbot | pytgbot/webhook.py | 1 | 5203 | # -*- coding: utf-8 -*-
from luckydonaldUtils.logger import logging
from pytgbot.bot import Bot
from pytgbot.exceptions import TgApiServerException, TgApiParseException
__author__ = 'luckydonald'
logger = logging.getLogger(__name__)
class Webhook(Bot):
"""
Subclass of Bot, will be returned of a sucessful webhook setting.
Differs with the normal Bot class, as the sending function stores the result to send,
so you can actually get that and return the data on your incomming message.
"""
stored_request = None
def _prepare_request(self, command, query):
"""
:param command: The Url command parameter
:type command: str
:param query: will get json encoded.
:type query: dict
:return:
"""
from luckydonaldUtils.encoding import to_native as n
from pytgbot.api_types.sendable import Sendable
from pytgbot.api_types import as_array
from DictObject import DictObject
import json
params = {}
for key in query.keys():
element = query[key]
if element is not None:
if isinstance(element, Sendable):
params[key] = json.dumps(as_array(element))
else:
params[key] = element
url = self._base_url.format(api_key=n(self.api_key), command=n(command))
return DictObject(url=url, params=params)
# end def
def _do_request(self, url, params=None, files=None, use_long_polling=None, request_timeout=None):
"""
:param url: The complete url to send to
:type url: str
:keyword params: Parameter for that connection
:keyword files: Optional files parameters
:keyword use_long_polling: if it should use long polling.
(see http://docs.python-requests.org/en/latest/api/#requests.Response.iter_content)
:type use_long_polling: bool
:keyword request_timeout: When the request should time out.
:type request_timeout: int
:return: json data received
:rtype: DictObject.DictObject
"""
import requests
r = requests.post(url, params=params, files=files, stream=use_long_polling,
verify=True, timeout=request_timeout)
# No self signed certificates. Telegram should be trustworthy anyway...
from DictObject import DictObject
try:
logger.debug("Response: {}".format(r.json()))
json_data = DictObject.objectify(r.json())
except Exception:
logger.exception("Parsing answer failed.\nRequest: {r!s}\nContent: {r.content}".format(r=r))
raise
# end if
json_data["response"] = r # TODO: does this failes on json lists? Does TG does that?
return json_data
# end def
def _process_response(self, json_data):
# TG should always return an dict, with at least a status or something.
if self.return_python_objects:
if json_data.ok != True:
raise TgApiServerException(
error_code=json_data.error_code if "error_code" in json_data else None,
response=json_data.response if "response" in json_data else None,
description=json_data.description if "description" in json_data else None,
request=r.request
)
# end if not ok
if "result" not in json_data:
raise TgApiParseException('Key "result" is missing.')
# end if no result
return json_data.result
# end if return_python_objects
return json_data
# end def
def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query):
"""
Send a request to the api.
If the bot is set to return the json objects, it will look like this:
```json
{
"ok": bool,
"result": {...},
# optionally present:
"description": "human-readable description of the result",
"error_code": int
}
```
:param command: The Url command parameter
:type command: str
:keyword request_timeout: When the request should time out.
:type request_timeout: int
:keyword files: if it needs to send files.
:keyword use_long_polling: if it should use long polling.
(see http://docs.python-requests.org/en/latest/api/#requests.Response.iter_content)
:type use_long_polling: bool
:param query: will get json encoded.
:return: The json response from the server, or, if `self.return_python_objects` is `True`, a parsed return type.
:rtype: DictObject.DictObject | pytgbot.api_types.receivable.Receivable
"""
params = self._prepare_request(command, query)
r = self._do_request(
params.url, params=params.params,
files=files, stream=use_long_polling, timeout=request_timeout
)
return self._process_response(r)
# end def do
| gpl-3.0 | 6,613,766,085,546,908,000 | 34.155405 | 120 | 0.596771 | false |
schilli/MOPS | MOPS/CorrFunction.py | 1 | 2568 | # -*- coding: UTF-8 -*-
from __future__ import print_function, division
import numpy as np
class CorrFunction(object):
"""
correlation function data, additional information and manipulation methods
Parameters
----------
corr : (nvec, nframes) array
Correlation functions
std : (nvec, nframes) array
Correlation function standard deviations
error : (nvec, nframes) array
Correlation function standard error of the mean
info : dict
Dictionary with information on correlation functions
"""
def __init__(self, corr=None, std=None, error=None, info=None):
self.corr = corr
self.std = std
self.error = error
if info is not None:
self.resid = info['bondvecinfo']['resid' ]
self.resindex = info['bondvecinfo']['resindex' ]
self.resname = info['bondvecinfo']['resnames' ]
self.atomindex = info['bondvecinfo']['atomindex' ]
self.atomname = info['bondvecinfo']['atomnames' ]
self.element = info['bondvecinfo']['element' ]
self.chain = info['bondvecinfo']['chain' ]
self.bondlength = info['bondvecinfo']['bondlength']
self.bondvec = info['bondvecinfo']['bondvec' ]
self.fitgroup = info['bondvecinfo']['fitgroup' ]
try:
self.fit = info['bondvecinfo']['fit' ]
except KeyError:
self.fit = False
try:
self.S2direct = np.array(info['bondvecinfo']['S2'])
except KeyError:
self.S2direct = None
self.dt = info['bondvecinfo']['dt' ]
self.topfilename = info['topfilename']
self.npzfilename = info['npzfilename']
self.trjfilename = info['trjfilename']
self.frames = info['frames' ]
else:
self.resid = None
self.resindex = None
self.resname = None
self.atomindex = None
self.atomname = None
self.element = None
self.chain = None
self.bondlength = None
self.bondvec = None
self.fitgroup = None
self.fit = None
self.dt = None
self.topfilename = None
self.npzfilename = None
self.trjfilename = None
self.frames = None
| gpl-3.0 | 2,965,479,022,776,792,600 | 34.666667 | 78 | 0.511682 | false |
kayhayen/Nuitka | tests/standalone/TkInterUsing.py | 1 | 1436 | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" TkInter standalone test, trying to make sure it loads.
"""
from __future__ import print_function
# Python3 changed module name.
if str is bytes:
import Tkinter as tkinter
else:
import tkinter
# nuitka-skip-unless-expression: __import__("Tkinter" if sys.version_info[0] < 3 else "tkinter")
try:
root = tkinter.Tk() # this will fail in absence of TCL
except tkinter.TclError as e:
print("TCLError exception happened.")
assert "connect to display" in str(e) or "no display" in str(e), str(e)
else:
print("OK")
| apache-2.0 | 5,512,069,357,224,753,000 | 34.9 | 96 | 0.704735 | false |
narfman0/D3OrganDropCalculator | calculator.py | 1 | 1510 | #!/bin/python
import sys
from math import pow
DEFAULT_TORMENT = 2
DEFAULT_RUNS = 5
#generate pascals triangle
def pascalsTriangle(rows):
for rownum in range (rows):
newValue=1
rlist = [newValue]
for iteration in range (rownum):
newValue = newValue * ( rownum-iteration ) * 1 / ( iteration + 1 )
rlist.append(int(newValue))
return rlist
#p to drop organ given torment level
def pOrganDrop(torment):
return .25+(torment-1)/20.0
#p to drop organ given torment level, iteration, and total iterations
def pOrganDropI(torment, i, total):
psuccess=pow(pOrganDrop(torment), total-i)
pnotsuccess=1
if i > 0:
pnotsuccess=pow(1-pOrganDrop(torment), i)
return psuccess*pnotsuccess
#p to drop organ at given torment/run level
def calculate(torment,runs):
triangle=pascalsTriangle(runs+1)
p=0.0
i=0
for leaf in triangle:
if i < len(triangle)-1:
pi = pOrganDropI(torment, i, runs) * leaf
p += pi
print('pdrop(i):' + str(i) + ' is ' + str(pi) + ' total: ' + str(p))
i+=1
return p
if __name__ == "__main__":
if len(sys.argv) != 3:
print('Usage: ./calculator.py <torment level> <#runs>' +
' using default torment level ' + str(DEFAULT_TORMENT) + ' with ' + str(DEFAULT_RUNS) + ' runs')
torment=DEFAULT_TORMENT
runs=DEFAULT_RUNS
else:
torment=int(sys.argv[1])
runs=int(sys.argv[2])
pdrop=calculate(torment,runs)
print('pdrop for a given organ=' + str(pdrop) + ', pdrop for all three is=' + str(pow(pdrop,3.0)))
| gpl-2.0 | 294,702,526,321,510,850 | 27.490566 | 100 | 0.653642 | false |
stdweird/vsc-manage | lib/vsc/manage/monitoring.py | 1 | 7199 | ##
# Copyright 2011-2013 Ghent University
#
# This file is part of vsc-manage,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/vsc-manage
#
# vsc-manage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# vsc-manage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with vsc-manage. If not, see <http://www.gnu.org/licenses/>.
#
##
'''
Created on Feb 22, 2012
@author: Jens Timmerman
'''
from config import get_config
from managecommands import SshCommand, Worker
from vsc.utils import fancylogger
class Monitoring(Worker):
'''
This class (interface) represents a monitoring service
'''
def __init__(self, nodelist, imms=False):
'''
Constructor
nodelist is a list of hostnames for nodes to perform actions on
if imms=True it will also do the same action on the imm nodes of the given nodelist nodes
'''
Worker.__init__(self)
self.nodelist = nodelist
self.log = fancylogger.getLogger(self.__class__.__name__)
self.imms = imms
def scheduleDowntime(self, hours):
"""
schedule a downtime for the nodes
"""
pass
def __repr__(self):
return self.__class__.__name__
def doIt(self):
"""
compatibility with the compositenode output
"""
out = Worker.doIt(self)
return [self, out]
class MonitoringException(Exception):
"""
Raised when something goes wrong with monitoring
"""
pass
class Icinga(Monitoring):
"""
Implementation of monitoring, interfacing with icinga.
"""
def scheduleDowntime(self, hours, comment=None):
"""
schedule a downtime for the nodes
This schedules a downtime for the host, and all it's services
"""
commands = []
import time
#comment could still be None or false
if not comment:
comment = get_config("ICINGA_DEFAULT_COMMENT")
starttime = int(time.time())
duration = 3600 * float(hours)
endtime = starttime + duration
for node in self.nodelist:
commands.append('echo "%s" > %s' %
(get_config("ICINGA_SCHEDULE_SERVICE_DOWNTIME") % {'host_name': node.hostname,
'start_time': starttime, 'timestamp': starttime - 1, 'comment': comment, 'end_time': endtime,
'duration': duration, 'clustername': node.clustername}, get_config("ICINGA_SOCKET"))
)
if self.imms and node.immmonitoring:
commands.append('echo "%s" > %s' %
(get_config("ICINGA_SCHEDULE_SERVICE_DOWNTIME") % {'host_name': node.immmonitoring,
'start_time': starttime, 'timestamp': starttime - 1, 'comment': comment,
'end_time': endtime, 'duration': duration, 'clustername': node.clustername},
get_config("ICINGA_SOCKET"))
)
command = ";".join(commands)
self.log.debug("creating command %s" % command)
command = SshCommand(command=command, host=get_config("ICINGA_HOST"), user="root", port=22, timeout=6)
self._adcommand(command)
return True
def acknowledgeHost(self, comment=None):
"""
Acknowledges a problem on the current nodes
This acknowledges the current problem on the host, but not it's services
"""
commands = []
import time
starttime = int(time.time())
#comment could still be None
if not comment:
comment = get_config("ICINGA_DEFAULT_COMMENT")
for node in self.nodelist:
tpldict = {
'host_name': node.hostname,
'timestamp': starttime - 1,
'comment': comment,
'clustername': node.clustername
}
tpldict['host_name'] = get_config('ICINGA_HOSTNAME') % tpldict
ack_command = 'echo "%s" > %s' % (get_config("ICINGA_ACKNOWLEDGE_HOST_PROBLEM") % tpldict, get_config("ICINGA_SOCKET"))
commands.append(ack_command)
if self.imms and node.immmonitoring:
commands.append('echo "%s" > %s' % (get_config("ICINGA_ACKNOWLEDGE_HOST_PROBLEM") % {
'host_name': node.immmonitoring,
'timestamp': starttime - 1,
'comment': comment,
'clustername': node.clustername,
}, get_config("ICINGA_SOCKET")))
command = ";".join(commands)
self.log.debug("creating command %s" % command)
command = SshCommand(command=command, host=get_config("ICINGA_HOST"), user="root", port=22, timeout=6)
self._adcommand(command)
def acknowledgeService(self, servicename, comment=None):
"""
Acknowledges a given service on all nodes
"""
commands = []
import time
starttime = int(time.time())
#comment could still be None
if not comment:
comment = get_config("ICINGA_DEFAULT_COMMENT")
for node in self.nodelist:
tpldict = {
'host_name': node.hostname,
'timestamp': starttime - 1,
'comment': comment,
'clustername': node.clustername,
'service': servicename
}
# apply icinga templating to hostname
tpldict['host_name'] = get_config('ICINGA_HOSTNAME') % tpldict
ack_command = get_config("ICINGA_ACKNOWLEDGE_SERVICE_PROBLEM") % tpldict
commands.append('echo "%s" > %s' % (ack_command, get_config("ICINGA_SOCKET")))
if self.imms and node.immmonitoring:
ack_command = get_config("ICINGA_ACKNOWLEDGE_SERVICE_PROBLEM") % {
'host_name': node.immmonitoring,
'timestamp': starttime - 1,
'comment': comment,
'clustername': node.clustername,
'service': servicename
}
commands.append('echo "%s" > %s' % (ack_command, get_config("ICINGA_SOCKET")))
command = ";".join(commands)
self.log.debug("creating command %s" % command)
command = SshCommand(command=command, host=get_config("ICINGA_HOST"), user="root", port=22, timeout=6)
self._adcommand(command)
| gpl-2.0 | 4,906,653,235,562,744,000 | 36.691099 | 131 | 0.581609 | false |
dhomeier/astropy | astropy/utils/masked/tests/test_containers.py | 1 | 4941 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord, representation as r
from astropy.time import Time
from .. import Masked
class TestRepresentations:
def setup_class(self):
self.x = np.array([3., 5., 0.]) << u.m
self.y = np.array([4., 12., 1.]) << u.m
self.z = np.array([0., 0., 1.]) << u.m
self.c = r.CartesianRepresentation(self.x, self.y, self.z)
self.mask = np.array([False, False, True])
self.mx = Masked(self.x, self.mask)
self.my = Masked(self.y, self.mask)
self.mz = Masked(self.z, self.mask)
self.mc = r.CartesianRepresentation(self.mx, self.my, self.mz)
def test_initialization(self):
check = self.mc.z == self.mz
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
assert_array_equal(self.mc.x, self.mx)
assert_array_equal(self.mc.y, self.my)
assert_array_equal(self.mc.z, self.mz)
def test_norm(self):
# Need stacking and erfa override.
norm = self.mc.norm()
assert_array_equal(norm.unmasked, self.c.norm())
assert_array_equal(norm.mask, self.mask)
def test_transformation(self):
msr = self.mc.represent_as(r.SphericalRepresentation)
sr = self.c.represent_as(r.SphericalRepresentation)
for comp in msr.components:
mc = getattr(msr, comp)
c = getattr(sr, comp)
assert_array_equal(mc.unmasked, c)
assert_array_equal(mc.mask, self.mask)
# Transformation back. This also tests erfa.ufunc.s2p, which
# is special in having a core dimension only in the output.
cr2 = sr.represent_as(r.CartesianRepresentation)
mcr2 = msr.represent_as(r.CartesianRepresentation)
for comp in mcr2.components:
mc = getattr(mcr2, comp)
c = getattr(cr2, comp)
assert_array_equal(mc.unmasked, c)
assert_array_equal(mc.mask, self.mask)
class TestSkyCoord:
def setup_class(self):
self.ra = np.array([3., 5., 0.]) << u.hourangle
self.dec = np.array([4., 12., 1.]) << u.deg
self.sc = SkyCoord(self.ra, self.dec)
self.mask = np.array([False, False, True])
self.mra = Masked(self.ra, self.mask)
self.mdec = Masked(self.dec, self.mask)
self.msc = SkyCoord(self.mra, self.mdec)
def test_initialization(self):
check = self.msc.dec == self.mdec
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
assert_array_equal(self.msc.data.lon, self.mra)
assert_array_equal(self.msc.data.lat, self.mdec)
def test_transformation(self):
gcrs = self.sc.gcrs
mgcrs = self.msc.gcrs
assert_array_equal(mgcrs.data.lon.mask, self.msc.data.lon.mask)
assert_array_equal(mgcrs.data.lon.unmasked, gcrs.data.lon)
assert_array_equal(mgcrs.data.lat.unmasked, gcrs.data.lat)
class TestTime:
def setup_class(self):
self.s = np.array(['2010-11-12T13:14:15.160',
'2010-11-12T13:14:15.161',
'2011-12-13T14:15:16.170'])
self.t = Time(self.s)
# Time formats will currently strip any ndarray subtypes, so we cannot
# initialize a Time with a Masked version of self.s yet. Instead, we
# work around it, for now only testing that masked are preserved by
# transformations.
self.mask = np.array([False, False, True])
self.mt = self.t._apply(Masked, self.mask)
def test_initialization(self):
assert_array_equal(self.mt.jd1.mask, self.mask)
assert_array_equal(self.mt.jd2.mask, self.mask)
assert_array_equal(self.mt.jd1.unmasked, self.t.jd1)
assert_array_equal(self.mt.jd2.unmasked, self.t.jd2)
@pytest.mark.parametrize('format_', ['jd', 'cxcsec', 'jyear'])
def test_different_formats(self, format_):
# Formats do not yet work with everything; e.g., isot is not supported
# since the Masked class does not yet support structured arrays.
tfmt = getattr(self.t, format_)
mtfmt = getattr(self.mt, format_)
check = mtfmt == tfmt
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
@pytest.mark.parametrize('scale', ['tai', 'tcb', 'ut1'])
def test_transformation(self, scale):
tscl = getattr(self.t, scale)
mtscl = getattr(self.mt, scale)
assert_array_equal(mtscl.jd1.mask, self.mask)
assert_array_equal(mtscl.jd2.mask, self.mask)
assert_array_equal(mtscl.jd1.unmasked, tscl.jd1)
assert_array_equal(mtscl.jd2.unmasked, tscl.jd2)
| bsd-3-clause | 859,605,714,367,307,100 | 39.5 | 78 | 0.624165 | false |
anpingli/openshift-ansible | playbooks/openstack/inventory.py | 1 | 8124 | #!/usr/bin/env python
"""
This is an Ansible dynamic inventory for OpenStack.
It requires your OpenStack credentials to be set in clouds.yaml or your shell
environment.
"""
from __future__ import print_function
from collections import Mapping
import json
import os
import shade
def base_openshift_inventory(cluster_hosts):
'''Set the base openshift inventory.'''
inventory = {}
masters = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'master']
etcd = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'etcd']
if not etcd:
etcd = masters
infra_hosts = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'node' and
server.metadata['sub-host-type'] == 'infra']
app = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'node' and
server.metadata['sub-host-type'] == 'app']
cns = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'cns']
nodes = list(set(masters + infra_hosts + app + cns))
dns = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'dns']
load_balancers = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'lb']
osev3 = list(set(nodes + etcd + load_balancers))
inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}
inventory['OSEv3'] = {'hosts': osev3}
inventory['masters'] = {'hosts': masters}
inventory['etcd'] = {'hosts': etcd}
inventory['nodes'] = {'hosts': nodes}
inventory['infra_hosts'] = {'hosts': infra_hosts}
inventory['app'] = {'hosts': app}
inventory['glusterfs'] = {'hosts': cns}
inventory['dns'] = {'hosts': dns}
inventory['lb'] = {'hosts': load_balancers}
inventory['localhost'] = {'ansible_connection': 'local'}
return inventory
def get_docker_storage_mountpoints(volumes):
'''Check volumes to see if they're being used for docker storage'''
docker_storage_mountpoints = {}
for volume in volumes:
if volume.metadata.get('purpose') == "openshift_docker_storage":
for attachment in volume.attachments:
if attachment.server_id in docker_storage_mountpoints:
docker_storage_mountpoints[attachment.server_id].append(attachment.device)
else:
docker_storage_mountpoints[attachment.server_id] = [attachment.device]
return docker_storage_mountpoints
def _get_hostvars(server, docker_storage_mountpoints):
ssh_ip_address = server.public_v4 or server.private_v4
hostvars = {
'ansible_host': ssh_ip_address
}
public_v4 = server.public_v4 or server.private_v4
if public_v4:
hostvars['public_v4'] = server.public_v4
hostvars['openshift_public_ip'] = server.public_v4
# TODO(shadower): what about multiple networks?
if server.private_v4:
hostvars['private_v4'] = server.private_v4
hostvars['openshift_ip'] = server.private_v4
# NOTE(shadower): Yes, we set both hostname and IP to the private
# IP address for each node. OpenStack doesn't resolve nodes by
# name at all, so using a hostname here would require an internal
# DNS which would complicate the setup and potentially introduce
# performance issues.
hostvars['openshift_hostname'] = server.metadata.get(
'openshift_hostname', server.private_v4)
hostvars['openshift_public_hostname'] = server.name
if server.metadata['host-type'] == 'cns':
hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
node_labels = server.metadata.get('node_labels')
# NOTE(shadower): the node_labels value must be a dict not string
if not isinstance(node_labels, Mapping):
node_labels = json.loads(node_labels)
if node_labels:
hostvars['openshift_node_labels'] = node_labels
# check for attached docker storage volumes
if 'os-extended-volumes:volumes_attached' in server:
if server.id in docker_storage_mountpoints:
hostvars['docker_storage_mountpoints'] = ' '.join(
docker_storage_mountpoints[server.id])
return hostvars
def build_inventory():
'''Build the dynamic inventory.'''
cloud = shade.openstack_cloud()
# TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
# environment variable.
cluster_hosts = [
server for server in cloud.list_servers()
if 'metadata' in server and 'clusterid' in server.metadata]
inventory = base_openshift_inventory(cluster_hosts)
for server in cluster_hosts:
if 'group' in server.metadata:
group = server.metadata.get('group')
if group not in inventory:
inventory[group] = {'hosts': []}
inventory[group]['hosts'].append(server.name)
inventory['_meta'] = {'hostvars': {}}
# cinder volumes used for docker storage
docker_storage_mountpoints = get_docker_storage_mountpoints(
cloud.list_volumes())
for server in cluster_hosts:
inventory['_meta']['hostvars'][server.name] = _get_hostvars(
server,
docker_storage_mountpoints)
stout = _get_stack_outputs(cloud)
if stout is not None:
try:
inventory['localhost'].update({
'openshift_openstack_api_lb_provider':
stout['api_lb_provider'],
'openshift_openstack_api_lb_port_id':
stout['api_lb_vip_port_id'],
'openshift_openstack_api_lb_sg_id':
stout['api_lb_sg_id']})
except KeyError:
pass # Not an API load balanced deployment
try:
inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
except KeyError:
pass # Not a kuryr deployment
return inventory
def _get_stack_outputs(cloud_client):
"""Returns a dictionary with the stack outputs"""
cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
stack = cloud_client.get_stack(cluster_name)
if stack is None or stack['stack_status'] not in (
'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
return None
data = {}
for output in stack['outputs']:
data[output['output_key']] = output['output_value']
return data
def _get_kuryr_vars(cloud_client, data):
"""Returns a dictionary of Kuryr variables resulting of heat stacking"""
settings = {}
settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
settings['kuryr_openstack_pod_project_id'] = (
cloud_client.current_project_id)
settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
settings['kuryr_openstack_username'] = cloud_client.auth['username']
settings['kuryr_openstack_password'] = cloud_client.auth['password']
if 'user_domain_id' in cloud_client.auth:
settings['kuryr_openstack_user_domain_name'] = (
cloud_client.auth['user_domain_id'])
else:
settings['kuryr_openstack_user_domain_name'] = (
cloud_client.auth['user_domain_name'])
# FIXME(apuimedo): consolidate kuryr controller credentials into the same
# vars the openstack playbook uses.
settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
if 'project_domain_id' in cloud_client.auth:
settings['kuryr_openstack_project_domain_name'] = (
cloud_client.auth['project_domain_id'])
else:
settings['kuryr_openstack_project_domain_name'] = (
cloud_client.auth['project_domain_name'])
return settings
if __name__ == '__main__':
print(json.dumps(build_inventory(), indent=4, sort_keys=True))
| apache-2.0 | -2,398,377,176,609,801,000 | 36.09589 | 94 | 0.634663 | false |
shanezhiu/pyspider | tests/test_fetcher.py | 1 | 11012 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-02-15 22:10:35
import os
import json
import copy
import time
import httpbin
import umsgpack
import subprocess
import unittest2 as unittest
from multiprocessing import Queue
import logging
import logging.config
logging.config.fileConfig("pyspider/logging.conf")
try:
from six.moves import xmlrpc_client
except ImportError:
import xmlrpclib as xmlrpc_client
from pyspider.libs import utils
from pyspider.libs.response import rebuild_response
from pyspider.fetcher.tornado_fetcher import Fetcher
class TestFetcher(unittest.TestCase):
sample_task_http = {
'taskid': 'taskid',
'project': 'project',
'url': '',
'fetch': {
'method': 'GET',
'headers': {
'Cookie': 'a=b',
'a': 'b'
},
'cookies': {
'c': 'd',
},
'timeout': 60,
'save': 'abc',
},
'process': {
'callback': 'callback',
'save': [1, 2, 3],
},
}
@classmethod
def setUpClass(self):
self.inqueue = Queue(10)
self.outqueue = Queue(10)
self.fetcher = Fetcher(self.inqueue, self.outqueue)
self.fetcher.phantomjs_proxy = '127.0.0.1:25555'
self.rpc = xmlrpc_client.ServerProxy('http://localhost:%d' % 24444)
self.xmlrpc_thread = utils.run_in_thread(self.fetcher.xmlrpc_run, port=24444)
self.httpbin_thread = utils.run_in_subprocess(httpbin.app.run, port=14887)
self.httpbin = 'http://127.0.0.1:14887'
self.thread = utils.run_in_thread(self.fetcher.run)
try:
self.phantomjs = subprocess.Popen(['phantomjs',
os.path.join(os.path.dirname(__file__),
'../pyspider/fetcher/phantomjs_fetcher.js'),
'25555'])
except OSError:
self.phantomjs = None
time.sleep(0.5)
@classmethod
def tearDownClass(self):
if self.phantomjs:
self.phantomjs.kill()
self.phantomjs.wait()
self.httpbin_thread.terminate()
self.httpbin_thread.join()
self.rpc._quit()
self.thread.join()
time.sleep(1)
def test_10_http_get(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/get'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
self.assertIsNotNone(response.json, response.content)
self.assertEqual(response.json['headers'].get('A'), 'b', response.json)
self.assertEqual(response.json['headers'].get('Cookie'), 'c=d', response.json)
def test_15_http_post(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
request['fetch']['data'] = 'binux'
request['fetch']['cookies'] = {'c': 'd'}
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
self.assertIsNotNone(response.json, response.content)
self.assertEqual(response.json['form'].get('binux'), '')
self.assertEqual(response.json['headers'].get('A'), 'b', response.json)
self.assertEqual(response.json['headers'].get('Cookie'), 'c=d', response.json)
def test_e010_redirect(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/redirect-to?url=/get'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.url, self.httpbin+'/get')
def test_e020_too_much_redirect(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/redirect/10'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 599, result)
self.assertIn('redirects followed', response.error)
def test_e030_cookie(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/cookies/set?k1=v1&k2=v2'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.cookies, {'k1': 'v1', 'k2': 'v2', 'c': 'd'}, result)
def test_20_dataurl_get(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_30_with_queue(self):
request= copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_40_with_rpc(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
result = umsgpack.unpackb(self.rpc.fetch(request).data)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_50_base64_data(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
# utf8 encoding 中文
request['fetch']['data'] = "[BASE64-DATA]5Lit5paH[/BASE64-DATA]"
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, response.error)
self.assertIsNotNone(response.json, response.content)
self.assertIn(u'中文', response.json['form'], response.json)
def test_55_base64_data(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
# gbk encoding 中文
request['fetch']['data'] = "[BASE64-DATA]1tDOxA==[/BASE64-DATA]"
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, response.error)
self.assertIsNotNone(response.json, response.content)
def test_60_timeout(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/delay/5'
request['fetch']['timeout'] = 3
start_time = time.time()
self.inqueue.put(request)
task, result = self.outqueue.get()
end_time = time.time()
self.assertGreater(end_time - start_time, 1.5)
self.assertLess(end_time - start_time, 4.5)
def test_65_418(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/status/418'
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 418)
self.assertIn('teapot', response.text)
def test_70_phantomjs_url(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin + '/get'
request['fetch']['fetch_type'] = 'js'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
data = json.loads(response.doc('pre').text())
self.assertIsNotNone(data, response.content)
self.assertEqual(data['headers'].get('A'), 'b', response.json)
self.assertEqual(data['headers'].get('Cookie'), 'c=d', response.json)
def test_80_phantomjs_timeout(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/delay/5'
request['fetch']['fetch_type'] = 'js'
request['fetch']['timeout'] = 3
start_time = time.time()
result = self.fetcher.sync_fetch(request)
end_time = time.time()
self.assertGreater(end_time - start_time, 2)
self.assertLess(end_time - start_time, 5)
def test_90_phantomjs_js_script(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin + '/html'
request['fetch']['fetch_type'] = 'js'
request['fetch']['js_script'] = 'function() { document.write("binux") }'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 200)
self.assertIn('binux', result['content'])
@unittest.skipIf(os.environ.get('IGNORE_GOOGLE'), "can't connect to google.")
def test_a100_phantomjs_sharp_url(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'https://groups.google.com/forum/#!forum/pyspider-users'
request['fetch']['fetch_type'] = 'js'
request['fetch']['headers']['User-Agent'] = 'Mozilla/5.0'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 200)
self.assertIn('pyspider-users', result['content'])
def test_a110_dns_error(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'http://www.not-exists-site.com/'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 599)
self.assertIn('error', result)
self.assertIn('resolve', result['error'])
self.inqueue.put(request)
task, result = self.outqueue.get()
self.assertEqual(result['status_code'], 599)
self.assertIn('error', result)
self.assertIn('resolve', result['error'])
| apache-2.0 | 8,131,086,496,657,840,000 | 37.596491 | 86 | 0.616455 | false |
ryfx/modrana | modules/mod_sketch.py | 1 | 2168 | # -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Sketching on touchscreen
#----------------------------------------------------------------------------
# Copyright 2008, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from modules.base_module import RanaModule
from time import time
# only import GKT libs if GTK GUI is used
from core import gs
if gs.GUIString == "GTK":
import cairo
def getModule(*args, **kwargs):
return Sketch(*args, **kwargs)
class Sketch(RanaModule):
"""Sketching functionality"""
def __init__(self, *args, **kwargs):
RanaModule.__init__(self, *args, **kwargs)
self.points = []
if 0: # to test
m = self.m.get("menu", None)
if m:
m.clearMenu('sketch', "set:menu:None")
self.set("menu", "sketch")
def drawMenu(self, cr, menuName, args=None):
if self.get("menu", "") == "sketch":
(x, y, w, h) = self.get('viewport')
count = 0
for p in self.points:
if count == 0:
cr.move_to(p[0], p[1])
else:
cr.line_to(p[0], p[1])
count += 1
cr.stroke()
mod = self.m.get("clickHandler", None)
if mod:
mod.registerDraggableEntireScreen("sketch")
def dragEvent(self, startX, startY, dx, dy, x, y):
self.points.append((x, y))
| gpl-3.0 | 3,229,469,049,537,184,000 | 32.369231 | 77 | 0.532749 | false |
Paymiumm/virtual_wallet_api | app/api_version/utils/__init__.py | 1 | 8126 | # ~ util package that would hold common functionalities and tools that all versions of the api would use
# (@: Name): "mailMorth"
# (@:Description): "email Management, and automation api code"
# (@:Author): "inteliJence development team"
# under the license of Apache License 2.0 and intelijence Protective Rights please edit and use it with all the care you can give
# this
# import the user handlers
# --------------------------------------
# Import all modules and extentions
# --------------------------------------
from user import Users
from flask_mail import Message
from ext_declaration import mail
from flask import current_app, render_template
from security import generate_confirmation_token, resend_confirmation_token, generate_transact_url, confirm_transact_url
from models import User
import socket
import re
import datetime
import threading
from werkzeug.security import generate_password_hash, check_password_hash
from twilio.rest import Client
from passgen import passgen
from ext_declaration import db
# --------------------------------------
# END IMPORTATIONS
# --------------------------------------
# --------------------------------------
# Start Work
# --------------------------------------
# def generate_one_time_password():
# """passgen modules used to generate one time password"""
# value = passgen(length=6, case='both', digits=True, letters=True, punctuation=False)
# return value
# from app.email import send_email
# end all import
user = Users() # start user manager
def send_email(to, subject, template):
msg = Message(subject, recipients=[to], html=template, sender=current_app.config['MAIL_DEFAULT_SENDER'])
mail.send(msg)
states = ['ABIA',
'ADAMAWA',
'AKWA IBOM',
'ANAMBRA',
'BAUCHI',
'BAYELSA',
'BENUE',
'BORNO',
'CROSS RIVER',
'DELTA',
'EBONYI',
'EDO',
'EKITI',
'ENUGU',
'GOMBE',
'IMO',
'JIGAWA',
'KADUNA',
'KANO',
'KATSINA',
'KEBBI',
'KOGI',
'KWARA',
'LAGOS',
'NASSARAWA',
'NIGER',
'OGUN',
'ONDO',
'OSUN',
'OYO',
'PLATEAU',
'RIVERS',
'SOKOTO',
'TARABA',
'YOBE',
'ZAMFARA',
'State']
def validate_(type_, value):
if type_ == "username":
if re.match("(\S+)([A-z]+)([0-9]*)([-_]*)", value):
print()
re.match("(\S+)([A-z]+)([0-9]*)([-_]*)", value)
return True
else:
print("username regex error")
return False
elif type_ == "password":
if re.match("(\S+)", value):
return True
else:
print("password regex error")
return False
elif type_ == "fullname":
if re.match("([A-z]+) ([A-z]+)", value):
return True
else:
print("name regex error")
return False
elif type_ == "number":
if re.match("([+]+)([0-9]+)", value):
return True
else:
print("number regex error")
return False
elif type_ == "address":
if re.match("^([0-9]+)(\s*)(\S*)([a-zA-Z ]+)(\s*)(\S*)", value):
return True
else:
print("address regex error")
return False
elif type_ == "city":
if re.match("[A-z]{2,}", value):
return True
else:
print("city regex error")
return False
elif type_ == "date":
if re.match("(\d+) (\d+) \d{4}", value):
return True
else:
print("date regex error")
return False
elif type_ == "postal":
if re.match("\d{6}", value):
return True
else:
print("postal regex error")
return False
elif type_ == "state":
for x in states:
if x == value and value != "State":
return True
print("opps states is not valid")
return False
elif type_ == "email":
if re.match("([a-zA-Z0-9_\.\-])+\@(([a-zA-Z0-9\-])+\.)+([a-zA-Z0-9]{2,4})+", value):
return True
else:
print("email regex error")
return False
def send_sms(to_number, body):
"""This function is to send_sms using twillio"""
# generate OTP
account_sid = current_app.config['TWILIO_ACCOUNT_SID']
auth_token = current_app.config['TWILIO_AUTH_TOKEN']
twilio_number = current_app.config['TWILIO_NUMBER']
client = Client(account_sid, auth_token)
client.api.messages.create(to_number, body, from_=twilio_number)
def generate_onetime_password():
# return generate_password_hash(str(random.random()))[20:26]
value = passgen(length=6, case='both', digits=True, letters=True, punctuation=False)
return value
def remove_otp(user):
user_ = User.query.filter_by(email=user).first()
user_.password_hash = ""
db.session.add(user_.password_hash)
db.session.commit()
print(user)
def activate_mail(email):
try:
token = generate_confirmation_token(email)
html = render_template('activateMail.html', confirm_url='http://127.0.0.1:8000/account/confirMail/' + token,
email='http://127.0.0.1:8000/account/resendConfirmation?email=' + email)
subject = 'Paymiumm: Confirm Your Account'
send_email(email, subject, html)
return True
except Exception as e:
print(e)
return False
except socket.gaierror as e:
print(e)
return False
def resend_activate_mail(email=""):
try:
token = resend_confirmation_token(email)
html = render_template('activateMail.html', confirm_url='http://127.0.0.1:8000/account/confirMail/' + token,
email='http://127.0.0.1:8000/account/resendConfirmation?email=' + email)
subject = 'Paymiumm: Confirm Your Account'
send_email(email, subject, html)
except Exception as e:
print(e)
return False
except socket.gaierror as e:
print(e)
return False
def exec_(email):
t = threading.Timer(3, remove_otp, args=[email])
t.start()
return True
def send_one_time_mail(user):
gP = generate_onetime_password()
print(user)
html = render_template('one_password_mail.html', one_time_password=gP)
subject = 'Paymiumm: Your one-time password'
try:
send_email(user, subject, html)
return str(gP)
except Exception as e:
print(e)
return False
except socket.gaierror as e:
print(e)
return False
def send_link_with_email(email, amount, message=None):
try:
details = {'email': email, 'amount': amount}
token = generate_transact_url(details)
html = render_template('send_money_link.html', confirm_url='' + token, email='')
subject = message
if message is None:
send_email(email, subject, html)
else:
send_email(email, subject, html)
return True
except Exception as e:
print(e)
return False
except socket.gaierror as e:
print(e)
return False
def send_link_with_text(number, amount, message=None):
try:
details = {'number': number, 'amount': amount}
token = generate_transact_url(details)
subject = message
if message is None:
send_sms(to_number=number, body=token)
else:
send_sms(to_number=number, body=token)
return True
except Exception as e:
print(e)
return False
| apache-2.0 | -1,626,510,524,226,959,000 | 24.469055 | 129 | 0.52412 | false |
aurigadl/EnvReactAsk | server/apiFuec/models.py | 1 | 3422 | from server import db
class Fuec(db.Model):
__table_args__ = {'extend_existing': True}
__tablename__ = 'fuec'
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime, default=db.func.current_timestamp())
created_by = db.Column(db.Integer, db.ForeignKey('user.id'))
no_fuec = db.Column(db.String(20))
social_object = db.Column(db.String(255), nullable=False)
nit = db.Column(db.String(255))
no_agreement = db.Column(db.Integer)
contractor = db.Column(db.String(255))
id_contractor = db.Column(db.Integer)
object_agreement = db.Column(db.String(255))
origin_destination = db.Column(db.String(1000))
kind_hiring = db.Column(db.String(255))
kind_link = db.Column(db.String(255))
init_date = db.Column(db.String(255))
last_date = db.Column(db.String(255))
car_no = db.Column(db.Integer)
car_license_plate = db.Column(db.String(255))
car_model = db.Column(db.String(255))
car_brand = db.Column(db.String(255))
car_class_car = db.Column(db.Integer)
car_operation = db.Column(db.String(255))
data_driver_json = db.Column(db.String(1000))
contractor_owner = db.Column(db.String(1000))
file_pdf = db.Column(db.LargeBinary)
def __init__(self,
no_fuec,
created_by,
social_object,
nit,
no_agreement,
contractor,
id_contractor,
object_agreement,
origin_destination,
kind_hiring,
kind_link,
init_date,
last_date,
car_no,
car_license_plate,
car_model,
car_brand,
car_class_car,
car_operation,
data_driver,
contractor_owner,
file_pdf):
if no_fuec:
self.no_fuec = no_fuec
if created_by:
self.created_by = created_by
if social_object:
self.social_object = social_object.lower()
if nit:
self.nit = nit
if no_agreement:
self.no_agreement = no_agreement
if contractor:
self.contractor = contractor
if id_contractor:
self.id_contractor = id_contractor
if object_agreement:
self.object_agreement = object_agreement
if origin_destination:
self.origin_destination = origin_destination
if kind_hiring:
self.kind_hiring = kind_hiring
if kind_link:
self.kind_link = kind_link
if init_date:
self.init_date = init_date
if last_date:
self.last_date = last_date
if car_no:
self.car_no = car_no
if car_license_plate:
self.car_license_plate = car_license_plate
if car_model:
self.car_model = car_model
if car_brand:
self.car_brand = car_brand
if car_class_car:
self.car_class_car = car_class_car
if car_operation:
self.car_operation = car_operation
if data_driver:
self.data_driver_json = data_driver
if contractor_owner:
self.contractor_owner = contractor_owner
if file_pdf:
self.file_pdf = file_pdf | gpl-3.0 | 7,552,208,850,233,082,000 | 31.292453 | 76 | 0.544126 | false |
moocowmoo/pycoin | pycoin/contrib/msg_signing.py | 1 | 11785 | import hashlib
import hmac
import io
import os
import re
from binascii import b2a_base64, a2b_base64
from .. import ecdsa
from ..serialize.bitcoin_streamer import stream_bc_string
from ..ecdsa import ellipticcurve, numbertheory
from ..networks import address_prefix_for_netcode, network_name_for_netcode
from ..encoding import public_pair_to_bitcoin_address, to_bytes_32, from_bytes_32, double_sha256
from ..key import Key
# According to brainwallet, this is "inputs.io" format, but it seems practical
# and is deployed in the wild. Core bitcoin doesn't offer a message wrapper like this.
signature_template = '''\
-----BEGIN {net_name} SIGNED MESSAGE-----
{msg}
-----BEGIN SIGNATURE-----
{addr}
{sig}
-----END {net_name} SIGNED MESSAGE-----'''
def parse_signed_message(msg_in):
"""
Take an "armoured" message and split into the message body, signing address
and the base64 signature. Should work on all altcoin networks, and should
accept both Inputs.IO and Multibit formats but not Armory.
Looks like RFC2550 <https://www.ietf.org/rfc/rfc2440.txt> was an "inspiration"
for this, so in case of confusion it's a reference, but I've never found
a real spec for this. Should be a BIP really.
"""
# Convert to Unix line feeds from DOS style, iff we find them, but
# restore to same at the end. The RFC implies we should be using
# DOS \r\n in the message, but that does not always happen in today's
# world of MacOS and Linux devs. A mix of types will not work here.
dos_nl = ('\r\n' in msg_in)
if dos_nl:
msg_in = msg_in.replace('\r\n', '\n')
try:
# trim any junk in front
_, body = msg_in.split('SIGNED MESSAGE-----\n', 1)
except:
raise ValueError("expecting text SIGNED MESSSAGE somewhere")
try:
# - sometimes middle sep is BEGIN BITCOIN SIGNATURE, other times just BEGIN SIGNATURE
# - choose the last instance, in case someone signs a signed message
parts = re.split('\n-----BEGIN [A-Z ]*SIGNATURE-----\n', body)
msg, hdr = ''.join(parts[:-1]), parts[-1]
except:
raise ValueError("expected BEGIN SIGNATURE line", body)
# after message, expect something like an email/http headers, so split into lines
hdr = list(filter(None, [i.strip() for i in hdr.split('\n')]))
if '-----END' not in hdr[-1]:
raise ValueError("expecting END on last line")
sig = hdr[-2]
addr = None
for l in hdr:
l = l.strip()
if not l:
continue
if l.startswith('-----END'):
break
if ':' in l:
label, value = [i.strip() for i in l.split(':', 1)]
if label.lower() == 'address':
addr = l.split(':')[1].strip()
break
continue
addr = l
break
if not addr or addr == sig:
raise ValueError("Could not find address")
if dos_nl:
msg = msg.replace('\n', '\r\n')
return msg, addr, sig
def sign_message(key, message=None, verbose=False, use_uncompressed=None, msg_hash=None):
"""
Return a signature, encoded in Base64, which can be verified by anyone using the
public key.
"""
secret_exponent = key.secret_exponent()
if not secret_exponent:
raise TypeError("Private key is required to sign a message")
addr = key.address()
netcode = key.netcode()
mhash = hash_for_signing(message, netcode) if message else msg_hash
# Use a deterministic K so our signatures are deterministic.
try:
r, s, y_odd = _my_sign(ecdsa.generator_secp256k1, secret_exponent, mhash)
except RuntimeError:
# .. except if extremely unlucky
k = from_bytes_32(os.urandom(32))
r, s, y_odd = _my_sign(ecdsa.generator_secp256k1, secret_exponent, mhash, _k=k)
is_compressed = not key._use_uncompressed(use_uncompressed)
assert y_odd in (0, 1)
# See http://bitcoin.stackexchange.com/questions/14263
# for discussion of the proprietary format used for the signature
#
# Also from key.cpp:
#
# The header byte: 0x1B = first key with even y, 0x1C = first key with odd y,
# 0x1D = second key with even y, 0x1E = second key with odd y,
# add 0x04 for compressed keys.
first = 27 + y_odd + (4 if is_compressed else 0)
sig = b2a_base64(bytearray([first]) + to_bytes_32(r) + to_bytes_32(s)).strip()
if not isinstance(sig, str):
# python3 b2a wrongness
sig = str(sig, 'ascii')
if not verbose or message is None:
return sig
return signature_template.format(
msg=message, sig=sig, addr=addr,
net_name=network_name_for_netcode(netcode).upper())
def verify_message(key_or_address, signature, message=None, msg_hash=None, netcode=None):
"""
Take a signature, encoded in Base64, and verify it against a
key object (which implies the public key),
or a specific base58-encoded pubkey hash.
"""
if isinstance(key_or_address, Key):
# they gave us a private key or a public key already loaded.
key = key_or_address
else:
key = Key.from_text(key_or_address)
netcode = netcode or key.netcode()
try:
# Decode base64 and a bitmask in first byte.
is_compressed, recid, r, s = _decode_signature(signature)
except ValueError:
return False
# Calculate hash of message used in signature
mhash = hash_for_signing(message, netcode) if message is not None else msg_hash
# Calculate the specific public key used to sign this message.
pair = _extract_public_pair(ecdsa.generator_secp256k1, recid, r, s, mhash)
# Check signing public pair is the one expected for the signature. It must be an
# exact match for this key's public pair... or else we are looking at a validly
# signed message, but signed by some other key.
#
pp = key.public_pair()
if pp:
# expect an exact match for public pair.
return pp == pair
else:
# Key() constructed from a hash of pubkey doesn't know the exact public pair, so
# must compare hashed addresses instead.
addr = key.address()
prefix = address_prefix_for_netcode(netcode)
ta = public_pair_to_bitcoin_address(pair, compressed=is_compressed, address_prefix=prefix)
return ta == addr
def msg_magic_for_netcode(netcode):
"""
We need the constant "strMessageMagic" in C++ source code, from file "main.cpp"
It is not shown as part of the signed message, but it is prefixed to the message
as part of calculating the hash of the message (for signature). It's also what
prevents a message signature from ever being a valid signature for a transaction.
Each altcoin finds and changes this string... But just simple substitution.
"""
name = network_name_for_netcode(netcode)
if netcode in ('BLK', 'BC'):
name = "BlackCoin" # NOTE: we need this particular HumpCase
# testnet, the first altcoin, didn't change header
if netcode == 'XTN':
name = "Bitcoin"
return '%s Signed Message:\n' % name
def _decode_signature(signature):
"""
Decode the internal fields of the base64-encoded signature.
"""
if signature[0] not in ('G', 'H', 'I'):
# Because we know the first char is in range(27, 35), we know
# valid first character is in this set.
raise TypeError("Expected base64 value as signature", signature)
# base 64 decode
sig = a2b_base64(signature)
if len(sig) != 65:
raise ValueError("Wrong length, expected 65")
# split into the parts.
first = ord(sig[0:1]) # py3 accomidation
r = from_bytes_32(sig[1:33])
s = from_bytes_32(sig[33:33+32])
# first byte encodes a bits we need to know about the point used in signature
if not (27 <= first < 35):
raise ValueError("First byte out of range")
# NOTE: The first byte encodes the "recovery id", or "recid" which is a 3-bit values
# which selects compressed/not-compressed and one of 4 possible public pairs.
#
first -= 27
is_compressed = bool(first & 0x4)
return is_compressed, (first & 0x3), r, s
def _extract_public_pair(generator, recid, r, s, value):
"""
Using the already-decoded parameters of the bitcoin signature,
return the specific public key pair used to sign this message.
Caller must verify this pubkey is what was expected.
"""
assert 0 <= recid < 4, recid
G = generator
n = G.order()
curve = G.curve()
order = G.order()
p = curve.p()
x = r + (n * (recid // 2))
alpha = (pow(x, 3, p) + curve.a() * x + curve.b()) % p
beta = numbertheory.modular_sqrt(alpha, p)
inv_r = numbertheory.inverse_mod(r, order)
y = beta if ((beta - recid) % 2 == 0) else (p - beta)
minus_e = -value % order
R = ellipticcurve.Point(curve, x, y, order)
Q = inv_r * (s * R + minus_e * G)
public_pair = (Q.x(), Q.y())
# check that this is the RIGHT public key? No. Leave that for the caller.
return public_pair
def hash_for_signing(msg, netcode='BTC'):
"""
Return a hash of msg, according to odd bitcoin method: double SHA256 over a bitcoin
encoded stream of two strings: a fixed magic prefix and the actual message.
"""
magic = msg_magic_for_netcode(netcode)
fd = io.BytesIO()
stream_bc_string(fd, bytearray(magic, 'ascii'))
stream_bc_string(fd, bytearray(msg, 'utf-8'))
# return as a number, since it's an input to signing algos like that anyway
return from_bytes_32(double_sha256(fd.getvalue()))
def deterministic_make_k(generator_order, secret_exponent, val,
hash_f=hashlib.sha256, trust_no_one=True):
"""
Generate K value BUT NOT according to https://tools.ietf.org/html/rfc6979
ecsda.deterministic_generate_k() was more general than it needs to be,
and I felt the hand of NSA in the wholly constants, so I simplified and
changed the salt.
"""
n = generator_order
assert hash_f().digest_size == 32
# code below has been specialized for SHA256 / bitcoin usage
assert n.bit_length() == 256
hash_size = 32
if trust_no_one:
v = b"Edward Snowden rocks the world!!"
k = b"Qwest CEO Joseph Nacchio is free"
else:
v = b'\x01' * hash_size
k = b'\x00' * hash_size
priv = to_bytes_32(secret_exponent)
if val > n:
val -= n
h1 = to_bytes_32(val)
k = hmac.new(k, v + b'\x00' + priv + h1, hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
k = hmac.new(k, v + b'\x01' + priv + h1, hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
while 1:
t = hmac.new(k, v, hash_f).digest()
k1 = from_bytes_32(t)
if k1 >= 1 and k1 < n:
return k1
k = hmac.new(k, v + b'\x00', hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
def _my_sign(generator, secret_exponent, val, _k=None):
"""
Return a signature for the provided hash (val), using the provided
random nonce, _k or generate a deterministic K as needed.
May raise RuntimeError, in which case retrying with a new
random value k is in order.
"""
G = generator
n = G.order()
k = _k or deterministic_make_k(n, secret_exponent, val)
p1 = k * G
r = p1.x()
if r == 0:
raise RuntimeError("amazingly unlucky random number r")
s = (numbertheory.inverse_mod(k, n) *
(val + (secret_exponent * r) % n)) % n
if s == 0:
raise RuntimeError("amazingly unlucky random number s")
return (r, s, p1.y() % 2)
# EOF
| mit | 2,836,944,414,860,314,600 | 31.376374 | 98 | 0.623929 | false |
google/telluride_decoding | telluride_decoding/csv_util.py | 1 | 5394 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to save results to a CSV file.
Each row in the CSV file represents results for one regularization value.
The first column in each row is the regularization value, the rest of the
columns are correlation numbers for the experiments.
"""
import collections
import csv
import os
import numpy as np
from telluride_decoding import plot_util
import tensorflow.compat.v2 as tf
# User should call tf.compat.v1.enable_v2_behavior()
def write_results(file_name, regularization_list, all_results):
""""Writes results to a CSV file.
Args:
file_name: The name of the CSV file to write the results.
regularization_list: A list of the regularization values.
all_results: The correlation results as a 2D array. This results is
generated by regression.py. The first dimension is for each
regularization value, the second dimension is for each tf record file used
for testing.
"""
if len(regularization_list) != len(all_results):
raise ValueError('Length of regularization list and results do no match.')
base_dir = os.path.split(file_name)[0]
if base_dir and not tf.io.gfile.exists(base_dir):
tf.io.gfile.makedirs(base_dir)
with tf.io.gfile.GFile(file_name, 'w') as csv_file:
csv_writer = csv.writer(csv_file)
for i, regularization in enumerate(regularization_list):
row = [str(regularization),]
row.extend([str(value) for value in all_results[i]])
csv_writer.writerow(row)
def _read_results(file_name, skip_header=False):
""""Reads results from a CSV file.
Args:
file_name: The name of the CSV file to read the results.
skip_header: Skip the first line when it is a header.
Returns:
An ordered dictionary with regularization values as the keys and the
correlation results as the values.
"""
results = collections.OrderedDict()
with tf.io.gfile.GFile(file_name, 'r') as csv_file:
content = list(csv.reader(csv_file))
if skip_header:
del content[0]
for row in content:
if len(row) < 2:
raise ValueError('Row %s does not have enough columns.' % row)
regularization_value = row[0]
correlations = row[1:]
results[float(regularization_value)] = [float(c) for c in correlations]
return results
def read_all_results_from_directory(dir_name, skip_header=False, pattern=''):
"""Reads results from all the CSV files in a directory.
Args:
dir_name: A name of the directory with all the CSV files.
skip_header: Skip the first line when it is a header.
pattern: Substring that must be in the files to read.
Returns:
An ordered dictionary with regularization values as the keys and the
correlation results as the values.
"""
all_results = collections.OrderedDict()
file_names = tf.io.gfile.listdir(dir_name)
for name in file_names:
if not name.endswith('csv') or pattern not in name:
continue
curr_name = os.path.join(dir_name, name)
curr_results = _read_results(curr_name, skip_header)
if not all_results:
all_results = curr_results
continue
if all_results.keys() != curr_results.keys():
raise ValueError(
'Files do not have the same regularization values %s vs %s' %
(all_results.keys(), curr_results.keys()))
for regularization_value, correlations in curr_results.items():
all_results[regularization_value].extend(correlations)
return all_results
def plot_csv_results(test_name,
results,
golden_mean_std_dict=None,
png_file_name=None,
show_plot=False):
"""Calculates the mean and standard deviation from the results and plot them.
Args:
test_name: The name of the test that will show in the title of the plot.
results: An ordered dictionary with regularization values as the keys and
the correlation results as the values.
golden_mean_std_dict: The golden results as an ordered dictionary with the
regularization values as the keys and tuples with mean value and standard
deviations as as the values.
png_file_name: If file name is not empty, save the plot to the PNG file.
show_plot: If true, show the plot in a window.
"""
regularization_list = []
mean_list = []
std_list = []
for regularization_value in results.keys():
regularization_list.append(regularization_value)
correlations = results[regularization_value]
mean_list.append(np.mean(correlations))
std_list.append(np.std(correlations))
plot_util.plot_mean_std(
test_name,
regularization_list,
mean_list,
std_list,
golden_mean_std_dict=golden_mean_std_dict,
png_file_name=png_file_name,
show_plot=show_plot)
| apache-2.0 | -7,502,328,681,272,178,000 | 35.945205 | 80 | 0.688172 | false |
qurit/rt-utils | rt_utils/ds_helper.py | 1 | 8750 | import datetime
from rt_utils.image_helper import get_contours_coords
from rt_utils.utils import ROIData, SOPClassUID
import numpy as np
from pydicom.uid import generate_uid
from pydicom.dataset import Dataset, FileDataset, FileMetaDataset
from pydicom.sequence import Sequence
from pydicom.uid import ImplicitVRLittleEndian
"""
File contains helper methods that handles DICOM header creation/formatting
"""
def create_rtstruct_dataset(series_data) -> FileDataset:
ds = generate_base_dataset()
add_study_and_series_information(ds, series_data)
add_patient_information(ds, series_data)
add_refd_frame_of_ref_sequence(ds, series_data)
return ds
def generate_base_dataset() -> FileDataset:
file_name = 'rt-utils-struct'
file_meta = get_file_meta()
ds = FileDataset(file_name, {}, file_meta=file_meta, preamble=b"\0" * 128)
add_required_elements_to_ds(ds)
add_sequence_lists_to_ds(ds)
return ds
def get_file_meta() -> FileMetaDataset:
file_meta = FileMetaDataset()
file_meta.FileMetaInformationGroupLength = 202
file_meta.FileMetaInformationVersion = b'\x00\x01'
file_meta.TransferSyntaxUID = ImplicitVRLittleEndian
file_meta.MediaStorageSOPClassUID = SOPClassUID.RTSTRUCT
file_meta.MediaStorageSOPInstanceUID = generate_uid() # TODO find out random generation is fine
file_meta.ImplementationClassUID = SOPClassUID.RTSTRUCT_IMPLEMENTATION_CLASS
return file_meta
def add_required_elements_to_ds(ds: FileDataset):
dt = datetime.datetime.now()
# Append data elements required by the DICOM standarad
ds.SpecificCharacterSet = 'ISO_IR 100'
ds.InstanceCreationDate = dt.strftime('%Y%m%d')
ds.InstanceCreationTime = dt.strftime('%H%M%S.%f')
ds.StructureSetLabel = 'RTstruct'
ds.StructureSetDate = dt.strftime('%Y%m%d')
ds.StructureSetTime = dt.strftime('%H%M%S.%f')
ds.Modality = 'RTSTRUCT'
ds.Manufacturer = 'Qurit'
ds.ManufacturerModelName = 'rt-utils'
ds.InstitutionName = 'Qurit'
# Set the transfer syntax
ds.is_little_endian = True
ds.is_implicit_VR = True
# Set values already defined in the file meta
ds.SOPClassUID = ds.file_meta.MediaStorageSOPClassUID
ds.SOPInstanceUID = ds.file_meta.MediaStorageSOPInstanceUID
ds.ApprovalStatus = 'UNAPPROVED'
def add_sequence_lists_to_ds(ds: FileDataset):
ds.StructureSetROISequence = Sequence()
ds.ROIContourSequence = Sequence()
ds.RTROIObservationsSequence = Sequence()
def add_study_and_series_information(ds: FileDataset, series_data):
reference_ds = series_data[0] # All elements in series should have the same data
ds.StudyDate = reference_ds.StudyDate
ds.SeriesDate = getattr(reference_ds, 'SeriesDate', '')
ds.StudyTime = reference_ds.StudyTime
ds.SeriesTime = getattr(reference_ds, 'SeriesTime', '')
ds.StudyDescription = getattr(reference_ds, 'StudyDescription', '')
ds.SeriesDescription = getattr(reference_ds, 'SeriesDescription', '')
ds.StudyInstanceUID = reference_ds.StudyInstanceUID
ds.SeriesInstanceUID = generate_uid() # TODO: find out if random generation is ok
ds.StudyID = reference_ds.StudyID
ds.SeriesNumber = "1" # TODO: find out if we can just use 1 (Should be fine since its a new series)
def add_patient_information(ds: FileDataset, series_data):
reference_ds = series_data[0] # All elements in series should have the same data
ds.PatientName = getattr(reference_ds, 'PatientName', '')
ds.PatientID = getattr(reference_ds, 'PatientID', '')
ds.PatientBirthDate = getattr(reference_ds, 'PatientBirthDate', '')
ds.PatientSex = getattr(reference_ds, 'PatientSex', '')
ds.PatientAge = getattr(reference_ds, 'PatientAge', '')
ds.PatientSize = getattr(reference_ds, 'PatientSize', '')
ds.PatientWeight = getattr(reference_ds, 'PatientWeight', '')
def add_refd_frame_of_ref_sequence(ds: FileDataset, series_data):
refd_frame_of_ref = Dataset()
refd_frame_of_ref.FrameOfReferenceUID = generate_uid() # TODO Find out if random generation is ok
refd_frame_of_ref.RTReferencedStudySequence = create_frame_of_ref_study_sequence(series_data)
# Add to sequence
ds.ReferencedFrameOfReferenceSequence = Sequence()
ds.ReferencedFrameOfReferenceSequence.append(refd_frame_of_ref)
def create_frame_of_ref_study_sequence(series_data) -> Sequence:
reference_ds = series_data[0] # All elements in series should have the same data
rt_refd_series = Dataset()
rt_refd_series.SeriesInstanceUID = reference_ds.SeriesInstanceUID
rt_refd_series.ContourImageSequence = create_contour_image_sequence(series_data)
rt_refd_series_sequence = Sequence()
rt_refd_series_sequence.append(rt_refd_series)
rt_refd_study = Dataset()
rt_refd_study.ReferencedSOPClassUID = SOPClassUID.DETACHED_STUDY_MANAGEMENT
rt_refd_study.ReferencedSOPInstanceUID = reference_ds.StudyInstanceUID
rt_refd_study.RTReferencedSeriesSequence = rt_refd_series_sequence
rt_refd_study_sequence = Sequence()
rt_refd_study_sequence.append(rt_refd_study)
return rt_refd_study_sequence
def create_contour_image_sequence(series_data) -> Sequence:
contour_image_sequence = Sequence()
# Add each referenced image
for series in series_data:
contour_image = Dataset()
contour_image.ReferencedSOPClassUID = series.file_meta.MediaStorageSOPClassUID
contour_image.ReferencedSOPInstanceUID = series.file_meta.MediaStorageSOPInstanceUID
contour_image_sequence.append(contour_image)
return contour_image_sequence
def create_structure_set_roi(roi_data: ROIData) -> Dataset:
# Structure Set ROI Sequence: Structure Set ROI 1
structure_set_roi = Dataset()
structure_set_roi.ROINumber = roi_data.number
structure_set_roi.ReferencedFrameOfReferenceUID = roi_data.frame_of_reference_uid
structure_set_roi.ROIName = roi_data.name
structure_set_roi.ROIDescription = roi_data.description
structure_set_roi.ROIGenerationAlgorithm = 'MANUAL'
return structure_set_roi
def create_roi_contour(roi_data: ROIData, series_data) -> Dataset:
roi_contour = Dataset()
roi_contour.ROIDisplayColor = roi_data.color
roi_contour.ContourSequence = create_contour_sequence(roi_data, series_data)
roi_contour.ReferencedROINumber = str(roi_data.number)
return roi_contour
def create_contour_sequence(roi_data: ROIData, series_data) -> Sequence:
"""
Iterate through each slice of the mask
For each connected segment within a slice, create a contour
"""
contour_sequence = Sequence()
for i, series_slice in enumerate(series_data):
mask_slice = roi_data.mask[:,:,i]
# Do not add ROI's for blank slices
if np.sum(mask_slice) == 0:
print("Skipping empty mask layer")
continue
contour_coords = get_contours_coords(mask_slice, series_slice, roi_data)
for contour_data in contour_coords:
contour = create_contour(series_slice, contour_data)
contour_sequence.append(contour)
return contour_sequence
def create_contour(series_slice: Dataset, contour_data: np.ndarray) -> Dataset:
contour_image = Dataset()
contour_image.ReferencedSOPClassUID = series_slice.file_meta.MediaStorageSOPClassUID
contour_image.ReferencedSOPInstanceUID = series_slice.file_meta.MediaStorageSOPInstanceUID
# Contour Image Sequence
contour_image_sequence = Sequence()
contour_image_sequence.append(contour_image)
contour = Dataset()
contour.ContourImageSequence = contour_image_sequence
contour.ContourGeometricType = 'CLOSED_PLANAR' # TODO figure out how to get this value
contour.NumberOfContourPoints = len(contour_data) / 3 # Each point has an x, y, and z value
contour.ContourData = contour_data
return contour
def create_rtroi_observation(roi_data: ROIData) -> Dataset:
rtroi_observation = Dataset()
rtroi_observation.ObservationNumber = roi_data.number
rtroi_observation.ReferencedROINumber = roi_data.number
# TODO figure out how to get observation description
rtroi_observation.ROIObservationDescription = 'Type:Soft,Range:*/*,Fill:0,Opacity:0.0,Thickness:1,LineThickness:2,read-only:false'
rtroi_observation.private_creators = 'Qurit Lab'
rtroi_observation.RTROIInterpretedType = ''
rtroi_observation.ROIInterpreter = ''
return rtroi_observation
def get_contour_sequence_by_roi_number(ds, roi_number):
for roi_contour in ds.ROIContourSequence:
# Ensure same type
if str(roi_contour.ReferencedROINumber) == str(roi_number):
return roi_contour.ContourSequence
raise Exception(f"Referenced ROI number '{roi_number}' not found")
| mit | -1,413,595,311,255,209,200 | 41.067308 | 134 | 0.729829 | false |
roatienza/dl-keras | chapter2-neural-networks/mlp-mnist-data_augment-2.1.7.py | 1 | 3959 | '''
A MLP network for MNIST digits classification
Project: https://github.com/roatienza/dl-keras
Usage: python3 <this file>
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# numpy package
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import mnist
from keras.utils import to_categorical
# load mnist dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# compute the number of labels
num_labels = np.amax(y_train) + 1
# convert to one-hot vector
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# image dimensions (assumed square)
image_size = x_train.shape[1]
input_size = image_size * image_size
# we train our network using float data
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# network parameters
batch_size = 128
hidden_units = 256
dropout = 0.45
data_augmentation = False
epochs = 20
max_batches = 2 * len(x_train) / batch_size
# this is 3-layer MLP with ReLU after each layer
model = Sequential()
model.add(Dense(hidden_units, input_dim=input_size))
model.add(Activation('relu'))
model.add(Dense(hidden_units))
model.add(Activation('relu'))
model.add(Dense(num_labels))
# this is the output for one-hot vector
model.add(Activation('softmax'))
model.summary()
# loss function for one-hot vector
# use of adam optimizer
# accuracy is good metric for classification tasks
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# validate the model on test dataset to determine generalization
# score = model.evaluate(x_test, y_test, batch_size=batch_size)
# print("\nTest accuracy: %.1f%%" % (100.0 * score[1]))
# Run training, with or without data augmentation.
if not data_augmentation:
print('Not using data augmentation.')
# train the network no data augmentation
x_train = np.reshape(x_train, [-1, input_size])
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
# we need [width, height, channel] dim for data aug
x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=5.0, # randomly rotate images in the range (deg 0 to 180)
width_shift_range=0.0, # randomly shift images horizontally
height_shift_range=0.0, # randomly shift images vertically
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
for e in range(epochs):
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=batch_size):
x_batch = np.reshape(x_batch, [-1, input_size])
model.fit(x_batch, y_batch, verbose=0)
batches += 1
print("Epoch %d/%d, Batch %d/%d" % (e+1, epochs, batches, max_batches))
if batches >= max_batches:
# we need to break the loop by hand because
# the generator loops indefinitely
break
# Score trained model.
x_test = np.reshape(x_test, [-1, input_size])
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
| mit | 4,741,922,929,612,429,000 | 35.657407 | 86 | 0.691589 | false |
nmc-probe/emulab-nome | protogeni/test/listactiveslivers.py | 1 | 1713 | #! /usr/bin/env python
#
# Copyright (c) 2008-2014 University of Utah and the Flux Group.
#
# {{{GENIPUBLIC-LICENSE
#
# GENI Public License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#
# }}}
#
#
#
import sys
import pwd
import getopt
import os
import re
import xmlrpclib
from M2Crypto import X509
execfile( "test-common.py" )
if admincredentialfile:
f = open( admincredentialfile )
mycredential = f.read()
f.close()
else:
Fatal("You need to supply an admin credential");
pass
#
# Ask manager for its list.
#
params = {}
params["credentials"] = (mycredential,)
rval,response = do_method("cm", "ListActiveSlivers", params)
if rval:
Fatal("Could not get a list of resources")
pass
print response[ "value" ]
| agpl-3.0 | -7,036,149,344,814,738,000 | 27.55 | 72 | 0.737303 | false |
polysquare/polysquare-ci-scripts | ciscripts/deploy/conan/deploy.py | 1 | 4874 | # /ciscripts/deploy/conan/deploy.py
#
# Copy directories into place to prepare for publishing conan project
#
# See /LICENCE.md for Copyright information
"""Copy directories into place to prepare for publishing conan project."""
import argparse
import json
import os
from contextlib import contextmanager
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO # suppress(import-error)
def _get_python_container(cont, util, shell):
"""Get a python 3 installation."""
py_ver = util.language_version("python3")
config_python = "setup/project/configure_python.py"
return cont.fetch_and_import(config_python).get(cont,
util,
shell,
py_ver)
def updated_dict(input_dict, update):
"""Apply update to input_dict and return the result."""
copy = input_dict.copy()
copy.update(update)
return copy
@contextmanager
def temporary_environment(environment):
"""Run child code inside temporarily set environment variables."""
try:
backup = os.environ.copy()
os.environ = environment
yield os.environ
finally:
os.environ = backup
@contextmanager
def captured_messages(util):
"""Capture printed messages."""
old_buffer = util.PRINT_MESSAGES_TO
try:
util.PRINT_MESSAGES_TO = StringIO()
yield util.PRINT_MESSAGES_TO
finally:
util.PRINT_MESSAGES_TO = old_buffer
# suppress(too-many-arguments)
def run_deploy(cont, util, pkg_name, version, block):
"""Run the deploy step and set CONAN_VERSION_OVERRIDE to version."""
update = {"CONAN_VERSION_OVERRIDE": version} if version else {}
upload_desc = "{pkg}/{version}@{block}".format(pkg=pkg_name,
version=version,
block=block)
with util.Task("""Deploying {} to conan""".format(upload_desc)):
with temporary_environment(updated_dict(os.environ, update)):
util.execute(cont,
util.running_output,
"conan",
"export",
block)
util.execute(cont,
util.running_output,
"conan",
"upload",
upload_desc)
def run(cont, util, shell, argv=None):
"""Copy directories into place to prepare for publishing conan project."""
parser = argparse.ArgumentParser("""Conan deployment""")
parser.add_argument("--package-name",
help="""Package name""",
type=str,
required=True)
result = parser.parse_args(argv)
assert os.path.exists("conan_keys")
with open("conan_keys", "r") as conan_keys_file:
conan_keys = json.loads(conan_keys_file.read())
username = conan_keys["username"]
password = conan_keys["password"]
os.environ["REPO_API_KEY"] = str(conan_keys["repo_api_key"])
cont.fetch_and_import("deploy/project/deploy.py").run(cont,
util,
shell)
block = "{user}/{pkg}".format(user=username,
pkg=result.package_name)
cont.fetch_and_import("deploy/project/deploy.py").run(cont,
util,
shell,
["--bump-version-on",
"conanfile.py"])
with _get_python_container(cont, util, shell).activated(util):
with captured_messages(util) as version_stream:
util.execute(cont,
util.running_output,
"python",
"-c",
"import conanfile; "
"print(conanfile.VERSION)")
version_stream.seek(0)
version = str(version_stream.read()).strip()
with util.Task("""Logging in as {}""".format(username)):
util.execute(cont,
util.running_output,
"conan",
"user",
username,
"-p",
password)
run_deploy(cont,
util,
result.package_name,
"master",
block)
run_deploy(cont,
util,
result.package_name,
version,
block)
| mit | -7,925,659,697,915,147,000 | 33.083916 | 79 | 0.489536 | false |
fmfn/FTRLp | FTRLp.py | 1 | 31553 | from __future__ import division
from __future__ import print_function
"""
------------ Follow The Regularized Leader - Proximal ------------
FTRL-P is an online classification algorithm that combines both L1 and L2
norms, particularly suited for large data sets with extremely high dimensionality.
This implementation follow the algorithm by H. B. McMahan et. al. It minimizes
the LogLoss function iteratively with a combination of L2 and L1 (centralized
at the current point) norms and adaptive, per coordinate learning rates.
This algorithm is efficient at obtaining sparsity and has proven to perform
very well in massive Click-Through-Rate prediction tasks.
This module contains two objects...
References:
* Follow-the-Regularized-Leader and Mirror Descent: Equivalent Theorems
and L1 Regularization, H. Brendan McMahan
* Ad Click Prediction: a View from the Trenches, H. Brendan McMahan et. al.
"""
from math import log, exp, fabs, sqrt
from csv import DictReader
from datetime import datetime
from random import random
from hashlib import sha256
def log_loss(y, p):
"""
--- Log_loss computing function
A function to compute the log loss of a predicted probability p given
a true target y.
:param y: True target value
:param p: Predicted probability
:return: Log loss.
"""
p = max(min(p, 1. - 10e-15), 10e-15)
return -log(p) if y == 1 else -log(1. - p)
def to_hash(value):
"""
Hashes values for hashing trick.
Treats numbers as strings.
:param value: Any value that should be trated as category.
:return: hashed value.
"""
if not isinstance(value, bytes):
value = str(value).encode('utf-8')
hex_value = sha256(value).hexdigest()
int_hash = int(hex_value, 16)
return int_hash
class DataGen(object):
"""
DataGen is an object to generate the data that is fed to the
classifier.
It reads the data file one row at a time, hashes it
and returns it.
The names and types of columns must be passed to it, so that categorical,
target, numerical and identification columns can be treated differently.
It also keeps track of the name and position of all features to allow
the classifier to keep track of the coefficients by feature.
"""
def __init__(self, max_features, target, descriptive=(), categorical=(), numerical=None, transformation=None):
"""
The object initialized with the maximum number of features to be generated and the
names of the appropriate columns.
Categorical columns are hashed while numerical columns are kept as is, therefore
care must be taken with normalization and pre processing.
:param max_features: The maximum number of features to generate. It includes all
numerical and categorical features. Must be greater than the
number of numerical features.
:param target: The name of the target variable. It must be a binary variable taking
values in {0, 1}.
:param descriptive: Descriptive features that are used to identify the samples but
are not to be used for modelling, such as IDs, public
identifiers, etc.
:param categorical: Categorical variable to be hashed.
:param numerical: Numerical variable. These will not be hashed but will be used in
the modelling phase.
"""
# --- Instance variables.
# Instance variables are created for columns names and the number of numerical
# columns in addition to all of the object's parameters.
# Stores the maximum number of features to generate while hashing
self.mf = max_features
# Stores the name of the target variable.
self.y = target
# Stores a list with the names of all descriptive variables.
self.ids = descriptive
# Stores a list with the names of all categorical variables.
self.cat = categorical
# Stores a list with the names of all numerical variables.
self.num = numerical
# Stores a dictionary with the names of numerical variable to apply a given function to.
self.tra = transformation if transformation is not None else {}
# Dictionary to store names
self.names = {}
# --- Numerical features
# Numerical features are indexed in sorted order. The number
# of features is determined by the variable size. The value
# of each feature is just the value read from the file. Start
# by defining what is numeric. If the user does not pass the
# names of all numerical features, the code will assume
# every columns that is not id, target or categorical is
# numeric and find their name when the training process begin.
if self.num is not None:
self.num_cols = sorted(self.num)
# Store the names in our names dictionary
self.names.update(dict(zip(self.num_cols, range(len(self.num_cols)))))
else:
self.num_cols = []
# --- Something to build model on
# Make sure the user passed some information on the columns to
# be used to build the model upon
assert len(self.cat) + len(self.num_cols) > 0, 'At least one categorical or numerical feature must ' \
'be provided.'
def _fetch(self, path):
"""
This method is the core reason this object exists. It is a python generator
that hashes categorical variables, combines them to numerical variables and
yields all the relevant information, row by row.
:param path: Path of the data file to be read.
:return: YIELDS the current row, ID information, feature values and the target value.
even if the file does not contain a target field it returns a target value
of zero anyway.
"""
for t, row in enumerate(DictReader(open(path))):
# --- Variables
# t: The current line being read
# row: All the values in this line
# --- Ids and other descriptive fields
# Process any descriptive fields and put it all in a list.
ids = []
for ID in self.ids:
ids.append(row[ID])
del row[ID]
# --- Target
# Process target and delete its entry from row if it exists
# otherwise just ignore and move along
y = 0.
if self.y in row:
if row[self.y] == '1':
y = 1.
del row[self.y]
# --- Features
# Initialize an empty dictionary to hold feature
# indexes and their corresponding values.
#
x = {}
# --- Enough features?
# For the very first row make sure we have enough features (max features
# is large enough) by computing the number of numerical columns and
# asserting that the maximum number of features is larger than it.
if t == 0:
# --- Hash size
# Computes a constant to add to hash index, it dictates the
# number of features that will not be hashed
num_size = len(self.num_cols)
size = num_size + len(self.tra)
# Make sure there is enough space for hashing
assert self.mf > size, 'Not enough dimensions to fit all features.'
# --- Numerical Variables
# Now we loop over numerical variables
for i, key in enumerate(self.num_cols):
# --- No transformation
# If no transformation is necessary, just store the actual value
# of the variable.
x[i] = float(row[key])
# --- Transformations
# Create on the fly transformed variables. The user passes a map of the
# name of the new variable to a tuple containing the name of the original
# variable to be transformed and the function to be applied to it.
# Once completed the new name is appended to the names dictionary with its
# corresponding index.#
for i, key in enumerate(self.tra):
# Start by addition to the data array x the new transformed values
# by looping over new_features and applying the transformation to the
# desired old feature.
x[num_size + i] = self.tra[key][1](row[self.tra[key][0]])
# Create a key in names dictionary with the new name and its
# corresponding index.
self.names[key] = num_size + i
# --- Categorical features
# Categorical features are hashed. For each different kind a
# hashed index is created and a value of 1 is 'stored' in that
# position.
for key in self.cat:
# --- Category
# Get the categorial variable from row
value = row[key]
# --- Hash
# One-hot encode everything with hash trick
index = to_hash(key + '_' + value) % (self.mf - size) + size
x[index] = 1.
# --- Save Name
# Save the name and index to the names dictionary if its a new feature
# AND if there's still enough space.
if key + '_' + value not in self.names and len(self.names) < self.mf:
self.names[key + '_' + value] = index
# Yield everything.
yield t, ids, x, y
def train(self, path):
"""
The train method is just a wrapper around the _fetch generator to comply
with sklearn's API.
:param path: The path for the training file.
:return: YIELDS row, features, target value
"""
# --- Generates train data
# This is just a generator on top of the basic _fetch. If this was python 3 I
# could use 'yield from', but I don't think this syntax exists in python 2.7,
# so I opted to use the explicit, less pythonic way.
for t, ids, x, y in self._fetch(path):
# --- Variables
# t: Current row
# ids: List of ID information
# x: Feature values
# y: Target values
yield t, x, y
def test(self, path):
"""
The test method is just a wrapper around the _fetch generator to comply
with sklearn's API.
:param path: The path for the test file.
:return: YIELDS row, features
"""
# --- Generates test data
# This is just a generator on top of the basic _fetch. If this was python 3 I
# could use 'yield from', but I don't think this syntax exists in python 2.7,
# so I opted to use the explicit, less pythonic way.
for t, ids, x, y in self._fetch(path):
# --- Variables
# t: Current row
# ids: List of ID information
# x: Feature values
# y: Target values
yield t, x
class FTRLP(object):
"""
--- Follow The Regularized Leader - Proximal ---
FTRL-P is an online classification algorithm that combines both L1 and L2
norms, particularly suited for large data sets with extremely high dimensionality.
This implementation follow the algorithm by H. B. McMahan et. al. It minimizes
the LogLoss function iteratively with a combination of L2 and L1 (centralized
at the current point) norms and adaptive, per coordinate learning rates.
This algorithm is efficient at obtaining sparsity and has proven to perform
very well in massive Click-Through-Rate prediction tasks.
References:
* Follow-the-Regularized-Leader and Mirror Descent: Equivalent Theorems
and L1 Regularization, H. Brendan McMahan
* Ad Click Prediction: a View from the Trenches, H. Brendan McMahan et. al.
"""
def __init__(self, alpha=1, beta=1, l1=1, l2=1, subsample=1, epochs=1, rate=0):
"""
Initializes the classifier's learning rate constants alpha and beta,
the regularization constants L1 and L2, and the maximum number of
features (limiting factor of the hash function).
The per feature learning rate is given by:
eta = alpha / ( beta + sqrt( sum g**g ) )
:param alpha: Learning rate's proportionality constant.
:param beta: Learning rate's parameter.
:param l1: l1 regularization constant.
:param l2: l2 regularization constant.
:return:
"""
# --- Classifier Parameters
# The FTRLP algorithm has four free parameters that can be tuned as pleased.
# Learning rate's proportionality constant.
self.alpha = alpha
# Learning rate's parameter.
self.beta = beta
# L1 regularization constant.
self.l1 = l1
# L2 regularization constant.
self.l2 = l2
# --- Log likelihood
# Stores the log likelihood during the whole
# fitting process.
self.log_likelihood_ = 0
self.loss = []
# --- Weight parameters.
# Lists and dictionaries to hold the weights. Initiate
# the weight vector z and learning rate n as None so that
# when self.train is called multiple times it will not
# overwrite the stored values. This essentially allows epoch
# training to take place, albeit a little bit ugly.
self.z = None
self.n = None
# The weight vector used for prediction is constructed on the fly
# and, in order to keep the memory cost low, it is a dictionary
# that receives values and keys as needed.
# --- Coefficients
# Lists to store the coefficients and their corresponding names.
# Initialized to None and constructed once the training method is
# completed. In case of multiple epochs, these quantities will be
# computed multiple times.
self.coef_ = {}
self.cname = None
# --- Target Ratio
# Store the ratio of each class of a binnary target variable to use
# it to make weighted discrete label predictions.
self.target_ratio = 0.
# --- Printing Rate
# Number of samples to train and predict on before printing
# current status
self.rate = rate
# --- Subsample
# While online methods can't be shuffle, combining subsampling of
# the training set with multiple epoch training gives similar results.
self.subsample = subsample
# --- Epochs
# something...
self.epochs = epochs
# --- Flag for partial fit
# Keeps a flag to allow the user to train multiple times
# without overwriting the object.
self.fit_flag = False
def _build_p(self, data_gen, path):
# Maybe is worth migrating the weight construction algorithm
# to here, I think it could clean up the code a little a bit
# in both train and predict methods.
pass
def _clear_params(self):
"""
If the fit method is called multiple times, all trained parameters
must be cleared allowing for a fresh start. This function simply
resets everything back to square one.
:return: Nothing
"""
# All models parameters are set to their original value (see
# __init__ description
self.log_likelihood_ = 0
self.loss = []
self.z = None
self.n = None
self.coef_ = {}
self.cname = None
def get_params(self, deep=True):
"""
A function to return a map of parameters names and values.
:param deep: Not sure yet, gotta check sklearn usage.
:return: Dictionary mapping parameters names to their values
"""
ps = {'alpha': self.alpha,
'beta': self.beta,
'l1': self.l1,
'l2': self.l2,
'subsample': self.subsample,
'epochs': self.epochs,
'rate': self.rate}
return ps
def set_params(self, **params):
"""
:param params:
:return:
"""
for key, value in params.iteritems():
setattr(self, key, value)
def _update(self, y, p, x, w):
"""
# --- Update weight vector and learning rate.
# With the prediction round completed we can proceed to
# updating the weight vector z and the learning rate eta
# based on the last observed label.
# To do so we will use the computed probability and target
# value to find the gradient loss and continue from there.
# The gradient for the log likelihood for round t can easily
# be shown to be:
# g_i = (p - y) * x_i, (round t)
# The remaining quantities are updated according to the
# minimization procedure outlined in [2].
:param y: True target variable
:param p: Predicted probability for the current sample
:param x: Non zero feature values
:param w: Weights
:return: Nothing
"""
# --- Update loop
# Loop over all relevant indexes and update all values
# accordingly.
for i in x.keys():
# --- Compute Gradient of LogLoss
g = (p - y) * x[i]
# --- Update constant sigma
# Note that this upgrade is equivalent to
# (eta_(t, i))^-1 - (eta_(t - 1, i))^-1
# as discussed in [2].
s = (sqrt(self.n[i] + g * g) - sqrt(self.n[i])) / self.alpha
# --- Increment changes
# Finally, increment the appropriate changes to weights and
# learning rate vectors.
self.z[i] += g - s * w[i]
self.n[i] += g * g
def _train(self, data_gen, path):
"""
--- Fitting method ---
Online fitting method. It takes one sample at a time, builds
the weight vector on the fly and computes the dot product of
weight vector and values and a prediction is made.
Then the true label of the target variable is observed and the
loss is added.
Once this is completed the weights are updated based on the
previously observed values.
:param data_gen: An instance of the DataGen class
:param path: The path to the training set
:return:
"""
# Best way? Proper coding means no access to protected members...
if self.z is None and self.n is None:
self.z = [0.] * data_gen.mf
self.n = [0.] * data_gen.mf
# --- Start the clock!
start_time = datetime.now()
for t, x, y in data_gen.train(path):
# --- Variables
# t: Current row
# x: Feature values
# y: Target values
# --- Target Ratio Update
# Rolling calculation of the target average
self.target_ratio = (1.0 * (t * self.target_ratio + y)) / (t + 1)
# --- Stochastic sample selection
# Chose whether or not to use a sample in
# training time. Since online methods can't
# really be shuffle we can use this combined
# with multiple epochs to create heterogeneity.
#if random() > self.subsample and ((t + 1) % self.rate != 0):
if random() > self.subsample and (t + 1) % self.rate != 0:
continue
# --- Dot product init.
# The dot product is computed as the weights are calculated,
# here it is initiated at zero.
wtx = 0
# --- Real time weights
# Initialize an empty dictionary to hold the weights
w = {}
# --- Weights and prediction
# Computes the weights for numerical features using the
# indexes and values present in the x dictionary. And make
# a prediction.
# This first loop build the weight vector on the fly. Since
# we expect most weights to be zero, the weight vector can
# be constructed in real time. Furthermore, there is no
# reason to store it, neither to clear it, since at each
# iteration only the relevant indexes are populated and used.
for indx in x.keys():
# --- Loop over indicator I
# x.keys() carries all the indexes of the feature
# vector with non-zero entries. Therefore, we can
# simply loop over it since anything else will not
# contribute to the dot product w.x, and, consequently
# to the prediction.
if fabs(self.z[indx]) <= self.l1:
# --- L1 regularization
# If the condition on the absolute value of the
# vector Z is not met, the weight coefficient is
# set exactly to zero.
w[indx] = 0
else:
# --- Non zero weight
# Provided abs(z_i) is large enough, the weight w_i
# is computed. First, the sign of z_i is determined.
sign = 1. if self.z[indx] >= 0 else -1.
# Then the value of w_i if computed and stored. Note
# that any previous value w_i may have had will be
# overwritten here. Which is fine since it will not
# be used anywhere outside this (t) loop.
w[indx] = - (self.z[indx] - sign * self.l1) / \
(self.l2 + (self.beta + sqrt(self.n[indx])) / self.alpha)
# --- Update dot product
# Once the value of w_i is computed we can use to compute
# the i-th contribution to the dot product w.x. Which, here
# is being done inside the index loop, compute only coordinates
# that could possible be non-zero.
wtx += w[indx] * x[indx]
# --- Make a prediction
# With the w.x dot product in hand we can compute the output
# probability by putting wtx through the sigmoid function.
# We limit wtx value to lie in the [-35, 35] interval to
# avoid round off errors.
p = 1. / (1. + exp(-max(min(wtx, 35.), -35.)))
# --- Update the loss function
# Now we look at the target value and use it, together with the
# output probability that was just computed to find the loss we
# suffer this round.
self.log_likelihood_ += log_loss(y, p)
# --- Verbose section
if (self.rate > 0) and (t + 1) % self.rate == 0:
# Append to the loss list.
self.loss.append(self.log_likelihood_)
# Print all the current information
print('Training Samples: {0:9} | '
'Loss: {1:11.2f} | '
'Time taken: {2:4} seconds'.format(t + 1,
self.log_likelihood_,
(datetime.now() - start_time).seconds))
# --- Update weights
# Finally, we now how well we did this round and move on to
# updating the weights based on the current status of our
# knowledge.
self._update(y, p, x, w)
# --- Coefficient names and indexes
# Bind the feature names to their corresponding coefficient obtained from
# the regression.
self.coef_.update(dict([[key, self.z[data_gen.names[key]]] for key in data_gen.names.keys()]))
def fit(self, data_gen, path):
"""
Epoch wrapper around the main fitting method _train
:param data_gen: An instance of the DataGen class
:param path: The path to the training set
:return:
"""
# --- Check fit flag
# Make sure the fit methods is starting from a clean slate by
# checking the fit_flag variable and calling the _clear_params
# function if necessary.
# While always calling _clear_params would do the job, by setting
# this flag we are also able to call fit multiple times WITHOUT
# clearing all parameters --- See partial_fit.
if self.fit_flag:
self._clear_params()
# --- Start the clock!
total_time = datetime.now()
# Train epochs
for epoch in range(self.epochs):
# --- Start the clock!
epoch_time = datetime.now()
# --- Verbose
# Print epoch if verbose is turned on
if self.rate > 0:
print('TRAINING EPOCH: {0:2}'.format(epoch + 1))
print('-' * 18)
self._train(data_gen, path)
# --- Verbose
# Print time taken if verbose is turned on
if self.rate > 0:
print('EPOCH {0:2} FINISHED IN {1} seconds'.format(epoch + 1,
(datetime.now() - epoch_time).seconds))
print()
# --- Verbose
# Print fit information if verbose is on
if self.rate > 0:
print(' --- TRAINING FINISHED IN '
'{0} SECONDS WITH LOSS {1:.2f} ---'.format((datetime.now() - total_time).seconds,
self.log_likelihood_))
print()
# --- Fit Flag
# Set fit_flag to true. If fit is called again this is will trigger
# the call of _clean_params. See partial_fit for different usage.
self.fit_flag = True
def partial_fit(self, data_gen, path):
"""
Simple solution to allow multiple fit calls without overwriting
previously calculated weights, losses and etc.
:param data_gen: An instance of the DataGen class
:param path: The path to the training set
:return:
"""
# --- Fit Flag
# Start by reseting fit_flag to false to "trick"
# the fit method into keep training without overwriting
# previously calculated quantities.
self.fit_flag = False
# --- Fit
# Call the fit method and proceed as normal
self.fit(data_gen, path)
def predict_proba(self, data_gen, path):
"""
--- Predicting Probabilities method ---
Predictions...
:param data_gen: An instance of the DataGen class
:param path: The path to the test set
:return: A list with predicted probabilities
"""
# --- Results
# Initialize an empty list to hold predicted values.
result = []
# --- Start the clock!
start_time = datetime.now()
for t, x in data_gen.test(path):
# --- Variables
# t: Current row
# x: Feature values
# --- Dot product init.
# The dot product is computed as the weights are calculated,
# here it is initiated at zero.
wtx = 0
# --- Real time weights
# Initialize an empty dictionary to hold the weights
w = {}
# --- Weights and prediction
# Computes the weights for numerical features using the
# indexes and values present in the x dictionary. And make
# a prediction.
# This first loop build the weight vector on the fly. Since
# we expect most weights to be zero, the weight vector can
# be constructed in real time. Furthermore, there is no
# reason to store it, neither to clear it, since at each
# iteration only the relevant indexes are populated and used.
for indx in x.keys():
# --- Loop over indicator I
# x.keys() carries all the indexes of the feature
# vector with non-zero entries. Therefore, we can
# simply loop over it since anything else will not
# contribute to the dot product w.x, and, consequently
# to the prediction.
if fabs(self.z[indx]) <= self.l1:
# --- L1 regularization
# If the condition on the absolute value of the
# vector Z is not met, the weight coefficient is
# set exactly to zero.
w[indx] = 0
else:
# --- Non zero weight
# Provided abs(z_i) is large enough, the weight w_i
# is computed. First, the sign of z_i is determined.
sign = 1. if self.z[indx] >= 0 else -1.
# Then the value of w_i if computed and stored. Note
# that any previous value w_i may have had will be
# overwritten here. Which is fine since it will not
# be used anywhere outside this (t) loop.
w[indx] = - (self.z[indx] - sign * self.l1) / \
(self.l2 + (self.beta + sqrt(self.n[indx])) / self.alpha)
# --- Update dot product
# Once the value of w_i is computed we can use to compute
# the i-th contribution to the dot product w.x. Which, here
# is being done inside the index loop, compute only coordinates
# that could possible be non-zero.
wtx += w[indx] * x[indx]
# --- Make a prediction
# With the w.x dot product in hand we can compute the output
# probability by putting wTx through the sigmoid function.
# We limit wTx value to lie in the [-35, 35] interval to
# avoid round off errors.
result.append(1. / (1. + exp(-max(min(wtx, 35.), -35.))))
# Verbose section - Still needs work...
if (t + 1) % self.rate == 0:
# print some stuff
print('Test Samples: {0:8} | '
'Time taken: {1:3} seconds'.format(t + 1,
(datetime.now() - start_time).seconds))
# All done, return the predictions!
return result
def predict(self, data_gen, path):
"""
--- Predicting method ---
Predictions...
:param data_gen: An instance of the DataGen class
:param path: The path to the test set
:return: A list with predicted probabilities
"""
# --- Probabilities
# Compute probabilities by invoking the predict_proba method
probs = self.predict_proba(data_gen, path)
# --- Return
# Return binary labels. The threshold is set using the mean value of the
# target variable.
return map(lambda x: 0 if x <= self.target_ratio else 1, probs) | mit | -5,593,943,148,002,153,000 | 37.527473 | 114 | 0.565461 | false |
abitofalchemy/hrg_nets | bters.py | 1 | 13299 | import shelve
import networkx as nx
import pandas as pd
import numpy as np
import math
import os
import sys
import re
import argparse
import traceback
import net_metrics as metrics
from glob import glob
__version__ = "0.1.0"
__author__ = ['Salvador Aguinaga']
# alchemee analyze the BTER generated graphs
def get_basic_stats(grphs,gen_mod, name):
df = pd.DataFrame()
for g in grphs:
tdf = [pd.Series(g.degree().values()).mean(), pd.Series(nx.clustering(g).values()).mean()]
df = df.append([tdf])
df.columns=['avg_k','avg_cc']
df.to_csv()
def get_degree_dist(grphs,gen_mod, name):
mf = pd.DataFrame()
for g in grphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby([1]).count()
mf = pd.concat([mf, gb], axis=1)
mf['pk'] = mf.mean(axis=1)/float(g.number_of_nodes())
mf['k'] = mf.index.values
#print mf
out_tsv = '../Results/{}_{}_degree.tsv'.format(name,gen_mod)
mf[['k','pk']].to_csv(out_tsv, sep='\t', index=False, header=True, mode="w")
def get_clust_coeff(grphs,gen_mod, name):
mf = pd.DataFrame()
for g in grphs:
df = pd.DataFrame.from_dict(g.degree().items())
df.columns=['v','k']
cf = pd.DataFrame.from_dict(nx.clustering(g).items())
cf.columns=['v','cc']
df = pd.merge(df,cf,on='v')
mf = pd.concat([mf, df])
gb = mf.groupby(['k']).mean()
out_tsv = "../Results/{}_{}_clustering.tsv".format(name,gen_mod)
gb[['cc']].to_csv(out_tsv, sep="\t", header=True, index=True)
def degree_prob_distributuion(orig_g_M, otherModel_M, name):
print 'draw degree probability distribution'
if orig_g_M is not None:
dorig = pd.DataFrame()
for g in orig_g_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "---<>--- orig", name
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
if otherModel_M is not None:
dorig = pd.DataFrame()
for g in otherModel_M:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "---<>--- otherModel_M", name
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/float(75)))
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
def network_value_distribution(orig_g_M, otherModel_M, name):
eig_cents = [nx.eigenvector_centrality_numpy(g) for g in orig_g_M] # nodes with eigencentrality
net_vals = []
for cntr in eig_cents:
net_vals.append(sorted(cntr.values(), reverse=True))
df = pd.DataFrame(net_vals)
print "orig"
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/75))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
eig_cents = [nx.eigenvector_centrality_numpy(g) for g in otherModel_M] # nodes with eigencentrality
net_vals = []
for cntr in eig_cents:
net_vals.append(sorted(cntr.values(), reverse=True))
df = pd.DataFrame(net_vals)
print "other model"
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/75))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
def hop_plots(orig_g_M, otherModel_M, name):
m_hops_ar = []
for g in orig_g_M:
c = metrics.get_graph_hops(g, 20)
d = dict(c)
m_hops_ar.append(d.values())
df = pd.DataFrame(m_hops_ar)
print '-- orig graph --\n'
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/float(75)))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
print '-- the other model --\n'
m_hops_ar = []
for g in otherModel_M:
c = metrics.get_graph_hops(g, 20)
d = dict(c)
m_hops_ar.append(d.values())
break
df = pd.DataFrame(m_hops_ar)
l = list(df.mean())
zz = float(len(l))
if not zz == 0:
sa = int(math.ceil(zz/float(75)))
for i in range(0, len(l), sa):
print "(" + str(i) + "," + str(l[i]) + ")"
def clustering_coefficients(orig_g_M, otherModel_M, name):
if len(orig_g_M) is not 0:
dorig = pd.DataFrame()
for g in orig_g_M:
degdf = pd.DataFrame.from_dict(g.degree().items())
ccldf = pd.DataFrame.from_dict(nx.clustering(g).items())
dat = np.array([degdf[0], degdf[1], ccldf[1]])
df = pd.DataFrame(np.transpose(dat))
df = df.astype(float)
df.columns = ['v', 'k', 'cc']
dorig = pd.concat([dorig, df]) # Appends to bottom new DFs
print "orig"
gb = dorig.groupby(['k'])
zz = len(gb['cc'].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb['cc'].mean().values), sa):
print "(" + str(gb['cc'].mean().index[x]) + ", " + str(gb['cc'].mean().values[x]) + ")"
if len(otherModel_M) is not 0:
dorig = pd.DataFrame()
for g in otherModel_M:
degdf = pd.DataFrame.from_dict(g.degree().items())
ccldf = pd.DataFrame.from_dict(nx.clustering(g).items())
dat = np.array([degdf[0], degdf[1], ccldf[1]])
df = pd.DataFrame(np.transpose(dat))
df = df.astype(float)
df.columns = ['v', 'k', 'cc']
dorig = pd.concat([dorig, df]) # Appends to bottom new DFs
print "otherModel_M"
gb = dorig.groupby(['k'])
zz = len(gb['cc'].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb['cc'].mean().values), sa):
print "(" + str(gb['cc'].mean().index[x]) + ", " + str(gb['cc'].mean().values[x]) + ")"
return
def assortativity(orig_g_M, otherModel_M, name):
if len(orig_g_M) is not 0:
dorig = pd.DataFrame()
for g in orig_g_M:
kcdf = pd.DataFrame.from_dict(nx.average_neighbor_degree(g).items())
kcdf['k'] = g.degree().values()
dorig = pd.concat([dorig, kcdf])
print "orig"
gb = dorig.groupby(['k'])
zz = len(gb[1].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb[1].mean().values), sa):
print "(" + str(gb.mean().index[x]) + ", " + str(gb[1].mean().values[x]) + ")"
if len(otherModel_M) is not 0:
dorig = pd.DataFrame()
for g in otherModel_M:
kcdf = pd.DataFrame.from_dict(nx.average_neighbor_degree(g).items())
kcdf['k'] = g.degree().values()
dorig = pd.concat([dorig, kcdf])
print "the other model ", name
gb = dorig.groupby(['k'])
zz = len(gb[1].mean().values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(gb[1].mean().values), sa):
print "(" + str(gb.mean().index[x]) + ", " + str(gb[1].mean().values[x]) + ")"
return
def kcore_decomposition(orig_g_M, otherModel_M, name):
dorig = pd.DataFrame()
for g in orig_g_M:
g.remove_edges_from(g.selfloop_edges())
d = nx.core_number(g)
df = pd.DataFrame.from_dict(d.items())
df[[0]] = df[[0]].astype(int)
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "orig"
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
dorig = pd.DataFrame()
for g in otherModel_M:
d = nx.core_number(g)
df = pd.DataFrame.from_dict(d.items())
df[[0]] = df[[0]].astype(int)
gb = df.groupby(by=[1])
dorig = pd.concat([dorig, gb.count()], axis=1) # Appends to bottom new DFs
print "== the other model =="
if not dorig.empty :
zz = len(dorig.mean(axis=1).values)
sa = int(math.ceil(zz/75))
if sa == 0: sa=1
for x in range(0, len(dorig.mean(axis=1).values), sa):
print "(" + str(dorig.mean(axis=1).index[x]) + ", " + str(dorig.mean(axis=1).values[x]) + ")"
return
def alchemee(graph,graphName):
g = graph
gn = graphName
lst_files = glob("../BTERgraphs/*{}*th.tsv".format(gn))
for j,f in enumerate(lst_files):
print '--<{}>-- {} --'.format(j,f)
a = nx.read_edgelist(f)
# degree_prob_distributuion( [g], [a], gn)
# print '-- network value --'
# network_value_distribution([g], [a], gn)
# print '-- Hop Plot --'
# hop_plots([g], [a], gn)
# print '\tclustering coeffs -- \n'
# clustering_coefficients([g], [a], gn)
print '\tdraw_assortativity_coefficients -- \n'
assortativity([g], [a], gn)
# print '\tdraw_kcore_decomposition -- \n'
# kcore_decomposition([g], [a], gn)
return
def get_parser():
parser = argparse.ArgumentParser(description='shelves: Process Infinity Mirror Graphs')
parser.add_argument('--g', metavar='GRAPH', help='graph edge-list')
parser.add_argument('--version', action='version', version=__version__)
return parser
def main():
global name
parser = get_parser()
args = vars(parser.parse_args())
if not args['g']:
parser.print_help()
os._exit(1)
print args['g']
try:
cg = nx.read_edgelist(args['g'])
# shlv = shelve.open(args['shl'])
except Exception, e:
print str(e)
cg = nx.read_edgelist(args['g'], comments="%")
name = os.path.basename(args['g']).rstrip('.txt')
if 1:
alchemee(cg, name)
print 'alchemee: Done'
exit(0)
if 1:
lst_files = glob("../Results/synthg_*"+ str(name)+ "*.shl")
for j,shlf in enumerate(lst_files):
shlv = shelve.open(shlf)
print "====>", j, len(shlv['clgm'][0]), len(shlv['kpgm'][0]), len(shlv['kpgm'][0][0]), type(shlv['kpgm'][0][0])
# print '\tdraw_degree_probability_distribution', '-'*40
# metrics.draw_degree_probability_distribution(orig_g_M=[cg], HRG_M=[], pHRG_M=[], chunglu_M=shlv['clgm'][0], kron_M=shlv['kpgm'][0]) #( chunglu_M, HRG_M, pHRG_M, kron_M)
# print '\tdraw_network_value','-'*40
# metrics.draw_network_value([cg], shlv['clgm'][0], [], [], shlv['kpgm'][0])
# print '\tdraw_hop_plot','-'*40
# metrics.draw_hop_plot([cg], shlv['clgm'][0], [], [], shlv['kpgm'][0])
# print '\tdraw_kcore_decomposition','-'*40
# metrics.draw_kcore_decomposition([cg], shlv['clgm'][0], [], [], shlv['kpgm'][0])
# print '\tdraw_clustering_coefficients','-'*40
# metrics.draw_clustering_coefficients([cg], shlv['clgm'][0], [], [], shlv['kpgm'][0])
# print '\tdraw_assortativity_coefficients','-'*40
# metrics.draw_assortativity_coefficients([cg], shlv['clgm'][0], [], [], shlv['kpgm'][0])
# metrics.draw_diam_plot([], [chunglu_M, HRG_M, pHRG_M, kron_M] )
# metrics.draw_degree_rank_plot(G, chunglu_M)
# metrics.draw_network_value(G, chunglu_M)
# print '-- degree dist --'
# degree_prob_distributuion( [cg], shlv['kpgm'][0], name)
# print '-- network value --'
# network_value_distribution([cg], shlv['kpgm'][0], name)
# print '-- Hop Plot --'
# hop_plots([cg], [shlv['kpgm'][0][0]], name)
# print '-- clustering coeffs --'
# clustering_coefficients([cg], shlv['kpgm'][0], name)
# print '\tdraw_assortativity_coefficients','-'*40
# assortativity([cg], shlv['kpgm'][0], name)
print '\tdraw_kcore_decomposition','-'*40
kcore_decomposition([cg], shlv['kpgm'][0], name)
else:
lst_files = glob("../Results/*"+ str(name)+ "*.shl")
with open('../Results/{}_gcd_infinity.txt'.format(str(name)), 'w') as tmp:
tmp.write('-- {} ----\n'.format(name))
for j,shlf in enumerate(lst_files):
print "--"+ shlf + "-"*40
shlv = shelve.open(shlf)
df_g = metrics.external_rage(cg)
gcm_g = metrics.tijana_eval_compute_gcm(df_g)
clgm_gcd = []
kpgm_gcd = []
tmp.write('---- clgm ----\n')
for i,sg in enumerate(shlv['clgm'][0]):
df = metrics.external_rage(sg)
gcm_h = metrics.tijana_eval_compute_gcm(df)
s = metrics.tijana_eval_compute_gcd(gcm_g, gcm_h)
#
# tmp.write("(" + str(i) + "," + str(s) + ')\n')
clgm_gcd.append(s)
tmp.write("(" +str(j) +"," + str(np.mean(clgm_gcd)) + ')\n')
tmp.write('---- kpgm ----\n')
for i,sg in enumerate(shlv['kpgm'][0]):
df = metrics.external_rage(sg)
gcm_h = metrics.tijana_eval_compute_gcm(df)
s = metrics.tijana_eval_compute_gcd(gcm_g, gcm_h)
#
# tmp.write("(" + str(i) + "," + str(s) + ')\n')
kpgm_gcd.append(s)
tmp.write("(" +str(j) +"," + str(np.mean(kpgm_gcd)) + ')\n')
if __name__ == "__main__":
try:
main()
except Exception, e:
print str(e)
traceback.print_exc()
os._exit(1)
sys.exit(0)
| gpl-3.0 | -7,554,848,798,561,009,000 | 32.330827 | 176 | 0.558764 | false |
infinisql/infinisql | manager/infinisqlmgr/management_server.py | 1 | 4169 | __author__ = 'Christopher Nelson'
import logging
import os
import signal
import time
from infinisqlmgr import common, management
def start_management_server(config):
from infinisqlmgr.management import util
common.configure_logging(config)
cluster_name = config.get("management", "cluster_name")
existing_pid = util.get_pid(config.dist_dir, cluster_name)
if existing_pid is not None:
logging.error("A management process appears to exist already. You should run the 'manager stop' command first "
"to make sure the existing process has stopped.")
return 1
logging.debug("forking management server")
pid = os.fork()
if pid!=0:
util.write_pid(config.dist_dir, cluster_name, pid)
logging.info("Parent start_management_server() finished")
return 0
logging.debug("creating management process")
management_server = management.Controller(config)
logging.debug("starting management process")
return management_server.run()
def stop_management_server(config):
from infinisqlmgr.management import util
common.configure_logging(config)
cluster_name = config.get("management", "cluster_name")
existing_pid = util.get_pid(config.dist_dir, cluster_name)
if existing_pid is not None:
logging.info("Trying to stop the existing process at pid %d", existing_pid)
try:
os.kill(existing_pid, signal.SIGTERM)
except ProcessLookupError:
logging.debug("the management process is not running")
else:
logging.info("Waiting for process %d exit", existing_pid)
try:
pid, exit_status = os.waitpid(existing_pid, 0)
except ChildProcessError:
# We ignore this because the child process might have already gone away, and we
# won't be able to get status information about it.
pass
else:
return_code = exit_status >> 8
logging.debug("management process exited with code %d", return_code)
if return_code!=0:
logging.warning("There was an error while stopping the management process, check the logs for more detail.")
# Make sure that the pid file is gone, even if it's empty.
if util.exists(config.dist_dir, cluster_name):
run_path = util.get_run_path(config.dist_dir, cluster_name)
logging.debug("deleting run file at: %s", run_path)
os.unlink(run_path)
logging.info("Stopped management process for cluster: %s" % cluster_name)
def restart_management_server(config):
stop_management_server(config)
time.sleep(1)
start_management_server(config)
def add_args(sub_parsers):
mgr_parser = sub_parsers.add_parser('manager', help='Options for controlling a management process')
mgr_parser.add_argument('--no-background', dest='daemonize', action='store_false',
default=True,
help='Do not run the manager in the background. Useful for debugging. (default is off)')
mgr_parser.add_argument('--cluster-name', dest='cluster_name',
default="default_cluster",
help='Set the cluster name to join. If the cluster does not exist it will be created. '
'(default is %(default)s)')
ss_parsers = mgr_parser.add_subparsers()
start_parser = ss_parsers.add_parser('start', help='Start a management process')
mgr_parser.add_argument('--listen-interface', dest='management_interface',
default="*",
help='Set the interface to listen on.'
'(default is %(default)s)')
start_parser.set_defaults(func=start_management_server)
stop_parser = ss_parsers.add_parser('stop', help='Stop a management process')
stop_parser.set_defaults(func=stop_management_server)
restart_parser = ss_parsers.add_parser('restart', help='Restart a management process')
restart_parser.set_defaults(func=restart_management_server)
| gpl-3.0 | -880,550,437,333,261,600 | 41.111111 | 128 | 0.638283 | false |
ndparker/tdi3 | tdi/_abstract.py | 1 | 1329 | # -*- coding: ascii -*-
u"""
:Copyright:
Copyright 2017
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================
ABC base setup
================
ABCs base setup
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import abc as _abc
# pylint: disable = invalid-name
base = type.__new__(_abc.ABCMeta, 'base', (), {})
method = _abc.abstractmethod
def make_impl(space):
""" Make impl function """
def impl(*which):
""" Register implementation for abstract ... """
def inner(cls):
""" Decorator """
for target in which:
if isinstance(target, str):
target = space[target]
target.register(cls)
return cls
return inner
return impl
| apache-2.0 | -4,359,358,447,965,685,000 | 25.058824 | 73 | 0.638074 | false |
uzh/vm-mad | vmmad/provider/__init__.py | 1 | 2399 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Interfaces to VM/node providers.
"""
# Copyright (C) 2011, 2012 ETH Zurich and University of Zurich. All rights reserved.
#
# Authors:
# Riccardo Murri <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
__docformat__ = 'reStructuredText'
__version__ = '$Revision$'
# stdlib imports
from abc import abstractmethod
from copy import copy
# local imports
from vmmad import log
from vmmad.orchestrator import VmInfo
class NodeProvider(object):
"""
Abstract base class describing the interface that a node provider
should implement.
"""
def __init__(self, image, kind):
"""
Initialize a node provider instance.
The `image` and `kind` arguments specify the features of the
VM instances that are later created by `start_vm`.
"""
pass
@abstractmethod
def start_vm(self, vm):
"""
Start a new VM.
Return a `VmInfo` object describing the started virtual
machine, which can be passed to the `stop_vm` method to stop
it later.
"""
pass
@abstractmethod
def update_vm_status(self, vms):
"""
Query cloud providers and update each `VmInfo` object in list
`vms` *in place* with the current VM node status.
"""
pass
@abstractmethod
def stop_vm(self, vm):
"""
Stop a running VM.
After this method has successfully completed, the VM must no
longer be running, all of its resources have been freed, and
-most importantly- nothing is being charged to the account
used for initialization.
The `vm` argument is a `VmInfo` object, on which previous
`start_vm` call should have recorded instance information.
"""
pass
| apache-2.0 | 2,496,700,895,021,067,300 | 26.261364 | 84 | 0.657357 | false |
NileshPS/OS-and-Networking-programs | 5_ftp/client.py | 1 | 5165 | #!/usr/bin/python3
import socket
import os
import sys
import logging as log
import getpass
from helper import *
log.basicConfig(format="[%(levelname)s] %(message)s", level=log.DEBUG)
class FTPError(Exception):
pass
class FTPClient:
def __init__(self):
self.sock = None
self.is_connected = False
self.is_authenticated = False
self.server_name = ''
# Establish a connection with remote FTP host
def open(self, hostname='', port=3302):
if self.is_connected:
raise FTPError(
'Already connected to %s, use close first.' % self.server_name)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(5)
try:
port = int(port)
except ValueError:
raise FTPError("Bad port address.")
self.sock.connect((hostname, port))
# Ping the server
sock_send(self.sock, FTPRequest('ping'))
# Print response
print(sock_recv(self.sock))
self.server_name = hostname # Save hostname for later
self.is_connected = True
# Initialise authentication procedure
self.init_auth()
def is_open(self):
return self.is_connected
def init_auth(self):
username = input("Name (%s) : " % self.server_name).strip()
passwd = getpass.getpass("Password : ")
# Authenticate with server
sock_send(self.sock, FTPRequest('auth', [username, passwd]))
response = sock_recv(self.sock)
if response.code // 100 != 2:
raise FTPError('%d %s' % (response.code, 'Login Incorect.'))
print(response.message)
self.is_authenticated = True
def send(self, query):
if not self.is_connected:
raise FTPError('Not Connected.')
if not self.is_authenticated:
raise FTPError('530 Please login with USER and PASS.')
if len(query) == 0:
return None # Silently ignore
elif query[0] == 'get' or query[0] == 'put':
if len(query) != 2:
raise FTPError('Please provide a filename.')
if query[0] == 'put':
try:
pack = FTPRequest('put', [
FileWrapper(query[1],
open(query[1], 'rb').read())])
sock_send(self.sock, pack)
return sock_recv(self.sock)
except OSError as oe:
raise FTPError(str(oe))
# else
pack = FTPRequest(query[0], query[1:])
sock_send(self.sock, pack)
return sock_recv(self.sock)
def close(self):
if (self.sock is not None):
sock_send(self.sock, FTPRequest('close'))
self.sock.close()
self.is_connected = False
self.is_authenticated = False
self.server_name = ''
self.sock = None
client = FTPClient()
def main():
global client
while True:
try:
query = input("ftp> ").strip().split(" ")
if len(query) == 0:
continue
if query[0] == '?':
# Show a list of available features
print(' '.join(COMMANDS))
elif query[0] == 'open':
# Establish a remote connection
if len(query) == 1:
query.append(input("(to) "))
client.open(query[1], query[2] if len(query) > 2 else 3302)
elif query[0] == 'close':
client.close()
log.info("Disconnected.")
elif query[0] == 'exit':
client.close()
break
elif query[0] == 'lcd':
try:
if len(query) == 2:
os.chdir(query[1])
except Exception as e:
raise FTPError(str(e))
elif query[0] not in COMMANDS:
log.error("Invalid command. Type ? for help")
else:
response = client.send(query)
if response.action == FTPResponse.ACTION_DISPLAY:
log.info(response)
log.info(response.data.decode('utf8'))
elif response.action == FTPResponse.ACTION_SAVE:
if type(response.data) != FileWrapper:
raise TypeError(
"Expected type of FileWrapper in Response.data." +
" Got %s." % str(type(response.data)))
try:
response.data.write(os.getcwd())
log.info(str(response))
except OSError as e:
log.error(str(e))
elif response.action == FTPResponse.ACTION_IGNORE:
log.info(response)
except FTPError as fe:
log.error(str(fe))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt as e:
if client is not None:
client.close()
client = None
sys.exit(0)
| gpl-3.0 | 5,413,776,981,236,701,000 | 32.322581 | 79 | 0.501839 | false |
juanc27/myfavteam | mysite/urls.py | 1 | 1833 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(
regex=r'^team/(?P<team_id>\d+)/$',
view='myfavteam.views.index'),
url(
regex=r"^$",
view='myfavteam.views.index'),
url(
regex=r'^news/team/(?P<team_id>\d+)/$',
view='myfavteam.views.news'),
url(
regex=r"^news/",
view='myfavteam.views.news'),
url(
regex=r'^social/team/(?P<team_id>\d+)/$',
view='myfavteam.views.social'),
url(
regex=r"^social/",
view='myfavteam.views.social'),
url(
regex=r'^schedule/team/(?P<team_id>\d+)/$',
view='myfavteam.views.schedule'),
url(
regex=r"^schedule/",
view='myfavteam.views.schedule'),
url(
regex=r'^standings/team/(?P<team_id>\d+)/$',
view='myfavteam.views.standings'),
url(
regex=r"^standings/",
view='myfavteam.views.standings'),
url(
regex=r'^stats/team/(?P<team_id>\d+)/$',
view='myfavteam.views.stats'),
url(
regex=r"^stats/",
view='myfavteam.views.stats'),
url(
regex=r'^roster/team/(?P<team_id>\d+)/$',
view='myfavteam.views.roster'),
url(
regex=r"^roster/",
view='myfavteam.views.roster'),
url(
regex=r'^player/(?P<player_id>\d+)/$',
view='myfavteam.views.player'),
url(
regex=r"^player/",
view='myfavteam.views.player'),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mit | -4,842,875,258,234,641,000 | 28.095238 | 65 | 0.556465 | false |
Hwesta/advent-of-code | aoc2017/test_aoc.py | 1 | 7193 | """
Tests for Advent of Code
"""
import unittest
import pytest
from . import day1, day2, day3, day4, day5, day6, day7, day8, day9
from . import day10, day11, day12, day13, day14, day15, day16, day17
from . import day18, day19, day20, day21, day22
@pytest.mark.parametrize('seq,sum,halfway', [
('1122', 3, False),
('1111', 4, False),
('1234', 0, False),
('91212129', 9, False),
('1212', 6, True),
('1221', 0, True),
('123425', 4, True),
('123123', 12, True),
('12131415', 4, True),
])
def test_day_1(seq, sum, halfway):
assert day1.solve(seq, halfway=halfway) == sum
@pytest.mark.parametrize('sheet,checksum,modulo', [
('''5 1 9 5
7 5 3
2 4 6 8''', 18, False),
('''5 9 2 8
9 4 7 3
3 8 6 5''', 9, True),
])
def test_day_2(sheet, checksum, modulo):
assert day2.solve(sheet, modulo=modulo) == checksum
@pytest.mark.parametrize('data,answer,flag', [
('1', 0, False),
('12', 3, False),
('23', 2, False),
('1024', 31, False),
('7', 10, True),
('800', 806, True),
])
def test_day_3(data, answer, flag):
assert day3.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('aa bb cc dd ee', 1, False),
('aa bb cc dd aa', 0, False),
('aa bb cc dd aaa', 1, False),
('abcde fghij', 1, True),
('abcde xyz ecdab', 0, True),
('a ab abc abd abf abj', 1, True),
('iiii oiii ooii oooi oooo', 1, True),
('oiii ioii iioi iiio', 0, True),
])
def test_day_4(data, answer, flag):
assert day4.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('''0
3
0
1
-3''', 5, False),
('''0
3
0
1
-3''', 10, True),
])
def test_day_5(data, answer, flag):
assert day5.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('''0 2 7 0''', 5, False),
('''0 2 7 0''', 4, True),
])
def test_day_6(data, answer, flag):
assert day6.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('''pbga (66)
xhth (57)
ebii (61)
havc (66)
ktlj (57)
fwft (72) -> ktlj, cntj, xhth
qoyq (66)
padx (45) -> pbga, havc, qoyq
tknk (41) -> ugml, padx, fwft
jptl (61)
ugml (68) -> gyxo, ebii, jptl
gyxo (61)
cntj (57)''', 'tknk', False),
('''pbga (66)
xhth (57)
ebii (61)
havc (66)
ktlj (57)
fwft (72) -> ktlj, cntj, xhth
qoyq (66)
padx (45) -> pbga, havc, qoyq
tknk (41) -> ugml, padx, fwft
jptl (61)
ugml (68) -> gyxo, ebii, jptl
gyxo (61)
cntj (57)''', 60, True),
])
def test_day_7(data, answer, flag):
assert day7.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('''b inc 5 if a > 1
a inc 1 if b < 5
c dec -10 if a >= 1
c inc -20 if c == 10''', 1, False),
])
def test_day_8(data, answer, flag):
assert day8.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
(r'{}', 1, False),
(r'{{{}}}', 6, False),
(r"{{},{}}", 5, False),
(r'{{{},{},{{}}}}', 16, False),
(r'{<a>,<a!!>,<a>,<a>}', 1, False),
(r'{{<a!!b>},{<ab>},{<ab>},{<ab>}}', 9, False),
(r'{{<!!>},{<!!!!>},{<!!>},{<!!>}}', 9, False),
(r'{{<a!>},{<a!>},{<a!>},{<ab>}}', 3, False),
(r'{<{o"i!a,<{i<a>}', 1, False),
(r'{<>}', 0, True),
(r'{<random characters>}', 17, True),
(r'<<<<>', 3, True),
(r'<{!>}>', 2, True),
(r'<!!>', 0, True),
(r'<!!!>>', 0, True),
(r'<{o"i!a,<{i<a>', 10, True),
])
def test_day_9(data, answer, flag):
assert day9.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,size,flag', [
('3,4,1,5', 12, 5, False),
('', 'a2582a3a0e66e6e86e3812dcb672a272', 256, True),
('AoC 2017', '33efeb34ea91902bb2f59c9920caa6cd', 256, True),
('1,2,3', '3efbe78a8d82f29979031a4aa0b16a9d', 256, True),
('1,2,4', '63960835bcdc130f0b66d7ff4f6a5a8e', 256, True),
])
def test_day_10(data, answer, size, flag):
assert day10.solve(data, size, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('ne,ne,ne', 3, False),
('ne,ne,sw,sw', 0, False),
('ne,ne,s,s', 2, False),
('se,sw,se,sw,sw', 3, False),
('ne,ne,ne', 3, True),
('ne,ne,sw,sw', 2, True),
])
def test_day_11(data, answer, flag):
assert day11.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
("""0 <-> 2
1 <-> 1
2 <-> 0, 3, 4
3 <-> 2, 4
4 <-> 2, 3, 6
5 <-> 6
6 <-> 4, 5""", 6, False),
("""0 <-> 2
1 <-> 1
2 <-> 0, 3, 4
3 <-> 2, 4
4 <-> 2, 3, 6
5 <-> 6
6 <-> 4, 5""", 2, True),
])
def test_day_12(data, answer, flag):
assert day12.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('''0: 3
1: 2
4: 4
6: 4''', 24, False),
('''0: 3
1: 2
4: 4
6: 4''', 10, True),
])
def test_day_13(data, answer, flag):
assert day13.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('flqrgnkx', 8108, False),
('flqrgnkx', 1242, True),
])
def test_day_14(data, answer, flag):
assert day14.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('''Generator A starts with 65
Generator B starts with 8921''', 588, False),
('''Generator A starts with 65
Generator B starts with 8921''', 309, True),
])
def test_day_15(data, answer, flag):
assert day15.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('s1,x3/4,pe/b', 'baedc', False),
# ('s1,x3/4,pe/b', 'ceadb', True),
])
def test_day_16(data, answer, flag):
assert day16.solve(data, 5, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('3', 638, False),
('3', 1222153, True),
])
def test_day_17(data, answer, flag):
assert day17.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('''set a 1
add a 2
mul a a
mod a 5
snd a
set a 0
rcv a
jgz a -1
set a 1
jgz a -2''', 4, False),
('''snd 1
snd 2
snd p
rcv a
rcv b
rcv c
rcv d''', 3, True),
])
def test_day_18(data, answer, flag):
assert day18.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
(''' |
| +--+
A | C
F---|----E|--+
| | | D
+B-+ +--+
''', 'ABCDEF', False),
(''' |
| +--+
A | C
F---|----E|--+
| | | D
+B-+ +--+
''', 38, True),
])
def test_day_19(data, answer, flag):
assert day19.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('''p=< 3,0,0>, v=< 2,0,0>, a=<-1,0,0>
p=< 4,0,0>, v=< 0,0,0>, a=<-2,0,0>''', 0, False),
('''p=<-6,0,0>, v=< 3,0,0>, a=< 0,0,0>
p=<-4,0,0>, v=< 2,0,0>, a=< 0,0,0>
p=<-2,0,0>, v=< 1,0,0>, a=< 0,0,0>
p=< 3,0,0>, v=<-1,0,0>, a=< 0,0,0>''', 1, True),
])
def test_day_20(data, answer, flag):
assert day20.solve(data, flag) == answer
@pytest.mark.parametrize('data,answer,iterations,flag', [
('''../.# => ##./#../...
.#./..#/### => #..#/..../..../#..#''', 4, 1, False),
('''../.# => ##./#../...
.#./..#/### => #..#/..../..../#..#''', 12, 2, False),
])
def test_day_21(data, answer, iterations, flag):
assert day21.solve(data, iterations, flag) == answer
@pytest.mark.parametrize('data,answer,flag', [
('''..#
#..
...''', 5587, False),
('''..#
#..
...''', 2511944, True),
])
def test_day_22(data, answer, flag):
assert day22.solve(data, flag) == answer
| mit | -8,735,172,952,443,945,000 | 23.137584 | 68 | 0.533018 | false |
Aaron23145/Sergis-Bot | bot/commands/help.py | 1 | 5164 | import config.settings
prefix = config.settings.main.get('prefix')
main_help = '```md\nÉstos son todos los comandos funcionales que \
tengo implementados por ahora. Uso "{p}" de prefijo.\n\n# {p}{help}\n# \
{p}{info}\n# \
{p}{echo}\n# {p}{eight_ball}\n# {p}{random} \n# {p}{suggestions} \n# \
{p}{isup}\n# {p}{cat}\n# {p}{dog}\
\n\nSi quieres ayuda de alguno en concreto, para qué funciona, \
o incluso un ejemplo de su uso, escribe {p}{help} comando por aquí mismo. \
Si por algún casual no te funcionan los comandos por mensajería privada, \
hazlo por un servidor donde yo esté presente.```'
main_help_info = '```md\n{info_m}\n\nEs un comando que muestra información \
básica del bot.\n\n# {p}{info}```'
main_help_echo = '```md\n{echo_m}\n\nEs un comando que me hace repetir lo que \
tú has dicho (si tengo permisos en el servidor también borro tu mensaje que \
activa el comando). Es de lo poco que se me da bien hacer la verdad^^\n\n# \
{p}{echo} Hola soy profesional en inseguridad web, y no necesito suerte.```'
main_help_8ball = '```md\n{ball_m}\n\nÉsto no es nada nuevo. Es lo que tienen \
todos los bots. Usas este comando y le haces una pregunta que tenga respuesta \
afirmativa o negativa. Yo responderé lo que me salga de los avatares \
circulares.\n\n# {p}{ball} ¿Te gustan las cookies?```'
main_help_random = '```md\n{random_m}\n\nAl usar este comando te diré una de \
mis brillantes e inimitables frases, tan geniales que no sabrás si llorar, de \
felicidad, o bien de tristeza y vergüenza ajena.\n\n# {p}{random}```'
main_help_suggestions = '```md\n{suggestions_m}\n\nEs para que me mandes una \
sugerencia para nuevas frases, lo que tú quieras. Si mi amo absoluto y poco \
piadoso le mola, se añadirá.```'
main_help_isup = '```md\n{isup_m}\n\nComprobaré la disponibilidad de un sitio\
web, siempre y cuando uses Local Storage, obviamente.\n\n# {p}{isup}```'
main_help_cat = '```md\n{cat_m}\n\nTe mostraré un genial y maravilloso gato \
aleatorio, no podría existir nada mejor, ¿verdad?\n\n# {p}{cat}```'
main_help_dog = '```md\n{dog_m}\n\nCompartiré un asqueroso perro aleatorio. \
No sé todavía porque obedezco. No me pagan lo suficiente.\n\n# \
{p}{dog}```'
main_help_not_found = 'Vaya {user}. Parece ser que el comando de ayuda que has \
introducido no ha sido encontrado. Recuerda que yo no estoy en el hades T_T \
no puedo adivinar que quieres ver. Vuelve a introducirlo, esta vez \
correctamente.'
async def execute(message, client):
print ('HELP: Executing... Requested by {}'.format(message.author))
help_type = message.content[len(prefix) +
len(config.settings.commands.get('help')) + 1:]
if help_type == '':
await client.send_message(message.author, main_help.format \
(p=prefix, help=config.settings.commands.get('help'),
echo=config.settings.commands.get('echo'),
eight_ball=config.settings.commands.get('8ball'),
random=config.settings.commands.get('random'),
suggestions=config.settings.commands.get('suggestions'),
info=config.settings.commands.get('info'),
isup=config.settings.commands.get('isup'),
cat=config.settings.commands.get('cat'),
dog=config.settings.commands.get('dog')))
await client.send_message(message.channel,
'Te mando los comandos por MP {}.'.format(message.author.mention))
elif help_type == 'info':
await client.send_message(message.author, main_help_info.format \
(info_m=config.settings.commands.get('info').upper(),
info=config.settings.commands.get('info'), p=prefix))
elif help_type == 'echo':
await client.send_message(message.author, main_help_echo.format \
(echo_m=config.settings.commands.get('echo').upper(),
echo=config.settings.commands.get('echo'), p=prefix))
elif help_type == '8ball':
await client.send_message(message.author, main_help_8ball.format \
(ball_m=config.settings.commands.get('8ball').upper(),
ball=config.settings.commands.get('8ball'), p=prefix))
elif help_type == 'random':
await client.send_message(message.author, main_help_random.format \
(random_m=config.settings.commands.get('random').upper(),
random=config.settings.commands.get('random'), p=prefix))
elif help_type == 'suggestions':
await client.send_message(message.author, main_help_suggestions.format \
(suggestions_m=config.settings.commands.get('suggestions').upper()))
elif help_type == 'isup':
await client.send_message(message.author, main_help_isup.format \
(isup_m=config.settings.commands.get('isup').upper(),
isup=config.settings.commands.get('isup'), p=prefix))
elif help_type == 'cat':
await client.send_message(message.author, main_help_cat.format \
(cat_m=config.settings.commands.get('cat').upper(),
cat=config.settings.commands.get('cat'), p=prefix))
elif help_type == 'dog':
await client.send_message(message.author, main_help_dog.format \
(dog_m=config.settings.commands.get('dog').upper(),
dog=config.settings.commands.get('dog'), p=prefix))
else:
await client.send_message(message.channel, main_help_not_found.format \
(user=message.author.mention))
| mit | -2,108,190,958,733,483,800 | 46.933333 | 80 | 0.697548 | false |
ElementalAlchemist/txircd | txircd/modules/rfc/cmd_userhost.py | 1 | 1249 | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class UserhostCommand(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "UserhostCommand"
core = True
def userCommands(self):
return [ ("USERHOST", 1, self) ]
def parseParams(self, user, params, prefix, tags):
if not params:
user.sendSingleError("UserhostParams", irc.ERR_NEEDMOREPARAMS, "USERHOST", "Not enough parameters")
return None
return {
"nicks": params[:5]
}
def execute(self, user, data):
userHosts = []
for nick in data["nicks"]:
if nick not in self.ircd.userNicks:
continue
targetUser = self.ircd.users[self.ircd.userNicks[nick]]
output = targetUser.nick
if self.ircd.runActionUntilValue("userhasoperpermission", targetUser, "", users=[targetUser]):
output += "*"
output += "="
if targetUser.metadataKeyExists("away"):
output += "-"
else:
output += "+"
output += "{}@{}".format(targetUser.ident, targetUser.host())
userHosts.append(output)
user.sendMessage(irc.RPL_USERHOST, " ".join(userHosts))
return True
userhostCmd = UserhostCommand() | bsd-3-clause | -2,786,711,777,351,139,300 | 28.761905 | 102 | 0.707766 | false |
Theoklitos/chestfreezer | chestfreezer-backend/util/json_parser.py | 1 | 6422 | '''
Created on Apr 4, 2014
Simple json marshalling utils for the classes in this project
#TODO Note: This whole module is pointless! Bottle.py can easily marshal dictionaries into/from json!
@author: theoklitos
'''
from control import brew_logic
from util import misc_utils, configuration
from database import db_adapter
def _pretty_state_identifier(state):
""" returns 'off' for False and 'on' for True """
if state:
return 'on'
else:
return 'off'
def get_temperature_reading_array_as_json(temperature_reading_list):
""" returns a list of temp readings as a json array """
result = '['
for temperature_reading in temperature_reading_list:
result += '\n' + get_temperature_reading_as_json(temperature_reading) + ','
if len(temperature_reading_list) != 0:
result = result[:-1]
return result + '\n]'
def get_temperature_reading_as_json(temperature_reading):
""" returns a single temp reading as a json object """
result = '{\n "probe_id" : "' + temperature_reading.probe_id + '",\n "temperature_C" : "' + str(temperature_reading.temperature_C) + '",\n "temperature_F" : "' + str(temperature_reading.temperature_F) + '",\n "timestamp" : "' + str(temperature_reading.timestamp) + '"\n}'
return result
def get_heater_device_json():
""" returns information about the heater in json """
return '{\n "state" : "' + _pretty_state_identifier(brew_logic.heater_state) + '",\n "overridden" : "' + str(brew_logic.heater_override).lower() + '"\n }'
def get_freezer_device_json():
""" returns information about the freezer in json """
return '{\n "state" : "' + _pretty_state_identifier(brew_logic.freezer_state) + '",\n "overridden" : "' + str(brew_logic.freezer_override).lower() + '"\n }'
def get_both_devices_json():
""" returns information about both the freezer and the heater as a json object """
return '{\n "heater" : ' + get_heater_device_json() + ',\n "freezer" : ' + get_freezer_device_json() + '\n}'
def get_probe_array_as_json(probe_list):
""" returns a list of temp probes as a json array """
result = '['
for probe in probe_list:
result += '\n' + get_probe_as_json(probe) + ','
return result[:-1] + '\n]'
def get_probe_as_json(probe):
""" returns a single temp probe as a json object """
master_value = 'False'
if probe.master == 1:
master_value = 'True'
result = '{\n "probe_id" : "' + str(probe.probe_id) + '",\n "name" : "' + str(probe.name) + '",\n "master" : "' + master_value + '"\n}'
return result
def get_instruction_as_json(instruction):
""" returns a single instruction as a json object """
result = '{\n "instruction_id" : "' + instruction.instruction_id + '",\n "target_temperature_C" : "' + str(instruction.target_temperature_C) + '",\n "from_timestamp" : "' + str(instruction.from_timestamp) + '",\n "to_timestamp" : "' + str(instruction.to_timestamp) + '",\n "description" : "' + instruction.description + '"\n}'
return result
def get_instruction_array_as_json(instruction_list):
""" returns the given instruction array as a json list """
result = '['
for instruction in instruction_list:
result += '\n' + get_instruction_as_json(instruction) + ','
if len(instruction_list) != 0:
result = result[:-1]
return result + '\n]'
def get_target_temperature_json():
""" returns information about the current "target" temperature """
is_overriden = False
if brew_logic.temperature_override_C is not None:
actual_target_C = brew_logic.temperature_override_C
is_overriden = True
elif brew_logic.instruction_target_temperature_C is not None: actual_target_C = brew_logic.instruction_target_temperature_C
elif (brew_logic.instruction_target_temperature_C is None) & (not is_overriden): return
if actual_target_C is None: return
current_instruction_json = ""
actual_target_F = misc_utils.celsius_to_fahrenheit(actual_target_C)
if brew_logic.current_instruction_id is not None: current_instruction_json = ',\n"current_instruction_id" : "' + brew_logic.current_instruction_id + '" '
return '{\n "target_temperature_C" : ' + str(actual_target_C) + ',\n "target_temperature_F" : ' + str(actual_target_F) + ',\n "overridden" : "' + str(is_overriden).lower() + '"' + current_instruction_json + '\n}'
def get_settings_as_json():
""" returns the application options as a json object """
store_temperature_interval_seconds = configuration.store_temperature_interval_seconds()
l1 = ' "store_temperature_interval_seconds" : ' + str(int(store_temperature_interval_seconds)) + ',';
instruction_interval_seconds = configuration.instruction_interval_seconds()
l2 = ' "instruction_interval_seconds" : ' + str(int(instruction_interval_seconds)) + ',';
control_temperature_interval_seconds = configuration.control_temperature_interval_seconds()
l3 = ' "monitor_temperature_interval_seconds" : ' + str(int(control_temperature_interval_seconds)) + ',';
temperature_tolerance = configuration.temperature_tolerance()
l4 = ' "temperature_tolerance_C" : ' + str(temperature_tolerance) + ',';
database_size = db_adapter.get_database_size()
l5 = ' "database_size_MB" : ' + str(round(database_size,1)) + ',';
database_free_size = db_adapter.get_database_free_size()
l6 = ' "database_free_size_MB" : ' + str(round(database_free_size,1)) + '';
return '{\n ' + l1 + '\n ' + l2 + '\n ' + l3 + '\n ' + l4 + '\n ' + l5 + '\n ' + l6 + '\n}'
def get_beer_as_json(beer):
""" returns the given beer as a json object """
return {'beer_id' : beer.beer_id, 'name' : beer.name, 'style' : beer.style, 'fermenting_from' : beer.fermenting_from_timestamp, 'fermenting_to' : beer.fermenting_to_timestamp, 'dryhopping_from' : beer.dryhopping_from_timestamp, 'dryhopping_to' : beer.dryhopping_to_timestamp, 'conditioning_from' : beer.conditioning_from_timestamp, 'conditioning_to' : beer.conditioning_to_timestamp, 'rating' : beer.rating, 'comments' : beer.comments}
def get_all_beers_as_json():
""" returns all the beers in the database as a json array """
from json import dumps
result = []
for beer in db_adapter.get_all_beers():
result.append(get_beer_as_json(beer))
return dumps(result)
| mit | 5,555,243,227,549,169,000 | 53.888889 | 439 | 0.646372 | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/__init__.py | 1 | 14118 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import tlv
class tlvs(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/traffic-engineering/tlvs. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The TLVs contained in the TE Opaque LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__tlv")
_yang_name = "tlvs"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__tlv = YANGDynClass(
base=YANGListType(
False,
tlv.tlv,
yang_name="tlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"traffic-engineering",
"tlvs",
]
def _get_tlv(self):
"""
Getter method for tlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv (list)
YANG Description: The Type-Length-Value tuples included in the TE LSA
"""
return self.__tlv
def _set_tlv(self, v, load=False):
"""
Setter method for tlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tlv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tlv() directly.
YANG Description: The Type-Length-Value tuples included in the TE LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
False,
tlv.tlv,
yang_name="tlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """tlv must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType(False,tlv.tlv, yang_name="tlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='False', extensions=None), is_container='list', yang_name="tlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__tlv = t
if hasattr(self, "_set"):
self._set()
def _unset_tlv(self):
self.__tlv = YANGDynClass(
base=YANGListType(
False,
tlv.tlv,
yang_name="tlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
tlv = __builtin__.property(_get_tlv)
_pyangbind_elements = OrderedDict([("tlv", tlv)])
from . import tlv
class tlvs(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/traffic-engineering/tlvs. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The TLVs contained in the TE Opaque LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__tlv")
_yang_name = "tlvs"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__tlv = YANGDynClass(
base=YANGListType(
False,
tlv.tlv,
yang_name="tlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"traffic-engineering",
"tlvs",
]
def _get_tlv(self):
"""
Getter method for tlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv (list)
YANG Description: The Type-Length-Value tuples included in the TE LSA
"""
return self.__tlv
def _set_tlv(self, v, load=False):
"""
Setter method for tlv, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tlv is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tlv() directly.
YANG Description: The Type-Length-Value tuples included in the TE LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
False,
tlv.tlv,
yang_name="tlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """tlv must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType(False,tlv.tlv, yang_name="tlv", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='False', extensions=None), is_container='list', yang_name="tlv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__tlv = t
if hasattr(self, "_set"):
self._set()
def _unset_tlv(self):
self.__tlv = YANGDynClass(
base=YANGListType(
False,
tlv.tlv,
yang_name="tlv",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="tlv",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
tlv = __builtin__.property(_get_tlv)
_pyangbind_elements = OrderedDict([("tlv", tlv)])
| apache-2.0 | 7,904,098,859,585,679,000 | 35.861619 | 517 | 0.536903 | false |
Molecular-Image-Recognition/Molecular-Image-Recognition | code/rmgpy/molecule/pathfinder.py | 1 | 12342 | ################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green ([email protected]),
# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module provides functions for searching paths within a molecule.
The paths generally consist of alternating atoms and bonds.
"""
import cython
import itertools
from Queue import Queue
def find_butadiene(start, end):
"""
Search for a path between start and end atom that consists of
alternating non-single and single bonds.
Returns a list with atom and bond elements from start to end, or
None if nothing was found.
"""
q = Queue()#FIFO queue of paths that need to be analyzed
q.put([start])
while not q.empty():
path = q.get()
# search for end atom among the neighbors of the terminal atom of the path:
terminal = path[-1]
assert isinstance(terminal, Atom)
for atom4, bond34 in terminal.bonds.iteritems():
if atom4 == end and not bond34.isSingle():# we have found the path we are looking for
#add the final bond and atom and return
path.append(bond34)
path.append(atom4)
return path
else:#none of the neighbors is the end atom.
# Add a new allyl path and try again:
new_paths = add_allyls(path)
[q.put(p) if p else '' for p in new_paths]
# Could not find a resonance path from start atom to end atom
return None
def find_butadiene_end_with_charge(start):
"""
Search for a (4-atom, 3-bond) path between start and end atom that consists of
alternating non-single and single bonds and ends with a charged atom.
Returns a list with atom and bond elements from start to end, or
None if nothing was found.
"""
q = Queue()#FIFO queue of paths that need to be analyzed
q.put([start])
while not q.empty():
path = q.get()
# search for end atom among the neighbors of the terminal atom of the path:
terminal = path[-1]
assert isinstance(terminal, Atom)
for atom4, bond34 in terminal.bonds.iteritems():
if atom4.charge != 0 and not bond34.isSingle() and not atom4 in path:# we have found the path we are looking for
#add the final bond and atom and return
path.append(bond34)
path.append(atom4)
return path
else:#none of the neighbors is the end atom.
# Add a new allyl path and try again:
new_paths = add_allyls(path)
[q.put(p) if p else '' for p in new_paths]
# Could not find a resonance path from start atom to end atom
return None
def find_allyl_end_with_charge(start):
"""
Search for a (3-atom, 2-bond) path between start and end atom that consists of
alternating non-single and single bonds and ends with a charged atom.
Returns a list with atom and bond elements from start to end, or
an empty list if nothing was found.
"""
paths = []
q = Queue()#FIFO queue of paths that need to be analyzed
unsaturated_bonds = add_unsaturated_bonds([start])
if not unsaturated_bonds:
return []
[q.put(path) for path in unsaturated_bonds]
while not q.empty():
path = q.get()
# search for end atom among the neighbors of the terminal atom of the path:
terminal = path[-1]
assert isinstance(terminal, Atom)
path_copy = path[:]
for atom3, bond23 in terminal.bonds.iteritems():
if atom3.charge != 0 and not atom3 in path_copy:# we have found the path we are looking for
#add the final bond and atom and return
path_copy_copy = path_copy[:]
path_copy_copy.extend([bond23, atom3])
paths.append(path_copy_copy)
else:#none of the neighbors is the end atom.
# Add a new inverse allyl path and try again:
new_paths = add_inverse_allyls(path)
[q.put(p) if p else '' for p in new_paths]
# Could not find a resonance path from start atom to end atom
return paths
def find_shortest_path(start, end, path=None):
path = path if path else []
path = path + [start]
if start == end:
return path
shortest = None
for node,_ in start.edges.iteritems():
if node not in path:
newpath = find_shortest_path(node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
def add_unsaturated_bonds(path):
"""
Find all the (2-atom, 1-bond) patterns "X=X" starting from the
last atom of the existing path.
The bond attached to the starting atom should be non single.
"""
paths = []
start = path[-1]
assert isinstance(start, Atom)
for atom2, bond12 in start.bonds.iteritems():
if not bond12.isSingle() and not atom2 in path and atom2.number!= 1:
new_path = path[:]
new_path.extend((bond12, atom2))
paths.append(new_path)
return paths
def add_allyls(path):
"""
Find all the (3-atom, 2-bond) patterns "X=X-X" starting from the
last atom of the existing path.
The bond attached to the starting atom should be non single.
The second bond should be single.
"""
paths = []
start = path[-1]
assert isinstance(start, Atom)
for atom2, bond12 in start.bonds.iteritems():
if not bond12.isSingle() and not atom2 in path:
for atom3, bond23 in atom2.bonds.iteritems():
if start is not atom3 and atom3.number!= 1:
new_path = path[:]
new_path.extend((bond12, atom2, bond23, atom3))
paths.append(new_path)
return paths
def add_inverse_allyls(path):
"""
Find all the (3-atom, 2-bond) patterns "start~atom2=atom3" starting from the
last atom of the existing path.
The second bond should be non-single.
"""
paths = []
start = path[-1]
assert isinstance(start, Atom)
for atom2, bond12 in start.bonds.iteritems():
if not atom2 in path:
for atom3, bond23 in atom2.bonds.iteritems():
if not atom3 in path and atom3.number!= 1 and not bond23.isSingle():
new_path = path[:]
new_path.extend((bond12, atom2, bond23, atom3))
paths.append(new_path)
return paths
def compute_atom_distance(atom_indices, mol):
"""
Compute the distances between each pair of atoms in the atom_indices.
The distance between two atoms is defined as the length of the shortest path
between the two atoms minus 1, because the start atom is part of the path.
The distance between multiple atoms is defined by generating all possible
combinations between two atoms and storing the distance between each combination
of atoms in a dictionary.
The parameter 'atom_indices' is a list of 1-based atom indices.
"""
if len(atom_indices) == 1: return {(atom_indices[0],): 0}
distances = {}
combos = [sorted(tup) for tup in itertools.combinations(atom_indices, 2)]
for i1, i2 in combos:
start, end = mol.atoms[i1 - 1], mol.atoms[i2 - 1]
path = find_shortest_path(start, end)
distances[(i1, i2)] = len(path) - 1
return distances
def findAllDelocalizationPaths(atom1):
"""
Find all the delocalization paths allyl to the radical center indicated
by `atom1`. Used to generate resonance isomers.
"""
cython.declare(paths=list)
cython.declare(atom2=Atom, atom3=Atom, bond12=Bond, bond23=Bond)
# No paths if atom1 is not a radical
if atom1.radicalElectrons <= 0:
return []
# Find all delocalization paths
paths = []
for atom2, bond12 in atom1.edges.items():
# Vinyl bond must be capable of gaining an order
if (bond12.isSingle() or bond12.isDouble()) and (atom1.radicalElectrons == 1 or atom1.radicalElectrons == 2):
for atom3, bond23 in atom2.edges.items():
# Allyl bond must be capable of losing an order without breaking
if atom1 is not atom3 and (bond23.isDouble() or bond23.isTriple()):
paths.append([atom1, atom2, atom3, bond12, bond23])
return paths
def findAllDelocalizationPathsLonePairRadical(atom1):
"""
Find all the delocalization paths of lone electron pairs next to the radical center indicated
by `atom1`. Used to generate resonance isomers in adjacent N and O as in NO2.
"""
cython.declare(paths=list)
cython.declare(atom2=Atom, bond12=Bond)
paths = []
if atom1.isNitrogen() and atom1.radicalElectrons >= 1 and atom1.lonePairs == 0:
for atom2, bond12 in atom1.edges.items():
if atom2.isOxygen() and atom2.radicalElectrons == 0 and atom2.lonePairs == 3 and bond12.isSingle():
paths.append([atom1, atom2])
elif atom1.isOxygen() and atom1.radicalElectrons >= 1 and atom1.lonePairs == 2:
for atom2, bond12 in atom1.edges.items():
if atom2.isNitrogen() and atom2.radicalElectrons == 0 and atom2.lonePairs == 1 and bond12.isSingle():
paths.append([atom1, atom2])
return paths
def findAllDelocalizationPathsN5dd_N5ts(atom1):
"""
Find all the resonance structures of nitrogen atoms with two double bonds (N5dd)
and nitrogen atoms with one triple and one single bond (N5ts)
"""
cython.declare(paths=list)
cython.declare(atom2=Atom, bond12=Bond)
# No paths if atom1 is not nitrogen
if not (atom1.isNitrogen()):
return []
# Find all delocalization paths
paths = []
index_atom_2 = 0
index_atom_3 = 0
for atom2, bond12 in atom1.edges.items():
index_atom_2 = index_atom_2 + 1
# Only double bonds are considered
if bond12.isDouble():
for atom3, bond13 in atom1.edges.items():
index_atom_3 = index_atom_3 + 1
# Only double bonds are considered, at the moment we only consider non-radical nitrogen and oxygen atoms
if (bond13.isDouble() and atom3.radicalElectrons == 0 and atom3.lonePairs > 0 and not atom3.isOxygen() and not atom3.isCarbon() and (index_atom_2 != index_atom_3)):
paths.append([atom1, atom2, atom3, bond12, bond13, 1])
for atom2, bond12 in atom1.edges.items():
# Only triple bonds are considered
if bond12.isTriple():
for atom3, bond13 in atom1.edges.items():
# Only single bonds are considered, at the moment we only consider negatively charged nitrogen and oxygen
if (bond13.isSingle() and ((atom3.isNitrogen() and atom3.lonePairs >= 2) or (atom3.isOxygen() and atom3.lonePairs >= 3))):
paths.append([atom1, atom2, atom3, bond12, bond13, 2])
return paths
| mit | 594,162,985,492,516,900 | 37.933754 | 180 | 0.629152 | false |
lsaffre/lino-welfare | lino_welfare/modlib/cbss/tx25.py | 1 | 38017 | # -*- coding: UTF-8 -*-
# Copyright 2011-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
# This is a masterpiece of untransparent code, difficult to understand
# and maintain. But I didn't find a better solution. Maybe an XSLT
# expert might help us to rewrite this from scratch. The purpose is very
# simple: transform the content of a Tx25 response into a printable
# document. A Tx25 response is a rather complex data structure with
# lots and lots of elements. It contains a handler for every element
# type
# In case you need to understand, consult the source code of
# :class:`RowFactory`.
from __future__ import unicode_literals
from builtins import str
from django.db import models
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from lino.api import dd
from lino.utils import AttrDict, IncompleteDate
from etgen import html as xghtml
E = xghtml.E
from .utils import cbss2gender
from .models import reply_has_result
from .ui import ConfidentialResultsTable
def rn2date(rd):
return IncompleteDate(
int(rd.Century + rd.Year),
int(rd.Month),
int(rd.Day))
def deldate(n):
if hasattr(n, 'DelDate'):
return [' (' + str(_('until ')) +
dd.dtos(rn2date(n.DelDate)) + ')']
return []
# def simpleattr(n,name):
# v = getattr(n,name,None)
# if v:
# return [ ', '+name+' ' + unicode(v)]
# return []
def simpletype(v):
return Info(xghtml.E.b(str(v)))
def boldstring(v):
return Info(xghtml.E.b(str(v)))
def validate_element(c):
if c is None:
raise Exception("Invalid element %r" % c)
class Info(object):
def __init__(self, *chunks):
for c in chunks:
validate_element(c)
self.chunks = list(chunks)
def addfrom(self, node, name, prefix=None, fmt=boldstring, suffix=''):
v = getattr(node, name, None)
if not v:
return self
if prefix is None:
prefix = '%s ' % name
else:
prefix = force_text(prefix)
if prefix and prefix[-1] not in ' :(':
prefix += ': '
if len(self.chunks):
if not prefix.startswith(' '):
prefix = ', ' + prefix
self.chunks += [prefix] + fmt(v).chunks
if suffix:
self.chunks.append(force_text(suffix))
return self
def add_deldate(self, n):
self.chunks += deldate(n)
def add_codelabel(self, n):
self.chunks += code_label(n).chunks
# if hasattr(n,'Label'):
# self.addfrom(n,'Label')
# self.addfrom(n,'Code','(',simpletype,')')
# else:
# self.addfrom(n,'Code','[',boldstring,']')
return self
def code_label(n):
chunks = []
if hasattr(n, 'Label') and n.Label:
chunks.append(xghtml.E.b(n.Label))
if hasattr(n, 'Code') and n.Code:
chunks += [' (', n.Code, ')']
return Info(*chunks)
# CodeLabel = code_label
# def CodeLabel(n):
# info = Info()
# return info
def NameType(n):
info = Info()
s = ' '.join([ln.Label for ln in n.LastName])
info.chunks.append(xghtml.E.b(s))
if hasattr(n, 'FirstName'):
info.chunks.append(', ')
s = ' '.join([fn.Label for fn in n.FirstName])
info.chunks.append(s)
return info
# def addinfo(node,name,prefix=None,fmt=simpletype,suffix=''):
# v = getattr(node,name,None)
# if not v: return []
# if prefix is None:
# prefix = ', %s ' % name
# info = [force_text(prefix)] + fmt(v)
# if suffix:
# info.append(force_text(suffix))
# return info
def DateType(n):
return Info(dd.dtos(rn2date(n)))
def ForfeitureDateType(n):
info = Info(dd.dtos(rn2date(n)))
info.addfrom(n, 'Graphic', ' (', simpletype, ')')
return info
def ExpiryDateType(n):
info = Info(dd.dtos(rn2date(n)))
info.addfrom(n, 'Graphic', ' (', simpletype, ')')
return info
def TribunalType(n):
return code_label(n)
def PlaceType(n):
return code_label(n)
def SituationType111(n):
return code_label(n)
def JustificationType(n):
return code_label(n)
def GraphicPlaceType(n):
info = CountryType(n.Country)
info.addfrom(n, 'Graphic', '')
# if hasattr(n,'Graphic'):
# info.append(', graphic:'+n.Graphic)
return info
def ForeignJudgementType(n):
return GraphicPlaceType(n.Place)
def BelgianJudgementType(n):
info = Info()
info.addfrom(n, 'Tribunal', None, TribunalType)
info.addfrom(n, 'Date', None, DateType)
info.addfrom(n, 'Place', None, PlaceType)
# info += TribunalType(n.Tribunal)
# info += DateType(n.Date)
# info += PlaceType(n.Place)
return info
def CountryType(n):
return code_label(n)
def LieuType(n):
info = Info()
if hasattr(n, 'Place1'):
# info += code_label(n.Place1)
info.addfrom(n, 'Place1', None, code_label)
elif hasattr(n, 'Place2'):
info.addfrom(n, 'Place2', None, GraphicPlaceType)
else:
place = n.Place3
# info += GraphicPlaceType(place)
info.addfrom(place, 'BelgianJudgement', '', BelgianJudgementType)
info.addfrom(place, 'ForeignJudgement', '', ForeignJudgementType)
# if hasattr(place,'BelgianJudgement'):
# info += BelgianJudgementType(place.BelgianJudgement)
# else:
# info += ForeignJudgementType(place.ForeignJudgement)
return info
def DiplomaticPostType(n):
return code_label(n)
def TerritoryType(n):
return code_label(n)
def ProvinceType(n):
return code_label(n)
def IssuerType(n):
# prefixes can be empty since this is a xs:choice
info = Info().addfrom(n, 'Place', '', PlaceType)
info.addfrom(n, 'Province', '', ProvinceType, ' (%s)' %
str(_("Province")))
info.addfrom(n, 'PosteDiplomatique', '', DiplomaticPostType, ' (%s)' %
str(_("Diplomatic post")))
return info
def ResidenceType(n):
return code_label(n)
def NationalNumberType(n):
info = Info().addfrom(n, 'NationalNumber', '')
return info # [n.NationalNumber]
def PartnerType(n):
info = Info().addfrom(n, 'NationalNumber', '', NationalNumberType)
# info.addfrom(n,'Name','',NameType)
info.addfrom(n, 'Name', ' ', NameType)
return info
def NotaryType(n):
info = Info().addfrom(n, 'NameNotary')
info.addfrom(n, 'Place', ' in ', PlaceType)
info.addfrom(n, 'Country', ', ', CountryType)
return info
def NotificationType(n):
info = Info().addfrom(n, 'NotificationDate', None, DateType)
info.addfrom(n, 'Place', ' in ', PlaceType)
return info
def ReasonType(n):
return code_label(n)
def CessationType(n):
return code_label(n)
def DeclarationType(n):
return code_label(n)
def Residence(n):
info = Info().addfrom(n, 'Residence', '', ResidenceType)
info.addfrom(n, 'Fusion', _("Fusion"))
info.addfrom(n, 'Language', _("Language"))
info.add_deldate(n)
return info
def IT003(n): # AscertainedLegalMainAddresses : Détermination de résidence
# raise Exception(str(n))
def InvestigationResultType(n):
return code_label(n)
info = Info().addfrom(n, 'InvestigationResult',
'', InvestigationResultType)
info.addfrom(n, 'Graphic1', '')
info.addfrom(n, 'Graphic2', '')
info.add_deldate(n)
return info
def IT005(n): # AddressChangeIntention
# raise Exception(str(n))
info = Info().addfrom(n, 'OriginPlace', _('Move from '), PlaceType)
info.addfrom(n, 'DestinationPlace', _('Move to '), PlaceType)
info.add_deldate(n)
return info
def IT006(n):
info = Info()
info.addfrom(n, 'Country', '', CountryType)
info.addfrom(n, 'Graphic', ' ')
info.add_deldate(n)
return info
def IT008(n): # ReturnPermissions
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'ExpiryDate', _("expires "), DateType)
info.add_deldate(n)
return info
def IT011(n): # Pseudonymes
info = Info()
info.addfrom(n, 'Name', '', NameType)
info.add_deldate(n)
return info
def IT013(n):
info = Info()
info.addfrom(n, 'ModificationType', '', ModificationTypeType)
info.addfrom(n, 'Graphic', '')
info.add_deldate(n)
return info
def IT018(n):
info = Info()
info.addfrom(n, 'Address', '', AddressType)
info.add_deldate(n)
return info
def IT024(n):
info = Info()
info.add_deldate(n)
return info
def TypeOfBurialType(n):
return code_label(n)
def LegalRepresentativeType(n):
info = Info()
info.addfrom(n, 'NationalNumber', " ", NationalNumberType)
info.addfrom(n, 'Graphic', " ")
return info
def IT152(n): # BurialModes, Mode de sépulture
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'TypeOfBurial', "", TypeOfBurialType)
info.addfrom(n, 'LegalRepresentative', "", LegalRepresentativeType)
info.add_deldate(n)
return info
def IT023(n): # PostalAddressAbroad, Adresse postale à l'étranger
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'Address', "", AddressType)
info.add_deldate(n)
return info
def TypeOfAbsenceType(n):
return Info(E.b(n.Code))
def IT026(n): # TemporaryAbsences
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'TypeOfAbsence', "", TypeOfAbsenceType)
info.addfrom(n, 'Graphic1', " ")
info.addfrom(n, 'Graphic2', " ")
info.add_deldate(n)
return info
def IT028(n):
info = Info()
info.addfrom(n, 'LegalFact', '', code_label)
info.addfrom(n, 'Graphic', '')
info.addfrom(n, 'ExpiryDate', _("expires "), DateType)
info.add_deldate(n)
return info
def IT208(n):
info = Info()
# info.addfrom(n,'Date','',DateType)
info.addfrom(n, 'PseudoNationalNumber', '')
info.add_deldate(n)
return info
def IT073(n):
info = Info()
info.addfrom(n, 'Category', '', CategoryType)
info.addfrom(n, 'CertificateNumber', _("no."))
info.add_deldate(n)
return info
def IT074(n):
info = Info()
info.addfrom(n, 'SerialNumber')
info.addfrom(n, 'IdentificationNumber')
info.add_deldate(n)
return info
def FiliationType(n):
return code_label(n)
def ParentType(n):
info = Info()
info.addfrom(n, 'Name', '', NameType)
info.addfrom(n, 'NationalNumber', ' (', NationalNumberType, ')')
return info
def StreetType(n):
# we don't print the code of streets
info = Info()
info.addfrom(n, 'Label', '')
# info.addfrom(n,'NationalNumber',' (',NationalNumberType,')')
return info
# return code_label(n)
def IT020(n):
def AddressType020(n):
info = Info()
info.addfrom(n, 'ZipCode', '')
info.addfrom(n, 'Street', '', StreetType)
info.addfrom(n, 'HouseNumber', _('no. '))
info.addfrom(n, 'Box', ' ')
return info
info = Info()
info.addfrom(n, "Address", '', AddressType020)
return info
def IT110(n):
# Filiation ascendante
info = Info()
info.addfrom(n, 'FiliationType', '', FiliationType)
info.addfrom(n, 'Parent1', _('of '), ParentType)
info.addfrom(n, 'Parent2', _('and '), ParentType)
info.addfrom(n, 'ActNumber', _("Act no. "))
info.addfrom(n, 'Place', _("in "), PlaceType)
info.addfrom(n, 'Graphic', " ")
info.add_deldate(n)
return info
def IT111(n):
# Statut de la personne représentée ou assistée
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'Justification', '', JustificationType)
info.addfrom(n, 'Situation', '', SituationType111)
info.addfrom(n, 'Graphic', " ")
info.add_deldate(n)
return info
def IT113(n): # Guardian : Personne qui représente ou assiste
info = Info()
info.addfrom(n, 'Date', _("Date"), DateType)
info.addfrom(n, 'Status', _("Status"), code_label)
info.addfrom(n, 'Justification', _("Justification"), code_label)
info.addfrom(n, 'Place', _("in "), PlaceType)
info.addfrom(n, 'Graphic', " ")
info.addfrom(n, 'Country', " ", CountryType)
info.add_deldate(n)
return info
def IT140(n):
info = Info().addfrom(n, 'Name', ' ', NameType)
info.addfrom(n, 'NationalNumber', ' (', NationalNumberType, ')')
# info += _(' as ')
info.addfrom(n, 'FamilyRole', _('as '), code_label)
info.addfrom(n, 'Housing', None, HousingType)
info.add_deldate(n)
return info
def IT141(n):
info = Info()
info.addfrom(n, 'Housing', None, HousingType)
info.addfrom(n, 'FamilyRole', '', code_label)
info.addfrom(n, 'Name', _('in family headed by '), NameType)
info.addfrom(n, 'NationalNumber', ' (', NationalNumberType, ')')
info.add_deldate(n)
return info
def NationalityType(n):
return code_label(n)
def IT213(n): # Alias
info = Info()
info.addfrom(n, 'Name', '', NameType)
info.addfrom(n, 'Nationality', None, NationalityType)
info.addfrom(n, 'BirthDate', _(' born '), DateType)
info.addfrom(n, 'BirthPlace', _(' in '))
info.add_deldate(n)
return info
def TypeOfLicenseType(n):
return code_label(n)
def TypeOfLicenseType194(n):
return code_label(n)
def DeliveryType206(n):
v = getattr(n, 'Place', None)
if v:
return PlaceType(v)
return CountryType(n.Country)
def DeliveryType194(n):
info = Info().addfrom(n, 'Place', _('in '), PlaceType)
info.addfrom(n, 'Label', '')
info.addfrom(n, 'Code', ' (', simpletype, ')')
# info.add_codelabel(n)
# info += code_label(n)
return info
def CategoryType(n):
return code_label(n)
def GearBoxType(n):
return code_label(n)
def MedicalType(n):
return code_label(n)
def LicenseCategoriesType(n):
info = Info()
# raise Exception(str(n))
# for cat in n.Category:
# info.addfrom(cat,'Category',' ',CategoryType)
info.chunks.append('/'.join([cat.Label for cat in n.Category]))
# info += code_label(n)
return info
def ForfeitureReasonType(n):
return code_label(n)
def IT191(n):
# info = code_label(n.TypeOfLicense)
info = Info().addfrom(n, 'TypeOfLicense', '', TypeOfLicenseType)
info.addfrom(n, 'LicenseNumber', _('no. '))
info.addfrom(n, 'Place', _('delivered in '), PlaceType)
info.addfrom(n, 'DeliveryCountry', ' (', CountryType, ')')
info.addfrom(n, 'ForfeitureReason', None, ForfeitureReasonType)
info.addfrom(n, 'ForfeitureDate', None, ForfeitureDateType)
# info.append()
# info.append(E.b(n.LicenseNumber))
# info.append(', categories '
# + ' '.join([cat.Label for cat in n.Categories.Category]))
# info.append(_(' delivered in '))
# info += code_label(n.Delivery.Place)
info.add_deldate(n)
return info
def IT194(n):
info = Info().addfrom(n, 'TypeOfLicense', '', TypeOfLicenseType194)
info.addfrom(n, 'Categories', _('categories '), LicenseCategoriesType)
info.addfrom(n, 'LicenseNumber', _('no. '))
info.addfrom(n, 'Delivery', _('delivered '), DeliveryType194)
info.addfrom(n, 'GearBox', None, GearBoxType)
info.addfrom(n, 'Medical', None, MedicalType)
info.addfrom(n, 'ExpiryDate', _('expires '), ExpiryDateType)
info.add_deldate(n)
return info
def IT198(n):
info = Info().addfrom(n, 'PermitNumber', _('no. '))
info.addfrom(n, 'Categories', _('categories '), LicenseCategoriesType)
info.addfrom(n, 'LicenseNumber', _('no. '))
info.addfrom(n, 'Delivery', _('delivered '), DeliveryType194)
info.addfrom(n, 'GearBox', None, GearBoxType)
info.addfrom(n, 'Medical', None, MedicalType)
info.addfrom(n, 'ExpiryDate', _('expires '), ExpiryDateType)
info.add_deldate(n)
return info
def TypeOfPassportType(n):
return code_label(n)
def PassportIdentType(n):
info = Info()
info.addfrom(n, 'PassportType', _('type '), TypeOfPassportType)
info.addfrom(n, 'PassportNumber', _('no. '))
return info
def IT199(n):
info = Info()
# info.chunks.append('Number ')
# info.chunks.append(E.b(n.PassportIdent.PassportNumber))
# info.append(', status ')
info.addfrom(n, 'Status', _("status"), code_label)
info.addfrom(n, 'PassportIdent', '', PassportIdentType)
info.addfrom(n, 'Issuer', _('issued by '), IssuerType)
info.addfrom(n, 'RenewalNumber', _('renewal no. '), boldstring)
info.addfrom(n, 'SerialNumber', _('serial no. '), boldstring)
info.addfrom(n, 'SecondNumber', _('second no. '), boldstring)
info.addfrom(n, 'ReplacementOf', _('replacement of '), boldstring)
info.addfrom(n, 'AdditionTo', _('addition to '), boldstring)
info.addfrom(n, 'ProductionDate', _('produced '), DateType)
info.addfrom(n, 'ExpiryDate', _('expires '), DateType)
# info.append(', type ')
# info += code_label(n.PassportIdent.PassportType)
# info.append(', expires ')
# info.append(E.b(dd.dtos(rn2date(n.ExpiryDate))))
# info.append(', delivered by ')
# info += code_label(n.Issuer.PosteDiplomatique)
# info.append(_(' renewal no. '))
# info.append(E.b(n.RenewalNumber))
info.add_deldate(n)
return info
def HousingType(n):
return code_label(n)
def ModificationTypeType(n):
return code_label(n)
def AddressType(n):
info = Info()
# pd = n.Address.Address
info.addfrom(n, 'Country', '', CountryType)
# info.append(', ')
info.addfrom(n, 'Graphic1', '')
info.addfrom(n, 'Graphic2', '')
info.addfrom(n, 'Graphic3', '')
# info.append(E.b(pd.Graphic1))
# info.append(', ')
# info.append(E.b(pd.Graphic2))
# info.append(', ')
# info.append(E.b(pd.Graphic3))
# info.addfrom(pd,'Graphic3')
return info
def CertificateType(n):
return code_label(n)
def IT200(n):
info = Info().addfrom(n, 'PublicSecurityNumber', _('no. '))
info.add_deldate(n)
return info
def IT202(n):
info = Info()
info.addfrom(n, 'Graphic1', '')
info.addfrom(n, 'Graphic2', '')
info.addfrom(n, 'Limosa', '', LimosaType)
info.add_deldate(n)
return info
def LimosaType(n):
info = Info()
info.addfrom(n, 'Reason1', '', LimosaReasonType)
info.addfrom(n, 'Reason2', '', LimosaReasonType)
info.addfrom(n, 'NationalNumber', _('SSIN '), NationalNumberType)
return info
def LimosaReasonType(n):
return code_label(n)
def IT205(n):
info = code_label(n)
info.add_deldate(n)
return info
def OrganizationType(n):
return code_label(n)
def GeneralInfoType(n):
info = code_label(n)
info.addfrom(n, 'Organization', _("Organization"), OrganizationType)
return info
def OrigineType(n):
return Info().add_codelabel(n)
def AppealType(n):
return code_label(n)
def StatusAppealType(n):
return code_label(n)
def ProcedureType(n):
info = Info()
info.addfrom(n, 'Origine', None, OrigineType)
info.addfrom(n, 'Reference')
info.addfrom(n, 'Appeal', None, AppealType)
info.addfrom(n, 'OpenClose', None, StatusAppealType)
info.addfrom(n, 'NationalNumber', _('SSIN '), NationalNumberType)
return info
def DecisionCancelledType(n):
info = Info()
info.addfrom(n, 'Date', None, DateType)
info.addfrom(n, 'Reference')
return info
def DelayLeaveGrantedType(n):
info = Info()
info.addfrom(n, 'Date', None, DateType)
return info
def StrikingOutType(n):
info = Info()
info.addfrom(n, 'Reference')
info.addfrom(n, 'OpenClose', None, OpenCloseType)
info.addfrom(n, 'Status', None, StrikingStatusType)
return info
def StrikingStatusType(n):
return code_label(n)
def TerritoryLeftType(n):
return code_label(n)
def OpenCloseType(n):
return code_label(n)
def ProtectionType(n):
info = code_label(n)
info.addfrom(n, 'Reference')
info.addfrom(n, 'Term')
return info
def AdviceFromCGVSType(n):
info = code_label(n)
info.addfrom(n, 'Reference')
return info
def ApplicationFiledType(n):
info = code_label(n)
info.addfrom(n, 'Place', _("in "), PlaceType)
return info
def DecisionType206(n):
# print 20150513, unicode(n).encode("ascii", errors="replace")
info = code_label(n)
info.addfrom(n, 'Reference', _("Reference"))
info.addfrom(n, 'OpenClose', _("Open/Close"), OpenCloseType)
info.addfrom(n, 'Comments')
info.addfrom(n, 'Term')
return info
def NotificationByDVZType(n):
info = Info()
info.addfrom(n, 'Place', _("in "), PlaceType)
info.addfrom(n, 'Reference')
return info
def NotificationByOrgType(n):
info = Info()
info.addfrom(n, 'Reference')
return info
def AppealLodgedType(n):
info = Info()
info.addfrom(n, 'Reference')
return info
def IT206(n):
def Status(n):
info = Info()
info.addfrom(n, 'Status')
return info
info = Info()
info.addfrom(n, 'GeneralInfo', '', GeneralInfoType)
info.addfrom(n, 'Procedure', _("Procedure"), ProcedureType)
info.addfrom(n, 'StrikingOut', None, StrikingOutType)
info.addfrom(n, 'DecisionCancelled',
_("Decision cancelled"), DecisionCancelledType)
info.addfrom(n, 'Protection', _("Protection"), ProtectionType)
info.addfrom(n, 'DelayLeaveGranted', None, DelayLeaveGrantedType)
info.addfrom(n, 'Escape', _("Escape"), Status)
info.addfrom(n, 'UnrestrictedStay', None, Status)
info.addfrom(n, 'ApplicationRenounced', _("Application renounced"), Status)
info.addfrom(n, 'TerritoryLeft', _("Territory left"), TerritoryLeftType)
info.addfrom(n, 'AdviceFromCGVS', None, AdviceFromCGVSType)
info.addfrom(n, 'Decision', _("Decision"), DecisionType206)
info.addfrom(n, 'ApplicationFiled',
_("Application filed"), ApplicationFiledType)
info.addfrom(n, 'NotificationByDVZ', None, NotificationByDVZType)
info.addfrom(n, 'NotificationByOrg', None, NotificationByOrgType)
info.addfrom(n, 'AppealLodged', None, AppealLodgedType)
info.add_deldate(n)
return info
def InitiativeType(n):
return code_label(n)
def SocialWelfareType(n):
info = Info()
info.addfrom(n, 'Place', _("in "), PlaceType)
info.addfrom(n, 'Initiative', None, InitiativeType)
info.add_deldate(n)
return info
def RefugeeCentreType(n):
return code_label(n)
def IT207(n):
info = Info()
info.addfrom(n, 'SocialWelfare',
_("Social Welfare Centre"), SocialWelfareType)
info.addfrom(n, 'RefugeeCentre', _("Refugee Centre"), RefugeeCentreType)
info.add_deldate(n)
return info
def RegistrationRegisterType(n):
return code_label(n)
def IT210(n):
info = Info()
info.addfrom(n, 'RegistrationRegister',
_("Registration register"), RegistrationRegisterType)
info.add_deldate(n)
return info
def IdentificationType(n):
return code_label(n)
def IT211(n):
info = Info()
info.addfrom(n, 'TypeOfDocument', '', IdentificationType)
info.add_deldate(n)
return info
def ChoosenResidenceType(n):
return code_label(n)
def IT212(n):
info = Info().addfrom(n, 'Residence', None, ChoosenResidenceType)
info.addfrom(n, 'Graphic', '')
info.add_deldate(n)
return info
def IT251(n):
info = Info()
info.add_deldate(n)
return info
def IT192(n):
info = Info().addfrom(n, 'Declaration', '', DeclarationType)
info.addfrom(n, 'Place', _('in '), PlaceType)
info.add_deldate(n)
return info
HANDLERS = dict()
def register_it_handler(name, label, subname, itname):
HANDLERS[name] = (label, subname, itname)
register_it_handler('WorkPermits', _("Work Permits"), 'WorkPermit', 'IT198')
register_it_handler(
'PublicSecurityNumbers',
_("Public Security Numbers"), 'PublicSecurityNumber', 'IT200')
register_it_handler('SpecialInfos', _("Special Infos"), 'SpecialInfo', 'IT202')
register_it_handler('RefugeeTypes', _("Refugee Types"), 'RefugeeType', 'IT205')
register_it_handler('StatusOfRefugee', _("Status of refugee"),
'StatusOfRefugee', 'IT206')
register_it_handler('Passports', _("Passports"), 'Passport', 'IT199')
register_it_handler(
'OrganizationsInCharge',
_("Organizations in charge"), 'OrganizationInCharge', 'IT207')
register_it_handler(
'RegistrationRegisters',
_("Registration registers"), 'RegistrationRegister', 'IT210')
register_it_handler('ChoosenResidences',
_("Choosen residences"), 'ChoosenResidence', 'IT212')
register_it_handler('OrganDonations', _("Organ Donations"),
'OrganDonation', 'IT192')
register_it_handler('ResidenceUpdateDates',
_("Residence Update Dates"), 'ResidenceUpdateDate',
'IT251')
register_it_handler('DocumentTypes', _("Document Types"),
'DocumentType', 'IT211')
register_it_handler('NameModifications',
_("Name Modifications"), 'NameModification', 'IT013')
register_it_handler('CountriesOfOrigin',
_("Countries Of Origin"), 'CountryOfOrigin', 'IT006')
register_it_handler('ReturnPermissions',
_("Return permissions"), 'ReturnPermission', 'IT008')
register_it_handler('AddressDeclarationAbroad',
_("Address Declaration Abroad"), 'Address', 'IT018')
register_it_handler('TemporaryRegistrations',
_("Inscriptions Temporaires"),
'TemporaryRegistration', 'IT028')
register_it_handler('SpecialRetirementCertificates',
_("Special Retirement Certificates"),
'SpecialRetirementCertificate',
'IT074')
register_it_handler('RetirementCertificates',
_("Retirement Certificates"), 'RetirementCertificate',
'IT073')
register_it_handler('Guardians',
_("Guardians"), 'Guardian', 'IT113')
register_it_handler('PseudoNationalNumbers',
_("Pseudo National Numbers"), 'PseudoNationalNumber',
'IT208')
register_it_handler('TemporaryAbsences',
_("Temporary absences"), 'TemporaryAbsence', 'IT026')
register_it_handler('BurialModes',
_("Burial modes"), 'BurialMode', 'IT152')
register_it_handler('PostalAddressAbroad',
_("Postal address abroad"), 'PostalAddressAbroad', 'IT023')
register_it_handler('ParentalAuthorities',
_("Parental authorities"), 'ParentalAuthority', 'IT111')
class RowFactory(object):
# The result of a Tx25 consist of data rows, each of which has a
# given type. Consult the source code of this class to see how it
# works.
def start_group(self, group):
self.current_group = group
self.counter = 0
def datarow(self, node, since, info):
group = self.current_group
self.counter += 1
if node.__class__.__name__.startswith('IT'):
itnum = node.__class__.__name__[2:]
else:
itnum = ''
if hasattr(node, 'Type'):
group += " " + node.Type
# if hasattr(node,'Status'):
# group += " " + unicode(node.Status)
if hasattr(node, 'Structure'):
group += " " + node.Structure
return AttrDict(group=group,
counter=self.counter,
type=itnum,
since=rn2date(since),
info=E.p(*info.chunks))
def get_it_handler(self, itnode):
t = HANDLERS.get(itnode.__class__.__name__, None)
if t is None:
return t
g, subname, itname = t
it = globals().get(itname)
def f(node, name):
self.start_group(g)
for n in getattr(node, subname):
info = it(n)
yield self.datarow(n, n.Date, info)
return f
def IT000(self, n, name):
self.start_group(_("National Number"))
n = n.NationalNumber
info = Info(
E.b(n.NationalNumber),
' (' + str(cbss2gender(n.Sex)) + ')')
yield self.datarow(n, n.Date, info)
def IT019(self, n, name):
self.start_group(_("Address Change Declaration"))
info = Info()
def AddressType(n):
info = Info()
info.addfrom(n, 'Graphic', '')
return info
info.addfrom(n, 'Address', '', AddressType)
info.add_deldate(n)
yield self.datarow(n, n.Date, info)
def FileOwner(self, fo, name):
self.start_group(_("Residences"))
for n in fo.Residences:
info = Residence(n)
yield self.datarow(n, n.Date, info)
def AscertainedLegalMainAddresses(self, fo, name):
# Détermination de résidence
self.start_group(_("Ascertained Legal Main Addresses"))
# raise Exception(str(fo))
# raise Exception(repr([n for n in fo]))
for n in fo.AscertainedLegalMainAddress:
info = IT003(n)
yield self.datarow(n, n.Date, info)
def Pseudonyms(self, fo, name):
self.start_group(_("Pseudonyms")) # Pseudonymes
for n in fo.Pseudonym:
info = IT011(n)
yield self.datarow(n, n.Date, info)
def Aliases(self, fo, name):
self.start_group(_("Aliases"))
for n in fo.Alias:
info = IT213(n)
yield self.datarow(n, n.Date, info)
def AddressChangeIntention(self, fo, name):
self.start_group(
_("Address Change Intention")) # Intention de changer l'adresse
for n in fo.Address:
info = IT005(n)
yield self.datarow(n, n.Date, info)
def AddressReferences(self, fo, name):
self.start_group(_("Address References")) # Adresse de référence
for n in fo.AddressReference:
info = IT024(n)
yield self.datarow(n, n.Date, info)
def Names(self, node, name):
self.start_group(_("Names"))
# group = name
for n in node.Name:
info = Info().addfrom(n, 'Name', '', NameType)
yield self.datarow(n, n.Date, info)
def LegalMainAddresses(self, node, name):
self.start_group(_("Legal Main Addresses"))
for n in node.LegalMainAddress:
yield self.datarow(n, n.Date, IT020(n))
def ResidenceAbroad(self, node, name): # IT022
def ResidenceAbroadAddressType(n):
info = Info('Address')
info.addfrom(n, 'PosteDiplomatique', None, DiplomaticPostType)
info.addfrom(n, 'Territory', ' ', TerritoryType)
info.addfrom(n, 'Address', ' ', AddressType)
return info
self.start_group(_("Residence Abroad"))
for n in node.ResidenceAbroad:
info = Info()
info.addfrom(n, 'Address', '', ResidenceAbroadAddressType)
# info += code_label(n.Address.PosteDiplomatique)
# info.append(', ')
# info += code_label(n.Address.Territory)
# info.append(', ')
info.add_deldate(n)
yield self.datarow(n, n.Date, info)
def Nationalities(self, node, name):
self.start_group(_("Nationalities"))
for n in node.Nationality:
info = code_label(n.Nationality)
yield self.datarow(n, n.Date, info)
def Occupations(self, node, name):
self.start_group(_("Occupations"))
for n in node.Occupation:
info = code_label(n.Occupation)
info.addfrom(n, 'SocialCategory', ' (SC ', code_label, ')')
yield self.datarow(n, n.Date, info)
def IT100(self, n, name):
self.start_group(_("Birth Place"))
info = Info()
info.addfrom(n, 'Place1', _('in '), PlaceType)
info.addfrom(n, 'Place2', _('in '), GraphicPlaceType)
info.addfrom(n, 'ActNumber', _("Act no. "))
info.addfrom(n, 'SuppletoryRegister')
yield self.datarow(n, n.Date, info)
def IT101(self, n, name):
self.start_group(
_("Declared Birth Date")) # Date de naissance déclarée
info = Info()
info.addfrom(n, 'DeclaredBirthDate', '', DateType)
info.addfrom(n, 'Certificate', '', CertificateType)
info.add_deldate(n)
yield self.datarow(n, n.Date, info)
def Filiations(self, node, name):
self.start_group(_("Filiations"))
for n in node.Filiation:
info = IT110(n)
yield self.datarow(n, n.Date, info)
def CivilStates(self, node, name):
self.start_group(_("Civil States")) # IT120
for n in node.CivilState:
info = code_label(n.CivilState)
if hasattr(n, 'Spouse'):
# info.append(' with ')
# info += name2info(n.Spouse.Name)
info.addfrom(n.Spouse, 'Name', _('with '), NameType)
info.chunks.append(' (')
info.chunks.append(n.Spouse.NationalNumber.NationalNumber)
info.chunks.append(')')
info.addfrom(n, 'Lieu', _('in '), LieuType)
# info += LieuType(n.Lieu)
info.addfrom(n, 'ActNumber', _("Act no. "))
# info.addfrom(n,'ActNumber')
info.addfrom(n, 'SuppletoryRegister')
info.add_deldate(n)
yield self.datarow(n, n.Date, info)
def HeadOfFamily(self, node, name):
self.start_group(_("Head Of Family"))
for n in node.HeadOfFamily:
info = IT140(n)
yield self.datarow(n, n.Date, info)
def FamilyMembers(self, node, name):
self.start_group(_("Family Members"))
for n in node.FamilyMember:
info = IT141(n)
yield self.datarow(n, n.Date, info)
def DrivingLicensesOldModel(self, node, name):
self.start_group(_("Driving Licenses Old Model"))
for n in node.DrivingLicense:
info = IT194(n)
yield self.datarow(n, n.Date, info)
def DrivingLicenses(self, node, name):
self.start_group(_("Driving Licenses"))
for n in node.DrivingLicense:
info = IT191(n)
yield self.datarow(n, n.Date, info)
def IdentityCards(self, node, name):
self.start_group(_("Identity Cards"))
for n in node.IdentityCard:
info = code_label(n.TypeOfCard)
info.chunks.append(' ')
info.chunks.append(_('no. '))
info.chunks.append(E.b(n.CardNumber))
info.addfrom(n, 'ExpiryDate', _('expires '), DateType)
# info.chunks.append(E.b(dd.dtos(rn2date(n.ExpiryDate))))
info.addfrom(n, 'Delivery', _('delivered in '), DeliveryType206)
# info.chunks.append(', delivered in ')
# info += code_label(n.Delivery.Place)
yield self.datarow(n, n.Date, info)
def LegalCohabitations(self, node, name):
def CessationType(n):
info = Info()
info.addfrom(n, 'Reason', _("Reason"), ReasonType)
info.addfrom(n, 'Place', _('in '), PlaceType)
info.addfrom(n, 'Notification', _('in '), NotificationType)
return info
def DeclarationType(n):
info = Info()
info.addfrom(n, 'RegistrationDate', '', DateType)
info.addfrom(n, 'Partner', _('with '), PartnerType)
info.addfrom(n, 'Place', _('in '), PlaceType)
info.addfrom(n, 'Notary', _('in '), NotaryType)
return info
self.start_group(_("Legal cohabitations"))
for n in node.LegalCohabitation:
info = Info()
info.addfrom(n, 'Declaration', _("Declaration"), DeclarationType)
info.addfrom(n, 'Cessation', _("Cessation"), CessationType)
info.add_deldate(n)
yield self.datarow(n, n.Date, info)
def IT253(self, node, name):
self.start_group(_("Creation Date"))
n = node # res.CreationDate
info = Info()
yield self.datarow(n, n.Date, info)
def IT254(self, node, name):
self.start_group(_("Last Update"))
n = node # res.LastUpdateDate
info = Info()
yield self.datarow(n, n.Date, info)
class RetrieveTIGroupsResult(ConfidentialResultsTable):
master = 'cbss.RetrieveTIGroupsRequest'
master_key = None
column_names = 'group:18 type:5 since:14 info:50'
@dd.displayfield(_("Group"))
def group(self, obj, ar):
if obj.counter == 1:
return obj.group
return ''
@dd.displayfield(_("TI"))
def type(self, obj, ar):
if obj.counter == 1:
return obj.type
return ''
@dd.virtualfield(models.DateField(_("Since")))
def since(self, obj, ar):
return obj.since
@dd.displayfield(_("Info"))
def info(self, obj, ar):
return obj.info
@classmethod
def get_data_rows(self, ar):
rti = ar.master_instance
if rti is None:
# print "20130425 rti is None"
return
self.check_permission(rti, ar)
# if not ipr.status in (RequestStates.ok,RequestStates.fictive):
# if not rti.status in (RequestStates.ok,RequestStates.warnings):
# return
reply = rti.get_service_reply()
if reply is None:
# print "20130425 reply is None"
return
# print "20130425 ok"
reply_has_result(reply)
res = reply.rrn_it_implicit
rf = RowFactory()
for name, node in res:
# print 20130425, name, node.__class__
m = getattr(rf, node.__class__.__name__, None)
if m is None:
m = rf.get_it_handler(node)
if m is None:
raise Exception("No handler for %s (%s)"
% (name, node.__class__.__name__))
for row in m(node, name):
yield row
| agpl-3.0 | -3,831,657,913,008,972,300 | 27.594432 | 79 | 0.599337 | false |
XBigTK13X/olava | source/builder.py | 1 | 1909 | import os
import datetime
import config
import pickle
from jinja2 import Environment, FileSystemLoader
pwd = os.path.dirname(os.path.abspath(__file__))
templates = Environment(loader=FileSystemLoader(os.path.join(pwd, 'templates')))
def createIndex(games,
platforms,
platformsOrder,
dayOrder,
releaseCount,
googleAnalyticsId):
global templates
rawData = {
'games': games,
'platforms': platforms,
'dayOrder': dayOrder,
'releaseCount': releaseCount,
'googleAnalyticsId': googleAnalyticsId
}
template = templates.get_template('index.html')
indexContent = template.render(games=games,
platforms=platforms,
platformsOrder=platformsOrder,
dayOrder=dayOrder,
releaseCount=releaseCount,
googleAnalyticsId=googleAnalyticsId)
if not os.path.exists(config.get().BuildOutputRoot):
os.makedirs(config.get().BuildOutputRoot)
indexPath = os.path.join(config.get().BuildOutputRoot, 'index.html')
with open(indexPath, 'w') as indexFile:
indexFile.write(indexContent)
print("Index file written to "+indexPath)
archiveRoot = os.path.join(config.get().BuildOutputRoot, 'archive')
if not os.path.exists(archiveRoot):
os.makedirs(archiveRoot)
dateToday = datetime.date.today()
archivePath = os.path.join(archiveRoot, str(dateToday))+".html"
with open(archivePath, 'w') as archiveFile:
archiveFile.write(indexContent)
rawPath = archivePath.replace('.html', '.pickle')
print("Archive file written to "+archivePath)
with open(rawPath, 'wb') as fp:
pickle.dump(rawData, fp)
print("Pickled raw data file written to "+rawPath)
| apache-2.0 | -1,568,970,124,760,492,500 | 37.959184 | 80 | 0.619172 | false |
mistercrunch/panoramix | superset/views/chart/views.py | 1 | 3873 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from flask import g
from flask_appbuilder import expose, has_access
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import lazy_gettext as _
from superset import db, is_feature_enabled
from superset.connectors.connector_registry import ConnectorRegistry
from superset.constants import MODEL_VIEW_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.models.slice import Slice
from superset.typing import FlaskResponse
from superset.utils import core as utils
from superset.views.base import (
check_ownership,
common_bootstrap_payload,
DeleteMixin,
SupersetModelView,
)
from superset.views.chart.mixin import SliceMixin
from superset.views.utils import bootstrap_user_data
class SliceModelView(
SliceMixin, SupersetModelView, DeleteMixin
): # pylint: disable=too-many-ancestors
route_base = "/chart"
datamodel = SQLAInterface(Slice)
include_route_methods = RouteMethod.CRUD_SET | {
RouteMethod.DOWNLOAD,
RouteMethod.API_READ,
RouteMethod.API_DELETE,
}
class_permission_name = "Chart"
method_permission_name = MODEL_VIEW_RW_METHOD_PERMISSION_MAP
def pre_add(self, item: "SliceModelView") -> None:
utils.validate_json(item.params)
def pre_update(self, item: "SliceModelView") -> None:
utils.validate_json(item.params)
check_ownership(item)
def pre_delete(self, item: "SliceModelView") -> None:
check_ownership(item)
@expose("/add", methods=["GET", "POST"])
@has_access
def add(self) -> FlaskResponse:
datasources = [
{"value": str(d.id) + "__" + d.type, "label": repr(d)}
for d in ConnectorRegistry.get_all_datasources(db.session)
]
payload = {
"datasources": sorted(
datasources,
key=lambda d: d["label"].lower() if isinstance(d["label"], str) else "",
),
"common": common_bootstrap_payload(),
"user": bootstrap_user_data(g.user),
}
return self.render_template(
"superset/add_slice.html", bootstrap_data=json.dumps(payload)
)
@expose("/list/")
@has_access
def list(self) -> FlaskResponse:
if not is_feature_enabled("ENABLE_REACT_CRUD_VIEWS"):
return super().list()
return super().render_app_template()
class SliceAsync(SliceModelView): # pylint: disable=too-many-ancestors
route_base = "/sliceasync"
include_route_methods = {RouteMethod.API_READ}
list_columns = [
"changed_on",
"changed_on_humanized",
"creator",
"datasource_id",
"datasource_link",
"datasource_url",
"datasource_name_text",
"datasource_type",
"description",
"description_markeddown",
"edit_url",
"icons",
"id",
"modified",
"owners",
"params",
"slice_link",
"slice_name",
"slice_url",
"viz_type",
]
label_columns = {"icons": " ", "slice_link": _("Chart")}
| apache-2.0 | 1,508,857,023,377,381,400 | 32.102564 | 88 | 0.657113 | false |
hackerspace-ntnu/website | files/views.py | 1 | 1896 | from django.shortcuts import render
from .models import Image, FileCategory
from .forms import ImageForm
from django.views.generic import CreateView, DeleteView, UpdateView, ListView, View
from django.shortcuts import redirect, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import PermissionRequiredMixin
class ImageDeleteView(PermissionRequiredMixin, DeleteView):
model = Image
success_url = '/files/images'
permission_required = "files.delete_image"
class ImageListView(PermissionRequiredMixin, ListView):
queryset = Image.objects.order_by('img_category', '-time')
template_name = 'files/images.html'
permission_required = 'files.view_image'
context_object_name = 'categories'
def get_queryset(self):
images = Image.objects.all()
categorized = {}
for category in FileCategory.objects.all().order_by('name'):
category_images = Image.objects.filter(img_category=category).order_by('-time')
if category_images:
categorized[category.name] = category_images
return categorized
class ImageView(PermissionRequiredMixin, View):
permission_required = "files.view_image"
def get(self, request, *args, **kwargs):
image = get_object_or_404(Image, pk=kwargs['pk'])
return HttpResponseRedirect('/media/'+str(image.file))
@login_required()
def imageUpload(request):
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES, prefix='img')
if form.is_valid():
image = form.save(commit=False)
image.save()
return render(request, 'files/single-image.html', {'image':image})
else:
return HttpResponse(form.errors)
else:
return HttpResponseRedirect('/')
| mit | -5,348,036,515,881,527,000 | 37.693878 | 91 | 0.695148 | false |
teltek/edx-platform | common/djangoapps/student/admin.py | 1 | 10794 | """ Django admin pages for student app """
from config_models.admin import ConfigurationModelAdmin
from django import forms
from django.contrib import admin
from django.contrib.admin.sites import NotRegistered
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField, UserChangeForm as BaseUserChangeForm
from django.db import models
from django.utils.translation import ugettext_lazy as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.waffle_utils import WaffleSwitch
from openedx.core.lib.courses import clean_course_id
from student import STUDENT_WAFFLE_NAMESPACE
from student.models import (
CourseAccessRole,
CourseEnrollment,
CourseEnrollmentAllowed,
DashboardConfiguration,
LinkedInAddToProfileConfiguration,
PendingNameChange,
Registration,
RegistrationCookieConfiguration,
UserAttribute,
UserProfile,
UserTestGroup
)
from student.roles import REGISTERED_ACCESS_ROLES
from xmodule.modulestore.django import modulestore
User = get_user_model() # pylint:disable=invalid-name
# This switch exists because the CourseEnrollment admin views make DB queries that impact performance.
# In a large enough deployment of Open edX, this is enough to cause a site outage.
# See https://openedx.atlassian.net/browse/OPS-2943
COURSE_ENROLLMENT_ADMIN_SWITCH = WaffleSwitch(STUDENT_WAFFLE_NAMESPACE, 'courseenrollment_admin')
class CourseAccessRoleForm(forms.ModelForm):
"""Form for adding new Course Access Roles view the Django Admin Panel."""
class Meta(object):
model = CourseAccessRole
fields = '__all__'
email = forms.EmailField(required=True)
COURSE_ACCESS_ROLES = [(role_name, role_name) for role_name in REGISTERED_ACCESS_ROLES.keys()]
role = forms.ChoiceField(choices=COURSE_ACCESS_ROLES)
def clean_course_id(self):
"""
Validate the course id
"""
if self.cleaned_data['course_id']:
return clean_course_id(self)
def clean_org(self):
"""If org and course-id exists then Check organization name
against the given course.
"""
if self.cleaned_data.get('course_id') and self.cleaned_data['org']:
org = self.cleaned_data['org']
org_name = self.cleaned_data.get('course_id').org
if org.lower() != org_name.lower():
raise forms.ValidationError(
u"Org name {} is not valid. Valid name is {}.".format(
org, org_name
)
)
return self.cleaned_data['org']
def clean_email(self):
"""
Checking user object against given email id.
"""
email = self.cleaned_data['email']
try:
user = User.objects.get(email=email)
except Exception:
raise forms.ValidationError(
u"Email does not exist. Could not find {email}. Please re-enter email address".format(
email=email
)
)
return user
def clean(self):
"""
Checking the course already exists in db.
"""
cleaned_data = super(CourseAccessRoleForm, self).clean()
if not self.errors:
if CourseAccessRole.objects.filter(
user=cleaned_data.get("email"),
org=cleaned_data.get("org"),
course_id=cleaned_data.get("course_id"),
role=cleaned_data.get("role")
).exists():
raise forms.ValidationError("Duplicate Record.")
return cleaned_data
def __init__(self, *args, **kwargs):
super(CourseAccessRoleForm, self).__init__(*args, **kwargs)
if self.instance.user_id:
self.fields['email'].initial = self.instance.user.email
@admin.register(CourseAccessRole)
class CourseAccessRoleAdmin(admin.ModelAdmin):
"""Admin panel for the Course Access Role. """
form = CourseAccessRoleForm
raw_id_fields = ("user",)
exclude = ("user",)
fieldsets = (
(None, {
'fields': ('email', 'course_id', 'org', 'role',)
}),
)
list_display = (
'id', 'user', 'org', 'course_id', 'role',
)
search_fields = (
'id', 'user__username', 'user__email', 'org', 'course_id', 'role',
)
def save_model(self, request, obj, form, change):
obj.user = form.cleaned_data['email']
super(CourseAccessRoleAdmin, self).save_model(request, obj, form, change)
@admin.register(LinkedInAddToProfileConfiguration)
class LinkedInAddToProfileConfigurationAdmin(admin.ModelAdmin):
"""Admin interface for the LinkedIn Add to Profile configuration. """
class Meta(object):
model = LinkedInAddToProfileConfiguration
# Exclude deprecated fields
exclude = ('dashboard_tracking_code',)
class CourseEnrollmentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CourseEnrollmentForm, self).__init__(*args, **kwargs)
if self.data.get('course'):
try:
self.data['course'] = CourseKey.from_string(self.data['course'])
except InvalidKeyError:
raise forms.ValidationError("Cannot make a valid CourseKey from id {}!".format(self.data['course']))
def clean_course_id(self):
course_id = self.cleaned_data['course']
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise forms.ValidationError("Cannot make a valid CourseKey from id {}!".format(course_id))
if not modulestore().has_course(course_key):
raise forms.ValidationError("Cannot find course with id {} in the modulestore".format(course_id))
return course_key
class Meta:
model = CourseEnrollment
fields = '__all__'
@admin.register(CourseEnrollment)
class CourseEnrollmentAdmin(admin.ModelAdmin):
""" Admin interface for the CourseEnrollment model. """
list_display = ('id', 'course_id', 'mode', 'user', 'is_active',)
list_filter = ('mode', 'is_active',)
raw_id_fields = ('user',)
search_fields = ('course__id', 'mode', 'user__username',)
form = CourseEnrollmentForm
def get_search_results(self, request, queryset, search_term):
qs, use_distinct = super(CourseEnrollmentAdmin, self).get_search_results(request, queryset, search_term)
# annotate each enrollment with whether the username was an
# exact match for the search term
qs = qs.annotate(exact_username_match=models.Case(
models.When(user__username=search_term, then=models.Value(True)),
default=models.Value(False),
output_field=models.BooleanField()))
# present exact matches first
qs = qs.order_by('-exact_username_match', 'user__username', 'course_id')
return qs, use_distinct
def queryset(self, request):
return super(CourseEnrollmentAdmin, self).queryset(request).select_related('user')
def has_permission(self, request, method):
"""
Returns True if the given admin method is allowed.
"""
if COURSE_ENROLLMENT_ADMIN_SWITCH.is_enabled():
return getattr(super(CourseEnrollmentAdmin, self), method)(request)
return False
def has_add_permission(self, request):
"""
Returns True if CourseEnrollment objects can be added via the admin view.
"""
return self.has_permission(request, 'has_add_permission')
def has_change_permission(self, request, obj=None):
"""
Returns True if CourseEnrollment objects can be modified via the admin view.
"""
return self.has_permission(request, 'has_change_permission')
def has_delete_permission(self, request, obj=None):
"""
Returns True if CourseEnrollment objects can be deleted via the admin view.
"""
return self.has_permission(request, 'has_delete_permission')
def has_module_permission(self, request):
"""
Returns True if links to the CourseEnrollment admin view can be displayed.
"""
return self.has_permission(request, 'has_module_permission')
class UserProfileInline(admin.StackedInline):
""" Inline admin interface for UserProfile model. """
model = UserProfile
can_delete = False
verbose_name_plural = _('User profile')
class UserChangeForm(BaseUserChangeForm):
"""
Override the default UserChangeForm such that the password field
does not contain a link to a 'change password' form.
"""
password = ReadOnlyPasswordHashField(
label=_("Password"),
help_text=_(
"Raw passwords are not stored, so there is no way to see this "
"user's password."
),
)
class UserAdmin(BaseUserAdmin):
""" Admin interface for the User model. """
inlines = (UserProfileInline,)
form = UserChangeForm
def get_readonly_fields(self, request, obj=None):
"""
Allows editing the users while skipping the username check, so we can have Unicode username with no problems.
The username is marked read-only when editing existing users regardless of `ENABLE_UNICODE_USERNAME`, to simplify the bokchoy tests.
"""
django_readonly = super(UserAdmin, self).get_readonly_fields(request, obj)
if obj:
return django_readonly + ('username',)
return django_readonly
@admin.register(UserAttribute)
class UserAttributeAdmin(admin.ModelAdmin):
""" Admin interface for the UserAttribute model. """
list_display = ('user', 'name', 'value',)
list_filter = ('name',)
raw_id_fields = ('user',)
search_fields = ('name', 'value', 'user__username',)
class Meta(object):
model = UserAttribute
@admin.register(CourseEnrollmentAllowed)
class CourseEnrollmentAllowedAdmin(admin.ModelAdmin):
""" Admin interface for the CourseEnrollmentAllowed model. """
list_display = ('email', 'course_id', 'auto_enroll',)
search_fields = ('email', 'course_id',)
class Meta(object):
model = CourseEnrollmentAllowed
admin.site.register(UserTestGroup)
admin.site.register(Registration)
admin.site.register(PendingNameChange)
admin.site.register(DashboardConfiguration, ConfigurationModelAdmin)
admin.site.register(RegistrationCookieConfiguration, ConfigurationModelAdmin)
# We must first un-register the User model since it may also be registered by the auth app.
try:
admin.site.unregister(User)
except NotRegistered:
pass
admin.site.register(User, UserAdmin)
| agpl-3.0 | 3,747,868,117,561,648,600 | 33.932039 | 140 | 0.655642 | false |
lensvol/repocket | repocket/main.py | 1 | 4195 | #!/usr/bin/python
import os
from collections import namedtuple
import yaml
from click import command, confirm, echo, option, prompt, secho
from pocket import Pocket
from rules import DEFAULT_RULES, compile_rules
PocketItem = namedtuple('PocketItem', ['id', 'url', 'tags', 'title'])
def save_config(path, cfg_dict):
with open(path, 'w') as fp:
fp.write(yaml.dump(cfg_dict))
return True
def load_config(path):
try:
with open(path, 'r') as fp:
return yaml.load(fp.read())
except IOError:
return {}
def get_consumer_key():
return prompt('Please enter your Pocket consumer key')
def get_access_token(consumer_key):
request_token = Pocket.get_request_token(
consumer_key=consumer_key,
redirect_uri='localhost',
)
auth_url = Pocket.get_auth_url(
code=request_token,
redirect_uri='localhost',
)
echo('Please, open this URL in your browser: {}'.format(auth_url))
if confirm('Did you went to that link?'):
echo('Getting credentials...')
credentials = Pocket.get_credentials(
consumer_key=consumer_key,
code=request_token,
)
return credentials['access_token']
def retrieve_items(pocket, count=10, sort=None, full=True):
call_args = dict(sort=sort or 'newest')
if full:
call_args['detailType'] = 'complete'
if count:
call_args['count'] = count
returned_items = pocket.get(**call_args)[0]['list']
for item_id, resp_item in returned_items.iteritems():
yield PocketItem(
item_id,
resp_item['resolved_url'],
resp_item.get('tags', {}).keys(),
resp_item['resolved_title']
)
def print_item(item, suggested_tags):
secho(u'Title:\t', fg='cyan', nl=False)
echo(item.title)
secho('URL:\t', fg='cyan', nl=False)
echo(item.url)
if item.tags:
secho('Existing tags:\t', fg='cyan', nl=False)
echo(', '.join(item.tags))
secho('Added tags:\t', fg='cyan', nl=False)
echo(', '.join(suggested_tags))
echo()
@command()
@option('--count', default=25, help='Number of items to process.')
@option('--dry-run', is_flag=True)
@option('-a', '--process-all', is_flag=True)
def processor(count, process_all, dry_run):
cfg_path = os.path.join(
os.path.expanduser('~'),
'.repocket.yml',
)
cfg = load_config(cfg_path)
creds = cfg.get('credentials', {})
consumer_key = creds.get('consumer_key')
access_token = creds.get('access_token')
if not consumer_key or not access_token:
consumer_key = get_consumer_key()
access_token = get_access_token(consumer_key)
cfg['credentials'] = {
'consumer_key': str(consumer_key),
'access_token': str(access_token),
}
secho('Your consumer key: ', fg='cyan', nl=False)
secho(consumer_key)
secho('Your access token: ', fg='cyan', nl=False)
secho(access_token)
echo()
api_connector = Pocket(consumer_key, access_token)
cfg.setdefault('rules', DEFAULT_RULES)
rules = compile_rules(cfg['rules'])
save_config(cfg_path, cfg)
secho('Processing items...', fg='cyan')
modified_items = []
items = retrieve_items(api_connector, count=process_all and 0 or count)
for item in items:
suggested_for_item = set()
for rule in rules:
tags = rule.suggest_tags(item)
suggested_for_item.update(tags or [])
new_tags = suggested_for_item - set(item.tags)
if new_tags:
modified_items.append((item, new_tags))
if modified_items:
echo()
for saved_item, suggested_tags in modified_items:
print_item(saved_item, suggested_tags)
api_connector.tags_add(saved_item.id, ','.join(list(suggested_tags)))
if not dry_run:
api_connector.commit()
secho('Changes are sent to server.', fg='green')
else:
secho('"Dry run", no changes are sent to server.', fg='yellow')
else:
secho('No changes have been made.', fg='green')
if __name__ == '__main__':
processor()
| mit | -4,351,194,532,131,688,400 | 28.335664 | 81 | 0.597616 | false |
conversationai/conversationai-models | experiments/tf_trainer/common/token_embedding_index_test.py | 1 | 1553 | # coding=utf-8
# Copyright 2018 The Conversation-AI.github.io Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfrecord_input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_trainer.common.token_embedding_index import LoadTokenIdxEmbeddings
class LoadTokenIdxEmbeddingsTest(tf.test.TestCase):
def test_LoadTokenIdxEmbeddings(self):
idx, embeddings, unknown_idx, embedding_size = LoadTokenIdxEmbeddings(
'testdata/cats_and_dogs_onehot.vocab.txt')
self.assertEqual(embedding_size, 6)
self.assertEqual(unknown_idx, 7)
self.assertEqual(idx['dogs'], 1)
self.assertEqual(idx['cats'], 2)
self.assertEqual(idx['not'], 6)
self.assertEqual(embeddings[1][0], 1.0)
self.assertEqual(embeddings[1][1], 0.0)
# Note: padding embedding will be random, and is index 0. Also the unknown
# token embedding will be random, and is index n+1; 7 in this case.
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 2,917,986,723,322,802,000 | 35.97619 | 78 | 0.732131 | false |
jinankjain/zamboni | mkt/ratings/serializers.py | 1 | 5467 | from django.core.urlresolvers import reverse
from rest_framework import serializers
from rest_framework.exceptions import PermissionDenied
from reviews.models import Review, ReviewFlag
from mkt.account.serializers import AccountSerializer
from mkt.api.fields import SlugOrPrimaryKeyRelatedField, SplitField
from mkt.api.exceptions import Conflict
from mkt.regions import get_region
from mkt.versions.serializers import SimpleVersionSerializer
from mkt.webapps.models import Webapp
class RatingSerializer(serializers.ModelSerializer):
app = SplitField(
SlugOrPrimaryKeyRelatedField(slug_field='app_slug',
queryset=Webapp.objects.all(),
source='addon'),
serializers.HyperlinkedRelatedField(view_name='app-detail',
read_only=True, source='addon'))
body = serializers.CharField()
user = AccountSerializer(read_only=True)
report_spam = serializers.SerializerMethodField('get_report_spam_link')
resource_uri = serializers.HyperlinkedIdentityField(
view_name='ratings-detail')
is_author = serializers.SerializerMethodField('get_is_author')
has_flagged = serializers.SerializerMethodField('get_has_flagged')
version = SimpleVersionSerializer(read_only=True)
class Meta:
model = Review
fields = ('app', 'body', 'created', 'has_flagged', 'is_author',
'modified', 'rating', 'report_spam', 'resource_uri', 'user',
'version')
def __init__(self, *args, **kwargs):
super(RatingSerializer, self).__init__(*args, **kwargs)
if 'request' in self.context:
self.request = self.context['request']
else:
self.request = None
if not self.request or not self.request.amo_user:
self.fields.pop('is_author')
self.fields.pop('has_flagged')
if self.request and self.request.method in ('PUT', 'PATCH'):
# Don't let users modify 'app' field at edit time
self.fields['app'].read_only = True
def get_report_spam_link(self, obj):
return reverse('ratings-flag', kwargs={'pk': obj.pk})
def get_is_author(self, obj):
return obj.user.pk == self.request.amo_user.pk
def get_has_flagged(self, obj):
return (not self.get_is_author(obj) and
obj.reviewflag_set.filter(user=self.request.amo_user).exists())
def validate(self, attrs):
if not getattr(self, 'object'):
# If we are creating a rating, then we need to do various checks on
# the app. Because these checks need the version as well, we have
# to do them here and not in validate_app().
# Assign user and ip_address. It won't change once the review is
# created.
attrs['user'] = self.request.amo_user
attrs['ip_address'] = self.request.META.get('REMOTE_ADDR', '')
# If the app is packaged, add in the current version.
if attrs['addon'].is_packaged:
attrs['version'] = attrs['addon'].current_version
# Return 409 if the user has already reviewed this app.
app = attrs['addon']
amo_user = self.request.amo_user
qs = self.context['view'].queryset.filter(addon=app, user=amo_user)
if app.is_packaged:
qs = qs.filter(version=attrs['version'])
if qs.exists():
raise Conflict('You have already reviewed this app.')
# Return 403 is the app is not public.
if not app.is_public():
raise PermissionDenied('The app requested is not public.')
# Return 403 if the user is attempting to review their own app.
if app.has_author(amo_user):
raise PermissionDenied('You may not review your own app.')
# Return 403 if not a free app and the user hasn't purchased it.
if app.is_premium() and not app.is_purchased(amo_user):
raise PermissionDenied("You may not review paid apps you "
"haven't purchased.")
# Return 403 if the app is not available in the current region.
current_region = get_region()
if not app.listed_in(region=current_region):
raise PermissionDenied('App not available in region "%s".' %
current_region.slug)
return attrs
def validate_app(self, attrs, source):
# Don't allow users to change the app on an existing rating.
if getattr(self, 'object'):
attrs[source] = self.object.addon
return attrs
class RatingFlagSerializer(serializers.ModelSerializer):
user = serializers.Field()
review_id = serializers.Field()
class Meta:
model = ReviewFlag
fields = ('review_id', 'flag', 'note', 'user')
def validate(self, attrs):
attrs['user'] = self.context['request'].amo_user
attrs['review_id'] = self.context['view'].kwargs['review']
if 'note' in attrs and attrs['note'].strip():
attrs['flag'] = ReviewFlag.OTHER
if ReviewFlag.objects.filter(review_id=attrs['review_id'],
user=attrs['user']).exists():
raise Conflict('You have already flagged this review.')
return attrs
| bsd-3-clause | 745,364,576,226,022,000 | 40.732824 | 79 | 0.605817 | false |
d120/pyofahrt | ofahrtbase/migrations/0020_auto_20171016_2206.py | 1 | 2501 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-16 20:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ofahrtbase', '0019_auto_20161020_1750'),
]
operations = [
migrations.AlterField(
model_name='ofahrt',
name='max_members',
field=models.IntegerField(
default=70,
help_text=
'Dieser Wert bestimmt die maximale Größe der Festanmeldeliste.',
verbose_name='Maximale Teilnehmendenzahl'),
),
migrations.AlterField(
model_name='ofahrt',
name='member_reg_open',
field=models.BooleanField(
default=False,
help_text=
'Ist dieser Wert aktiviert, können sich Teilnehmer*innen registrieren.',
verbose_name='Teilnehmeregistrierung'),
),
migrations.AlterField(
model_name='ofahrt',
name='orga_reg_open',
field=models.BooleanField(
default=False,
help_text=
'Ist dieser Wert aktiviert, können sich Studierende als Ofahrtorga bewerben.',
verbose_name='Orgaregistrierung'),
),
migrations.AlterField(
model_name='ofahrt',
name='queue_tolerance',
field=models.IntegerField(
default=20,
help_text=
'Dieser Wert legt fest, ab wann Neuanmeldungen von Teilnehmer*innen in die Warteschlange müssen. (Warteschlange falls: aktuelle Festanmeldungen + aktuell vorläufige Anmeldungen > maximale Festanmeldungen + dieser Wert)',
verbose_name='Warteschlangentoleranz'),
),
migrations.AlterField(
model_name='ofahrt',
name='self_participation',
field=models.IntegerField(
default=2000,
help_text='Eingenanteil der Teilnehmer*innen in Cent',
verbose_name='Teilnahmebeitrag'),
),
migrations.AlterField(
model_name='ofahrt',
name='workshop_reg_open',
field=models.BooleanField(
default=False,
help_text=
'Ist dieser Wert aktiviert, werden derzeit Workshops gesucht.',
verbose_name='Workshopregistrierung'),
),
]
| agpl-3.0 | 1,181,482,808,610,669,800 | 35.691176 | 236 | 0.558717 | false |
mozilla/kitchensinkserver | vendor-local/lib/python/tastypie/api.py | 1 | 6721 | import warnings
from django.conf.urls.defaults import *
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest
from tastypie.exceptions import NotRegistered, BadRequest
from tastypie.serializers import Serializer
from tastypie.utils import trailing_slash, is_valid_jsonp_callback_value
from tastypie.utils.mime import determine_format, build_content_type
class Api(object):
"""
Implements a registry to tie together the various resources that make up
an API.
Especially useful for navigation, HATEOAS and for providing multiple
versions of your API.
Optionally supplying ``api_name`` allows you to name the API. Generally,
this is done with version numbers (i.e. ``v1``, ``v2``, etc.) but can
be named any string.
"""
def __init__(self, api_name="v1", serializer_class=Serializer):
self.api_name = api_name
self._registry = {}
self._canonicals = {}
self.serializer = serializer_class()
def register(self, resource, canonical=True):
"""
Registers an instance of a ``Resource`` subclass with the API.
Optionally accept a ``canonical`` argument, which indicates that the
resource being registered is the canonical variant. Defaults to
``True``.
"""
resource_name = getattr(resource._meta, 'resource_name', None)
if resource_name is None:
raise ImproperlyConfigured("Resource %r must define a 'resource_name'." % resource)
self._registry[resource_name] = resource
if canonical is True:
if resource_name in self._canonicals:
warnings.warn("A new resource '%r' is replacing the existing canonical URL for '%s'." % (resource, resource_name), Warning, stacklevel=2)
self._canonicals[resource_name] = resource
# TODO: This is messy, but makes URI resolution on FK/M2M fields
# work consistently.
resource._meta.api_name = self.api_name
resource.__class__.Meta.api_name = self.api_name
def unregister(self, resource_name):
"""
If present, unregisters a resource from the API.
"""
if resource_name in self._registry:
del(self._registry[resource_name])
if resource_name in self._canonicals:
del(self._canonicals[resource_name])
def canonical_resource_for(self, resource_name):
"""
Returns the canonical resource for a given ``resource_name``.
"""
if resource_name in self._canonicals:
return self._canonicals[resource_name]
raise NotRegistered("No resource was registered as canonical for '%s'." % resource_name)
def wrap_view(self, view):
def wrapper(request, *args, **kwargs):
try:
return getattr(self, view)(request, *args, **kwargs)
except BadRequest:
return HttpResponseBadRequest()
return wrapper
def override_urls(self):
"""
Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
"""
return []
def prepend_urls(self):
"""
A hook for adding your own URLs or matching before the default URLs.
"""
return []
@property
def urls(self):
"""
Provides URLconf details for the ``Api`` and all registered
``Resources`` beneath it.
"""
pattern_list = [
url(r"^(?P<api_name>%s)%s$" % (self.api_name, trailing_slash()), self.wrap_view('top_level'), name="api_%s_top_level" % self.api_name),
]
for name in sorted(self._registry.keys()):
self._registry[name].api_name = self.api_name
pattern_list.append((r"^(?P<api_name>%s)/" % self.api_name, include(self._registry[name].urls)))
urlpatterns = self.prepend_urls()
overridden_urls = self.override_urls()
if overridden_urls:
warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
urlpatterns += overridden_urls
urlpatterns += patterns('',
*pattern_list
)
return urlpatterns
def top_level(self, request, api_name=None):
"""
A view that returns a serialized list of all resources registers
to the ``Api``. Useful for discovery.
"""
available_resources = {}
if api_name is None:
api_name = self.api_name
for name in sorted(self._registry.keys()):
available_resources[name] = {
'list_endpoint': self._build_reverse_url("api_dispatch_list", kwargs={
'api_name': api_name,
'resource_name': name,
}),
'schema': self._build_reverse_url("api_get_schema", kwargs={
'api_name': api_name,
'resource_name': name,
}),
}
desired_format = determine_format(request, self.serializer)
options = {}
if 'text/javascript' in desired_format:
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
serialized = self.serializer.serialize(available_resources, desired_format, options)
return HttpResponse(content=serialized, content_type=build_content_type(desired_format))
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedApi._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
class NamespacedApi(Api):
"""
An API subclass that respects Django namespaces.
"""
def __init__(self, api_name="v1", urlconf_namespace=None, **kwargs):
super(NamespacedApi, self).__init__(api_name=api_name, **kwargs)
self.urlconf_namespace = urlconf_namespace
def register(self, resource, canonical=True):
super(NamespacedApi, self).register(resource, canonical=canonical)
if canonical is True:
# Plop in the namespace here as well.
resource._meta.urlconf_namespace = self.urlconf_namespace
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
| bsd-3-clause | -851,501,442,314,775,000 | 35.726776 | 153 | 0.613599 | false |
mfiers/Moa | moa/utils.py | 1 | 7301 | #!/usr/bin/env python
# Copyright 2009-2011 Mark Fiers
# The New Zealand Institute for Plant & Food Research
#
# This file is part of Moa - http://github.com/mfiers/Moa
#
# Licensed under the GPL license (see 'COPYING')
#
"""
moa.utils
---------
A set of random utilities used by Moa
"""
from email.mime.text import MIMEText
import fcntl
import os
import smtplib
import struct
import subprocess
import re
import sys
import termios
import traceback
import moa.utils
import moa.logger as l
def removeIndent(txt):
"""
Removes indentation from a txt - for use by moa.args and moa.api
"""
ld = [x.replace("\t", " ").rstrip()
for x in txt.split("\n")]
re_firstNonSpace = re.compile('\S')
indents = []
for line in ld:
# ignore empty lines
if not line:
continue
fns = re_firstNonSpace.search(line)
if fns:
indents.append(fns.start())
minIndent = min(indents)
nld = []
for line in ld:
if not line:
nld.append("")
else:
nld.append(line[minIndent:])
return "\n".join(nld)
def sendmail(server, sender, recipient, subject, message):
"""
Send an email.
"""
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipient
smtp_server = smtplib.SMTP(server)
smtp_server.sendmail(sender, [recipient], msg.as_string())
smtp_server.quit()
def niceRunTime(d):
"""
Nice representation of the run time
d is time duration string
"""
d = str(d)
if ',' in d:
days, time = d.split(',')
else:
days = 0
time = d
hours, minutes, seconds = time.split(':')
hours, minutes = int(hours), int(minutes)
seconds, miliseconds = seconds.split('.')
seconds = int(seconds)
miliseconds = int(miliseconds)
if days > 0:
if days == 1:
return "1 day, %d hrs" % hours
else:
return "%d days, %d hrs" % (days, hours)
if hours == 0 and minutes == 0 and seconds == 0:
return "<1 sec"
if hours > 0:
return "%d:%02d hrs" % (hours, minutes)
elif minutes > 0:
return "%d:%02d min" % (minutes, seconds)
else:
return "%d sec" % seconds
def getCwd():
"""
Do not use os.getcwd() -
need to make sure symbolic links do not get dereferenced
hijacked some code from:
http://stackoverflow.com/questions/123958/how-to-get-set-logical-directory-path-in-python
"""
cwd = os.environ.get("PWD")
if cwd is not None:
return cwd
# no environment. fall back to calling pwd on shell
cwd = subprocess.Popen(
'pwd',
stdout=subprocess.PIPE).communicate()[0].strip()
return cwd
def getTerminalSize():
def ioctl_GWINSZ(fd):
try:
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
def getProcessInfo(pid):
"""
Return some info on a process
"""
cl = ('ps --no-heading -fp %s' % (pid)).split()
p = subprocess.Popen(cl, stdout=subprocess.PIPE)
out = p.communicate()[0].strip().split(None, 7)
if not out:
return {}
pi = dict(zip(
'uid pid ppid c stime tty time cmd'.split(), out))
# check if this is moa invocation
if 'python' in pi['cmd'] and \
'moa' in pi['cmd']:
pi['moa'] = True
else:
pi['moa'] = False
return pi
def getMoaBase():
"""
Return MOABASE - the directory where Moa is installed. This
function also sets an environment variable `MOABASE`
>>> d = getMoaBase()
>>> assert(os.path.isdir(d))
>>> assert(os.path.isfile(os.path.join(d, 'README')))
>>> assert(os.path.isdir(os.path.join(d, 'lib')))
:rtype: string (path)
"""
if 'MOABASE' in os.environ:
MOABASE = os.environ["MOABASE"]
return MOABASE
thif = os.path.dirname(os.path.dirname(__file__))
if thif[-4:] == '.egg':
MOABASE = thif
else:
MOABASE = '/usr/share/moa'
# for depending scripts
os.putenv('MOABASE', MOABASE)
return MOABASE
def moaDirOrExit(job):
"""
Check if the job contains a proper Moa job, if not, exit with an
error message and a non-zero exit code.
:param job: An instance of :class:`moa.job.Job`
"""
if not job.isMoa():
moa.ui.exit("Need a Moa job")
sys.exit(-1)
def deprecated(func):
"""
Decorator function to flag a function as deprecated
:param func: any function
"""
def depfunc(*args, **kwargs):
l.critical('Calling deprecated function %s' % func.__name__)
l.critical("\n" + "\n".join(traceback.format_stack()))
func(*args, **kwargs)
return depfunc
def printstack(func):
"""
Decorator function to print stack
:param func: any function
"""
def depfunc(*args, **kwargs):
l.critical("\n" + "\n".join(traceback.format_stack()[:-1]))
func(*args, **kwargs)
return depfunc
def simple_decorator(decorator):
"""
This decorator can be used to turn simple functions into
well-behaved decorators, so long as the decorators are fairly
simple. If a decorator expects a function and returns a function
(no descriptors), and if it doesn't modify function attributes or
docstring, then it is eligible to use this. Simply apply
@simple_decorator to your decorator and it will automatically
preserve the docstring and function attributes of functions to
which it is applied.
Note; I got this code from somehwere, but forgot where
exactly. This seems the most likely source:
http://svn.navi.cx/misc/trunk/djblets/djblets/util/decorators.py
"""
def new_decorator(f):
g = decorator(f)
g.__name__ = f.__name__
g.__doc__ = f.__doc__
g.__dict__.update(f.__dict__)
return g
# Now a few lines needed to make simple_decorator itself
# be a well-behaved decorator.
new_decorator.__name__ = decorator.__name__
new_decorator.__doc__ = decorator.__doc__
new_decorator.__dict__.update(decorator.__dict__)
return new_decorator
@simple_decorator
def flog(func):
"""
A simple logger - uses the :mod:`moa.logger` code to log the
calling function. Use as a decorator::
@moa.utils.flog
def any_function(*args);
...
This is for debugging purposes (obviously)
:param func: Any python function
"""
def flogger(*args, **kwargs):
l.critical("Executing %s" % func.__name__)
for a in args:
l.error(" - calling with arg %s" % a)
for k in kwargs.keys():
l.error(" - calling with kwargs %s=%s" % (k, kwargs[k]))
return func(*args, **kwargs)
return flogger
| gpl-3.0 | 182,209,178,476,799,500 | 24.003425 | 93 | 0.583482 | false |
mschuh/pi_music_looper | music_looper/music_looper.py | 1 | 12884 | # Copyright 2015 Adafruit Industries.
# Author: Tony DiCola
# License: GNU GPLv2, see LICENSE.txt
import ConfigParser
import importlib
import os
import re
import sys
import signal
import time
import pygame
from model import Playlist
# Basic video looper architecure:
#
# - VideoLooper class contains all the main logic for running the looper program.
#
# - Almost all state is configured in a .ini config file which is required for
# loading and using the VideoLooper class.
#
# - VideoLooper has loose coupling with file reader and video player classes that
# are used to find movie files and play videos respectively. The configuration
# defines which file reader and video player module will be loaded.
#
# - A file reader module needs to define at top level create_file_reader function
# that takes as a parameter a ConfigParser config object. The function should
# return an instance of a file reader class. See usb_drive.py and directory.py
# for the two provided file readers and their public interface.
#
# - Similarly a video player modules needs to define a top level create_player
# function that takes in configuration. See omxplayer.py and hello_video.py
# for the two provided video players and their public interface.
#
# - Future file readers and video players can be provided and referenced in the
# config to extend the video player use to read from different file sources
# or use different video players.
class MusicLooper(object):
def __init__(self, config_path):
"""Create an instance of the main video looper application class. Must
pass path to a valid video looper ini configuration file.
"""
# Load the configuration.
self._config = ConfigParser.SafeConfigParser()
if len(self._config.read(config_path)) == 0:
raise RuntimeError('Failed to find configuration file at {0}, is the application properly installed?'.format(config_path))
self._console_output = self._config.getboolean('music_looper', 'console_output')
# Load configured video player and file reader modules.
self._player = self._load_player()
self._reader = self._load_file_reader()
# Load other configuration values.
self._osd = self._config.getboolean('music_looper', 'osd')
self._is_random = self._config.getboolean('music_looper', 'is_random')
self._keyboard_control = self._config.getboolean('music_looper', 'keyboard_control')
# Parse string of 3 comma separated values like "255, 255, 255" into
# list of ints for colors.
self._bgcolor = map(int, self._config.get('music_looper', 'bgcolor') \
.translate(None, ',') \
.split())
self._fgcolor = map(int, self._config.get('music_looper', 'fgcolor') \
.translate(None, ',') \
.split())
# Load sound volume file name value
self._sound_vol_file = self._config.get('omxplayer', 'sound_vol_file');
# default value to 0 millibels (omxplayer)
self._sound_vol = 0
# Initialize pygame and display a blank screen.
pygame.display.init()
pygame.font.init()
pygame.mouse.set_visible(False)
size = (pygame.display.Info().current_w, pygame.display.Info().current_h)
self._screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
self._blank_screen()
# Set other static internal state.
self._extensions = self._player.supported_extensions()
self._small_font = pygame.font.Font(None, 50)
self._medium_font = pygame.font.Font(None, 100)
self._big_font = pygame.font.Font(None, 250)
self._running = True
def _print(self, message):
"""Print message to standard output if console output is enabled."""
if self._console_output:
print(message)
def _load_player(self):
"""Load the configured video player and return an instance of it."""
module = self._config.get('music_looper', 'music_player')
return importlib.import_module('.' + module, 'music_looper') \
.create_player(self._config)
def _load_file_reader(self):
"""Load the configured file reader and return an instance of it."""
module = self._config.get('music_looper', 'file_reader')
return importlib.import_module('.' + module, 'music_looper') \
.create_file_reader(self._config)
def _is_number(iself, s):
try:
float(s)
return True
except ValueError:
return False
def _build_playlist(self):
"""Search all the file reader paths for movie files with the provided
extensions.
"""
# Get list of paths to search from the file reader.
paths = self._reader.search_paths()
# Enumerate all movie files inside those paths.
movies = []
for ex in self._extensions:
for path in paths:
# Skip paths that don't exist or are files.
if not os.path.exists(path) or not os.path.isdir(path):
continue
# Ignore hidden files (useful when file loaded on usb
# key from an OSX computer
movies.extend(['{0}/{1}'.format(path.rstrip('/'), x) \
for x in os.listdir(path) \
if re.search('\.{0}$'.format(ex), x,
flags=re.IGNORECASE) and \
x[0] is not '.'])
# Get the video volume from the file in the usb key
sound_vol_file_path = '{0}/{1}'.format(path.rstrip('/'), self._sound_vol_file)
if os.path.exists(sound_vol_file_path):
with open(sound_vol_file_path, 'r') as sound_file:
sound_vol_string = sound_file.readline()
if self._is_number(sound_vol_string):
self._sound_vol = int(float(sound_vol_string))
# Create a playlist with the sorted list of movies.
return Playlist(sorted(movies), self._is_random)
def _blank_screen(self):
"""Render a blank screen filled with the background color."""
self._screen.fill(self._bgcolor)
pygame.display.update()
def _render_text(self, message, font=None):
"""Draw the provided message and return as pygame surface of it rendered
with the configured foreground and background color.
"""
# Default to small font if not provided.
if font is None:
font = self._small_font
return font.render(message, True, self._fgcolor, self._bgcolor)
def _animate_countdown(self, playlist, seconds=10):
"""Print text with the number of loaded movies and a quick countdown
message if the on screen display is enabled.
"""
# Print message to console with number of movies in playlist.
message = 'Found {0} song{1}.'.format(playlist.length(),
's' if playlist.length() >= 2 else '')
self._print(message)
# Do nothing else if the OSD is turned off.
if not self._osd:
return
# Draw message with number of movies loaded and animate countdown.
# First render text that doesn't change and get static dimensions.
label1 = self._render_text(message + ' Starting playback in:')
l1w, l1h = label1.get_size()
sw, sh = self._screen.get_size()
for i in range(seconds, 0, -1):
# Each iteration of the countdown rendering changing text.
label2 = self._render_text(str(i), self._big_font)
l2w, l2h = label2.get_size()
# Clear screen and draw text with line1 above line2 and all
# centered horizontally and vertically.
self._screen.fill(self._bgcolor)
self._screen.blit(label1, (sw/2-l1w/2, sh/2-l2h/2-l1h))
self._screen.blit(label2, (sw/2-l2w/2, sh/2-l2h/2))
pygame.display.update()
# Pause for a second between each frame.
time.sleep(1)
def _idle_message(self):
"""Print idle message from file reader."""
# Print message to console.
message = self._reader.idle_message()
self._print(message)
# Do nothing else if the OSD is turned off.
if not self._osd:
return
# Display idle message in center of screen.
label = self._render_text(message)
lw, lh = label.get_size()
sw, sh = self._screen.get_size()
self._screen.fill(self._bgcolor)
self._screen.blit(label, (sw/2-lw/2, sh/2-lh/2))
# If keyboard control is enabled, display message about it
if self._keyboard_control:
label2 = self._render_text('press ESC to quit')
l2w, l2h = label2.get_size()
self._screen.blit(label2, (sw/2-l2w/2, sh/2-l2h/2+lh))
pygame.display.update()
def _prepare_to_run_playlist(self, playlist):
"""Display messages when a new playlist is loaded."""
# If there are movies to play show a countdown first (if OSD enabled),
# or if no movies are available show the idle message.
if playlist.length() > 0:
self._animate_countdown(playlist)
self._blank_screen()
else:
self._idle_message()
def run(self):
"""Main program loop. Will never return!"""
# Get playlist of movies to play from file reader.
playlist = self._build_playlist()
self._prepare_to_run_playlist(playlist)
# Main loop to play videos in the playlist and listen for file changes.
while self._running:
# Load and play a new movie if nothing is playing.
if not self._player.is_playing():
movie = playlist.get_next()
if movie is not None:
# Start playing the first available song
label1 = self._render_text('Playing song:')
l1w, l1h = label1.get_size()
sw, sh = self._screen.get_size()
label2 = self._render_text(os.path.splitext(os.path.basename(movie))[0], self._medium_font)
l2w, l2h = label2.get_size()
# Clear screen and draw text with line1 above line2 and all
# centered horizontally and vertically.
self._screen.fill(self._bgcolor)
self._screen.blit(label1, (sw/2-l1w/2, sh/2-l2h/2-l1h))
self._screen.blit(label2, (sw/2-l2w/2, sh/2-l2h/2))
pygame.display.update()
self._print('Playing song: {0}'.format(movie))
self._player.play(movie, loop=playlist.length() == 1, vol = self._sound_vol)
# Check for changes in the file search path (like USB drives added)
# and rebuild the playlist.
if self._reader.is_changed():
self._player.stop(3) # Up to 3 second delay waiting for old
# player to stop.
# Rebuild playlist and show countdown again (if OSD enabled).
playlist = self._build_playlist()
self._prepare_to_run_playlist(playlist)
# Event handling for key press, if keyboard control is enabled
if self._keyboard_control:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
# If pressed key is ESC quit program
if event.key == pygame.K_ESCAPE:
self.quit()
# Give the CPU some time to do other tasks.
time.sleep(0.002)
def quit(self):
"""Shut down the program"""
self._running = False
if self._player is not None:
self._player.stop()
pygame.quit()
def signal_quit(self, signal, frame):
"""Shut down the program, meant to by called by signal handler."""
self.quit()
# Main entry point.
if __name__ == '__main__':
print('Starting Music Looper.')
# Default config path to /boot.
config_path = '/boot/music_looper.ini'
# Override config path if provided as parameter.
if len(sys.argv) == 2:
config_path = sys.argv[1]
# Create video looper.
musiclooper = MusicLooper(config_path)
# Configure signal handlers to quit on TERM or INT signal.
signal.signal(signal.SIGTERM, musiclooper.signal_quit)
signal.signal(signal.SIGINT, musiclooper.signal_quit)
# Run the main loop.
musiclooper.run()
| gpl-2.0 | 8,920,466,758,795,323,000 | 44.850534 | 134 | 0.589801 | false |
django-danceschool/django-danceschool | danceschool/core/management/commands/setup_permissions.py | 1 | 9771 | from django.core.management.base import BaseCommand
from django.apps import apps
from django.contrib.auth.models import Group, Permission
from six.moves import input
try:
import readline
except ImportError:
pass
class Command(BaseCommand):
help = 'Create default groups and permissions for standard dance school setups'
def boolean_input(self, question, default=None):
'''
Method for yes/no boolean inputs
'''
result = input("%s: " % question)
if not result and default is not None:
return default
while len(result) < 1 or result[0].lower() not in "yn":
result = input("Please answer yes or no: ")
return result[0].lower() == "y"
def handle(self, *args, **options):
self.stdout.write(
"""
USER GROUPS AND PERMISSIONS
---------------------------
This project allows you to provide finely-grained permissions to individual users and
user groups, such as instructors and administrators. This allows you to let different
types of users manage different types of content while still maintaining appropriate
security.
To get you started with the permissions system, we can create three initial user
groups, and give them different levels of permissions over content:
- The "Board" group: Users in this group will receive permissions to edit
all public-facing content as well as all financial records. They will not
automaticcaly receive permissions to edit certain other sitewide settings for
security reasons.
- The "Instructor" group: Users in this group will receive permissions to use
school administrative functions such as emailing students, submitting expenses
and revenues, and viewing their own statistics and payment history. However, by
default, these users cannot edit public-facing content such as page content or
FAQs.
- The "Registration Desk" group: Users in this group receive only the ability
to log into the site in order to view class registrations and check in students.
By default, they cannot access any other administrative function.
We strongly encourage you to create these initial groups as a starting point for
managing staff permissions on the site. The superuser that you created previously
will always retain permissions to edit all content and settings. Additionally, you
can always go on to create additional groups, or to edit permissions on either a
group basis or an individual user basis.
Note: This process may take a minute or two to complete.
"""
)
create_board_group = self.boolean_input('Create \'Board\' group with default initial permissions [Y/n]', True)
if create_board_group:
board_group = Group.objects.get_or_create(name='Board')[0]
# The Board group get all permissions on the CMS app and on all danceschool apps, plus
# the permissions explicitly listed here by their natural_key. Unfortunately this is
# slow because we have to check permissions one-by-one
give_explicit = [
('add_emailaddress', 'account', 'emailaddress'),
('change_emailaddress', 'account', 'emailaddress'),
('delete_emailaddress', 'account', 'emailaddress'),
('add_user', 'auth', 'user'),
('change_user', 'auth', 'user'),
]
app_add_list = ['cms', 'core', 'djangocms_forms', 'djangocms_text_ckeditor', 'easy_thumbnails', 'filer']
for this_app in [
'danceschool.financial',
'danceschool.discounts',
'danceschool.door',
'danceschool.faq',
'danceschool.guestlist',
'danceschool.news',
'danceschool.prerequisites',
'danceschool.private_events',
'danceschool.private_lessons',
'danceschool.stats',
'danceschool.vouchers',
'danceschool.banlist',
'danceschool.payments.payatdoor',
'danceschool.payments.paypal',
'danceschool.payments.stripe',
'danceschool.payments.square',
]:
if apps.is_installed(this_app):
app_add_list.append(this_app.split('.')[1])
for perm in Permission.objects.all():
if perm.natural_key() in give_explicit or perm.natural_key()[1] in app_add_list:
board_group.permissions.add(perm)
self.stdout.write('Finished creating \'Board\' group and setting initial permissions.\n')
create_instructor_group = self.boolean_input(
'Create \'Instructor\' group with default initial permissions [Y/n]', True
)
if create_instructor_group:
instructor_group = Group.objects.get_or_create(name='Instructor')[0]
give_explicit = [
('view_page', 'cms', 'page'),
('add_classdescription', 'core', 'classdescription'),
('change_classdescription', 'core', 'classdescription'),
('can_autocomplete_users', 'core', 'customer'),
('send_email', 'core', 'emailtemplate'),
('report_substitute_teaching', 'core', 'eventstaffmember'),
('update_instructor_bio', 'core', 'instructor'),
('view_own_instructor_finances', 'core', 'instructor'),
('view_own_instructor_stats', 'core', 'instructor'),
('process_refunds', 'core', 'invoice'),
('send_invoices', 'core', 'invoice'),
('view_all_invoices', 'core', 'invoice'),
('accept_door_payments', 'core', 'registration'),
('checkin_customers', 'core', 'registration'),
('ajax_registration', 'core', 'registration'),
('override_register_closed', 'core', 'registration'),
('override_register_dropins', 'core', 'registration'),
('override_register_soldout', 'core', 'registration'),
('register_dropins', 'core', 'registration'),
('view_registration_summary', 'core', 'registration'),
('view_school_stats', 'core', 'staffmember'),
('view_staff_directory', 'core', 'staffmember'),
('add_file', 'filer', 'file'),
('change_file', 'filer', 'file'),
('can_use_directory_listing', 'filer', 'folder'),
('add_image', 'filer', 'image'),
('change_image', 'filer', 'image'),
('add_expenseitem', 'financial', 'expenseitem'),
('mark_expenses_paid', 'financial', 'expenseitem'),
('add_revenueitem', 'financial', 'revenueitem'),
('view_finances_bymonth', 'financial', 'revenueitem'),
('mark_revenues_received', 'financial', 'revenueitem'),
('add_newsitem', 'news', 'newsitem'),
('change_newsitem', 'news', 'newsitem'),
('ignore_requirements', 'prerequisites', 'requirement'),
('add_eventreminder', 'private_events', 'eventreminder'),
('change_eventreminder', 'private_events', 'eventreminder'),
('delete_eventreminder', 'private_events', 'eventreminder'),
('add_privateevent', 'private_events', 'privateevent'),
('change_privateevent', 'private_events', 'privateevent'),
('delete_privateevent', 'private_events', 'privateevent'),
('edit_own_availability', 'private_lessons', 'instructoravailabilityslot'),
('view_banlist', 'banlist', 'bannedperson'),
('view_guestlist', 'guestlist', 'guestlist'),
]
for perm in Permission.objects.all():
if perm.natural_key() in give_explicit:
instructor_group.permissions.add(perm)
self.stdout.write('Finished creating \'Instructor\' group and setting initial permissions.\n')
create_regdesk_group = self.boolean_input(
'Create \'Registration Desk\' group with default initial permissions [Y/n]',
True
)
if create_regdesk_group:
regdesk_group = Group.objects.get_or_create(name='Registration Desk')[0]
give_explicit = [
('view_page', 'cms', 'page'),
('can_autocomplete_users', 'core', 'customer'),
('process_refunds', 'core', 'invoice'),
('send_invoices', 'core', 'invoice'),
('view_all_invoices', 'core', 'invoice'),
('accept_door_payments', 'core', 'registration'),
('checkin_customers', 'core', 'registration'),
('override_register_closed', 'core', 'registration'),
('override_register_dropins', 'core', 'registration'),
('override_register_soldout', 'core', 'registration'),
('ajax_registration', 'core', 'registration'),
('register_dropins', 'core', 'registration'),
('view_registration_summary', 'core', 'registration'),
('view_staff_directory', 'core', 'staffmember'),
('ignore_requirements', 'prerequisites', 'requirement'),
('view_banlist', 'banlist', 'bannedperson'),
('view_guestlist', 'guestlist', 'guestlist'),
]
for perm in Permission.objects.all():
if perm.natural_key() in give_explicit:
regdesk_group.permissions.add(perm)
self.stdout.write('Finished creating \'Registration\' group and setting initial permissions.\n')
| bsd-3-clause | -4,150,810,581,700,201,000 | 48.852041 | 118 | 0.58735 | false |
pacoqueen/ginn | db/ajusta_precios_factura.py | 1 | 2486 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#################################################################
# 10 de julio de 2006.
# Script temporal para ajustar los precios de LDV de algunas
# facturas antiguas DEBIDO A UN ERROR EN LAS MACROS EXCEL que
# usaban antes de la implantación de ginn. Para respetar la
# contabilidad de enero-mayo '06, este script ajusta los
# precios unitarios a los calculados (con chorromil decimales)
# por excel y que así coincidan los totales de facturación.
#################################################################
import sys, os
sys.path.append(os.path.join('..', 'framework'))
from framework import pclases
# Lista de pares (idldv, preciounidad):
ldvs = ((69, 0.39169906),
(77, 0.36143386),
(131, 0.21685764),
(141, 0.51147259),
(275, 0.23219231),
(149, 0.27408263),
(534, 0.3329553),
(561, 0.29571618),
(604, 1.4923387),
(558, 0.33879479),
(565, 0.39169958),
(540, 1.4923384),
(566, 0.50392024),
(612, 0.29134587),
(616, 0.29479676),
(567, 0.21685841),
(379, 0.50392043),
(339, 0.32200196),
(403, 0.31724339),
(412, 0.67335334),
(513, 0.21685887),
(516, 0.26690208),
(864, 0.21687323),
(167, 0.21685885),
(169, 0.39169906),
(300, 1.4923393),
(178, 0.29134589),
(575, 0.29134666),
(186, 0.39169576),
(194, 0.21365343),
(203, 0.21685893),
(204, 0.50392024)
)
for id, precio in ldvs:
ldv = pclases.LineaDeVenta.get(id)
print "Ajustando LDV %d de %f a %f..." % (id, ldv.precio, precio),
ldv.precio = precio
print "OK (%f)" % ldv.precio
print "Ajustando IVA factura O60001...",
fra = pclases.FacturaVenta.get(197)
fra.iva = 0
print "OK (%f)" % (fra.iva)
print "Ajustando IVA factura O60008...",
fra = pclases.FacturaVenta.get(204)
fra.iva = 0
print "OK (%f)" % (fra.iva)
print "Cambiando número factura O60008 dupliacada a O60011 y el IVA a 0...",
fra = pclases.FacturaVenta.get(207)
fra.numfactura = "O60011"
fra.iva = 0
print "OK (%s, %f)" % (fra.numfactura, fra.iva)
print "Ajustando IVA factura O60013...",
fra = pclases.FacturaVenta.get(209)
fra.iva = 0
print "OK (%f)" % (fra.iva)
print "Cambiando número factura G60003 dupliacada a G60004...",
fra = pclases.FacturaVenta.get(199)
fra.numfactura = "G60004"
print "OK (%s)" % (fra.numfactura)
| gpl-2.0 | -395,667,731,017,486,700 | 30.405063 | 77 | 0.571544 | false |
luoguizhou/gooderp_addons | goods/models/goods.py | 1 | 7593 | # -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import UserError
class Goods(models.Model):
"""
继承了core里面定义的goods 模块,并定义了视图和添加字段。
"""
_inherit = 'goods'
@api.multi
def get_parent_tax_rate(self, parent_id):
# 逐级取商品分类上的税率
tax_rate = parent_id.tax_rate
if not tax_rate and parent_id.parent_id:
tax_rate = self.get_parent_tax_rate(parent_id.parent_id)
return tax_rate
@api.multi
def get_tax_rate(self, goods, partner, type):
"""
获得税率
如果商品上没有税率,则逐级取商品分类上的税率;
商品税率和业务伙伴税率做比较:如果都存在,取小的;其中一个存在取该值;都不存在取公司上的进/销项税
"""
if not goods:
return
goods_tax_rate, partner_tax_rate = False, False
# 如果商品上没有税率,则取商品分类上的税率
if goods.tax_rate:
goods_tax_rate = goods.tax_rate
elif goods.goods_class_id.tax_rate:
goods_tax_rate = goods.goods_class_id.tax_rate
elif goods.goods_class_id.parent_id: # 逐级取商品分类上的税率
goods_tax_rate = self.get_parent_tax_rate(goods.goods_class_id.parent_id)
# 取业务伙伴税率
if partner:
partner_tax_rate = partner.tax_rate
# 商品税率和业务伙伴税率做比较,并根据情况返回
if goods_tax_rate and partner_tax_rate:
if goods_tax_rate >= partner_tax_rate:
return partner_tax_rate
else:
return goods_tax_rate
elif goods_tax_rate and not partner_tax_rate:
return goods_tax_rate
elif not goods_tax_rate and partner_tax_rate:
return partner_tax_rate
else:
if type == 'buy':
return self.env.user.company_id.import_tax_rate
elif type == 'sell':
return self.env.user.company_id.output_tax_rate
no_stock = fields.Boolean(u'虚拟商品')
using_batch = fields.Boolean(u'管理批号')
force_batch_one = fields.Boolean(u'管理序列号')
attribute_ids = fields.One2many('attribute', 'goods_id', string=u'属性')
image = fields.Binary(u'图片', attachment=True)
supplier_id = fields.Many2one('partner',
u'默认供应商',
ondelete='restrict',
domain=[('s_category_id', '!=', False)])
price = fields.Float(u'零售价')
barcode = fields.Char(u'条形码')
note = fields.Text(u'备注')
goods_class_id = fields.Many2one(
'goods.class', string=u'商品分类',
help="Those categories are used to group similar products for point of sale.")
_sql_constraints = [
('barcode_uniq', 'unique(barcode)', u'条形码不能重复'),
]
@api.onchange('uom_id')
def onchange_uom(self):
"""
:return: 当选取单位时辅助单位默认和 单位相等。
"""
self.uos_id = self.uom_id
@api.onchange('using_batch')
def onchange_using_batch(self):
"""
:return: 当将管理批号的勾去掉后,自动将管理序列号的勾去掉
"""
if not self.using_batch:
self.force_batch_one = False
def conversion_unit(self, qty):
""" 数量 × 转化率 = 辅助数量
:param qty: 传进来数量计算出辅助数量
:return: 返回辅助数量
"""
self.ensure_one()
return self.conversion * qty
def anti_conversion_unit(self, qty):
""" 数量 = 辅助数量 / 转化率
:param qty: 传入值为辅助数量
:return: 数量
"""
self.ensure_one()
return self.conversion and qty / self.conversion or 0
class Attribute(models.Model):
_name = 'attribute'
_description = u'属性'
@api.one
@api.depends('value_ids')
def _compute_name(self):
self.name = ' '.join(
[value.category_id.name + ':' + value.value_id.name for value in self.value_ids])
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
'''在many2one字段中支持按条形码搜索'''
args = args or []
if name:
attribute_ids = self.search([('ean', '=', name)])
if attribute_ids:
return attribute_ids.name_get()
return super(Attribute, self).name_search(
name=name, args=args, operator=operator, limit=limit)
ean = fields.Char(u'条码')
name = fields.Char(u'属性', compute='_compute_name',
store=True, readonly=True)
goods_id = fields.Many2one('goods', u'商品', ondelete='cascade')
value_ids = fields.One2many(
'attribute.value', 'attribute_id', string=u'属性')
company_id = fields.Many2one(
'res.company',
string=u'公司',
change_default=True,
default=lambda self: self.env['res.company']._company_default_get())
_sql_constraints = [
('ean_uniq', 'unique (ean)', u'该条码已存在'),
('goods_attribute_uniq', 'unique (goods_id, name)', u'该SKU已存在'),
]
@api.one
@api.constrains('value_ids')
def check_value_ids(self):
att_dict = {}
for line in self.value_ids:
if not att_dict.has_key(line.category_id):
att_dict[line.category_id] = line.category_id
else:
raise UserError(u'属性值的类别不能相同')
class AttributeValue(models.Model):
_name = 'attribute.value'
_rec_name = 'value_id'
_description = u'属性明细'
attribute_id = fields.Many2one('attribute', u'属性', ondelete='cascade')
category_id = fields.Many2one('core.category', u'属性',
ondelete='cascade',
domain=[('type', '=', 'attribute')],
context={'type': 'attribute'},
required='1')
value_id = fields.Many2one('attribute.value.value', u'值',
ondelete='restrict',
domain="[('category_id','=',category_id)]",
default=lambda self: self.env.context.get(
'default_category_id'),
required='1')
company_id = fields.Many2one(
'res.company',
string=u'公司',
change_default=True,
default=lambda self: self.env['res.company']._company_default_get())
class AttributeValueValue(models.Model):
_name = 'attribute.value.value'
_description = u'属性值'
category_id = fields.Many2one('core.category', u'属性',
ondelete='cascade',
domain=[('type', '=', 'attribute')],
context={'type': 'attribute'},
required='1')
name = fields.Char(u'值', required=True)
company_id = fields.Many2one(
'res.company',
string=u'公司',
change_default=True,
default=lambda self: self.env['res.company']._company_default_get())
_sql_constraints = [
('name_category_uniq', 'unique(category_id,name)', u'同一属性的值不能重复')
]
| agpl-3.0 | -1,692,061,697,503,829,800 | 32.743842 | 93 | 0.54219 | false |
funilrys/A-John-Shots | a_john_shots/helpers.py | 1 | 5760 | #!/bin/env python3
"""
A John Shots - A tool to get the Security Hash Algorightms (SHA) of all file in a given path.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Contributors:
Let's contribute to A John Shots!
Project link:
https://github.com/funilrys/A-John-Shots
License:
::
MIT License
Copyright (c) 2017-2019 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# pylint: disable=bad-continuation, too-many-arguments
from re import compile as comp
def combine_dicts(dict_1, dict_2):
"""
Combine two dictionnaries.
:param dict_1: The first dict.
:type dict_1: dict
:param dict_2: The second dict.
:type dict_2: dict
:return: The combined dict.
:rtype: dict
"""
result = {}
for key, value in dict_1.items():
if key in dict_2.keys():
if isinstance(dict_2[key], dict):
result[key] = combine_dicts(value, dict_2.pop(key))
else:
result[key] = value
for key, value in dict_2.items():
result[key] = value
return result
class Regex: # pylint: disable=too-few-public-methods
"""
A simple implementation ot the python.re package
Arguments:
- data: str or list
The data or a list of data to check.
- regex: str or list
The regex or a list or regex.
- return_data: bool
- True: Return matched string
- False: Return False|True
- group: int
The group to return.
- rematch: bool
Implementation of Bash ${BASH_REMATCH}.
- True: Returned matched groups into a list format.
- occurences: int
The number of occurence to replace.
"""
def __init__(
self, data, regex, group=0, occurences=0, rematch=False, return_data=True
):
super(Regex, self).__init__()
# We initiate the needed variable in order to be usable all over class
self.data = data
self.regex = regex
self.group = group
self.occurences = occurences
self.rematch = rematch
self.return_data = return_data
# We initiate regex according to self.escape status.
self.regex = regex
def match(self, regex=None, data_to_match=None):
"""
Used to get exploitable result of re.search
Arguments:
- data: str
The data or a list of data to check.
- regex: str
The regex or a list or regex.
Returns:
list or bool
- bool: if self.return_data is False
- list: otherwise
"""
# We initate this variable which gonna contain the returned data
result = []
if not regex:
regex = self.regex
if not data_to_match:
data_to_match = self.data
# We compile the regex string
to_match = comp(regex)
# In case we have to use the implementation of ${BASH_REMATCH} we use
# re.findall otherwise, we use re.search
if self.rematch:
pre_result = to_match.findall(data_to_match)
else:
pre_result = to_match.search(data_to_match)
if self.return_data and pre_result is not None:
if self.rematch:
for data in pre_result:
if isinstance(data, tuple):
result.extend(list(data))
else:
result.append(data)
if self.group != 0:
return result[self.group]
else:
result = pre_result.group(self.group).strip()
return result
if not self.return_data and pre_result is not None:
return True
return False
def loop_matching(self):
"""
This method can be used to perform a loop matching.
"""
results = []
if isinstance(self.data, str):
if isinstance(self.regex, list):
for exp in self.regex:
matched = self.match(regex=exp)
try:
results.extend(matched)
except TypeError:
results.append(matched)
if not self.return_data:
if True in results:
return True
return False
else:
return self.match()
elif isinstance(self.data, list) and isinstance(self.regex, str):
for string in self.data:
results.extend(self.match(data_to_match=string))
return results
| mit | 3,648,964,506,032,662,000 | 28.84456 | 93 | 0.584201 | false |
katyhuff/moose | python/utils/MooseSourceParser.py | 1 | 4710 | import sys
import os
import re
import subprocess
import clang.cindex
if 'MOOSE_CLANG_LIB' not in os.environ:
raise EnvironmentError("Using the MooseSourceParser requires setting 'MOOSE_CLANG_LIB' environment variable to point to the clang library.")
clang.cindex.Config.set_library_path(os.getenv('MOOSE_CLANG_LIB'))
class MooseSourceParser(object):
"""
An object for parsing MOOSE source code.
Args:
app_path[str]: The path that contains the application Makefile (needed for extracting includes).
"""
def __init__(self, app_path):
# Check that the supplied path has a Makefile (for getting includes)
if not os.path.exists(os.path.join(app_path, 'Makefile')):
#TODO: Make this a MooseException and log the exception and also check that the make file os one from MOOSE
print 'The supplied application directory does not contain a Makefile:', app_path
return
# Extract the includes from the Makefile
self._includes = self.includes(app_path)
def parse(self, filename):
"""
Parse the supplied C/h file with clang.
Args:
filename[str]: The filename to parse.
"""
# Check that the supplied file exists
if not os.path.exists(filename):
#TODO: Proper exception and logging
print 'The supplied source/header file does not exist:', filename
return
# Build the flags to pass to clang
includes = ['-x', 'c++', '-std=c++11']
includes += self._includes
# Build clang translation unit
index = clang.cindex.Index.create()
self._translation_unit = index.parse(filename, includes)
@staticmethod
def includes(app_path):
"""
Returns the includes by running 'make echo_include' for an application.
Args:
app_path[str]: A valid moose application or directory with a MOOSE Makefile (e.g., framework).
"""
p = subprocess.Popen(['make', 'echo_include'], cwd=app_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
for match in re.finditer(r'-I(.*?)\s', output):
yield match.group(0).strip().strip('\n')
def method(self, name):
"""
Retrieve a class declaration and definition by name.
Args:
name[str]: The name of the method to extract.
Returns:
decl[str], defn[str]: A string containing the declaration and definition of the desired method.
"""
decl = None
defn = None
cursors = self.find(clang.cindex.CursorKind.CXX_METHOD, name=name)
for c in cursors:
if c.is_definition():
defn = self.content(c)
else:
decl = self.content(c)
return decl, defn
def dump(self, cursor=None, level = 0, **kwargs):
"""
A tool for dumping the cursor tree.
"""
if cursor == None:
cursor = self._translation_unit.cursor,
recursive = kwargs.pop('recursive', True)
for c in cursor.get_children():
print ' '*4*level, c.kind, c.spelling, c.extent.start.file, c.extent.start.line
if recursive and c.get_children():
self.dump(c, level+1)
@staticmethod
def content(cursor):
source_range = cursor.extent
fid = open(source_range.start.file.name, 'r')
content = fid.read()[source_range.start.offset:source_range.end.offset]
fid.close()
return content
def find(self, kind, **kwargs):
"""
Locate the clang.cindex.Cursor object(s). (public)
Args:
kind[int]: The type of cursor (see clang.cindex.py) to locate.
Kwargs:
name[str]: The name of the cursor to return (i.e., Cursor.spelling)
definition[bool]: Only include items with 'is_definition' set to true.
Returns:
A list of all cursors matching the kind and optionally the name.
"""
name = kwargs.pop('name', None)
defn = kwargs.pop('definition', False)
for cursor in self._translation_unit.cursor.walk_preorder():
if (hasattr(cursor, 'kind')) and (cursor.kind == kind) and (name == None or cursor.spelling == name):
#print cursor.extent.start.file
yield cursor
if __name__ == '__main__':
src = '/Users/slauae/projects/moose/framework/src/kernels/Diffusion.C'
parser = MooseSourceParser('/Users/slauae/projects/moose/framework')
parser.parse(src)
decl, defn = parser.method('computeQpResidual')
print decl, defn
| lgpl-2.1 | 5,019,072,454,677,851,000 | 31.937063 | 144 | 0.605732 | false |
ShrimpingIt/tableaux | regimes/flopbunny/main.py | 1 | 2389 | from time import sleep
from uos import urandom
from machine import Pin
from cockle import pins
dataPin = pins[1]
clockPin = pins[2]
latchPin = pins[3]
dataPin.init(Pin.OUT)
latchPin.init(Pin.OUT)
clockPin.init(Pin.OUT)
dataPin.value(0)
latchPin.value(0)
clockPin.value(0)
delay = 1
numLights = 8
byteCount = numLights // 8
backBytes = [0 for pos in range(byteCount)]
def latch():
latchPin.value(1)
latchPin.value(0)
def clock():
clockPin.value(1)
clockPin.value(0)
def writeByte(val):
bit = 1
for step in range(8):
if val & bit != 0:
dataPin.value(1)
else:
dataPin.value(0)
clock()
bit = bit << 1
def send(lit):
if (lit):
dataPin.value(1)
else:
dataPin.value(0)
for step in range(8):
clock()
latch()
def setLight(pos, lit, show=True):
bytePos = pos // 8
bitPos = pos % 8
if lit:
backBytes[bytePos] = backBytes[bytePos] | (1 << bitPos)
else:
backBytes[bytePos] = backBytes[bytePos] & ~(1 << bitPos)
if (show):
flip()
def flip():
for pos in range(len(backBytes)):
writeByte(backBytes[pos])
latch()
def turnOn(lights):
for pos in range(len(lights)):
setLight(lights[pos], True, False)
flip()
def turnOff(lights):
for pos in range(len(lights)):
setLight(lights[pos], False, False)
flip()
def sequence(lights, delay=0.1, count=1):
while True:
for outer in range(len(lights)):
for inner in range(len(lights)):
setLight(lights[inner], inner == outer)
sleep(delay)
def identify():
for lightPos in range(numLights):
setLight(lightPos, False)
for lightPos in range(numLights):
setLight(lightPos, True)
input("Light Number " + str(lightPos))
setLight(lightPos, False)
def walk():
global backBytes
while True:
backBytes = [ord(urandom(1)) for item in backBytes]
flip()
sleep(1)
eyes = [0]
earsUp = [1]
earLeft = [4]
earRight = [5]
earsDown = earLeft + earRight
glasses = [2]
head = [3]
def sequence():
turnOn(head + glasses + eyes)
turnOff(earsUp); turnOn(earsDown)
sleep(1)
turnOff(earsDown); turnOn(earsUp)
sleep(1)
def animate():
while True:
sequence()
def illuminate():
turnOn(range(numLights))
animate()
| agpl-3.0 | -6,227,094,358,887,579,000 | 17.098485 | 64 | 0.598995 | false |
cdubz/timestrap | timestrap/settings/docker.py | 1 | 1289 | from .base import * # noqa: F401,F403
DEBUG = False
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY'] # noqa: F405
# SECURITY WARNING: set this to your domain name in production!
ALLOWED_HOSTS = ['*']
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Channels
# https://channels.readthedocs.io/en/latest/
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [('redis', 6379)],
},
},
}
# Email
if os.environ.get('EMAIL_HOST'): # noqa: E501,F405
EMAIL_HOST = os.environ.get('EMAIL_HOST') # noqa: F405
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER') # noqa: F405
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD') # noqa: F405
EMAIL_PORT = int(os.environ.get('EMAIL_PORT', 25)) # noqa: F405
EMAIL_USE_TLS = bool(os.environ.get('EMAIL_USE_TLS', False)) # noqa: F405
else:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_ENABLED = False
| bsd-2-clause | -6,018,888,593,459,702,000 | 22.87037 | 78 | 0.619085 | false |
SangRyul/bamboo | edit/change.py | 1 | 1175 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
def fileformatting():
import re
errorlog = open("error_log.txt","w+", encoding = "UTF8")
for x in range(1,1015):
try:
#please change this number
f = open(str(x)+".txt", "r+", encoding = "UTF8")
time = f.readline() # 시간
r = f.read()
if("<br /" in r):
r = r.replace("<br />", "")
article = r.split("#대나무")
for k in range(len(article)):
if(len(article[k])>1 and article[k][0].isdigit()):
bamboo_name = re.search(r'\d+', article[k]).group()
article[k] = article[k].replace(bamboo_name, "")
newfile = open(bamboo_name+".txt", "w+", encoding = "UTF8")
newfile.write(time)
newfile.write(article[k])
print(x)
except:
errorlog.write(str(x)+'파일이 손상되었음 \n')
if __name__ == "__main__":
fileformatting()
| gpl-3.0 | -1,194,179,583,405,264,600 | 27.04878 | 79 | 0.422106 | false |
mronkain/Timer2 | setup.py | 1 | 1102 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="artmr",
version="1.0",
author="mronkain",
author_email="[email protected]",
description="Offline race timing console application",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mronkain/artmr",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 2.7",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"Topic :: Utilities"
),
install_requires=[
'asciimatics>=1.9',
'sqlobject',
'pandas'
],
python_requires='>=2.6, !=3.0.*, !=3.1.*, !=3.2.*, <4',
entry_points = {
'console_scripts': ['artmr=artmr.artmr:main'],
}
)
| mit | 7,069,182,115,293,866,000 | 28 | 59 | 0.578947 | false |
git-keeper/git-keeper | git-keeper-server/gkeepserver/event_handlers/class_add_handler.py | 1 | 3374 | # Copyright 2020 Nathan Sommer and Ben Coleman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Provides a handler for adding a new class."""
from gkeepcore.path_utils import user_from_log_path
from gkeepcore.valid_names import validate_class_name
from gkeepserver.database import db
from gkeepserver.event_handler import EventHandler, HandlerException
from gkeepserver.gkeepd_logger import gkeepd_logger
from gkeepserver.handler_utils import log_gkeepd_to_faculty
from gkeepserver.info_update_thread import info_updater
class ClassAddHandler(EventHandler):
"""Handle creating a new class."""
def handle(self):
"""
Handle creating a new class. The class will initially be empty.
Writes success or failure to the gkeepd to faculty log.
"""
try:
validate_class_name(self._class_name)
if db.class_exists(self._class_name, self._faculty_username):
error = ('Class {} already exists. Use gkeep modify if you '
'would like to modify this class'
.format(self._class_name))
raise HandlerException(error)
db.insert_class(self._class_name, self._faculty_username)
info_updater.enqueue_class_scan(self._faculty_username,
self._class_name)
self._log_to_faculty('CLASS_ADD_SUCCESS', self._class_name)
except Exception as e:
self._log_error_to_faculty(str(e))
gkeepd_logger.log_warning('Class creation failed: {0}'.format(e))
def __repr__(self) -> str:
"""
Build a string representation of the event.
:return: string representation of the event
"""
string = 'Add class event: {0}'.format(self._payload)
return string
def _parse_payload(self):
"""
Extracts attributes from the log line.
Raises HandlerException if the log line is not well formed.
Sets the following attributes:
_faculty_username - username of the faculty member
_class_name - name of the class
"""
self._faculty_username = user_from_log_path(self._log_path)
self._class_name = self._payload
def _log_to_faculty(self, event_type, text):
"""
Write to the gkeepd.log for the faculty member.
:param event_type: event type
:param text: text to write to the log
"""
log_gkeepd_to_faculty(self._faculty_username, event_type, text)
def _log_error_to_faculty(self, error):
"""
Log a CLASS_ADD_ERROR message to the gkeepd.log for the faculty.
:param error: the error message
"""
self._log_to_faculty('CLASS_ADD_ERROR', error)
| agpl-3.0 | 8,357,152,285,302,182,000 | 33.080808 | 77 | 0.647303 | false |
TejasM/wisely | wisely_project/users/urls.py | 1 | 1393 | import views
__author__ = 'tmehta'
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^login/$', views.login_user, name='login'),
url(r'^logout/$', views.logout_user, name='logout'),
url(r'^signup/$', views.signup, name='sign-up'),
url(r'^index/$', views.index_alt, name='index'),
url(r'^index/alt$', views.index_alt, name='index_alt'),
url(r'^check_updated/$', views.check_updated, name='check_update'),
url(r'^force_updated/$', views.force_updated, name='force_update'),
url(r'^profile/$', views.profile, name='profile'),
url(r'^edit_profile/$', views.edit_profile, name='edit_profile'),
url(r'^profile/(?P<user_id>\w+)/$', views.public_profile, name='public_profile'),
url(r'^news/$', views.news, name='news'),
url(r'^compose/$', views.compose, name='compose'),
url(r'^reply/$', views.reply, name='reply'),
url(r'^follow/$', views.follow, name='follow'),
url(r'^get_course_stats/$', views.get_course_stats, name='get_course_stats'),
url(r'^contact_us/$', views.contact_us, name='contact_us'),
) | mit | 6,447,398,637,610,337,000 | 57.083333 | 104 | 0.492462 | false |
paalge/scikit-image | skimage/data/__init__.py | 1 | 10535 | # coding: utf-8
"""Standard test images.
For more images, see
- http://sipi.usc.edu/database/database.php
"""
import os as _os
import numpy as np
from .. import data_dir
from ..io import imread, use_plugin
from .._shared._warnings import expected_warnings
from ._binary_blobs import binary_blobs
from .. import img_as_bool
__all__ = ['load',
'astronaut',
'camera',
'checkerboard',
'chelsea',
'clock',
'coffee',
'coins',
'horse',
'hubble_deep_field',
'immunohistochemistry',
'logo',
'moon',
'page',
'text',
'rocket',
'stereo_motorcycle']
def load(f, as_grey=False):
"""Load an image file located in the data directory.
Parameters
----------
f : string
File name.
as_grey : bool, optional
Convert to greyscale.
Returns
-------
img : ndarray
Image loaded from ``skimage.data_dir``.
"""
use_plugin('pil')
return imread(_os.path.join(data_dir, f), as_grey=as_grey)
def camera():
"""Gray-level "camera" image.
Often used for segmentation and denoising examples.
Returns
-------
camera : (512, 512) uint8 ndarray
Camera image.
"""
return load("camera.png")
def astronaut():
"""Colour image of the astronaut Eileen Collins.
Photograph of Eileen Collins, an American astronaut. She was selected
as an astronaut in 1992 and first piloted the space shuttle STS-63 in
1995. She retired in 2006 after spending a total of 38 days, 8 hours
and 10 minutes in outer space.
This image was downloaded from the NASA Great Images database
<http://grin.hq.nasa.gov/ABSTRACTS/GPN-2000-001177.html>`__.
No known copyright restrictions, released into the public domain.
Returns
-------
astronaut : (512, 512, 3) uint8 ndarray
Astronaut image.
"""
return load("astronaut.png")
def text():
"""Gray-level "text" image used for corner detection.
Notes
-----
This image was downloaded from Wikipedia
<http://en.wikipedia.org/wiki/File:Corner.png>`__.
No known copyright restrictions, released into the public domain.
Returns
-------
text : (172, 448) uint8 ndarray
Text image.
"""
return load("text.png")
def checkerboard():
"""Checkerboard image.
Checkerboards are often used in image calibration, since the
corner-points are easy to locate. Because of the many parallel
edges, they also visualise distortions particularly well.
Returns
-------
checkerboard : (200, 200) uint8 ndarray
Checkerboard image.
"""
return load("chessboard_GRAY.png")
def coins():
"""Greek coins from Pompeii.
This image shows several coins outlined against a gray background.
It is especially useful in, e.g. segmentation tests, where
individual objects need to be identified against a background.
The background shares enough grey levels with the coins that a
simple segmentation is not sufficient.
Notes
-----
This image was downloaded from the
`Brooklyn Museum Collection
<http://www.brooklynmuseum.org/opencollection/archives/image/617/image>`__.
No known copyright restrictions.
Returns
-------
coins : (303, 384) uint8 ndarray
Coins image.
"""
return load("coins.png")
def logo():
"""Scikit-image logo, a RGBA image.
Returns
-------
logo : (500, 500, 4) uint8 ndarray
Logo image.
"""
return load("logo.png")
def moon():
"""Surface of the moon.
This low-contrast image of the surface of the moon is useful for
illustrating histogram equalization and contrast stretching.
Returns
-------
moon : (512, 512) uint8 ndarray
Moon image.
"""
return load("moon.png")
def page():
"""Scanned page.
This image of printed text is useful for demonstrations requiring uneven
background illumination.
Returns
-------
page : (191, 384) uint8 ndarray
Page image.
"""
return load("page.png")
def horse():
"""Black and white silhouette of a horse.
This image was downloaded from
`openclipart <http://openclipart.org/detail/158377/horse-by-marauder>`
Released into public domain and drawn and uploaded by Andreas Preuss
(marauder).
Returns
-------
horse : (328, 400) bool ndarray
Horse image.
"""
with expected_warnings(['Possible precision loss', 'Possible sign loss']):
return img_as_bool(load("horse.png", as_grey=True))
def clock():
"""Motion blurred clock.
This photograph of a wall clock was taken while moving the camera in an
aproximately horizontal direction. It may be used to illustrate
inverse filters and deconvolution.
Released into the public domain by the photographer (Stefan van der Walt).
Returns
-------
clock : (300, 400) uint8 ndarray
Clock image.
"""
return load("clock_motion.png")
def immunohistochemistry():
"""Immunohistochemical (IHC) staining with hematoxylin counterstaining.
This picture shows colonic glands where the IHC expression of FHL2 protein
is revealed with DAB. Hematoxylin counterstaining is applied to enhance the
negative parts of the tissue.
This image was acquired at the Center for Microscopy And Molecular Imaging
(CMMI).
No known copyright restrictions.
Returns
-------
immunohistochemistry : (512, 512, 3) uint8 ndarray
Immunohistochemistry image.
"""
return load("ihc.png")
def chelsea():
"""Chelsea the cat.
An example with texture, prominent edges in horizontal and diagonal
directions, as well as features of differing scales.
Notes
-----
No copyright restrictions. CC0 by the photographer (Stefan van der Walt).
Returns
-------
chelsea : (300, 451, 3) uint8 ndarray
Chelsea image.
"""
return load("chelsea.png")
def coffee():
"""Coffee cup.
This photograph is courtesy of Pikolo Espresso Bar.
It contains several elliptical shapes as well as varying texture (smooth
porcelain to course wood grain).
Notes
-----
No copyright restrictions. CC0 by the photographer (Rachel Michetti).
Returns
-------
coffee : (400, 600, 3) uint8 ndarray
Coffee image.
"""
return load("coffee.png")
def hubble_deep_field():
"""Hubble eXtreme Deep Field.
This photograph contains the Hubble Telescope's farthest ever view of
the universe. It can be useful as an example for multi-scale
detection.
Notes
-----
This image was downloaded from
`HubbleSite
<http://hubblesite.org/newscenter/archive/releases/2012/37/image/a/>`__.
The image was captured by NASA and `may be freely used in the public domain
<http://www.nasa.gov/audience/formedia/features/MP_Photo_Guidelines.html>`_.
Returns
-------
hubble_deep_field : (872, 1000, 3) uint8 ndarray
Hubble deep field image.
"""
return load("hubble_deep_field.jpg")
def rocket():
"""Launch photo of DSCOVR on Falcon 9 by SpaceX.
This is the launch photo of Falcon 9 carrying DSCOVR lifted off from
SpaceX's Launch Complex 40 at Cape Canaveral Air Force Station, FL.
Notes
-----
This image was downloaded from
`SpaceX Photos
<https://www.flickr.com/photos/spacexphotos/16511594820/in/photostream/>`__.
The image was captured by SpaceX and `released in the public domain
<http://arstechnica.com/tech-policy/2015/03/elon-musk-puts-spacex-photos-into-the-public-domain/>`_.
Returns
-------
rocket : (427, 640, 3) uint8 ndarray
Rocket image.
"""
return load("rocket.jpg")
def stereo_motorcycle():
"""Rectified stereo image pair with ground-truth disparities.
The two images are rectified such that every pixel in the left image has its
corresponding pixel on the same scanline in the right image. That means that
both images are warped such that they have the same orientation but a
horizontal spatial offset (baseline). The ground-truth pixel offset in
column direction is specified by the included disparity map.
The two images are part of the Middlebury 2014 stereo benchmark. The dataset
was created by Nera Nesic, Porter Westling, Xi Wang, York Kitajima, Greg
Krathwohl, and Daniel Scharstein at Middlebury College. A detailed
description of the acquisition process can be found in [1]_.
The images included here are down-sampled versions of the default exposure
images in the benchmark. The images are down-sampled by a factor of 4 using
the function `skimage.transform.downscale_local_mean`. The calibration data
in the following and the included ground-truth disparity map are valid for
the down-sampled images::
Focal length: 994.978px
Principal point x: 311.193px
Principal point y: 254.877px
Principal point dx: 31.086px
Baseline: 193.001mm
Returns
-------
img_left : (500, 741, 3) uint8 ndarray
Left stereo image.
img_right : (500, 741, 3) uint8 ndarray
Right stereo image.
disp : (500, 741, 3) float ndarray
Ground-truth disparity map, where each value describes the offset in
column direction between corresponding pixels in the left and the right
stereo images. E.g. the corresponding pixel of
``img_left[10, 10 + disp[10, 10]]`` is ``img_right[10, 10]``.
NaNs denote pixels in the left image that do not have ground-truth.
Notes
-----
The original resolution images, images with different exposure and lighting,
and ground-truth depth maps can be found at the Middlebury website [2]_.
References
----------
.. [1] D. Scharstein, H. Hirschmueller, Y. Kitajima, G. Krathwohl, N. Nesic,
X. Wang, and P. Westling. High-resolution stereo datasets with
subpixel-accurate ground truth. In German Conference on Pattern
Recognition (GCPR 2014), Muenster, Germany, September 2014.
.. [2] http://vision.middlebury.edu/stereo/data/scenes2014/
"""
return (load("motorcycle_left.png"),
load("motorcycle_right.png"),
np.load(_os.path.join(data_dir, "motorcycle_disp.npz"))["arr_0"])
| bsd-3-clause | -3,940,344,765,195,876,400 | 26.292746 | 104 | 0.64879 | false |
TransparentHealth/hhs_oauth_client | apps/provider/migrations/0010_auto_20160623_1813.py | 1 | 1676 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('provider', '0009_auto_20160623_1600'),
]
operations = [
migrations.RemoveField(
model_name='organization',
name='addresses',
),
migrations.RemoveField(
model_name='organization',
name='affiliations',
),
migrations.RemoveField(
model_name='organization',
name='licenses',
),
migrations.RemoveField(
model_name='organization',
name='taxonomies',
),
migrations.RemoveField(
model_name='practitioner',
name='addresses',
),
migrations.RemoveField(
model_name='practitioner',
name='affiliations',
),
migrations.RemoveField(
model_name='practitioner',
name='licenses',
),
migrations.RemoveField(
model_name='practitioner',
name='taxonomies',
),
migrations.AddField(
model_name='affiliation',
name='npi',
field=models.CharField(default='', max_length=10, blank=True),
),
migrations.AddField(
model_name='license',
name='npi',
field=models.CharField(default='', max_length=10, blank=True),
),
migrations.AddField(
model_name='taxonomy',
name='npi',
field=models.CharField(default='', max_length=10, blank=True),
),
]
| apache-2.0 | -2,265,275,564,265,581,000 | 26.47541 | 74 | 0.520883 | false |
mtwilliams/mojo | dependencies/assimp-2.0.863/port/PyAssimp/pyassimp/structs.py | 1 | 31693 | #-*- coding: UTF-8 -*-
from ctypes import POINTER, c_int, c_uint, c_char, c_float, Structure, c_char_p, c_double, c_ubyte
class Matrix3x3(Structure):
"""
See 'aiMatrix3x3.h' for details.
"""
_fields_ = [
("a1", c_float),("a2", c_float),("a3", c_float),
("b1", c_float),("b2", c_float),("b3", c_float),
("c1", c_float),("c2", c_float),("c3", c_float),
]
class Matrix4x4(Structure):
"""
See 'aiMatrix4x4.h' for details.
"""
_fields_ = [
("a1", c_float),("a2", c_float),("a3", c_float),("a4", c_float),
("b1", c_float),("b2", c_float),("b3", c_float),("b4", c_float),
("c1", c_float),("c2", c_float),("c3", c_float),("c4", c_float),
("d1", c_float),("d2", c_float),("d3", c_float),("d4", c_float),
]
class Face(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# Number of indices defining this face. 3 for a triangle, >3 for polygon
("mNumIndices", c_uint),
# Pointer to the indices array. Size of the array is given in numIndices.
("mIndices", POINTER(c_uint)),
]
class VertexWeight(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# Index of the vertex which is influenced by the bone.
("mVertexId", c_uint),
# The strength of the influence in the range (0...1).
# The influence from all bones at one vertex amounts to 1.
("mWeight", c_float),
]
class Quaternion(Structure):
"""
See 'aiQuaternion.h' for details.
"""
_fields_ = [
# w,x,y,z components of the quaternion
("w", c_float),("x", c_float),("y", c_float),("z", c_float),
]
class Texel(Structure):
"""
See 'aiTexture.h' for details.
"""
_fields_ = [
("b", c_ubyte),("g", c_ubyte),("r", c_ubyte),("a", c_ubyte),
]
class Plane(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Plane equation
("a", c_float),("b", c_float),("c", c_float),("d", c_float),
]
class Color3D(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Red, green and blue color values
("r", c_float),("g", c_float),("b", c_float),
]
class Color4D(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Red, green, blue and alpha color values
("r", c_float),("g", c_float),("b", c_float),("a", c_float),
]
class String(Structure):
"""
See 'aiTypes.h' for details.
"""
MAXLEN = 1024
_fields_ = [
# Length of the string excluding the terminal 0
("length", c_uint),
# String buffer. Size limit is MAXLEN
("data", c_char*MAXLEN),
]
class MemoryInfo(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Storage allocated for texture data, in bytes
("textures", c_uint),
# Storage allocated for material data, in bytes
("materials", c_uint),
# Storage allocated for mesh data, in bytes
("meshes", c_uint),
# Storage allocated for node data, in bytes
("nodes", c_uint),
# Storage allocated for animation data, in bytes
("animations", c_uint),
# Storage allocated for camera data, in bytes
("cameras", c_uint),
# Storage allocated for light data, in bytes
("lights", c_uint),
# Storage allocated for the full import, in bytes
("total", c_uint),
]
class Vector2D(Structure):
"""
See 'aiVector2D.h' for details.
"""
_fields_ = [
("x", c_float),("y", c_float),
]
class Vector3D(Structure):
"""
See 'aiVector3D.h' for details.
"""
_fields_ = [
("x", c_float),("y", c_float),("z", c_float),
]
class Node(Structure):
"""
See 'aiScene.h' for details.
"""
Node._fields_ = [
#The name of the node.
# #The name might be empty (length of zero) but all nodes which
#need to be accessed afterwards by bones or anims are usually named.
#Multiple nodes may have the same name, but nodes which are accessed
#by bones (see #aiBone and #aiMesh::mBones) *must* be unique.
#
#Cameras and lights are assigned to a specific node name - if there
#are multiple nodes with this name, they're assigned to each of them.
#<br>
#There are no limitations regarding the characters contained in
#this text. You should be able to handle stuff like whitespace, tabs,
#linefeeds, quotation marks, ampersands, ... .
#
("mName", String),
#The transformation relative to the node's parent.#
("mTransformation", Matrix4x4),
#Parent node. NULL if this node is the root node.#
("mParent", POINTER(Node)),
#The number of child nodes of this node.#
("mNumChildren", c_uint),
#The child nodes of this node. NULL if mNumChildren is 0.#
("mChildren", POINTER(POINTER(Node))),
#The number of meshes of this node.#
("mNumMeshes", c_uint),
#The meshes of this node. Each entry is an index into the mesh#
("mMeshes", POINTER(c_uint)),
]
class VectorKey(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# The value of this key
("mValue", Vector3D),
]
class QuatKey(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# The value of this key
("mValue", Quaternion),
]
class NodeAnim(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
#The name of the node affected by this animation. The node
# must exist and it must be unique.
#
("mNodeName", String),
#The number of position keys#
("mNumPositionKeys", c_uint),
#The position keys of this animation channel. Positions are
#specified as 3D vector. The array is mNumPositionKeys in size.
# #If there are position keys, there will also be at least one
#scaling and one rotation key.
#
("mPositionKeys", POINTER(VectorKey)),
#The number of rotation keys#
("mNumRotationKeys", c_uint),
#The rotation keys of this animation channel. Rotations are
# given as quaternions, which are 4D vectors. The array is
# mNumRotationKeys in size.
# #If there are rotation keys, there will also be at least one
#scaling and one position key.
#
("mRotationKeys", POINTER(QuatKey)),
#The number of scaling keys#
("mNumScalingKeys", c_uint),
#The scaling keys of this animation channel. Scalings are
# specified as 3D vector. The array is mNumScalingKeys in size.
# #If there are scaling keys, there will also be at least one
#position and one rotation key.
#
("mScalingKeys", POINTER(VectorKey)),
#Defines how the animation behaves before the first
# key is encountered.
# # The default value is aiAnimBehaviour_DEFAULT (the original
# transformation matrix of the affected node is used).
#
("mPreState", c_uint),
#Defines how the animation behaves after the last
# key was processed.
# # The default value is aiAnimBehaviour_DEFAULT (the original
# transformation matrix of the affected node is taken).
#
("mPostState", c_uint),
]
class Animation(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
#The name of the animation. If the modeling package this data was
# exported from does support only a single animation channel, this
# name is usually empty (length is zero).
#
("mName", String),
#Duration of the animation in ticks.
#
("mDuration", c_double),
#Ticks per second. 0 if not specified in the imported file
#
("mTicksPerSecond", c_double),
#The number of bone animation channels. Each channel affects
# a single node.
#
("mNumChannels", c_uint),
#The node animation channels. Each channel affects a single node.
# The array is mNumChannels in size.
#
("mChannels", POINTER(POINTER(NodeAnim))),
]
class Camera(Structure):
"""
See 'aiCamera.h' for details.
"""
_fields_ = [
#The name of the camera.
# # There must be a node in the scenegraph with the same name.
# This node specifies the position of the camera in the scene
# hierarchy and can be animated.
#
("mName", String),
#Position of the camera relative to the coordinate space
# defined by the corresponding node.
# # The default value is 0|0|0.
#
("mPosition", Vector3D),
#'Up' - vector of the camera coordinate system relative to
# the coordinate space defined by the corresponding node.
# # The 'right' vector of the camera coordinate system is
# the cross product of the up and lookAt vectors.
# The default value is 0|1|0. The vector
# may be normalized, but it needn't.
#
("mUp", Vector3D),
#'LookAt' - vector of the camera coordinate system relative to
# the coordinate space defined by the corresponding node.
# # This is the viewing direction of the user.
# The default value is 0|0|1. The vector
# may be normalized, but it needn't.
#
("mLookAt", Vector3D),
#Half horizontal field of view angle, in radians.
# # The field of view angle is the angle between the center
# line of the screen and the left or right border.
# The default value is 1/4PI.
#
("mHorizontalFOV", c_float),
#Distance of the near clipping plane from the camera.
# #The value may not be 0.f (for arithmetic reasons to prevent
#a division through zero). The default value is 0.1f.
#
("mClipPlaneNear", c_float),
#Distance of the far clipping plane from the camera.
# #The far clipping plane must, of course, be farer away than the
#near clipping plane. The default value is 1000.f. The ratio
#between the near and the far plane should not be too
#large (between 1000-10000 should be ok) to avoid floating-point
#inaccuracies which could lead to z-fighting.
#
("mClipPlaneFar", c_float),
#Screen aspect ratio.
# #This is the ration between the width and the height of the
#screen. Typical values are 4/3, 1/2 or 1/1. This value is
#0 if the aspect ratio is not defined in the source file.
#0 is also the default value.
#
("mAspect", c_float),
]
class Light(Structure):
"""
See 'aiLight.h' for details.
"""
_fields_ = [
#The name of the light source.
# # There must be a node in the scenegraph with the same name.
# This node specifies the position of the light in the scene
# hierarchy and can be animated.
#
("mName", String),
#The type of the light source.
# #aiLightSource_UNDEFINED is not a valid value for this member.
#
("mType", c_uint),
#Position of the light source in space. Relative to the
# transformation of the node corresponding to the light.
# # The position is undefined for directional lights.
#
("mPosition", Vector3D),
#Direction of the light source in space. Relative to the
# transformation of the node corresponding to the light.
# # The direction is undefined for point lights. The vector
# may be normalized, but it needn't.
#
("mDirection", Vector3D),
#Constant light attenuation factor.
# # The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1#d + att2#d*d)
# @endcode
# This member corresponds to the att0 variable in the equation.
# Naturally undefined for directional lights.
#
("mAttenuationConstant", c_float),
#Linear light attenuation factor.
# # The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1#d + att2#d*d)
# @endcode
# This member corresponds to the att1 variable in the equation.
# Naturally undefined for directional lights.
#
("mAttenuationLinear", c_float),
#Quadratic light attenuation factor.
#
# The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1#d + att2#d*d)
# @endcode
# This member corresponds to the att2 variable in the equation.
# Naturally undefined for directional lights.
#
("mAttenuationQuadratic", c_float),
#Diffuse color of the light source
# # The diffuse light color is multiplied with the diffuse
# material color to obtain the final color that contributes
# to the diffuse shading term.
#
("mColorDiffuse", Color3D),
#Specular color of the light source
# # The specular light color is multiplied with the specular
# material color to obtain the final color that contributes
# to the specular shading term.
#
("mColorSpecular", Color3D),
#Ambient color of the light source
# # The ambient light color is multiplied with the ambient
# material color to obtain the final color that contributes
# to the ambient shading term. Most renderers will ignore
# this value it, is just a remaining of the fixed-function pipeline
# that is still supported by quite many file formats.
#
("mColorAmbient", Color3D),
#Inner angle of a spot light's light cone.
# # The spot light has maximum influence on objects inside this
# angle. The angle is given in radians. It is 2PI for point
# lights and undefined for directional lights.
#
("mAngleInnerCone", c_float),
#Outer angle of a spot light's light cone.
# # The spot light does not affect objects outside this angle.
# The angle is given in radians. It is 2PI for point lights and
# undefined for directional lights. The outer angle must be
# greater than or equal to the inner angle.
# It is assumed that the application uses a smooth
# interpolation between the inner and the outer cone of the
# spot light.
#
("mAngleOuterCone", c_float),
]
class UVTransform(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
#Translation on the u and v axes.
# # The default value is (0|0).
#
("mTranslation", Vector2D),
#Scaling on the u and v axes.
# # The default value is (1|1).
#
("mScaling", Vector2D),
#Rotation - in counter-clockwise direction.
# # The rotation angle is specified in radians. The
# rotation center is 0.5f|0.5f. The default value
# 0.f.
#
("mRotation", c_float),
]
class MaterialProperty(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
#Specifies the name of the property (key)
## Keys are case insensitive.
#
("mKey", String),
#Textures: Specifies the exact usage semantic.
#
# For non-texture properties, this member is always 0
# or #aiTextureType_NONE.
#
("mSemantic", c_uint),
#Textures: Specifies the index of the texture
# # For non-texture properties, this member is always 0.
#
("mIndex", c_uint),
#Size of the buffer mData is pointing to, in bytes.
# #This value may not be 0.
#
("mDataLength", c_uint),
#Type information for the property.
##Defines the data layout inside the data buffer. This is used
#by the library internally to perform debug checks and to
#utilize proper type conversions.
#(It's probably a hacky solution, but it works.)
#
("mType", c_uint),
#Binary buffer to hold the property's value
##The size of the buffer is always mDataLength.
#
("mData", POINTER(c_char)),
]
class Material(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
#List of all material properties loaded.#
("mProperties", POINTER(POINTER(MaterialProperty))),
#Number of properties in the data base#
("mNumProperties", c_uint),
#Storage allocated#
("mNumAllocated", c_uint),
]
class Bone(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# The name of the bone.
("mName", String),
# The number of vertices affected by this bone
("mNumWeights", c_uint),
# The vertices affected by this bone
("mWeights", POINTER(VertexWeight)),
# Matrix that transforms from mesh space to bone space in bind pose
("mOffsetMatrix", Matrix4x4),
]
class Mesh(Structure):
"""
See 'aiMesh.h' for details.
"""
AI_MAX_NUMBER_OF_COLOR_SETS = 0x4
AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x4
_fields_ = [
#Bitwise combination of the members of the #aiPrimitiveType enum.
#This specifies which types of primitives are present in the mesh.
#The "SortByPrimitiveType"-Step can be used to make sure the
#output meshes consist of one primitive type each.
#
("mPrimitiveTypes", c_uint),
#The number of vertices in this mesh.
#This is also the size of all of the per-vertex data arrays
#
("mNumVertices", c_uint),
#The number of primitives (triangles, polygons, lines) in this mesh.
#This is also the size of the mFaces array
#
("mNumFaces", c_uint),
#Vertex positions.
#This array is always present in a mesh. The array is
#mNumVertices in size.
#
("mVertices", POINTER(Vector3D)),
#Vertex normals.
#The array contains normalized vectors, NULL if not present.
#The array is mNumVertices in size. Normals are undefined for
#point and line primitives. A mesh consisting of points and
#lines only may not have normal vectors. Meshes with mixed
#primitive types (i.e. lines and triangles) may have normals,
#but the normals for vertices that are only referenced by
#point or line primitives are undefined and set to QNaN (WARN:
#qNaN compares to inequal to *everything*, even to qNaN itself.
#Use code like this
#@code
##define IS_QNAN(f) (f != f)
#@endcode
#to check whether a field is qnan).
#@note Normal vectors computed by Assimp are always unit-length.
#However, this needn't apply for normals that have been taken
# directly from the model file.
#
("mNormals", POINTER(Vector3D)),
#Vertex tangents.
#The tangent of a vertex points in the direction of the positive
#X texture axis. The array contains normalized vectors, NULL if
#not present. The array is mNumVertices in size. A mesh consisting
#of points and lines only may not have normal vectors. Meshes with
#mixed primitive types (i.e. lines and triangles) may have
#normals, but the normals for vertices that are only referenced by
#point or line primitives are undefined and set to QNaN.
#@note If the mesh contains tangents, it automatically also
#contains bitangents (the bitangent is just the cross product of
#tangent and normal vectors).
#
("mTangents", POINTER(Vector3D)),
#Vertex bitangents.
#The bitangent of a vertex points in the direction of the positive
#Y texture axis. The array contains normalized vectors, NULL if not
#present. The array is mNumVertices in size.
#@note If the mesh contains tangents, it automatically also contains
#bitangents.
#
("mBitangents", POINTER(Vector3D)),
#Vertex color sets.
#A mesh may contain 0 to #AI_MAX_NUMBER_OF_COLOR_SETS vertex
#colors per vertex. NULL if not present. Each array is
#mNumVertices in size if present.
#
("mColors", POINTER(Color4D)*AI_MAX_NUMBER_OF_COLOR_SETS),
#Vertex texture coords, also known as UV channels.
#A mesh may contain 0 to AI_MAX_NUMBER_OF_TEXTURECOORDS per
#vertex. NULL if not present. The array is mNumVertices in size.
#
("mTextureCoords", POINTER(Vector3D)*AI_MAX_NUMBER_OF_TEXTURECOORDS),
#Specifies the number of components for a given UV channel.
#Up to three channels are supported (UVW, for accessing volume
#or cube maps). If the value is 2 for a given channel n, the
#component p.z of mTextureCoords[n][p] is set to 0.0f.
#If the value is 1 for a given channel, p.y is set to 0.0f, too.
#@note 4D coords are not supported
#
("mNumUVComponents", c_uint*AI_MAX_NUMBER_OF_TEXTURECOORDS),
#The faces the mesh is constructed from.
#Each face refers to a number of vertices by their indices.
#This array is always present in a mesh, its size is given
#in mNumFaces. If the AI_SCENE_FLAGS_NON_VERBOSE_FORMAT
#is NOT set each face references an unique set of vertices.
#
("mFaces", POINTER(Face)),
#The number of bones this mesh contains.
#Can be 0, in which case the mBones array is NULL.
#
("mNumBones", c_uint),
#The bones of this mesh.
#A bone consists of a name by which it can be found in the
#frame hierarchy and a set of vertex weights.
#
("mBones", POINTER(POINTER(Bone))),
#The material used by this mesh.
#A mesh does use only a single material. If an imported model uses
#multiple materials, the import splits up the mesh. Use this value
#as index into the scene's material list.
#
("mMaterialIndex", c_uint),
]
class Texture(Structure):
"""
See 'aiTexture.h' for details.
"""
_fields_ = [
#Width of the texture, in pixels
# #If mHeight is zero the texture is compressed in a format
#like JPEG. In this case mWidth specifies the size of the
#memory area pcData is pointing to, in bytes.
#
("mWidth", c_uint),
#Height of the texture, in pixels
# #If this value is zero, pcData points to an compressed texture
#in any format (e.g. JPEG).
#
("mHeight", c_uint),
#A hint from the loader to make it easier for applications
# to determine the type of embedded compressed textures.
# #If mHeight != 0 this member is undefined. Otherwise it
#is set set to '\\0\\0\\0\\0' if the loader has no additional
#information about the texture file format used OR the
#file extension of the format without a trailing dot. If there
#are multiple file extensions for a format, the shortest
#extension is chosen (JPEG maps to 'jpg', not to 'jpeg').
#E.g. 'dds\\0', 'pcx\\0', 'jpg\\0'. All characters are lower-case.
#The fourth character will always be '\\0'.
#
("achFormatHint", c_char*4),
#Data of the texture.
# #Points to an array of mWidth#mHeight aiTexel's.
#The format of the texture data is always ARGB8888 to
#make the implementation for user of the library as easy
#as possible. If mHeight = 0 this is a pointer to a memory
#buffer of size mWidth containing the compressed texture
#data. Good luck, have fun!
#
("pcData", POINTER(Texel)),
]
class Ray(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Position and direction of the ray
("pos", Vector3D),("dir", Vector3D),
]
class Scene(Structure):
"""
See 'aiScene.h' for details.
"""
AI_SCENE_FLAGS_INCOMPLETE = 0x1
AI_SCENE_FLAGS_VALIDATED = 0x2
AI_SCENE_FLAGS_VALIDATION_WARNING = 0x4
AI_SCENE_FLAGS_NON_VERBOSE_FORMAT = 0x8
AI_SCENE_FLAGS_TERRAIN = 0x10
_fields_ = [
#Any combination of the AI_SCENE_FLAGS_XXX flags. By default
#this value is 0, no flags are set. Most applications will
#want to reject all scenes with the AI_SCENE_FLAGS_INCOMPLETE
#bit set.
#
("mFlags", c_uint),
#The root node of the hierarchy.
#
#There will always be at least the root node if the import
#was successful (and no special flags have been set).
#Presence of further nodes depends on the format and content
#of the imported file.
#
("mRootNode", POINTER(Node)),
#The number of meshes in the scene.#
("mNumMeshes", c_uint),
#The array of meshes.
# #Use the indices given in the aiNode structure to access
#this array. The array is mNumMeshes in size. If the
#AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always
#be at least ONE material.
#
("mMeshes", POINTER(POINTER(Mesh))),
#The number of materials in the scene.#
("mNumMaterials", c_uint),
#The array of materials.
#
#Use the index given in each aiMesh structure to access this
#array. The array is mNumMaterials in size. If the
#AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always
#be at least ONE material.
#
("mMaterials", POINTER(POINTER(Material))),
#The number of animations in the scene.#
("mNumAnimations", c_uint),
#The array of animations.
# #All animations imported from the given file are listed here.
#The array is mNumAnimations in size.
#
("mAnimations", POINTER(POINTER(Animation))),
#The number of textures embedded into the file#
("mNumTextures", c_uint),
#The array of embedded textures.
#
#Not many file formats embed their textures into the file.
#An example is Quake's MDL format (which is also used by
#some GameStudio versions)
#
("mTextures", POINTER(POINTER(Texture))),
#The number of light sources in the scene. Light sources
#are fully optional, in most cases this attribute will be 0
#
("mNumLights", c_uint),
#The array of light sources.
#
#All light sources imported from the given file are
#listed here. The array is mNumLights in size.
#
("mLights", POINTER(POINTER(Light))),
#The number of cameras in the scene. Cameras
#are fully optional, in most cases this attribute will be 0
#
("mNumCameras", c_uint),
#The array of cameras.
#
#All cameras imported from the given file are listed here.
#The array is mNumCameras in size. The first camera in the
#array (if existing) is the default camera view into
#the scene.
#
("mCameras", POINTER(POINTER(Camera))),
]
| mit | 2,478,706,221,965,106,700 | 34.411173 | 98 | 0.523901 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.