repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
riusksk/riufuzz | tools/coverage/Utilities/BBminus.py | 1 | 2036 | import os
import shutil
import getopt
import sys
inputDir = ".\\input"
outputDir = ".\\output"
inputFile = ".\\result.txt"
modules = {}
basicblocks = {}
#Conf
def help():
print "Possible arguments: GenBpFiles.py [-h] [-d DIR] [-o FILE]"
print " -h Prints this message to you"
print " -d DIR Directory that contains basicblocks files"
print " -i FILE File that contains basicblocks to remove"
print " -o DIR Result directory"
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:i:o:", [])
except:
help()
sys.exit()
for opt, arg in opts:
if opt in("-h"):
help()
sys.exit()
if opt in("-d"):
inputDir = arg
if opt in("-i"):
inputFile = arg
if opt in("-o"):
outputDir = arg
#input file
print "Reading input file %s" % inputFile
f = open(inputFile)
#module list
line = f.readline()
modules = {}
while line != "" and line[2] != "|":
moduleName = line[:line.find("|")]
moduleCode = line[line.find("|")+1:line.find("|")+3]
modules[moduleCode] = moduleName
if moduleName not in basicblocks:
basicblocks[moduleName] = {}
line = f.readline()
#basicblock
while line.strip() != "":
moduleCode = line[0:2]
bb = line[3:11]
moduleName = modules[moduleCode]
if bb not in basicblocks[moduleName]:
basicblocks[moduleName][bb] = 1
else:
basicblocks[moduleName][bb] += 1
line = f.readline()
f.close()
#Modifying basicblocks
if not os.path.isdir(outputDir):
os.makedirs(outputDir)
for fname in os.listdir(inputDir):
f = open(inputDir + "/" + fname)
moduleLine = f.readline()
module = moduleLine.strip().lower()
if len(basicblocks[module]) == 0:
print "File %s remains unchanged" % fname
f.close()
shutil.copy2(inputDir + "/" + fname, outputDir + "/" + fname)
continue
print "Modifying %s" % fname
#basicblock
fout = open(outputDir + "/" + fname, "w")
fout.write(moduleLine)
line = f.readline()
while line.strip() != "":
bb = line[0:8]
if bb not in basicblocks[module]:
fout.write(line)
line = f.readline()
f.close()
fout.close() | apache-2.0 | 5,489,864,988,082,207,000 | 21.141304 | 66 | 0.63998 | false | 2.912732 | false | false | false |
prasunroypr/ai-driver | v2.0/models.py | 1 | 2226 | # -*- coding: utf-8 -*-
""" AlexNet.
References:
- Alex Krizhevsky, Ilya Sutskever & Geoffrey E. Hinton. ImageNet
Classification with Deep Convolutional Neural Networks. NIPS, 2012.
- [AlexNet Paper](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)
"""
#%% import modules
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
#%% Building 'AlexNet'
def alexnet(n_rows, n_cols, lr=0.001):
network = input_data(shape=[None, n_rows, n_cols, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 2048, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 2048, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr,
name='target')
model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1,
tensorboard_verbose=2, tensorboard_dir='logs')
# model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True, show_metric=True,
# batch_size=64, snapshot_step=200, snapshot_epoch=False, run_id='alexnet')
return model
| mit | -5,253,556,282,920,099,000 | 42.647059 | 123 | 0.680144 | false | 3.456522 | false | false | false |
pytorch/fairseq | fairseq/data/encoders/moses_tokenizer.py | 1 | 1660 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from fairseq.data.encoders import register_tokenizer
from fairseq.dataclass import FairseqDataclass
@dataclass
class MosesTokenizerConfig(FairseqDataclass):
source_lang: str = field(default="en", metadata={"help": "source language"})
target_lang: str = field(default="en", metadata={"help": "target language"})
moses_no_dash_splits: bool = field(
default=False, metadata={"help": "don't apply dash split rules"}
)
moses_no_escape: bool = field(
default=False,
metadata={"help": "don't perform HTML escaping on apostrophe, quotes, etc."},
)
@register_tokenizer("moses", dataclass=MosesTokenizerConfig)
class MosesTokenizer(object):
def __init__(self, cfg: MosesTokenizerConfig):
self.cfg = cfg
try:
from sacremoses import MosesTokenizer, MosesDetokenizer
self.tok = MosesTokenizer(cfg.source_lang)
self.detok = MosesDetokenizer(cfg.target_lang)
except ImportError:
raise ImportError(
"Please install Moses tokenizer with: pip install sacremoses"
)
def encode(self, x: str) -> str:
return self.tok.tokenize(
x,
aggressive_dash_splits=(not self.cfg.moses_no_dash_splits),
return_str=True,
escape=(not self.cfg.moses_no_escape),
)
def decode(self, x: str) -> str:
return self.detok.detokenize(x.split())
| mit | -6,509,349,771,505,510,000 | 32.877551 | 85 | 0.654819 | false | 3.807339 | false | false | false |
musashin/Py429 | ARINC429/DiscreteBit.py | 2 | 3928 | '''
Created on 2013-11-05
@author: nicolas
'''
import MessageField
import Exception
class Field(MessageField.Field):
'''
This subclass of A429MsgField is part of an ensemble of classes
that can be used as an utility for packing and unpacking A429 messages.
LabelField is more specifically dedicated to managing bits in discrete
ARINC 429 messages.
'''
def __repr__(self):
if self._value is not None:
return '<%s.%s object at 0x%x, value %s [%s]>'%(self.__module__,
self.__class__.__name__,
id(self),
str(self._value),
repr(MessageField.Field))
else:
return '<%s.%s object at 0x%x [%s]>'%(self.__module__,
self.__class__.__name__,
id(self),
repr(MessageField.Field))
def __init__(self,bitIndex,bitName,meaningWhenSet,meaningWhenNotSet):
'''
Simply declare a 1 bit field at the specified position
Note: LSB index is 1
'''
MessageField.Field.__init__(self,bitIndex, 1,bitName)
self._value = None
self._meaningWhenSet = meaningWhenSet
self._meaningWhenNotSet = meaningWhenNotSet
def is_data_set(self):
return self._value is not None
def setData(self,bitValue):
''' set the bit value
This function expect the bit value passed as a boolean
'''
if type(bitValue) != type(bool()):
raise Exception.A429Exception('Bit are expected as bool')
else:
self._value = bitValue
def getData(self):
''' get the bit value '''
if self._value is None:
raise Exception.A429NoData(self.name)
else:
return self._value
def clear(self):
'''
Clear the label value
'''
self._value = None
def pack(self):
'''
Return the 32 bits word corresponding to an A429 message with the bit data (all other bits at zero)
'''
if self._value is None:
raise Exception.A429NoData(self.name)
else:
return MessageField.Field.pack(self,int(self._value))
def unpack(self,A429word):
""" set the bit value given a 32 bit ARINC 429 message value """
self._value = bool(MessageField.Field.unpack(self,A429word))
def __eq__(self, other):
'''
Define the == operator to compare field definition AND parity convention
'''
if isinstance(other, Field):
return self.__dict__ == other.__dict__
else:
return NotImplemented
def __ne__(self, other):
'''
Define the != operator to compare field definition AND parity convention
'''
result = self.__eq__(other)
'''
Define the != operator to compare size, lsb and name
'''
if result is NotImplemented:
return result
return not result
def serialize(self, stream, serializeState = False , parentElement = None):
'''
Serialize field to XML
'''
from xml.etree.ElementTree import Element, SubElement, Comment, ElementTree
fieldElement = super(Field,self).serialize(stream,serializeState,parentElement)
fieldElement.set('type',__name__)
fieldElement.set('meaningWhenSet', self._meaningWhenSet)
fieldElement.set('meaningWhenNotSet', self._meaningWhenNotSet)
if serializeState:
fieldElement.text = str(self._value)
return fieldElement | mit | -7,028,037,822,746,414,000 | 33.165217 | 107 | 0.529786 | false | 4.837438 | false | false | false |
eofs/django-oauth-api | oauth_api/permissions.py | 1 | 1891 | from django.core.exceptions import ImproperlyConfigured
from rest_framework.permissions import BasePermission
SAFE_METHODS = ['GET', 'HEAD', 'OPTIONS']
class OAuth2ScopePermission(BasePermission):
"""
Make sure request is authenticated and token has right scope set.
"""
def has_permission(self, request, view):
token = request.auth
read_only = request.method in SAFE_METHODS
if not token:
return False
if hasattr(token, 'scope'):
scopes = self.get_scopes(request, view)
if scopes['required'] is not None:
is_valid = token.is_valid(scopes['required'])
if not is_valid:
return False
else:
# View did not define any required scopes
is_valid = False
# Check for method specific scopes
if read_only:
if scopes['read'] is not None:
return token.is_valid(scopes['read'])
else:
if scopes['write'] is not None:
return token.is_valid(scopes['write'])
return is_valid
assert False, ('OAuth2ScopePermission requires the '
'`oauth_api.authentication.OAuth2Authentication` '
'class to be used.')
def get_scopes(self, request, view):
required = getattr(view, 'required_scopes', None)
read = getattr(view, 'read_scopes', None)
write = getattr(view, 'write_scopes', None)
if not required and not read and not write:
raise ImproperlyConfigured(
'OAuth protected resources requires scopes. Please add required_scopes, read_scopes or write_scopes.'
)
return {
'required': required,
'read': read,
'write': write,
}
| bsd-2-clause | -3,289,867,127,133,433,000 | 31.050847 | 117 | 0.558964 | false | 4.848718 | false | false | false |
xju2/xaodtools | bsubs/monojet/submit_monojet.py | 1 | 2672 | #!/usr/bin/env python
import sys
import string
import commands
import os
from optparse import OptionParser
import glob
base_dir = os.getcwd()
exe_base = "/afs/cern.ch/user/x/xju/work/upsilon/code/MyXAODTools/bsubs/monojet/"
def check_dir(dir_):
if not os.path.exists(dir_):
os.mkdir(dir_)
def submit(exe, out_log_name):
print "executable:", exe
print "log file:", out_log_name
bad_jobs = 0
good_jobs = 0
input_dir = base_dir + "/split_and_merge/"
input_all = glob.glob(input_dir+"x*")
check_dir(base_dir+"/histograms/")
for input_name in input_all:
out_name = base_dir+"/histograms/merged_"+os.path.basename(input_name)+"_hist.root"
run_cmd = exe + " " +input_name+" "+out_name
bsubs_cmd = "bsub -q wisc -R 'pool>4000' -C 0 -o " + \
base_dir+ "/"+ out_log_name+" "+run_cmd
#print bsubs_cmd
status,output=commands.getstatusoutput(bsubs_cmd)
if status != 0:
bad_jobs += 1
else:
good_jobs += 1
print "Good jobs: "+ str(good_jobs)+", "+str(bad_jobs)+" failed!"
def submit_tree(exe, out_log_name):
print "executable:", exe
print "log file:", out_log_name
bad_jobs = 0
good_jobs = 0
input_dir = base_dir + "/histograms/"
input_all = glob.glob(input_dir+"merged*")
for input_name in input_all:
out_name = base_dir+"/histograms/hist_qcd_"+os.path.basename(input_name)
run_cmd = exe + " " +input_name+" "+out_name
bsubs_cmd = "bsub -q wisc -R 'pool>4000' -C 0 -o " + \
base_dir+ "/"+ out_log_name+" "+run_cmd
#print bsubs_cmd
status,output=commands.getstatusoutput(bsubs_cmd)
if status != 0:
bad_jobs += 1
else:
good_jobs += 1
print "Good jobs: "+ str(good_jobs)+", "+str(bad_jobs)+" failed!"
if __name__ == "__main__":
usage = "%prog log_name"
parser = OptionParser(description="submit jobs for monojet", usage=usage)
parser.add_option("--new_file", dest="new_file", default=False, action="store_true", help="create new file")
parser.add_option("--read_ntuple", dest="read_ntuple", default=False, action="store_true", help="read ntuple produced by jetsmearing")
(options,args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
exit(1)
out_log_name = args[0]
if options.new_file:
exe = exe_base+"run_jetsmearing.sh"
submit(exe, out_log_name)
elif options.read_ntuple:
exe = exe_base+"run_read_minitree.sh"
submit_tree(exe, out_log_name)
else:
parser.print_help()
exit(2)
| mit | -6,030,616,063,693,215,000 | 28.362637 | 138 | 0.587201 | false | 3.132474 | false | false | false |
primercuervo/cognitive_radio_ml | python/spectrogram_generation.py | 1 | 6291 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Generates Spectrograms based on Raw data recorded with GNURadio """
import os
import numpy as np
import scipy as sp
from scipy import signal
from scipy.misc import imsave
# While using sp.signal.specgram there are two fields that regard the FFT size:
# * nfft: Length of the FFT used, if a zero padded FFT is desired. If None,
# the FFT length is nperseg. Defaults to None.
# * nperseg: Length of each segment. Defaults to None, but if window is str or
# tuple, is set to 256, and if window is array_like, is set to the
# length of the window.
# Length of window segments to later avg. avg over 120FFTs = 0.768ms
NFFT = 64
# Per file, 605 pics are generated, from which 10% is going to be used for test
NUM_TRAIN_IMG = 545
NUM_TEST_IMG = 60
# TODO: change NFFT name?
# 5e5 samples for 50ms time window
COUNT = int(5e5) # TODO do I use this?
DIR_PATH = os.path.join('..', '..', 'data', 'final_pu', 'with_dc')
TRAIN_PATH = os.path.join(DIR_PATH, '..', '..', 'pic_set', 'train')
TEST_PATH = os.path.join(DIR_PATH, '..', '..', 'pic_set', 'test')
# plt.gray()
# Count = 1.25 e 9 samples for 2500 pictures back to back
# + 500 samples for overflow avoidance (not expected to use them)
###############################################################################
# File naming convention
###############################################################################
# The file will have the following name formating;
FILE = "scn_{scn}_snr_{snr}.dat"
# where scn is the Scenario under consideration, which will be taken from the
# following structure:
SCN = [scenario for scenario in range(10)]
# SNR regards the Signal-to-noise ratio of the recorded signal, taking
# values from the following structure
SNR = ['-5', '-2_5', '0', '2_5', '5', '10', '15']
# TODO: I need to check first if the file to be analyzed exists, otherwise
# this is pointless
# Type of measurement
TYPE = ['with_dc', 'no_dc']
# Check if the dirs for the images exists. If not, create
## Checking for train dir
print("Checking for directories...")
if not os.path.exists(TRAIN_PATH):
print("Creating directory at ", os.path.realpath(TRAIN_PATH))
os.makedirs(TRAIN_PATH)
## Checking for test dir
if not os.path.exists(TEST_PATH):
print("Creating directory at ", os.path.realpath(TEST_PATH))
os.makedirs(TEST_PATH)
for typ in TYPE:
# Needed to locate the different type of measurement
DIR_PATH = os.path.join('..', '..', 'data', 'final_pu', typ)
for scn in SCN:
## Checking for class scenario directory
### Train
TRAIN_SCN_PATH = os.path.join(TRAIN_PATH, 'scn_{}'.format(scn))
if not os.path.exists(TRAIN_SCN_PATH):
print("Creating directory at ", os.path.realpath(TRAIN_SCN_PATH))
os.makedirs(TRAIN_SCN_PATH)
### Test
TEST_SCN_PATH = os.path.join(TEST_PATH, 'scn_{}'.format(scn))
if not os.path.exists(TEST_SCN_PATH):
print("Creating directory at ", os.path.realpath(TEST_SCN_PATH))
os.makedirs(TEST_SCN_PATH)
for snr in SNR:
AF = open(os.path.join(DIR_PATH, 'scn_{}_snr_{}.dat'.format(scn, snr)), 'rb')
for j in range(605): # Number of spectrograms to generate
for i in range(64):
# af.seek(7700*i) # every sample has 4 bytes THIS ONE IS TOTALLY WRONG!!!
# af.seek(7700*8*i, 0) # every sample has 4 bytes I DUNNO WHY THIS IS NOT THE SAME AS BELOW
# From https://stackoverflow.com/questions/39834345/scipy-signal-spectrogram-output-not-as-expected
# I got that # of segments = 1 + floor( (datalen - NFFT) / (NFFT - overlap))
# With:
# * NFFT = 64
# # segments = 120 (to record around 50ms of data)
# the datalen required is ~7700 samples
# seek uses the offset in bytes, so offset = #samples * bytes per sample
# Remember: here we are using samples of type np.complex64
# AF.seek(7700*8*i, 0) # every sample has 4 bytes I DUNNO WHY THIS IS NOT THE SAME AS BELOW
# AF.seek(7700*8, 1) # every sample has 4 bytes THIS ONE WORKS
# IMPORTANT!
# Seek seems not to be necessary
# print(AF.tell())
# in fromfile(...) the count includes the datatype, so no need of
# multiplying samples times bytes per sample
data = sp.fromfile(AF, dtype=sp.complex64, count=7700)
# spectrogram(...) returns also the frequency bins and the times:
# f, t, Sxx = signal.spectrogram(...)
# but we won't use them
_, _, Sxx = signal.spectrogram(data,
fs=10e6,
mode='magnitude',
return_onesided=False,
nperseg=NFFT,
detrend=False,
noverlap=0)
# The spectrum will be reversed, so we shift it
Sxx = sp.fftpack.fftshift(Sxx, axes=0)
Sxx = 20 * np.log10(Sxx)
avgd = np.average(Sxx, axis=1)
if i == 0:
stacked = np.array(avgd)
else:
stacked = np.vstack([stacked, avgd])
if j < NUM_TRAIN_IMG:
imsave(os.path.join(TRAIN_SCN_PATH, 'image_{}.jpg'.format(
j
+ NUM_TRAIN_IMG * SNR.index(snr)
+ NUM_TRAIN_IMG * len(SNR) * TYPE.index(typ))), stacked)
else:
imsave(os.path.join(TEST_SCN_PATH, 'image_{}.jpg'.format(
(j-NUM_TRAIN_IMG)
+ NUM_TEST_IMG * SNR.index(snr)
+ NUM_TEST_IMG * len(SNR)* TYPE.index(typ))), stacked)
AF.close()
# The End
| mit | -8,876,635,129,351,106,000 | 44.586957 | 119 | 0.53203 | false | 3.840659 | true | false | false |
heticor915/UTP2016-1 | IS512/fproject.py | 1 | 1752 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#Project Details: This file is part of the final project of statistics
#Team members : Eliana Osorio, Sebastian Idarraga, Hector F. Jimenez
#File Details: Contains Colors, And Core functions
# url: github.com/heticor915/UTP2016-1/IS512Statistics
# License Details:
# Copyright (C) 2016 Hector F. Jimenez S.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; Applies version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import core as core
import sys, subprocess
#if __name__=='__main__':
#Print the banner
while True:
subprocess.call(['clear'],shell=False)#clean term
core.banner() #Generate a Random banner
core.menu() #Dmenu...
opcion=raw_input("[:::]> ") #Casting the option entered by the user
if(((opcion>'6') or (opcion<'1') ) and opcion!=''): #Validator...break the infinite while cicle
core.usage(); #Print proper usage
raw_input(); #Wait for enter
break
if (opcion!='\n'):
opcion=int(opcion)
core.operative(opcion) #Do the homework :P
raw_input(); #Debug
| gpl-3.0 | -5,338,926,116,758,308,000 | 41.8 | 99 | 0.656963 | false | 3.388781 | false | false | false |
mattjj/pylds | examples/gibbs.py | 1 | 2014 | import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
from pybasicbayes.util.text import progprint_xrange
from pylds.models import DefaultLDS
npr.seed(0)
# Set parameters
D_obs = 1
D_latent = 2
D_input = 1
T = 2000
# Simulate from one LDS
truemodel = DefaultLDS(D_obs, D_latent, D_input)
inputs = np.random.randn(T, D_input)
data, stateseq = truemodel.generate(T, inputs=inputs)
# Fit with another LDS
input_model = DefaultLDS(D_obs, D_latent, D_input)
input_model.add_data(data, inputs=inputs)
# Fit a separate model without the inputs
noinput_model = DefaultLDS(D_obs, D_latent, D_input=0)
noinput_model.add_data(data)
# Run the Gibbs sampler
def update(model):
model.resample_model()
return model.log_likelihood()
input_lls = [update(input_model) for _ in progprint_xrange(100)]
noinput_lls = [update(noinput_model) for _ in progprint_xrange(100)]
# Plot the log likelihoods
plt.figure()
plt.plot(input_lls, label="with inputs")
plt.plot(noinput_lls, label="wo inputs")
plt.xlabel('iteration')
plt.ylabel('training likelihood')
plt.legend()
# Predict forward in time
T_given = 1800
T_predict = 200
given_data= data[:T_given]
given_inputs = inputs[:T_given]
preds = \
input_model.sample_predictions(
given_data, inputs=given_inputs,
Tpred=T_predict,
inputs_pred=inputs[T_given:T_given + T_predict])
# Plot the predictions
plt.figure()
plt.plot(np.arange(T), data, 'b-', label="true")
plt.plot(T_given + np.arange(T_predict), preds, 'r--', label="prediction")
ylim = plt.ylim()
plt.plot([T_given, T_given], ylim, '-k')
plt.xlabel('time index')
plt.xlim(max(0, T_given - 200), T)
plt.ylabel('prediction')
plt.ylim(ylim)
plt.legend()
# Smooth the data
input_ys = input_model.smooth(data, inputs)
noinput_ys = noinput_model.smooth(data)
plt.figure()
plt.plot(data, 'b-', label="true")
plt.plot(input_ys, 'r-', lw=2, label="with input")
plt.xlabel("Time")
plt.xlim(max(0, T_given-200), T)
plt.ylabel("Smoothed Data")
plt.legend()
plt.show()
| mit | 4,016,889,957,529,469,400 | 23.560976 | 74 | 0.70854 | false | 2.766484 | false | false | false |
SouthPatron/GojiDNS | site/gojidns/goji/urls.py | 1 | 3332 | # GojiDNS - Developed by South Patron CC - http://www.southpatron.com/
#
# This file is part of GojiDNS.
#
# GojiDNS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GojiDNS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GojiDNS. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView as TV, RedirectView as RV
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('goji.views.public',
url( r'^$', 'index', name = 'goji-public-index' ),
url( r'^v/login$', 'login', name = 'goji-public-login' ),
url( r'^v/logout$', 'logout', name = 'goji-public-logout' ),
url( r'^v/register$', 'register', name = 'goji-public-register' ),
url( r'^v/authenticate$', 'authenticate', name = 'goji-public-authenticate' ),
url( r'^v/resend_authentication$', 'resend_authentication', name = 'goji-public-resend-authentication' ),
url( r'^v/reset_password$', 'reset_password', name = 'goji-public-reset-password' ),
url( r'^v/confirm_email/(?P<code>\S+)$', 'confirm_email', name = 'goji-public-confirm-email-code' ),
url( r'^v/confirm_email$', 'confirm_email', name = 'goji-public-confirm-email' ),
url( r'^faq$', 'faq', name = 'goji-public-faq' ),
url( r'^legal$',
TV.as_view( template_name = 'pages/public/general/legal.html' ),
name = 'goji-public-legal'
),
url( r'^features$',
TV.as_view( template_name = 'pages/public/general/features.html' ),
name = 'goji-public-features'
),
)
urlpatterns += patterns('goji.views.members',
url( r'^members$','domain_list', name = 'goji-domain-list' ),
url( r'^members/domain/(?P<domain>\S+)/resource/(?P<rid>\d+)/delete$', 'domain_resource_delete', name = 'goji-domain-resource-delete' ),
url( r'^members/domain/(?P<domain>\S+)/resource/(?P<rid>\d+)$', 'domain_resource_edit', name = 'goji-domain-resource-edit' ),
url( r'^members/domain/(?P<domain>\S+)/resource/add$', 'domain_resource_add', name = 'goji-domain-resource-add' ),
url( r'^members/domain/(?P<domain>\S+)/edit$', 'domain_edit', name = 'goji-domain-edit' ),
url( r'^members/domain/(?P<domain>\S+)$', 'domain', name = 'goji-domain' ),
url( r'^members/domain_add$', 'domain_add', name = 'goji-domain-add' ),
url( r'^members/domain_clone$', 'domain_clone', name = 'goji-domain-clone' ),
url( r'^members/domain_delete/(?P<domain>\S+)$', 'domain_delete', name = 'goji-domain-delete' ),
url( r'^members/profile$', 'profile', name = 'goji-profile' ),
url( r'^members/network_status$', 'network_status', name = 'goji-network-status' ),
url( r'^members/change_password$', 'change_password', name = 'goji-change-password' ),
url( r'^members/contact-us$', 'contact_us', name = 'goji-contact-us' ),
)
urlpatterns += patterns('',
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls))
)
| gpl-3.0 | 8,165,623,650,469,955,000 | 39.144578 | 137 | 0.677371 | false | 3.037375 | false | false | false |
ella/mypage | mypage/pages/models.py | 1 | 4627 | import datetime
import itertools
import logging
from copy import deepcopy
import anyjson as json
from django.db import models
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_unicode
from django import template
import django.template.loader
from django.utils.safestring import mark_safe
from django.conf import settings
from mypage.pages.managers import PageManager, SessionPageManager
from mypage.pages.layout import Layout
from mypage.widgets.models import Widget
from mypage.widgets.models import get_object
from mypage.widgets.templatetags.splitobjectlist import split_list
DEFAULT_PAGE_TEMPLATES = (
('page.html', 'Default', 2),
('page3.html', 'Default 3', 3),
)
DEFAULT_SKIN_CHOICES = (('default', 'Default'),)
log = logging.getLogger('mypage.pages.models')
def page_template_choices():
# TODO: do this function lazy to support multi-site process
page_templates = getattr(settings, 'PAGE_TEMPLATES', DEFAULT_PAGE_TEMPLATES)
page_template_choices = [ (val, name) for val, name, containers in page_templates ]
return page_template_choices
def skin_choices():
# TODO: migrate and remove
return getattr(settings, 'SKIN_CHOICES', DEFAULT_SKIN_CHOICES)
class Page(models.Model):
"Page containing multiple widgets."
template = models.CharField(max_length=100, default='page.html', choices=page_template_choices())
site = models.ForeignKey(Site, default=lambda: settings.SITE_ID)
# TODO migrate to layout.template_config and remove
skin = models.CharField(max_length=100, blank=True, default='', choices=skin_choices())
layout_migrated = models.BooleanField(default=False)
layout_json = models.TextField()
objects = PageManager()
class Meta:
verbose_name = _('Page')
verbose_name_plural = _('Pages')
def __unicode__(self):
return u'Page: %d' % self.pk
@property
def widgets(self):
if not hasattr(self, '_widgets'):
self._widgets = Widget.objects.filter(pk__in=map(lambda wil: wil.widget_id, self.layout.widgets))
return self._widgets
def update_template(self, new_template):
if new_template == self.template:
return
cs = None
for val, name, containers in getattr(settings, 'PAGE_TEMPLATES', DEFAULT_PAGE_TEMPLATES):
if val == new_template:
cs = containers
break
else:
raise KeyError('%r is not a valid choice for template' % new_template)
self.layout.arrange_containers(cs)
self.template = new_template
def get_widgets(self):
return [ i.get_child() for i in self.widgets.all() ]
def layout_get(self):
if not hasattr(self, '_layout'):
self._layout = Layout(self, json.deserialize(self.layout_json))
return self._layout
def layout_set(self, value):
self.layout_json = json.serialize(value)
layout = property(layout_get, layout_set)
def add_widget(self, widget, container=0, position=None):
self.layout.insert_widget(widget, container=container, position=position)
self.save()
log.info('Add widget %d into page %d)', widget.pk, self.pk)
def add_widgets(self, widgets):
for w in widgets:
self.add_widget(w)
self.save()
def remove_widget(self, widget):
self.layout.remove_widget(widget)
log.info('Remove widget %d from page %d)', widget.pk, self.pk)
self.save()
def remove_widgets(self, widgets):
for w in widgets:
self.remove_widget(w)
self.save()
class UserPage(Page):
"Page customized by/for one User"
user = models.ForeignKey(User, db_index=True)
objects = PageManager()
site_copy = models.ForeignKey(Site, default=lambda: settings.SITE_ID)
class Meta:
unique_together = (('site_copy', 'user',),)
verbose_name = _('User page')
verbose_name_plural = _('User pages')
class SessionPage(Page):
"Page customized by/for one AnonymousUser via a session"
session_key = models.CharField(_('session key'), max_length=40, db_index=True)
updated = models.DateTimeField(null=False, default=datetime.datetime.now)
site_copy = models.ForeignKey(Site, default=lambda: settings.SITE_ID)
objects = SessionPageManager()
class Meta:
unique_together = (('site_copy', 'session_key',),)
verbose_name = _('Session page')
verbose_name_plural = _('Session pages')
| bsd-3-clause | 1,911,953,035,815,449,600 | 32.28777 | 109 | 0.671061 | false | 3.911243 | false | false | false |
emlynoregan/appenginetaskutils | experiments/countaccountswithfuture.py | 1 | 1246 | from model.account import Account
import logging
from taskutils.future import future, FutureReadyForResult, GenerateOnAllChildSuccess,\
setlocalprogress
def CountAccountsWithFutureExperiment():
def Go():
def CountRemaining(futurekey, cursor):
logging.debug("Got here")
accounts, cursor, kontinue = Account.query().fetch_page(
100, start_cursor = cursor
)
numaccounts = len(accounts)
if kontinue:
lonallchildsuccessf = GenerateOnAllChildSuccess(futurekey, numaccounts, lambda a, b: a + b)
future(CountRemaining, parentkey=futurekey, queue="background", onallchildsuccessf = lonallchildsuccessf)(cursor)
logging.debug("raising")
setlocalprogress(futurekey, numaccounts)
if kontinue:
raise FutureReadyForResult("still calculating")
else:
logging.debug("leaving")
return numaccounts
countfuture = future(CountRemaining, queue="background")(None)
return countfuture.key
return "Count Accounts With Future", Go
| apache-2.0 | -3,864,000,169,989,430,300 | 35.647059 | 129 | 0.592295 | false | 4.984 | false | false | false |
eagleatustb/p2pdown | source/third_party/MediaInfoLib/Source/Example/HowToUse_Dll.py | 3 | 3180 | ## MediaInfoDLL - All info about media files
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
#
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# Python (Windows) example
#
# To make this example working, you must put MediaInfo.Dll and test.avi
# in the same folder
#
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# Should be "import MediaInfoDLL" but does not work, why?
# How to import MediaInfoDLL.py correctly?
# Example following
#
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from MediaInfoDLL import *
MI = MediaInfo()
Version=MI.Option_Static("Info_Version", "0.7.7.0;MediaInfoDLL_Example_Python;0.7.7.0")
if Version=="":
print "MediaInfo.Dll: this version of the DLL is not compatible"
exit
#Information about MediaInfo
print "Info_Parameters"
print MI.Option_Static(u"Info_Parameters")
print
print "Info_Capacities"
print MI.Option_Static(u"Info_Capacities")
print
print "Info_Codecs"
print MI.Option_Static(u"Info_Codecs")
#An example of how to use the library
print
print "Open"
MI.Open(u"Example.ogg")
print
print "Inform with Complete=false"
MI.Option_Static("Complete")
print MI.Inform()
print
print "Inform with Complete=true"
MI.Option_Static(u"Complete", u"1")
print MI.Inform()
print
print "Custom Inform"
MI.Option_Static(u"Inform", u"General;Example : FileSize=%FileSize%")
print MI.Inform()
print
print "Get with Stream=General and Parameter='FileSize'"
print MI.Get(Stream.General, 0, u"FileSize")
print
print "GetI with Stream=General and Parameter=46"
print MI.GetI(Stream.General, 0, 46)
print
print "Count_Get with StreamKind=Stream_Audio"
print MI.Count_Get(Stream.Audio)
print
print "Get with Stream=General and Parameter='AudioCount'"
print MI.Get(Stream.General, 0, u"AudioCount")
print
print "Get with Stream=Audio and Parameter='StreamCount'"
print MI.Get(Stream.Audio, 0, u"StreamCount")
print
print "Close"
MI.Close()
| gpl-2.0 | 8,083,810,166,341,449,000 | 28.873786 | 87 | 0.604088 | false | 3.404711 | false | false | false |
sramana/pysis | apps/myprofile/forms.py | 1 | 1025 | from django.conf import settings
from django import forms
from accounts.models import Profile
class GeneralDetailsForm(forms.ModelForm):
class Meta:
model = Profile
fields = list(set(settings.GENERAL_DETAILS_FIELD_LIST) -
set(['first_name', 'last_name', 'register_number', 'college_email_id']))
class PersonalDetailsForm(forms.ModelForm):
class Meta:
model = Profile
fields = settings.PERSONAL_DETAILS_FIELD_LIST
class FamilyDetailsForm(forms.ModelForm):
class Meta:
model = Profile
fields = settings.FAMILY_DETAILS_FIELD_LIST
class ContactDetailsForm(forms.ModelForm):
class Meta:
model = Profile
fields = settings.CONTACT_DETAILS_FIELD_LIST
class EducationDetailsForm(forms.ModelForm):
class Meta:
model = Profile
fields = settings.EDUCATION_DETAILS_FIELD_LIST
class MiscDetailsForm(forms.ModelForm):
class Meta:
model = Profile
fields =settings. MISC_DETAILS_FIELD_LIST
| unlicense | -8,270,455,928,596,939,000 | 26.702703 | 94 | 0.685854 | false | 3.972868 | false | false | false |
h2020-westlife-eu/VRE | api/models.py | 1 | 11347 | import copy
import json
from django.contrib.auth.models import User
from django.core import signing
from django.db import models
from .jobportals.portals import PORTAL_FORMS
from .data import (
DATAFILE_STATES,
DATAFILE_STATE_CHOICES,
DATAFILE_TRANSFER_IN_PROGRESS,
EXTERNAL_SUBMISSION_STATE_CHOICES,
EXTERNAL_SUBMISSION_PENDING_SUBMISSION,
ACTIONS_TEXT,
STORAGE_ACCOUNT_PENDING_VALIDATION,
STORAGE_ACCOUNT_READY,
STORAGE_ACCOUNT_STATES,
STORAGE_ACCOUNT_STATE_CHOICES,
)
class BaseModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class UserStorageAccount(BaseModel):
root_folder_key = ''
owner = models.ForeignKey(User)
name = models.CharField(max_length=255, blank=True)
validation_state = models.CharField(max_length=255, choices=STORAGE_ACCOUNT_STATE_CHOICES,
default=STORAGE_ACCOUNT_PENDING_VALIDATION)
_concrete = None
def get_concrete(self):
"""
:return: the concrete instance of this storage account
"""
for attr in ['s3provider', 'gdriveprovider', 'b2dropprovider', 'dropboxprovider', 'dummyprovider', 'wlwebdavprovider']:
try:
inst = getattr(self, attr)
return inst
except:
pass
return None
@property
def display_name(self):
if self.name != '':
return self.name
else:
if self.get_concrete() is not None:
return self.get_concrete().__unicode__()
else:
return self.__unicode__()
@property
def utilization(self):
s = Datafile.objects.filter(folder__storage_account=self).aggregate(models.Sum('size'))['size__sum']
if s is None:
return 0
else:
return s
@property
def readable_validation_state(self):
return STORAGE_ACCOUNT_STATES.get(self.validation_state, 'Unknown')
@property
def validated(self):
return self.validation_state == STORAGE_ACCOUNT_READY
@property
def quota(self):
inst = self.get_concrete()
if inst is None:
return None
elif inst.type == 'GDRIVE_PROVIDER':
if inst.quota_bytes == 0:
return None
else:
return inst.quota_bytes
else:
return None
@property
def sync_in_progress(self):
sync_op = SyncOperation.get_latest_for_account(self)
if sync_op is None:
return False
else:
return sync_op.ongoing
def get_root_folder(self):
return Folder.objects.get(storage_account=self, parent=None)
def __unicode__(self):
return u'StorageAccount%d' % self.pk
class S3Provider(UserStorageAccount):
type = 'S3_PROVIDER'
root_folder_key = '/'
access_key_id = models.CharField(max_length=255)
secret_access_key = models.CharField(max_length=255)
bucket_name = models.CharField(max_length=255)
def __unicode__(self):
return u'S3Provider (bucket: %s, access_key: %s)' % (self.bucket_name, self.access_key_id)
class GDriveProvider(UserStorageAccount):
type = 'GDRIVE_PROVIDER'
root_folder_key = 'root'
credentials = models.CharField(max_length=4096)
quota_bytes = models.BigIntegerField(default=0)
def __unicode__(self):
return u'GDriveProvider'
class B2DropProvider(UserStorageAccount):
type = 'B2DROP_PROVIDER'
root_folder_key = '/'
username = models.CharField(max_length=255)
password = models.CharField(max_length=255)
def __unicode__(self):
return u'B2DropProvider'
class WLWebdavProvider(UserStorageAccount):
type = 'WL_WEBDAV_PROVIDER'
root_folder_key = '/'
def __unicode__(self):
return u'WLWebdavProvider'
class DropboxProvider(UserStorageAccount):
type = 'DROPBOX'
root_folder_key = '/'
access_user_id = models.CharField(max_length=255)
access_token = models.CharField(max_length=255)
quota_bytes = models.BigIntegerField(default=0)
def __unicode__(self):
return u'DropboxProvider'
class DummyProvider(UserStorageAccount):
type = 'DUMMY'
root_folder_key = '/'
def __unicode__(self):
return u'DummyProvider'
class Dataset(BaseModel):
owner = models.ForeignKey(User)
name = models.CharField(max_length=1024)
published = models.BooleanField(default=False)
publish_key = models.CharField(max_length=1024, default='')
def publish(self, expires=None):
self.published = True
self.publish_key = signing.dumps({'pk': self.pk})
self.save()
def unpublish(self):
self.published = False
self.publish_key = ''
self.save()
def __unicode__(self):
return self.name
class DatasetFile(BaseModel):
owner = models.ForeignKey(User)
dataset = models.ForeignKey(Dataset)
datafile = models.ForeignKey('Datafile')
def __unicode__(self):
return self.datafile.filename
class Meta:
unique_together = ('dataset', 'datafile')
class Folder(BaseModel):
owner = models.ForeignKey(User)
parent = models.ForeignKey('Folder', null=True)
name = models.CharField(max_length=1024)
storage_account = models.ForeignKey(UserStorageAccount)
storage_key = models.CharField(max_length=1024)
@property
def full_path(self):
# TODO: Optimize this. MPTT?
if self.parent is None:
return self.name
else:
return self.parent.full_path + '/' + self.name
@property
def rel_path(self):
"""
:return: the path relative to the provider root
"""
# TODO: What if there is a '/' in the path??
# TODO: Optimize this. MPTT?
if self.parent is None:
return ''
elif self.parent.parent is None:
return self.name
else:
return self.parent.rel_path + '/' + self.name
def __unicode__(self):
return self.name
class Datafile(BaseModel):
filename = models.CharField(max_length=1024)
owner = models.ForeignKey(User)
folder = models.ForeignKey(Folder)
upload_state = models.CharField(
max_length=255, choices=DATAFILE_STATE_CHOICES, default=DATAFILE_TRANSFER_IN_PROGRESS)
# storage_account = models.ForeignKey(UserStorageAccount)
storage_key = models.CharField(max_length=1024)
size = models.IntegerField(null=True, default=None)
external_link = models.URLField(max_length=8192, blank=True)
@property
def storage_account(self):
return self.folder.storage_account
@property
def full_path(self):
return self.folder.full_path + '/' + self.filename
@property
def rel_path(self):
return self.folder.rel_path + '/' + self.filename
@property
def readable_upload_state(self):
return DATAFILE_STATES.get(self.upload_state, 'Unknown')
def __unicode__(self):
return self.filename
class UserAction(BaseModel):
user = models.ForeignKey(User)
action_type = models.CharField(max_length=255)
args = models.TextField()
@property
def text(self):
return self.__unicode__()
@classmethod
def log(cls, user, action_type, args):
# TODO: validate that args match action_type?
obj = cls(user=user, action_type=action_type, args=json.dumps(args))
obj.save()
return obj
def __unicode__(self):
try:
args = json.loads(self.args)
except ValueError:
args = {}
args.update({'user': self.user.username})
r = ACTIONS_TEXT[self.action_type] % args
return r
class Meta:
ordering = ['-created_at']
class SyncOperation(BaseModel):
storage_account = models.ForeignKey(UserStorageAccount)
ongoing = models.BooleanField(default=False)
@classmethod
def get_latest_for_account(cls, storage_account):
"""
Returns the most recent SyncOperation for a given UserStorageAccount
:param storage_account:
:return: the latest SyncOperation, or None if there hasn't been any
"""
try:
obj = cls.objects.filter(storage_account=storage_account).order_by('-created_at')[0]
except:
obj = None
return obj
class ExternalCredentials(BaseModel):
provider_name = models.CharField(max_length=1024)
owner = models.ForeignKey(User)
username = models.CharField(max_length=1024)
password = models.CharField(max_length=1024)
def __unicode__(self):
return 'ExternalCredentials(%s, %s)' % (self.provider_name, self.username)
class ExternalJobPortal(BaseModel):
name = models.CharField(max_length=1024)
def __unicode__(self):
return self.name
class ExternalJobPortalFormGroup(BaseModel):
portal = models.ForeignKey(ExternalJobPortal)
parent = models.ForeignKey('ExternalJobPortalFormGroup', null=True)
name = models.CharField(max_length=1024)
def __unicode__(self):
return self.name
class ExternalJobPortalForm(BaseModel):
portal = models.ForeignKey(ExternalJobPortal)
parent = models.ForeignKey(ExternalJobPortalFormGroup, null=True)
name = models.CharField(max_length=1024)
original_url = models.URLField()
submit_url = models.URLField()
template_name = models.CharField(max_length=1024)
@classmethod
def load_initial(cls):
portal_forms = copy.deepcopy(PORTAL_FORMS)
for portal_form in portal_forms:
# Save the portal
portal, created = ExternalJobPortal.objects.update_or_create(pk=portal_form['portal']['pk'], defaults=portal_form['portal'])
if created:
print('Created portal %d' % portal.pk)
portal_form['portal'] = portal
new_portal_form, created = cls.objects.update_or_create(pk=portal_form['pk'], defaults=portal_form)
if created:
print('Created portal_form %d' % new_portal_form.pk)
new_portal_form.save()
def __unicode__(self):
return self.name
class ExternalJobPortalSubmission(BaseModel):
owner = models.ForeignKey(User)
target = models.ForeignKey(ExternalJobPortalForm)
data = models.TextField()
job_key = models.CharField(max_length=1024, blank=True)
@property
def state(self):
try:
states = ExternalJobPortalSubmissionStateChange.objects.filter(external_submission=self)
states = states.order_by('-created_at')
state = states[0]
except:
state = EXTERNAL_SUBMISSION_PENDING_SUBMISSION
return state
def update_state(self, new_state):
state_change = ExternalJobPortalSubmissionStateChange(external_submission=self, state=new_state)
state_change.save()
def __unicode__(self):
return 'ExternalSubmission(%d)' % self.pk
class ExternalJobPortalSubmissionStateChange(BaseModel):
external_submission = models.ForeignKey(ExternalJobPortalSubmission)
state = models.CharField(max_length=256, choices=EXTERNAL_SUBMISSION_STATE_CHOICES)
| mit | -2,539,438,578,325,541,400 | 26.948276 | 136 | 0.639464 | false | 3.988401 | false | false | false |
darrencheng0817/AlgorithmLearning | USACO/section1/dualpal/dualpal.py | 1 | 1764 | '''
Created on 2016年2月9日
@author: Darren
'''
'''
Dual Palindromes
Mario Cruz (Colombia) & Hugo Rickeboer (Argentina)
A number that reads the same from right to left as when read from left to right is called a palindrome. The number 12321 is a palindrome; the number 77778 is not. Of course, palindromes have neither leading nor trailing zeroes, so 0220 is not a palindrome.
The number 21 (base 10) is not palindrome in base 10, but the number 21 (base 10) is, in fact, a palindrome in base 2 (10101).
Write a program that reads two numbers (expressed in base 10):
N (1 <= N <= 15)
S (0 < S < 10000)
and then finds and prints (in base 10) the first N numbers strictly greater than S that are palindromic when written in two or more number bases (2 <= base <= 10).
Solutions to this problem do not require manipulating integers larger than the standard 32 bits.
PROGRAM NAME: dualpal
INPUT FORMAT
A single line with space separated integers N and S.
SAMPLE INPUT (file dualpal.in)
3 25
OUTPUT FORMAT
N lines, each with a base 10 number that is palindromic when expressed in at least two of the bases 2..10. The numbers should be listed in order from smallest to largest.
SAMPLE OUTPUT (file dualpal.out)
26
27
28
'''
def convert(num,base):
res=""
while num>0:
temp=num%base
if temp>9:
res=chr(ord("A")-10+temp)+res
else:
res=str(temp)+res
num//=base
return res
def dualpal(N,S):
res=[]
while len(res)<N:
S+=1
count=0
for base in range(2,11):
cand=convert(S, base)
if cand==cand[::-1]:
count+=1
if count>=2:
res.append(S)
break
print(res)
dualpal(15, 9900) | mit | 4,947,112,873,918,593,000 | 26.920635 | 256 | 0.653584 | false | 3.342205 | false | false | false |
CarterFendley/2015-robot | robot/autonomous/StackAutonomous.py | 1 | 1979 | from robotpy_ext.autonomous import timed_state, StatefulAutonomous
# Only for auto complete #
from components.drive import Drive
from components.forklift import ToteForklift
from components.alignment import Alignment
class StackAutonomous(StatefulAutonomous):
MODE_NAME = 'Stack Auto'
DEFAULT = False
drive = Drive
tote_forklift = ToteForklift
align = Alignment
def initialize(self):
self.register_sd_var('back', .5)
self.register_sd_var('fwd', .5)
def on_enable(self):
super().on_enable()
self.drive.reset_gyro_angle()
def on_iteration(self, tm):
super().on_iteration(tm)
# This gets executed afterwards
self.drive.angle_rotation(0)
@timed_state(duration =.5, next_state='get_tote2', first=True)
def calibrate(self, initial_call):
if initial_call:
self.tote_forklift.set_pos_stack1()
if self.tote_forklift.isCalibrated:
self.next_state('get_tote2')
@timed_state(duration=1.3, next_state='reverse')
def get_tote2(self, initial_call):
if initial_call:
self.align.align()
@timed_state(duration=3, next_state='drop')
def reverse(self):
self.drive.move(self.back, 0, 0, 0)
@timed_state(duration=1.3, next_state='strafeRight')
def drop(self):
self.tote_forklift.set_pos_bottom()
@timed_state(duration = 2, next_state='get_tote3')
def strafeRight(self):
self.drive.move(0, 1, 0, 0)
@timed_state(duration = 3, next_state='get_tote4')
def get_tote3(self):
self.align.align()
@timed_state(duration = 1.3, next_state='reverse2')
def get_tote4(self):
self.align.align()
@timed_state(duration = 3, next_state='strafe')
def reverse2(self):
self.drive.move(self.back, 0, 0, 0)
@timed_state(duration=1)
def strafe(self):
self.drive.move(0, -1, 0, 0) | apache-2.0 | -4,853,128,997,016,027,000 | 29.461538 | 66 | 0.618494 | false | 3.281924 | false | false | false |
apple/swift-lldb | scripts/utilsDebug.py | 6 | 4226 | """ Utility module to help debug Python scripts
--------------------------------------------------------------------------
File: utilsDebug.py
Overview: Python module to supply functions to help debug Python
scripts.
Gotchas: None.
Copyright: None.
--------------------------------------------------------------------------
"""
# Python modules:
import sys
# Third party modules:
# In-house modules:
# Instantiations:
#-----------------------------------------------------------------------------
# Details: Class to implement simple stack function trace. Instantiation the
# class as the first function you want to trace. Example:
# obj = utilsDebug.CDebugFnVerbose("validate_arguments()")
# Gotchas: This class will not work in properly in a multi-threaded
# environment.
# Authors: Illya Rudkin 28/11/2013.
# Changes: None.
#--
class CDebugFnVerbose(object):
# Public static properties:
bVerboseOn = False # True = turn on function tracing, False = turn off.
# Public:
#++------------------------------------------------------------------------
# Details: CDebugFnVerbose constructor.
# Type: Method.
# Args: vstrFnName - (R) Text description i.e. a function name.
# Return: None.
# Throws: None.
#--
# CDebugFnVerbose(vstrFnName)
#++------------------------------------------------------------------------
# Details: Print out information on the object specified.
# Type: Method.
# Args: vstrText - (R) Some helper text description.
# vObject - (R) Some Python type object.
# Return: None.
# Throws: None.
#--
def dump_object(self, vstrText, vObject):
if not CDebugFnVerbose.bVerboseOn:
return
sys.stdout.write(
"%d%s> Dp: %s" %
(CDebugFnVerbose.__nLevel,
self.__get_dots(),
vstrText))
print(vObject)
#++------------------------------------------------------------------------
# Details: Print out some progress text given by the client.
# Type: Method.
# Args: vstrText - (R) Some helper text description.
# Return: None.
# Throws: None.
#--
def dump_text(self, vstrText):
if not CDebugFnVerbose.bVerboseOn:
return
print(("%d%s> Dp: %s" % (CDebugFnVerbose.__nLevel, self.__get_dots(),
vstrText)))
# Private methods:
def __init__(self, vstrFnName):
self.__indent_out(vstrFnName)
#++------------------------------------------------------------------------
# Details: Build an indentation string of dots based on the __nLevel.
# Type: Method.
# Args: None.
# Return: Str - variable length string.
# Throws: None.
#--
def __get_dots(self):
return "".join("." for i in range(0, CDebugFnVerbose.__nLevel))
#++------------------------------------------------------------------------
# Details: Build and print out debug verbosity text indicating the function
# just exited from.
# Type: Method.
# Args: None.
# Return: None.
# Throws: None.
#--
def __indent_back(self):
if CDebugFnVerbose.bVerboseOn:
print(("%d%s< fn: %s" % (CDebugFnVerbose.__nLevel,
self.__get_dots(), self.__strFnName)))
CDebugFnVerbose.__nLevel -= 1
#++------------------------------------------------------------------------
# Details: Build and print out debug verbosity text indicating the function
# just entered.
# Type: Method.
# Args: vstrFnName - (R) Name of the function entered.
# Return: None.
# Throws: None.
#--
def __indent_out(self, vstrFnName):
CDebugFnVerbose.__nLevel += 1
self.__strFnName = vstrFnName
if CDebugFnVerbose.bVerboseOn:
print(("%d%s> fn: %s" % (CDebugFnVerbose.__nLevel,
self.__get_dots(), self.__strFnName)))
# Private statics attributes:
__nLevel = 0 # Indentation level counter
# Private attributes:
__strFnName = ""
| apache-2.0 | 2,642,967,391,899,800,000 | 32.808 | 79 | 0.482016 | false | 4.251509 | false | false | false |
jeroyang/approx | approx/approx.py | 1 | 6561 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from random import choice, sample
from itertools import product
def direct_print(text):
print(text, flush=True)
def question_guess(number, lower_bound, upper_bound):
template = '從 {} 到 {} 猜一個數字:'
question = template.format(lower_bound, upper_bound)
answer = number
return (question, answer)
def question_add(arg1, arg2):
template = '{0} + {1} = '
question = template.format(arg1, arg2)
answer = arg1 + arg2
return (question, answer)
def question_sub(arg1, wanted_answer):
template = '{0} - {1} = '
question = template.format(arg1 + wanted_answer, arg1)
answer = wanted_answer
return (question, answer)
def question_sub_fixed(arg1, arg2):
"""
Given arg1 and arg2 return the question of "arg1 - arg2 = "
"""
template = '{0} - {1} = '
question = template.format(arg1, arg2)
answer = arg1 - arg2
return (question, answer)
def question_multiply(arg1, arg2):
template = '{0} × {1} = '
question = template.format(arg1, arg2)
answer = arg1 * arg2
return (question, answer)
def question_divide(arg1, wanted_answer):
template = '{0} ÷ {1} = '
question = template.format(arg1*wanted_answer, arg1)
answer = wanted_answer
return (question, answer)
def question_highest_digit(original_question, original_answer):
blocked_answer = '▢' + str(original_answer)[1:]
question = ''.join([
original_question,
blocked_answer,
',其中▢應填入什麼數字?',
])
answer = int(str(original_answer)[0])
return (question, answer)
def question_highest_wrapper(recipe, *args):
original = generate_question(recipe, *args)
question, answer = question_highest_digit(*original)
return (question, answer)
def is_correct(users_answer, answer, precision):
delta = abs(users_answer - answer)
return delta <= precision
def new_level_greet(level_id, precision):
template = '第 {} 關 (容許誤差:{})'
greet = template.format(level_id, precision)
bar = '=' * 20
return '\n'.join([bar, greet, bar])
def correct_greet(answer, users_answer):
if answer == users_answer:
greet = '太棒了,答案就是 {}!'.format(answer)
else:
greet = '算你答對,正確答案是 {}!'.format(answer)
return greet
def too_high_hint():
return '太多了,再少一點!'
def too_low_hint():
return '太少了,再多一點!'
RECIPE_MAP = {
'guess': question_guess,
'add': question_add,
'sub': question_sub,
'subf': question_sub_fixed,
'multiply': question_multiply,
'divide': question_divide,
'highest': question_highest_wrapper,
}
def generate_question_list(recipe, *args):
question_function = RECIPE_MAP[recipe]
new_args = []
for arg in args:
if not isinstance(arg, range):
new_args.append([arg])
else:
new_args.append(arg)
question_list = []
for arg_tuple in product(*new_args):
question, answer = question_function(*arg_tuple)
question_list.append((question, answer))
return question_list
def play_question(question, answer, precision, print_method, input_func):
while True:
users_response = input_func(question)
try:
users_answer = float(users_response)
except:
confirmed = confirm_exit(input_func)
if confirmed:
return False
else:
continue
if is_correct(users_answer, answer, precision):
print_method(correct_greet(answer, users_answer))
return True
elif users_answer > answer:
print_method(too_high_hint())
else: #users_answer < answer
print_method(too_low_hint())
def confirm_exit(input_func):
answer = input_func("確認結束遊戲?(是請按1;其他鍵表示否)")
if answer == '1':
return True
else:
return False
class ApproxGame(object):
"""
Game for mental mathmatics in approximate numbers
"""
def __init__(self):
levels = {
# id: (next_level, precision, round_count, recipe, *args)
1: (2, 0, 1, 'guess', range(0, 100), 0, 100),
2: (3, 0, 10, 'add', range(1, 10), range(0, 10)),
3: (4, 0, 5, 'subf', 9, range(1, 10)),
4: (5, 0, 5, 'add', 10, range(1, 10)),
6: (7, 0, 10, 'add', range(10, 100, 10), range(1, 10)),
5: (6, 0, 10, 'sub', range(1, 10), range(0, 10)),
7: (8, 5, 10, 'subf', 99, range(11, 100)),
8: (9, 0, 10, 'add', range(100, 1000, 100), range(10, 100, 10)),
9: (10, 0, 10, 'add', range(100, 1000, 100), range(10, 100)),
10: (11, 10, 10, 'subf', 999, range(100, 1000)),
11: (12, 10, 10, 'add', range(10, 100), range(10, 100)),
12: (13, 10, 10, 'sub', range(10, 100), range(0, 100)),
13: (14, 0, 10, 'highest', 'multiply',
range(10, 99), range(1, 10)),
14: (15, 0, 10, 'multiply', range(1, 9), range(0, 9)),
15: (16, 10, 10, 'multiply', range(10, 99), range(0, 9)),
16: (17, 50, 10, 'multiply', range(100, 999), range(1, 9)),
17: (18, 100, 5, 'multiply', range(10, 99), range(10, 99)),
18: (19, 0, 10, 'divide', range(1, 9), range(2, 9)),
19: (1, 10, 2, 'divide', range(10, 99), range(2, 9)),
}
self._levels = levels
def play_level(self, level_id, print_method, input_func):
level = self._levels[level_id]
next_level, precision, round_count = level[0:3]
recipe_args = level[3:]
print_method(new_level_greet(level_id, precision))
question_list = generate_question_list(*recipe_args)
for question, answer in sample(question_list, round_count):
correctness = play_question(question, answer,
precision, direct_print, input_func)
if not correctness: # stop game
return None
return next_level
def run(self, level_id=1):
while True:
level_id = self.play_level(level_id, direct_print, input)
if level_id is None:
direct_print("=======\n遊戲結束\n=======\n")
break
if __name__ == '__main__':
game = ApproxGame()
game.run() | bsd-3-clause | 5,040,673,850,842,633,000 | 30.613861 | 73 | 0.56852 | false | 3.179781 | false | false | false |
andr3wmac/metaTower | packages/dlmanager/NZB/Decoder.py | 1 | 7033 | import re, string, os, time, mt
from zlib import crc32
yenc_found = False
try:
import _yenc
yenc_found = True
except:
pass
class ArticleDecoder(mt.threads.Thread):
def __init__(self, nextSeg, save_to, path, onFinish = None, onSuccess = None, onFail = None, onAssemblyPercent = None):
mt.threads.Thread.__init__(self)
self.daemon = True
self.decoder = SegmentDecoder()
self.nextSeg = nextSeg
self.save_to = save_to
self.onFinish = onFinish
self.onSuccess = onSuccess
self.onAssemblyPercent = onAssemblyPercent
self.onFail = onFail
self.path = path
def run(self):
while ( self.running ):
try:
seg = self.nextSeg()
if ( seg == None ):
self.sleep(0.1)
continue
if ( seg == -1 ):
# this means we're finished here.
if ( self.onAssemblyPercent ): self.onAssemblyPercent(0)
self.assembleSegments()
if ( self.onAssemblyPercent ): self.onAssemblyPercent(100)
self.running = False
break
self.decodeSegment(seg)
except Exception as inst:
mt.log.error("ArticleDecoder running error: " + str(inst.args))
self.stop()
if ( self.onFinish ): self.onFinish()
def assembleSegments(self):
if ( not self.running ): return
mt.log.debug("Assembling..")
# generate list of files.
file_index = {}
for cache_file in os.listdir(self.path):
file_name = cache_file[:-4]
if ( not file_index.has_key(file_name) ):
file_index[file_name] = []
file_index[file_name].append(cache_file)
# check if the save folder exists
if ( not os.path.isdir(self.save_to) ): os.mkdir(self.save_to)
file_count = len(file_index)
files_complete = 0
for file_name in file_index:
try:
file = open(os.path.join(self.save_to, file_name), "wb")
file_index[file_name].sort()
segments = file_index[file_name]
mt.log.debug("Assembling File: " + file_name + " Total Segments: " + str(len(segments)))
for seg in segments:
seg_f = open(os.path.join(self.path, seg), "rb")
seg_data = seg_f.read()
seg_f.close()
if ( seg_data ): file.write(seg_data)
os.remove(os.path.join(self.path, seg))
file.close()
mt.log.debug("Assembled file: " + file_name + ".")
except Exception as inst:
mt.log.error("File assembly error: " + str(inst.args))
# report assembly completion status
if ( self.onAssemblyPercent ):
files_complete += 1
percent = int((float(files_complete)/float(file_count))*100.0)
self.onAssemblyPercent(percent)
def decodeSegment(self, seg):
try:
if ( self.decoder.yenc_decode(seg) ):
file_path = os.path.join(self.path, seg.decoded_filename + "." + str("%03d" % (seg.decoded_number,)))
cache_file = open(file_path, "wb")
cache_file.write(seg.decoded_data)
cache_file.close()
# memory leaks really bad without this.
del seg.data[:]
seg.decoded_data = ""
if ( self.onSuccess ): self.onSuccess(seg)
else:
if ( self.onFail ): self.onFail(seg)
except Exception as inst:
mt.log.error("ArticleDecoder decode segment(" + seg.msgid + ") error: " + str(inst.args))
if ( self.onFail ): self.onFail(seg)
finally:
del seg.data[:]
class SegmentDecoder(object):
def __init__(self):
self.YDEC_TRANS = ''.join([chr((i + 256 - 42) % 256) for i in range(256)])
def yenc_decode(self, seg):
ignore_errors = seg.lastTry()
buffer = []
in_body = False
end_found = False
for line in seg.data:
if (line[:7] == '=ybegin'):
args = line.split(" ")
for arg in args:
if ( arg.startswith("name=") ):
seg.decoded_filename = line.split("=")[-1]
if ( arg.startswith("part=") ):
seg.decoded_number = int(arg.split("=")[1])
elif (line[:6] == '=ypart'):
in_body = True
continue
elif (line[:5] == '=yend'):
args = line.split(" ")
for arg in args:
if ( arg.startswith("pcrc32=") or arg.startswith("crc32=") ):
c = arg.split("=")[1]
seg.decoded_crc = '0' * (8 - len(c)) + c
end_found = True
break
if ( in_body ): buffer.append(line)
# no ending found, article must have been cut off in transmit.
if ( not end_found ) and ( not ignore_errors ):
mt.log.debug("Article decode error: =yend not found.")
return False
# join the data together and decode it.
data = ''.join(buffer)
crc = ""
if ( yenc_found ):
decoded_data, _yenc_crc, something = _yenc.decode_string(data)
crc = '%08X' % ((_yenc_crc ^ -1) & 2**32L - 1)
else:
# stolen from hellanzb.
for i in (0, 9, 10, 13, 27, 32, 46, 61):
j = '=%c' % (i + 64)
data = data.replace(j, chr(i))
decoded_data = data.translate(self.YDEC_TRANS)
crc = '%08X' % (crc32(decoded_data) & 2**32L - 1)
# if the article has failed multiple times we'll ignore errors and take
# whatever we can get from it.
if ( not ignore_errors ):
# If a CRC was included, check it.
if ( seg.decoded_crc != "" ) and ( crc != "" ):
if ( seg.decoded_crc.upper() != crc ):
mt.log.debug("CRC does not match. A: " + seg.decoded_crc.upper() + " B: " + crc)
return False
# check partnum
if ( seg.decoded_number != seg.number ):
mt.log.debug("Part number does not match: " + seg.msgid)
return False
# ensure we decoded a filename.
if ( seg.decoded_filename == "" ):
mt.log.debug(seg.msgid + " does not have a filename.")
return False
else:
if ( seg.decoded_number != seg.number ): seg.decoded_number = seg.number
seg.decoded_size = len(decoded_data)
seg.decoded_data = decoded_data
return True
| gpl-3.0 | 2,171,819,136,183,207,700 | 35.630208 | 123 | 0.488981 | false | 4.05828 | false | false | false |
Nehoroshiy/urnn | utils/complex_expm.py | 1 | 2403 | import theano
import numpy as np
from theano import Op, Apply
from theano.tensor import as_tensor_variable
try:
import scipy.linalg
imported_scipy = True
except ImportError:
# some ops (e.g. Cholesky, Solve, A_Xinv_b) won't work
imported_scipy = False
class ComplexExpm(Op):
"""
Compute the matrix exponential of a square array.
"""
__props__ = ()
def make_node(self, A):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Expm op")
A = as_tensor_variable(A)
assert A.ndim == 3
expm = theano.tensor.tensor3(dtype=A.dtype)
return Apply(self, [A, ], [expm, ])
def perform(self, node, inputs, outputs):
(A,) = inputs
(expm,) = outputs
temp = scipy.linalg.expm(A[0, :, :] + 1j * A[1, :, :])
expm[0] = np.stack([temp.real, temp.imag])
def grad(self, inputs, outputs):
(A,) = inputs
(g_out,) = outputs
return [ComplexExpmGrad()(A, g_out)]
def infer_shape(self, node, shapes):
return [shapes[0]]
def _hconj_internal(x):
x_hconj = np.transpose(x, axes=(0, 2, 1)).copy()
x_hconj[1, :, :] = -x_hconj[1, :, :]
return x_hconj
class ComplexExpmGrad(Op):
"""
Gradient of the matrix exponential of a square array.
"""
__props__ = ()
def make_node(self, A, gw):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Expm op")
A = as_tensor_variable(A)
assert A.ndim == 3
out = theano.tensor.tensor3(dtype=A.dtype)
return Apply(self, [A, gw], [out, ])
def infer_shape(self, node, shapes):
return [shapes[0]]
def perform(self, node, inputs, outputs):
# Kalbfleisch and Lawless, J. Am. Stat. Assoc. 80 (1985) Equation 3.4
# Kind of... You need to do some algebra from there to arrive at
# this expression.
(A, gA) = inputs
(out,) = outputs
w, V = scipy.linalg.eig(A[0, :, :] + 1j * A[1, :, :], right=True)
U = scipy.linalg.inv(V)
exp_w = np.exp(w)
X = np.subtract.outer(exp_w, exp_w) / np.subtract.outer(w, w)
np.fill_diagonal(X, exp_w)
Y = np.conj(V.dot(U.dot(gA[0, :, :].T - 1j * gA[1, :, :].T).dot(V) * X).dot(U)).T
out[0] = np.stack([Y.real, Y.imag]).astype(A.dtype)
complex_expm = ComplexExpm() | mit | -6,241,389,459,740,728,000 | 25.417582 | 89 | 0.559301 | false | 3.112694 | false | false | false |
deloittem/irma-frontend | frontend/helpers/formatters/external/virustotal/virustotal.py | 1 | 2746 | #
# Copyright (c) 2013-2016 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
from lib.plugins import PluginBase
from lib.irma.common.utils import IrmaProbeType
class VirusTotalFormatterPlugin(PluginBase):
# =================
# plugin metadata
# =================
_plugin_name_ = "VirusTotal"
_plugin_author_ = "IRMA (c) Quarkslab"
_plugin_version_ = "1.0.0"
_plugin_category_ = IrmaProbeType.external
_plugin_description_ = "VirusTotal results Formatter"
_plugin_dependencies_ = []
# ===========
# Formatter
# ===========
@staticmethod
def can_handle_results(raw_result):
expected_name = VirusTotalFormatterPlugin.plugin_name
expected_category = VirusTotalFormatterPlugin.plugin_category
return raw_result.get('type', None) == expected_category and \
raw_result.get('name', None) == expected_name
"""
VT AVs list
'Bkav', 'MicroWorld-eScan', 'nProtect', 'K7AntiVirus', 'NANO-Antivirus',
'F-Prot', 'Norman', 'Kaspersky', 'ByteHero', 'F-Secure', 'TrendMicro',
'McAfee-GW-Edition', 'Sophos', 'Jiangmin', 'ViRobot', 'Commtouch',
'AhnLab-V3', 'VBA32', 'Rising', 'Ikarus', 'Fortinet', 'Panda',
'CAT-QuickHeal', 'McAfee', 'Malwarebytes', 'K7GW', 'TheHacker',
'TotalDefense', 'TrendMicro-HouseCall', 'Avast', 'ClamAV', 'BitDefender',
'Agnitum', 'Comodo', 'DrWeb', 'VIPRE', 'AntiVir', 'Emsisoft', 'Antiy-AVL',
'Kingsoft', 'Microsoft', 'SUPERAntiSpyware', 'GData', 'ESET-NOD32',
'AVG', 'Baidu-International', 'Symantec', 'PCTools',
"""
@staticmethod
def format(raw_result):
status = raw_result.get('status', -1)
if status != -1:
vt_result = raw_result.pop('results', {})
av_result = vt_result.get('results', {})
if status == 1:
# get ratios from virustotal results
nb_detect = av_result.get('positives', 0)
nb_total = av_result.get('total', 0)
raw_result['results'] = "detected by {0}/{1}" \
"".format(nb_detect, nb_total)
raw_result['external_url'] = av_result.get('permalink', None)
elif status == 0:
raw_result['results'] = av_result.get('verbose_msg', None)
return raw_result
| apache-2.0 | 8,598,137,284,366,815,000 | 37.676056 | 78 | 0.612163 | false | 3.261283 | false | false | false |
brunats/IAR | Proj4 - Simulated Annealing/sa.py | 1 | 7620 | # -*- coding: utf-8 -*-
# ****************************************************************************
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ****************************************************************************
# Autores:
# Bruna Tavares Silva @brunats
# Christopher Renkavieski @ChrisRenka
# Disciplina:
# Inteligência Artificial - BCC - CCT UDESC
# Profº:
# Rafael Parpinelli
# ****************************************************************************
import time
import csv
import random
import math
from copy import deepcopy
#import numpy as np
####define funções
def leitura(arq):
#definindo matriz
entrada = []
#lendo ambiente e construindo matriz
with open(arq,'r') as csvfile:
plots = csv.reader(csvfile, delimiter='\n')
for row in plots:
linha = ""
if(row != []):
linha = row.pop(0)
if(linha[0] != 'c'):
if(linha[0] == 'p'):
p, tipo, var, clau = linha.split()
var = int(var)
clau = int(clau)
elif(linha[0] != '%' and linha[0] != '0'):
a, b, c, zero = linha.split()
a = int(a)
b = int(b)
c = int(c)
clausula = []
clausula.append(a)
clausula.append(b)
clausula.append(c)
entrada.append(clausula)
#print(entrada)
return var, clau, entrada
def geraRandom(n):
lista = []
for i in range (0, n+1):
lista.append(random.choice([True, False]))
return lista
def inverte(a):
if (a == True):
return False
return True
def avalia(cnf, sol):
total = 0
for i in cnf:
cl = False
for j in i:
if (j>0):
cl = cl or sol[j]
else:
cl = cl or inverte(sol[-j])
if (cl == False):
total+=1
return total
def randomSearch(cnf, sol, var, clau, it, num):
arqNome = 'random{}.txt'.format(num)
f = open(arqNome, 'w')
#it = 5000
resultado = avalia(cnf, sol)
#print(resultado/clau)
s = '0 {}\n'.format(resultado/clau)
f.write(s)
lista = []
lista.append(resultado/clau)
for i in range(1,it):
sTemp = geraRandom(var)
rTemp = avalia(cnf, sTemp)
#print(rTemp/clau)
s = '{} {}\n'.format(i, rTemp/clau)
f.write(s)
lista.append(rTemp/clau)
if(rTemp < resultado):
sol = deepcopy(sTemp)
resultado = rTemp
f.close()
return sol, resultado, lista
def reduzLinear(t, ti, passos):
return t - ti/passos
def reduzExp(ti, passo, alpha):
return ti*pow(alpha, passo)
def perturba(sol, var):
nova = deepcopy(sol)
flip = random.randint(1, var)
nova[flip] = inverte(nova[flip])
return nova
def simAne(cnf, sol, var, clau, it, num):
arqNome = 'simAne{}.txt'.format(num)
f = open(arqNome, 'w')
ti = 0.010
t = ti
resultado = avalia(cnf, sol)/clau
s = '0 {}\n'.format(resultado)
f.write(s)
lista = []
lista.append(resultado)
melhorSol = deepcopy(sol)
melhorResult = resultado
for i in range(1, it):
sTemp = perturba(sol, var)
rTemp = avalia(cnf, sTemp)/clau
#s = '{} {}\n'.format(i, rTemp)
s = '{} {}\n'.format(i, resultado)
f.write(s)
lista.append(resultado)
deltaE = rTemp - resultado
if(deltaE<=0):
sol = deepcopy(sTemp)
resultado = rTemp
if(rTemp < melhorResult):
melhorResult = rTemp
melhorSol = deepcopy(sTemp)
elif(random.uniform(0,1) <= math.exp(-deltaE/t)):
#print(math.exp(-deltaE/t))
sol = deepcopy(sTemp)
resultado = rTemp
t = reduzLinear(t, ti, it)
#t = reduzExp(ti, i, 0.9999)
#print(t)
f.close()
return melhorSol, melhorResult, lista
def executa(cnf, var, clau, it):
melhorRand = []
melhorSimAne = []
listaRand = []
listaSimAne = []
for i in range(0, 10):
print(i)
solInicial = geraRandom(var)
solFinal, rFinal, totalRand = randomSearch(lista, solInicial, var, clau, it, i)
melhorRand.append(rFinal)
listaRand.append(totalRand)
solFinal, rFinal, totalSimAne = simAne(lista, solInicial, var, clau, it, i)
melhorSimAne.append(rFinal)
listaSimAne.append(totalSimAne)
print(melhorRand)
print(melhorSimAne)
return listaRand, listaSimAne
def media(listaRand, listaSimAne, it):
print('Calculando médias e desvios padrão')
fRand = open('mediaRand.txt', 'w')
fSimAne = open('mediaSimAne.txt', 'w')
for j in range(0, it):
mediaRand = 0.0
mediaSimAne = 0.0
for i in range(0, 10):
mediaRand += listaRand[i][j]
mediaSimAne += listaSimAne[i][j]
mediaRand = mediaRand/10
mediaSimAne = mediaSimAne/10
sdRand = 0.0
sdSimAne = 0.0
for i in range(0, 10):
sdRand += (listaRand[i][j] - mediaRand)*(listaRand[i][j] - mediaRand)
sdSimAne += (listaSimAne[i][j] - mediaSimAne)*(listaSimAne[i][j] - mediaSimAne)
sdRand = sdRand/10
sdRand = math.sqrt(sdRand)
sRand = '{} {} {} {}\n'.format(j, mediaRand, mediaRand-sdRand, mediaRand+sdRand)
fRand.write(sRand)
sdSimAne = sdSimAne/10
sdSimAne = math.sqrt(sdSimAne)
sSimAne = '{} {} {} {}\n'.format(j, mediaSimAne, mediaSimAne-sdSimAne, mediaSimAne+sdSimAne)
fSimAne.write(sSimAne)
fRand.close()
fSimAne.close()
####fim das funções
arq = 'uf20-01.cnf'
#arq = 'teste.txt'
n_var, n_clau, lista = leitura(arq)
it = 50000
listaRand, listaSimAne = executa(lista, n_var, n_clau, it)
media(listaRand, listaSimAne, it)
'''
solInicial = geraRandom(n_var)
print("Solução inicial:")
print(solInicial)
#solFinal, rFinal = randomSearch(lista, solInicial, n_var, n_clau, it)
#print("Solução final random:")
#print (solFinal)
#print (rFinal)
#solFinal, rFinal = simAne(lista, solInicial, n_var, n_clau, it)
#print("Solução final simulated annealing:")
#print (solFinal)
#print (rFinal*n_clau)
'''
| gpl-3.0 | -6,583,067,115,758,831,000 | 26.066421 | 100 | 0.503813 | false | 3.261578 | false | false | false |
i02sopop/Kirinki | settings.py | 1 | 4976 | # This file is part of Kirinki.
#
# Kirinki is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Kirinki is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with kirinki. If not, see <http://www.gnu.org/licenses/>.
#
# Django settings for kirinki project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Pablo Alvarez de Sotomayor Posadillo', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'kirinki', # Or path to database file if using sqlite3.
'USER': 'kuser', # Not used with sqlite3.
'PASSWORD': 'dbpasswd', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Madrid'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'es-es'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = '/var/www/kirinki/static/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '7uj1)e5k#@x%gxi0#)-08l5w%(sqbty^uct7hv1w1cy#-=%@c*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.gzip.GZipMiddleware',
)
ROOT_URLCONF = 'kirinki.urls'
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.contrib.messages.context_processors.messages"
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/var/www/kirinki/templates'
# 'templates'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'kirinki',
)
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
SESSION_SAVE_EVERY_REQUEST = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
CACHE_BACKEND = 'memcached://127.0.0.1:21211/'
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
EMAIL_HOST = 'kirinki.net'
EMAIL_PORT = '25'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = True
DEFAULT_CHARSET = 'utf-8'
FILE_CHARSET = 'utf-8'
| agpl-3.0 | -3,486,017,059,594,565,000 | 35.321168 | 134 | 0.706793 | false | 3.489481 | false | false | false |
julzhk/crypto_spike | simple_blockchain.py | 1 | 1058 | import hashlib
from collections import OrderedDict
class BlockChain(object):
def __init__(self, genesis='None'):
self.data = OrderedDict()
self.prev = self.make_hash(genesis)
def make_hash(self, msg):
hasher = hashlib.md5()
hasher.update(u'%s' % msg)
return hasher.hexdigest()
def add(self, data):
data = u'%s:%s' % (data, self.prev)
key = self.make_hash(data)
self.data[key] = data
self.prev = key
def output(self):
for k in self.data:
print '%s : %s' % (k, self.data[k])
def verify(self):
return all([k == self.make_hash(self.data[k]) for k in self.data])
bc = BlockChain(genesis='hi')
bc.add('hello')
bc.add('hello world')
bc.add('hello world!')
bc.output()
print bc.verify()
# 716e505b51b115aa7554596127627e50 : hello:49f68a5c8493ec2c0bf489821c21fc3b
# 2d890e63bcecb7e826ac7201aa9a055b : hello world:716e505b51b115aa7554596127627e50
# c6c09a0ecf532c2ee1f1a5dcd8455b0b : hello world!:2d890e63bcecb7e826ac7201aa9a055b
# True
| unlicense | 4,228,283,874,792,296,000 | 24.804878 | 82 | 0.651229 | false | 2.762402 | false | false | false |
sebres/fail2ban | fail2ban/server/observer.py | 2 | 17879 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Serg G. Brester (sebres)
#
# This module was written as part of ban time increment feature.
__author__ = "Serg G. Brester (sebres)"
__copyright__ = "Copyright (c) 2014 Serg G. Brester"
__license__ = "GPL"
import threading
from .jailthread import JailThread
from .failmanager import FailManagerEmpty
import os, logging, time, datetime, math, json, random
import sys
from ..helpers import getLogger
from .mytime import MyTime
from .utils import Utils
# Gets the instance of the logger.
logSys = getLogger(__name__)
class ObserverThread(JailThread):
"""Handles observing a database, managing bad ips and ban increment.
Parameters
----------
Attributes
----------
daemon
ident
name
status
active : bool
Control the state of the thread.
idle : bool
Control the idle state of the thread.
sleeptime : int
The time the thread sleeps for in the loop.
"""
# observer is event driven and it sleep organized incremental, so sleep intervals can be shortly:
DEFAULT_SLEEP_INTERVAL = Utils.DEFAULT_SLEEP_INTERVAL / 10
def __init__(self):
# init thread
super(ObserverThread, self).__init__(name='f2b/observer')
# before started - idle:
self.idle = True
## Event queue
self._queue_lock = threading.RLock()
self._queue = []
## Event, be notified if anything added to event queue
self._notify = threading.Event()
## Sleep for max 60 seconds, it possible to specify infinite to always sleep up to notifying via event,
## but so we can later do some service "events" occurred infrequently directly in main loop of observer (not using queue)
self.sleeptime = 60
#
self._timers = {}
self._paused = False
self.__db = None
self.__db_purge_interval = 60*60
# observer is a not main thread:
self.daemon = True
def __getitem__(self, i):
try:
return self._queue[i]
except KeyError:
raise KeyError("Invalid event index : %s" % i)
def __delitem__(self, i):
try:
del self._queue[i]
except KeyError:
raise KeyError("Invalid event index: %s" % i)
def __iter__(self):
return iter(self._queue)
def __len__(self):
return len(self._queue)
def __eq__(self, other): # Required for Threading
return False
def __hash__(self): # Required for Threading
return id(self)
def add_named_timer(self, name, starttime, *event):
"""Add a named timer event to queue will start (and wake) in 'starttime' seconds
Previous timer event with same name will be canceled and trigger self into
queue after new 'starttime' value
"""
t = self._timers.get(name, None)
if t is not None:
t.cancel()
t = threading.Timer(starttime, self.add, event)
self._timers[name] = t
t.start()
def add_timer(self, starttime, *event):
"""Add a timer event to queue will start (and wake) in 'starttime' seconds
"""
# in testing we should wait (looping) for the possible time drifts:
if MyTime.myTime is not None and starttime:
# test time after short sleep:
t = threading.Timer(Utils.DEFAULT_SLEEP_INTERVAL, self._delayedEvent,
(MyTime.time() + starttime, time.time() + starttime, event)
)
t.start()
return
# add timer event:
t = threading.Timer(starttime, self.add, event)
t.start()
def _delayedEvent(self, endMyTime, endTime, event):
if MyTime.time() >= endMyTime or time.time() >= endTime:
self.add_timer(0, *event)
return
# repeat after short sleep:
t = threading.Timer(Utils.DEFAULT_SLEEP_INTERVAL, self._delayedEvent,
(endMyTime, endTime, event)
)
t.start()
def pulse_notify(self):
"""Notify wakeup (sets /and resets/ notify event)
"""
if not self._paused:
n = self._notify
if n:
n.set()
#n.clear()
def add(self, *event):
"""Add a event to queue and notify thread to wake up.
"""
## lock and add new event to queue:
with self._queue_lock:
self._queue.append(event)
self.pulse_notify()
def add_wn(self, *event):
"""Add a event to queue withouth notifying thread to wake up.
"""
## lock and add new event to queue:
with self._queue_lock:
self._queue.append(event)
def call_lambda(self, l, *args):
l(*args)
def run(self):
"""Main loop for Threading.
This function is the main loop of the thread.
Returns
-------
bool
True when the thread exits nicely.
"""
logSys.info("Observer start...")
## first time create named timer to purge database each hour (clean old entries) ...
self.add_named_timer('DB_PURGE', self.__db_purge_interval, 'db_purge')
## Mapping of all possible event types of observer:
__meth = {
# universal lambda:
'call': self.call_lambda,
# system and service events:
'db_set': self.db_set,
'db_purge': self.db_purge,
# service events of observer self:
'is_alive' : self.isAlive,
'is_active': self.isActive,
'start': self.start,
'stop': self.stop,
'nop': lambda:(),
'shutdown': lambda:()
}
try:
## check it self with sending is_alive event
self.add('is_alive')
## if we should stop - break a main loop
while self.active:
self.idle = False
## check events available and execute all events from queue
while not self._paused:
## lock, check and pop one from begin of queue:
try:
ev = None
with self._queue_lock:
if len(self._queue):
ev = self._queue.pop(0)
if ev is None:
break
## retrieve method by name
meth = ev[0]
if not callable(ev[0]): meth = __meth.get(meth) or getattr(self, meth)
## execute it with rest of event as variable arguments
meth(*ev[1:])
except Exception as e:
#logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
logSys.error('%s', e, exc_info=True)
## going sleep, wait for events (in queue)
n = self._notify
if n:
self.idle = True
n.wait(self.sleeptime)
## wake up - reset signal now (we don't need it so long as we reed from queue)
n.clear()
if self._paused:
continue
else:
## notify event deleted (shutdown) - just sleep a litle bit (waiting for shutdown events, prevent high cpu usage)
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
## stop by shutdown and empty queue :
if not self.is_full:
break
## end of main loop - exit
logSys.info("Observer stopped, %s events remaining.", len(self._queue))
self._notify = None
#print("Observer stopped, %s events remaining." % len(self._queue))
except Exception as e:
logSys.error('Observer stopped after error: %s', e, exc_info=True)
#print("Observer stopped with error: %s" % str(e))
# clear all events - exit, for possible calls of wait_empty:
with self._queue_lock:
self._queue = []
self.idle = True
return True
def isAlive(self):
#logSys.debug("Observer alive...")
return True
def isActive(self, fromStr=None):
# logSys.info("Observer alive, %s%s",
# 'active' if self.active else 'inactive',
# '' if fromStr is None else (", called from '%s'" % fromStr))
return self.active
def start(self):
with self._queue_lock:
if not self.active:
super(ObserverThread, self).start()
def stop(self, wtime=5, forceQuit=True):
if self.active and self._notify:
logSys.info("Observer stop ... try to end queue %s seconds", wtime)
#print("Observer stop ....")
# just add shutdown job to make possible wait later until full (events remaining)
with self._queue_lock:
self.add_wn('shutdown')
#don't pulse - just set, because we will delete it hereafter (sometimes not wakeup)
n = self._notify
self._notify.set()
#self.pulse_notify()
self._notify = None
# wait max wtime seconds until full (events remaining)
if self.wait_empty(wtime) or forceQuit:
n.clear()
self.active = False; # leave outer (active) loop
self._paused = True; # leave inner (queue) loop
self.__db = None
else:
self._notify = n
return self.wait_idle(min(wtime, 0.5)) and not self.is_full
return True
@property
def is_full(self):
with self._queue_lock:
return True if len(self._queue) else False
def wait_empty(self, sleeptime=None):
"""Wait observer is running and returns if observer has no more events (queue is empty)
"""
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
if sleeptime is not None:
e = MyTime.time() + sleeptime
# block queue with not operation to be sure all really jobs are executed if nop goes from queue :
if self._notify is not None:
self.add_wn('nop')
if self.is_full and self.idle:
self.pulse_notify()
while self.is_full:
if sleeptime is not None and MyTime.time() > e:
break
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
# wait idle to be sure the last queue element is processed (because pop event before processing it) :
self.wait_idle(0.001)
return not self.is_full
def wait_idle(self, sleeptime=None):
"""Wait observer is running and returns if observer idle (observer sleeps)
"""
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
if self.idle:
return True
if sleeptime is not None:
e = MyTime.time() + sleeptime
while not self.idle:
if sleeptime is not None and MyTime.time() > e:
break
time.sleep(ObserverThread.DEFAULT_SLEEP_INTERVAL)
return self.idle
@property
def paused(self):
return self._paused;
@paused.setter
def paused(self, pause):
if self._paused == pause:
return
self._paused = pause
# wake after pause ended
self.pulse_notify()
@property
def status(self):
"""Status of observer to be implemented. [TODO]
"""
return ('', '')
## -----------------------------------------
## [Async] database service functionality ...
## -----------------------------------------
def db_set(self, db):
self.__db = db
def db_purge(self):
logSys.debug("Purge database event occurred")
if self.__db is not None:
self.__db.purge()
# trigger timer again ...
self.add_named_timer('DB_PURGE', self.__db_purge_interval, 'db_purge')
## -----------------------------------------
## [Async] ban time increment functionality ...
## -----------------------------------------
def failureFound(self, failManager, jail, ticket):
""" Notify observer a failure for ip was found
Observer will check ip was known (bad) and possibly increase an retry count
"""
# check jail active :
if not jail.isAlive() or not jail.getBanTimeExtra("increment"):
return
ip = ticket.getIP()
unixTime = ticket.getTime()
logSys.debug("[%s] Observer: failure found %s", jail.name, ip)
# increase retry count for known (bad) ip, corresponding banCount of it (one try will count than 2, 3, 5, 9 ...) :
banCount = 0
retryCount = 1
timeOfBan = None
try:
maxRetry = failManager.getMaxRetry()
db = jail.database
if db is not None:
for banCount, timeOfBan, lastBanTime in db.getBan(ip, jail):
banCount = max(banCount, ticket.getBanCount())
retryCount = ((1 << (banCount if banCount < 20 else 20))/2 + 1)
# if lastBanTime == -1 or timeOfBan + lastBanTime * 2 > MyTime.time():
# retryCount = maxRetry
break
retryCount = min(retryCount, maxRetry)
# check this ticket already known (line was already processed and in the database and will be restored from there):
if timeOfBan is not None and unixTime <= timeOfBan:
logSys.debug("[%s] Ignore failure %s before last ban %s < %s, restored",
jail.name, ip, unixTime, timeOfBan)
return
# for not increased failures observer should not add it to fail manager, because was already added by filter self
if retryCount <= 1:
return
# retry counter was increased - add it again:
logSys.info("[%s] Found %s, bad - %s, %s # -> %s%s", jail.name, ip,
MyTime.time2str(unixTime), banCount, retryCount,
(', Ban' if retryCount >= maxRetry else ''))
# retryCount-1, because a ticket was already once incremented by filter self
retryCount = failManager.addFailure(ticket, retryCount - 1, True)
ticket.setBanCount(banCount)
# after observe we have increased attempt count, compare it >= maxretry ...
if retryCount >= maxRetry:
# perform the banning of the IP now (again)
# [todo]: this code part will be used multiple times - optimize it later.
try: # pragma: no branch - exception is the only way out
while True:
ticket = failManager.toBan(ip)
jail.putFailTicket(ticket)
except FailManagerEmpty:
failManager.cleanup(MyTime.time())
except Exception as e:
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
class BanTimeIncr:
def __init__(self, banTime, banCount):
self.Time = banTime
self.Count = banCount
def calcBanTime(self, jail, banTime, banCount):
be = jail.getBanTimeExtra()
return be['evformula'](self.BanTimeIncr(banTime, banCount))
def incrBanTime(self, jail, banTime, ticket):
"""Check for IP address to increment ban time (if was already banned).
Returns
-------
float
new ban time.
"""
# check jail active :
if not jail.isAlive() or not jail.database:
return banTime
be = jail.getBanTimeExtra()
ip = ticket.getIP()
orgBanTime = banTime
# check ip was already banned (increment time of ban):
try:
if banTime > 0 and be.get('increment', False):
# search IP in database and increase time if found:
for banCount, timeOfBan, lastBanTime in \
jail.database.getBan(ip, jail, overalljails=be.get('overalljails', False)) \
:
# increment count in ticket (if still not increased from banmanager, test-cases?):
if banCount >= ticket.getBanCount():
ticket.setBanCount(banCount+1)
logSys.debug('IP %s was already banned: %s #, %s', ip, banCount, timeOfBan);
# calculate new ban time
if banCount > 0:
banTime = be['evformula'](self.BanTimeIncr(banTime, banCount))
ticket.setBanTime(banTime)
# check current ticket time to prevent increasing for twice read tickets (restored from log file besides database after restart)
if ticket.getTime() > timeOfBan:
logSys.info('[%s] IP %s is bad: %s # last %s - incr %s to %s' % (jail.name, ip, banCount,
MyTime.time2str(timeOfBan),
datetime.timedelta(seconds=int(orgBanTime)), datetime.timedelta(seconds=int(banTime))));
else:
ticket.restored = True
break
except Exception as e:
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
return banTime
def banFound(self, ticket, jail, btime):
""" Notify observer a ban occured for ip
Observer will check ip was known (bad) and possibly increase/prolong a ban time
Secondary we will actualize the bans and bips (bad ip) in database
"""
if ticket.restored: # pragma: no cover (normally not resored tickets only)
return
try:
oldbtime = btime
ip = ticket.getIP()
logSys.debug("[%s] Observer: ban found %s, %s", jail.name, ip, btime)
# if not permanent and ban time was not set - check time should be increased:
if btime != -1 and ticket.getBanTime() is None:
btime = self.incrBanTime(jail, btime, ticket)
# if we should prolong ban time:
if btime == -1 or btime > oldbtime:
ticket.setBanTime(btime)
# if not permanent
if btime != -1:
bendtime = ticket.getTime() + btime
logtime = (datetime.timedelta(seconds=int(btime)),
MyTime.time2str(bendtime))
# check ban is not too old :
if bendtime < MyTime.time():
logSys.debug('Ignore old bantime %s', logtime[1])
return False
else:
logtime = ('permanent', 'infinite')
# if ban time was prolonged - log again with new ban time:
if btime != oldbtime:
logSys.notice("[%s] Increase Ban %s (%d # %s -> %s)", jail.name,
ip, ticket.getBanCount(), *logtime)
# delayed prolonging ticket via actions that expected this (not later than 10 sec):
logSys.log(5, "[%s] Observer: prolong %s in %s", jail.name, ip, (btime, oldbtime))
self.add_timer(min(10, max(0, btime - oldbtime - 5)), self.prolongBan, ticket, jail)
# add ticket to database, but only if was not restored (not already read from database):
if jail.database is not None and not ticket.restored:
# add to database always only after ban time was calculated an not yet already banned:
jail.database.addBan(jail, ticket)
except Exception as e:
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
def prolongBan(self, ticket, jail):
""" Notify observer a ban occured for ip
Observer will check ip was known (bad) and possibly increase/prolong a ban time
Secondary we will actualize the bans and bips (bad ip) in database
"""
try:
btime = ticket.getBanTime()
ip = ticket.getIP()
logSys.debug("[%s] Observer: prolong %s, %s", jail.name, ip, btime)
# prolong ticket via actions that expected this:
jail.actions._prolongBan(ticket)
except Exception as e:
logSys.error('%s', e, exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
# Global observer initial created in server (could be later rewriten via singleton)
class _Observers:
def __init__(self):
self.Main = None
Observers = _Observers()
| gpl-2.0 | 3,966,489,470,698,092,500 | 32.356343 | 133 | 0.669668 | false | 3.223765 | false | false | false |
talcorn92/FoodOrigins | src/scale_toxicity.py | 2 | 1670 | def check_toxicity(x):
if x <= 5:
return "Very Toxic"
elif x <= 5 and x >= 50:
return "Toxic"
elif x <= 50 and x >= 300:
return "Moderately Toxic"
elif x <= 300 and x >= 2000:
return "Not Toxic"
elif x == "Varies":
return "Unknown"
else:
return "Unknown"
def get_ld50(x):
chemicals = {
'Bipiridils': 157,
'Anticoagulants': 280,
'Botanic prod&biologSdTrF': "Varies",
'Carbamates-insect-SdTr': 500,
'Chlorinated Hydrocarbons': 18,
'Urea derivates': 11000,
'Uracil': 6000,
'Mineral Oils': "Varies",
'Triazines': 672,
'Organo-Phosphates': 1300,
'Inorganics': "Varies",
'Botanic.Produc&Biologic.': "Varies",
'Carbamates Herbicides': 30000,
'Amides': 380,
'Triazoles diazoles-SdTrF': 1453,
'Disinfectants': 192,
'Phenoxy Hormone Products': 930,
'Benzimidazoles-SeedTrF': 385,
'Carbamates Insecticides': 500,
'Pyrethroids': 2000,
'Dithiocarbamates-SeedTrF': 400,
'Dinitroanilines': 10000,
'Triazoles, Diazoles': 1453,
'Diazines, Morpholines': 3900,
'Organo-phospates-SdTr In': 1300,
'Narcotics': 127,
'Plant Growth Regulators': "Varies",
'Benzimidazoles': 385,
'Pyrethroids-SeedTr Ins': 2000,
'Dithiocarbamates': 400,
'Sulfonyl Ureas': 2000
}
if x in chemicals:
return chemicals.get(x)
else:
return "NONE"
def get_tox(chemical):
lDValue = get_ld50(chemical)
toxicity = check_toxicity(lDValue)
return toxicity
| mit | -4,583,518,396,735,686,000 | 27.305085 | 45 | 0.55988 | false | 2.840136 | false | false | false |
opt9/Mobile-Security-Framework-MobSF | StaticAnalyzer/tools/enjarify/enjarify/main.py | 14 | 4551 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zipfile, traceback, argparse, collections
from . import parsedex
from .jvm import writeclass
from .mutf8 import decode
from .jvm.optimization import options
def read(fname, mode='rb'):
with open(fname, mode) as f:
return f.read()
def translate(data, opts, classes=None, errors=None):
dex = parsedex.DexFile(data)
classes = collections.OrderedDict() if classes is None else classes
errors = collections.OrderedDict() if errors is None else errors
for cls in dex.classes:
unicode_name = decode(cls.name) + '.class'
if unicode_name in classes or unicode_name in errors:
print('Warning, duplicate class name', unicode_name)
continue
try:
class_data = writeclass.toClassFile(cls, opts)
classes[unicode_name] = class_data
except Exception:
errors[unicode_name] = traceback.format_exc()
if not (len(classes) + len(errors)) % 1000:
print(len(classes) + len(errors), 'classes processed')
return classes, errors
def writeToJar(fname, classes):
with zipfile.ZipFile(fname, 'w') as out:
for unicode_name, data in classes.items():
# Don't bother compressing small files
compress_type = zipfile.ZIP_DEFLATED if len(data) > 10000 else zipfile.ZIP_STORED
info = zipfile.ZipInfo(unicode_name)
info.external_attr = 0o775 << 16 # set Unix file permissions
out.writestr(info, data, compress_type=compress_type)
def main():
parser = argparse.ArgumentParser(prog='enjarify', description='Translates Dalvik bytecode (.dex or .apk) to Java bytecode (.jar)')
parser.add_argument('inputfile')
parser.add_argument('-o', '--output', help='Output .jar file. Default is [input-filename]-enjarify.jar.')
parser.add_argument('-f', '--force', action='store_true', help='Force overwrite. If output file already exists, this option is required to overwrite.')
parser.add_argument('--fast', action='store_true', help='Speed up translation at the expense of generated bytecode being less readable.')
args = parser.parse_args()
dexs = []
if args.inputfile.lower().endswith('.apk'):
with zipfile.ZipFile(args.inputfile, 'r') as z:
for name in z.namelist():
if name.startswith('classes') and name.endswith('.dex'):
dexs.append(z.read(name))
else:
dexs.append(read(args.inputfile))
# Exclusive mode requires 3.3+, so provide helpful error in this case
if not args.force:
try:
FileExistsError
except NameError:
print('Overwrite protection requires Python 3.3+. Either pass -f or --force, or upgrade to a more recent version of Python. If you are using Pypy3 2.4, you need to switch to a nightly build or build from source. Or just pass -f.')
return
# Might as well open the output file early so we can detect existing file error
# before going to the trouble of translating everything
outname = args.output or args.inputfile.rpartition('/')[-1].rpartition('.')[0] + '-enjarify.jar'
try:
outfile = open(outname, mode=('wb' if args.force else 'xb'))
except FileExistsError:
print('Error, output file already exists and --force was not specified.')
print('To overwrite the output file, pass -f or --force.')
return
opts = options.NONE if args.fast else options.PRETTY
classes = collections.OrderedDict()
errors = collections.OrderedDict()
for data in dexs:
translate(data, opts=opts, classes=classes, errors=errors)
writeToJar(outfile, classes)
outfile.close()
print('Output written to', outname)
for name, error in sorted(errors.items()):
print(name, error)
print('{} classes translated successfully, {} classes had errors'.format(len(classes), len(errors)))
if __name__ == "__main__":
main()
| gpl-3.0 | 1,931,750,613,663,698,200 | 42.342857 | 242 | 0.670402 | false | 4.092626 | false | false | false |
seanfarley/pygit2 | test/test_tag.py | 2 | 3589 | # -*- coding: UTF-8 -*-
#
# Copyright 2010 Google, Inc.
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for Tag objects."""
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import pygit2
from . import utils
__author__ = '[email protected] (Dave Borowitz)'
TAG_SHA = '3d2962987c695a29f1f80b6c3aa4ec046ef44369'
class TagTest(utils.BareRepoTestCase):
def test_read_tag(self):
tag = self.repo[TAG_SHA]
self.assertTrue(isinstance(tag, pygit2.Tag))
self.assertEqual(pygit2.GIT_OBJ_TAG, tag.type)
self.assertEqual(pygit2.GIT_OBJ_COMMIT, tag.target.type)
self.assertEqual('root', tag.name)
self.assertEqual(
('Dave Borowitz', '[email protected]', 1288724692, -420),
tag.tagger)
self.assertEqual('Tagged root commit.\n', tag.message)
commit = tag.target
self.assertEqual('Initial test data commit.\n', commit.message)
def test_new_tag(self):
name = 'thetag'
target = 'af431f20fc541ed6d5afede3e2dc7160f6f01f16'
message = 'Tag a blob.\n'
tagger = ('John Doe', '[email protected]', 12347, 0)
target_prefix = target[:5]
too_short_prefix = target[:3]
self.assertRaises(ValueError, self.repo.create_tag, name,
too_short_prefix, pygit2.GIT_OBJ_BLOB, tagger,
message)
sha = self.repo.create_tag(name, target_prefix, pygit2.GIT_OBJ_BLOB,
tagger, message)
tag = self.repo[sha]
self.assertEqual('3ee44658fd11660e828dfc96b9b5c5f38d5b49bb', tag.hex)
self.assertEqual(name, tag.name)
self.assertEqual(target, tag.target.hex)
self.assertEqual(tagger, tag.tagger)
self.assertEqual(message, tag.message)
self.assertEqual(name, self.repo[tag.hex].name)
def test_modify_tag(self):
name = 'thetag'
target = 'af431f20fc541ed6d5afede3e2dc7160f6f01f16'
message = 'Tag a blob.\n'
tagger = ('John Doe', '[email protected]', 12347)
tag = self.repo[TAG_SHA]
self.assertRaises(AttributeError, setattr, tag, 'name', name)
self.assertRaises(AttributeError, setattr, tag, 'target', target)
self.assertRaises(AttributeError, setattr, tag, 'tagger', tagger)
self.assertRaises(AttributeError, setattr, tag, 'message', message)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 3,870,672,821,302,102,500 | 36.778947 | 77 | 0.676512 | false | 3.603414 | true | false | false |
SunDwarf/ButterflyNet | examples/packets/echo_server.py | 1 | 1624 | import asyncio
import logging
import struct
from bfnet.packets import PacketHandler, Packet, PacketButterfly
from bfnet import util
logging.basicConfig(filename='/dev/null', level=logging.INFO)
formatter = logging.Formatter('%(asctime)s - [%(levelname)s] %(name)s - %(message)s')
root = logging.getLogger()
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
root.addHandler(consoleHandler)
# Create your event loop.
loop = asyncio.get_event_loop()
my_handler = PacketHandler.get_handler(loop=loop, log_level=logging.DEBUG)
# Create a new packet.
@my_handler.add_packet_type
class Packet0Echo(Packet):
id = 0
def __init__(self, pbf):
super().__init__(pbf)
# Set our attributes.
self.data_to_echo = ""
def unpack(self, data: dict):
"""
Unpack the packet.
"""
self.data_to_echo = data["echo"]
return True
def gen(self):
"""
Pack a new packet.
"""
return {"echo": self.data_to_echo}
@asyncio.coroutine
def main():
my_server = yield from my_handler.create_server(("127.0.0.1", 8001), ("keys/test.crt", "keys/test.key", None))
@my_server.set_handler
@asyncio.coroutine
def handler(bf: PacketButterfly):
while True:
echopacket = yield from bf.read()
if not echopacket:
break
bf.write(echopacket)
if __name__ == '__main__':
loop.create_task(main())
try:
loop.run_forever()
except KeyboardInterrupt:
# Close the server.
my_handler.stop()
loop.close()
| lgpl-3.0 | -3,121,332,836,884,908,000 | 21.246575 | 114 | 0.618842 | false | 3.68254 | false | false | false |
anshbansal/general | Python3/project_euler/001_050/017.py | 1 | 1033 | NUMS = {
0: 0, #This value makes one condition less
1: 3, 2: 3, 3: 5, 4: 4, 5: 4, 6: 3, 7: 5, 8: 5, 9: 4,
10: 3, 11: 6, 12: 6, 13: 8, 14: 8, 15: 7, 16: 7, 17: 9,
18: 8, 19: 8
}
def numeral_to_string(num):
ans = 0
if num > 999:
ans += len("onethousand")
num %= 1000
if num > 99:
temp = num // 100
ans += NUMS[temp]
if num % 100:
ans += len("hundredand")
else:
ans += len("hundred")
num %= 100
if num > 19:
#Strings of 80 and 90 have same length
#Strings of 40, 50, 60 have same length
#Strings of 20, 30 have same length
if num > 79:
ans += 6
elif num > 69:
ans += 7
elif num > 39:
ans += 5
else:
ans += 6
num %= 10
#NUMS[num] for num < 20
return (ans + NUMS[num])
def prob_017():
return sum(numeral_to_string(i) for i in range(1, 1001))
if __name__ == "__main__":
print(prob_017())
| mit | -2,665,086,860,459,994,000 | 20.520833 | 60 | 0.448209 | false | 3.074405 | false | false | false |
chunkified/kl-iostream | test/test_all.py | 1 | 2357 | import os
import sys
import glob
import subprocess
import signal
import tempfile
import time
import unittest
class KLTest(unittest.TestCase):
def __init__(self, klFilePath):
super(KLTest, self).__init__()
self.__klFilePath = klFilePath
def id(self):
return os.path.split(self.__klFilePath)[1].partition('.')[0]
def shortDescription(self):
return self.id()
def runTest(self):
stageFolder = os.path.abspath(os.path.join(os.path.split(self.__klFilePath)[0], '..', '..', 'stage'))
env = {}
env.update(os.environ)
if not env.has_key('FABRIC_EXTS_PATH'):
env['FABRIC_EXTS_PATH'] = stageFolder
else:
env['FABRIC_EXTS_PATH'] += os.pathsep + stageFolder
p = None
def handler(signum, frame):
if p:
os.kill(p.pid, signal.SIGTERM)
sys.exit(0)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
klArgs = ['kl'] + ['--showthread', '--loadexts', self.__klFilePath]
logFile = tempfile.TemporaryFile()
logFilePath = logFile.name
logFile.file.flush()
logFile.file.close()
logFile = open(logFilePath, 'wb')
p = subprocess.Popen(
klArgs,
env = env,
cwd = os.path.abspath(os.path.split(__file__)[0]),
shell=True,
universal_newlines=True,
stdout = logFile
)
while True:
time.sleep(1)
p.poll()
if not p.returncode is None:
break
logFile.close()
if not os.path.exists(logFilePath):
self.fail('logFile was not created.')
return
currContent = open(logFilePath, 'rb').read()
print '------- '+self.__klFilePath+' --------'
print currContent
print '----------------------------------'
outFilePath = self.__klFilePath.rpartition('.')[0]+'.out'
if not os.path.exists(outFilePath):
self.fail('.out file does not exist.')
prevContent = open(outFilePath, 'rb').read()
prevContent = prevContent.replace('\r', '')
self.assertEqual(currContent, prevContent)
if __name__ == '__main__':
klFolder = os.path.join(os.path.split(os.path.abspath(__file__))[0], 'kl')
klFiles = glob.glob(os.path.join(klFolder, '*.kl'))
suite = unittest.TestSuite()
for klFile in klFiles:
test = KLTest(klFile)
suite.addTest(test)
runner = unittest.TextTestRunner()
result = runner.run(suite)
| bsd-3-clause | -966,766,316,929,872,900 | 23.552083 | 105 | 0.612219 | false | 3.523169 | true | false | false |
romanvm/romans_blog | blog/templatetags/blog_tags.py | 1 | 7862 | # coding: utf-8
# Module: blog_tags
# Created on: 25.11.2015
# Author: Roman Miroshnychenko aka Roman V.M. ([email protected])
import json
from collections import namedtuple
from urllib.parse import quote_plus
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.core.paginator import EmptyPage
from ..models import Category, Post
register = template.Library()
SideBarObjects = namedtuple('SideBarObjects', ['objects', 'more'])
MenuLink = namedtuple('MenuLink', ['caption', 'url'])
@register.simple_tag
def get_categories():
"""
Simple tag
:return: list of non-empty categories ordered by post count in desc. order
"""
return Category.objects.ordered_by_post_count()
@register.simple_tag
def get_posts_digest(featured=False, posts_count=3):
"""
Simple tag
Get the lists of the latest posts (general of featured) for the blog sidebar
:param featured: if ``True`` featured posts digest is returned
:param posts_count: the number of posts to include in a digest
:return: the digest of recent posts and "More" link
:rtype: :class:`SideBarObjects`
"""
if featured:
posts = Post.objects.featured()
more_link = reverse('blog:featured_posts')
else:
posts = Post.objects.published()
more_link = reverse('blog:home')
more = more_link if posts.count() > posts_count else None
return SideBarObjects(posts[:posts_count], more)
@register.simple_tag
def get_archive_digest(months_count=6):
"""
Simple tag
:param months_count: the number of month to include in a digest
:return: the list of the most recent months from the blog archive for the blog sidebar
:rtype: :class:`SideBarObjects`
"""
months = Post.objects.published().dates(
'date_published', 'month', order='DESC')[:months_count + 1]
more = reverse('blog:archive') if len(months) > months_count else None
return SideBarObjects(months[:months_count], more)
@register.simple_tag
def get_blog_menu_links():
"""
Simple tag
:return: blog menu links for the site main menu.
"""
featured = Post.objects.featured()
featured_link = reverse('blog:featured_posts') if featured.exists() else None
return (
MenuLink(_('Recent Posts'), reverse('blog:home')),
MenuLink(_('Featured Posts'), featured_link),
MenuLink(_('Categories'), reverse('blog:categories_list')),
MenuLink(_('Archive'), reverse('blog:archive'))
)
@register.inclusion_tag('{0}/paginator.html'.format(settings.CURRENT_SKIN), takes_context=True)
def render_paginator(context, adjacent_pages=2):
"""
Inclusion tag
Renders paginator for multi-page lists.
A skin must provide the respective paginator template.
Adds pagination context variables for use in displaying first, adjacent and
last page links in addition to those created by the object_list generic
view.
:param context: parent template context
:param adjacent_pages: the number of pages adjacent to the current
:return: rendered paginator html code
"""
start_page = max(context['page_obj'].number - adjacent_pages, 1)
if start_page <= 3:
start_page = 1
end_page = context['page_obj'].number + adjacent_pages + 1
if end_page >= context['paginator'].num_pages - 1:
end_page = context['paginator'].num_pages + 1
page_numbers = [n for n in range(start_page, end_page) if n in range(1, context['paginator'].num_pages + 1)]
page_obj = context['page_obj']
paginator = context['paginator']
try:
next_ = context['page_obj'].next_page_number()
except EmptyPage:
next_ = None
try:
previous = context['page_obj'].previous_page_number()
except EmptyPage:
previous = None
return {
'page_obj': page_obj,
'paginator': paginator,
'page': context['page_obj'].number,
'pages': context['paginator'].num_pages,
'page_numbers': page_numbers,
'next': next_,
'previous': previous,
'has_next': context['page_obj'].has_next(),
'has_previous': context['page_obj'].has_previous(),
'show_first': 1 not in page_numbers,
'show_last': context['paginator'].num_pages not in page_numbers,
'request': context['request'],
'query': quote_plus(context['query']),
}
@register.simple_tag(takes_context=True)
def check_blog_url(context):
"""
Check if a current URL belong to blog application
:param context: template context
:type context: dict
:return: check result
:rtype: bool
"""
return context['request'].path in [item.url for item in get_blog_menu_links()]
@register.inclusion_tag('common_content/json-ld.html', takes_context=True)
def blog_json_ld(context):
"""
Renders JSON-LD for the blog
:param context: parent template context
:type context: dict
:return: context for json-ld template
:rtype: dict
"""
site_url = '{}://{}'.format(
context['request'].scheme,
context['request'].get_host()
)
try:
site_logo_url = site_url + context['site_config'].site_logo.url
except AttributeError:
site_logo_url = site_url + settings.DEFAULT_LOGO
json_ld = {
'@context': 'http://schema.org',
'@type': 'Blog',
'name': context['site_config'].site_name,
'url': site_url,
'description': context['site_config'].site_tagline,
'publisher': {
'@type': 'Organization',
'name': context['site_config'].site_name,
'logo': {
'@type': 'imageObject',
'url': site_logo_url
}
}
}
return {'json_ld': json.dumps(json_ld, indent=2)}
@register.inclusion_tag('common_content/json-ld.html', takes_context=True)
def blog_post_json_ld(context):
"""
Renders JSON-LD for the blog
:param context: parent template context
:type context: dict
:return: context for json-ld template
:rtype: dict
"""
site_url = '{}://{}'.format(
context['request'].scheme,
context['request'].get_host()
)
try:
featured_image_url = site_url + context['post'].featured_image.url
except AttributeError:
featured_image_url = site_url + settings.DEFAULT_FEATURED_IMAGE
try:
site_logo_url = site_url + context['site_config'].site_logo.url
except AttributeError:
site_logo_url = site_url + settings.DEFAULT_LOGO
json_ld = {
'@context': 'https://schema.org',
'@type': 'BlogPosting',
'headline': context['post'].title,
'description': context['post'].meta_description,
'datePublished': (context['post'].date_published.strftime('%Y-%m-%d')
if context['post'].date_published else None),
'dateModified': (context['post'].last_updated.strftime('%Y-%m-%d')
if context['post'].last_updated else None),
'image': {
'@type': 'imageObject',
'url': featured_image_url,
},
'publisher': {
'@type': 'Organization',
'name': context['site_config'].site_name,
'logo': {
'@type': 'imageObject',
'url': site_logo_url
}
},
'author': {
'@type': 'Person',
'name': 'Roman Miroshnychenko' # todo: implement Post.author field
},
'keywords': ', '.join([category.name for category in context['post'].categories.all()]),
'mainEntityOfPage': site_url + context['request'].path,
'articleBody': context['post'].content
}
return {'json_ld': json.dumps(json_ld, indent=2)}
| gpl-3.0 | 1,597,300,140,016,424,000 | 32.313559 | 112 | 0.619944 | false | 3.878638 | true | false | false |
yshalenyk/databridge | databridge/feed.py | 1 | 6704 | import gevent
import logging
import requests
from gevent.queue import Queue, Full
from gevent.event import Event
from .contrib.client import APICLient
from .exceptions import LBMismatchError
QUEUE_FULL_DELAY = 5
EMPTY_QUEUE_DELAY = 1
ON_EMPTY_DELAY = 10
FORWARD_WORKER_SLEEP = 5
BACKWARD_WOKER_DELAY = 1
WATCH_DELAY = 1
logger = logging.getLogger(__name__)
class APIRetreiver(object):
def __init__(self, config, **options):
if not isinstance(config, dict):
raise TypeError(
"Expected a dict as config, got {}".format(type(config))
)
self.api_host = config.get('api_host')
self.api_version = config.get('api_version')
self.api_key = config.get('api_key')
if 'api_extra_params' in options:
self._extra = options.get('api_extra_params')
self.tender_queue = Queue(maxsize=config.get('queue_max_size', 250))
self.filter_callback = options.get('filter_callback', lambda x: x)
self.forward_worker_dead = Event()
self.forward_worker_dead.set()
self.backward_worker_dead = Event()
self.backward_worker_dead.set()
self._init_clients()
def _init_clients(self):
logger.info('Sync: Init clients')
self.forward_client = APICLient(
self.api_key,
self.api_host,
self.api_version
)
self.backward_client = APICLient(
self.api_key,
self.api_host,
self.api_version
)
self.origin_cookie = self.forward_client.session.cookies
self.backward_client.session.cookies = self.origin_cookie
def _get_sync_point(self):
logger.info('Sync: initializing sync')
forward = {'feed': 'changes'}
backward = {'feed': 'changes', 'descending': '1'}
if getattr(self, '_extra', ''):
[x.update(self._extra) for x in [forward, backward]]
r = self.backward_client.get_tenders(backward)
backward['offset'] = r['next_page']['offset']
forward['offset'] = r['prev_page']['offset']
logger.error(forward)
self.tender_queue.put(filter(self.filter_callback, r['data']))
logger.info('Sync: initial sync params forward: '
'{}, backward: {}'.format(forward, backward))
return forward, backward
def _start_sync_workers(self):
forward, backward = self._get_sync_point()
self.workers = [
gevent.spawn(self._forward_worker, forward),
gevent.spawn(self._backward_worker, backward),
]
def _forward_worker(self, params):
worker = "Forward worker:"
logger.info('{} starting'.format(worker))
r = self.forward_client.get_tenders(params)
if self.forward_client.session.cookies != self.origin_cookie:
raise LBMismatchError
try:
while True:
try:
while r['data']:
try:
self.tender_queue.put(
filter(self.filter_callback, r['data'])
)
except Full:
while self.tender_queue.full():
gevent.sleep(QUEUE_FULL_DELAY)
self.tender_queue.put(
filter(self.filter_callback, r['data'])
)
params['offset'] = r['next_page']['offset']
r = self.forward_client.get_tenders(params)
if self.forward_client.session.cookies != self.origin_cookie:
raise LBMismatchError
if r['data']:
gevent.sleep(FORWARD_WORKER_SLEEP)
logger.warn('{} got empty listing. Sleep'.format(worker))
gevent.sleep(ON_EMPTY_DELAY)
except LBMismatchError:
logger.info('LB mismatch error on backward worker')
self.reinit_clients.set()
except Exception as e:
logger.error("{} down! Error: {}".format(worker, e))
self.forward_worker_dead.set()
else:
logger.error("{} finished.".format(worker))
def _backward_worker(self, params):
worker = "Backward worker: "
logger.info('{} staring'.format(worker))
try:
while True:
try:
r = self.backward_client.get_tenders(params)
if not r['data']:
logger.debug('{} empty listing..exiting'.format(worker))
break
gevent.sleep(BACKWARD_WOKER_DELAY)
if self.backward_client.session.cookies != self.origin_cookie:
raise LBMismatchError
try:
self.tender_queue.put(
filter(self.filter_callback, r['data'])
)
except Full:
logger.error('full queue')
while self.tender_queue.full():
gevent.sleep(QUEUE_FULL_DELAY)
self.tender_queue.put(
filter(self.filter_callback, r['data'])
)
params['offset'] = r['next_page']['offset']
except LBMismatchError:
logger.info('{} LB mismatch error'.format(worker))
if not self.reinit_clients.is_set():
self.reinit_clients.set()
except Exception as e:
logger.error("{} down! Error: {}".format(worker, e))
self.forward_worker_dead.set()
else:
logger.error("{} finished.".format(worker))
def _restart_workers(self):
self._init_clients()
gevent.killall(self.workers)
self._start_sync_workers()
return self.workers
def get_tenders(self):
self._start_sync_workers()
forward, backward = self.workers
try:
while True:
if self.tender_queue.empty():
gevent.sleep(EMPTY_QUEUE_DELAY)
if (forward.dead or forward.ready()) or \
(backward.dead and not backward.successful()):
forward, backward = self._restart_workers()
while not self.tender_queue.empty():
yield self.tender_queue.get()
except Exception as e:
logger.error(e)
| apache-2.0 | -4,660,910,926,064,261,000 | 35.835165 | 85 | 0.514171 | false | 4.463382 | true | false | false |
alphagov/notifications-api | migrations/versions/0009_created_by_for_jobs.py | 1 | 1253 | """empty message
Revision ID: 0009_created_by_for_jobs
Revises: 0008_archive_template
Create Date: 2016-04-26 14:54:56.852642
"""
# revision identifiers, used by Alembic.
revision = '0009_created_by_for_jobs'
down_revision = '0008_archive_template'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('jobs', sa.Column('created_by_id', postgresql.UUID(as_uuid=True), nullable=True))
op.create_index(op.f('ix_jobs_created_by_id'), 'jobs', ['created_by_id'], unique=False)
op.create_foreign_key(None, 'jobs', 'users', ['created_by_id'], ['id'])
op.get_bind()
op.execute('UPDATE jobs SET created_by_id = \
(SELECT user_id FROM user_to_service WHERE jobs.service_id = user_to_service.service_id LIMIT 1)')
op.alter_column('jobs', 'created_by_id', nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'jobs', type_='foreignkey')
op.drop_index(op.f('ix_jobs_created_by_id'), table_name='jobs')
op.drop_column('jobs', 'created_by_id')
### end Alembic commands ###
| mit | -4,739,229,239,736,392,000 | 35.852941 | 114 | 0.676776 | false | 3.254545 | false | false | false |
sergey-dryabzhinsky/dedupsqlfs | dedupsqlfs/db/mysql/table/link.py | 1 | 2607 | # -*- coding: utf8 -*-
__author__ = 'sergey'
from dedupsqlfs.db.mysql.table import Table
class TableLink( Table ):
_table_name = "link"
def create( self ):
c = self.getCursor()
# Create table
c.execute(
"CREATE TABLE IF NOT EXISTS `%s` (" % self.getName()+
"`inode_id` BIGINT UNSIGNED PRIMARY KEY, "+
"`target` BLOB NOT NULL"+
")"+
self._getCreationAppendString()
)
return
def insert( self, inode, target):
"""
:param target: bytes
:return: int
"""
self.startTimer()
cur = self.getCursor()
cur.execute(
"INSERT INTO `%s` " % self.getName()+
" (`inode_id`, `target`) VALUES (%(inode)s, %(target)s)",
{
"inode": inode,
"target": target
}
)
item = cur.lastrowid
self.stopTimer('insert')
return item
def find_by_inode( self, inode):
"""
:param inode: int
:return: int
"""
self.startTimer()
cur = self.getCursor()
cur.execute(
"SELECT `target` FROM `%s` " % self.getName()+
" WHERE `inode_id`=%(inode)s",
{
"inode": inode
}
)
item = cur.fetchone()
if item:
item = item["target"]
self.stopTimer('find_by_inode')
return item
def get_count(self):
self.startTimer()
cur = self.getCursor()
cur.execute("SELECT COUNT(1) as `cnt` FROM `%s`" % self.getName())
item = cur.fetchone()
if item:
item = item["cnt"]
else:
item = 0
self.stopTimer('get_count')
return item
def get_inode_ids(self, start_id, end_id):
self.startTimer()
cur = self.getCursor()
cur.execute("SELECT `inode_id` FROM `%s` " % self.getName()+
" WHERE `inode_id`>=%s AND `inode_id`<%s", (start_id, end_id,))
nameIds = set(str(item["inode_id"]) for item in cur)
self.stopTimer('get_inode_ids')
return nameIds
def remove_by_ids(self, inode_ids):
self.startTimer()
count = 0
id_str = ",".join(inode_ids)
if id_str:
cur = self.getCursor()
cur.execute("DELETE FROM `%s` " % self.getName()+
" WHERE `inode_id` IN (%s)" % (id_str,))
count = cur.rowcount
self.stopTimer('remove_by_ids')
return count
pass
| mit | 919,150,897,091,248,600 | 25.876289 | 83 | 0.472957 | false | 3.891045 | false | false | false |
sassoftware/rbuild | rbuild_test/unit_test/internaltest/plugintest.py | 2 | 4098 | #!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rbuild_test import rbuildhelp
from rbuild import errors
from rbuild import pluginapi
class MyPlugin(pluginapi.Plugin):
foo = 'bar'
def myApiCall(self, *args, **kw):
print 'api call: %s, %s' % (args, kw)
return 'return value'
class PluginTest(rbuildhelp.RbuildHelper):
def myHook(self, *args, **kw):
args = ('foo', ) + args[1:]
return args, {'newkw' : kw['kw']}
def myHook2(self, *args, **kw):
args = ('barz ' + args[0], ) + args[1:]
return args, {'newkw2' : kw['newkw']}
def myPostHook(self, rv, *args, **kw):
return rv+' augmented'
def myPostHookError(self, rv, *args, **kw):
raise KeyError
def brokenHook(self, *args, **kw):
return 3
def testPrehooks(self):
plugin = MyPlugin('plugin', 'path', None)
rc, txt = self.captureOutput(plugin.myApiCall, 'arg1', kw='kw1')
assert(rc == 'return value')
self.assertEquals(txt, "api call: ('arg1',), {'kw': 'kw1'}\n")
plugin._installPrehook('myApiCall', self.myHook)
rc, txt = self.captureOutput(plugin.myApiCall, 'arg1', kw='kw1')
assert(rc == 'return value')
self.assertEquals(txt, "api call: ('foo',), {'newkw': 'kw1'}\n")
plugin._installPrehook('myApiCall', self.myHook2)
rc, txt = self.captureOutput(plugin.myApiCall, 'arg1', kw='kw1')
self.assertEquals(txt, "api call: ('barz foo',), {'newkw2': 'kw1'}\n")
plugin._installPrehook('myApiCall', self.brokenHook)
err = self.discardOutput(
self.assertRaises, errors.InvalidHookReturnError,
plugin.myApiCall, 'arg1', kw='kw1')
self.assertEquals(err.hook, self.brokenHook)
# after removing the broken hook this should work.
plugin._getPrehooks('myApiCall').remove(self.brokenHook)
rc, txt = self.captureOutput(plugin.myApiCall, 'arg1', kw='kw1')
def testPrehookErrors(self):
plugin = MyPlugin('plugin', 'path', None)
err = self.assertRaises(errors.InvalidAPIMethodError,
plugin._installPrehook, 'nosuchApi', self.myHook)
self.assertEquals(err.method, 'nosuchApi')
err = self.assertRaises(errors.InvalidAPIMethodError,
plugin._getPrehooks, 'nosuchApi')
self.assertEquals(err.method, 'nosuchApi')
def testPosthooks(self):
plugin = MyPlugin('plugin', 'path', None)
plugin._installPosthook('myApiCall', self.myPostHook)
rc, txt = self.captureOutput(plugin.myApiCall, 'arg1', kw='kw1')
assert(rc == 'return value augmented')
def testPosthookErrors(self):
plugin = MyPlugin('plugin', 'path', None)
plugin._installPosthook('myApiCall', self.myPostHookError)
err = self.discardOutput(
self.assertRaises, KeyError,
plugin.myApiCall, 'arg1', kw='kw1')
# after removing the broken hook this should work.
plugin._getPosthooks('myApiCall').remove(self.myPostHookError)
rc, txt = self.captureOutput(plugin.myApiCall, 'arg1', kw='kw1')
assert(rc == 'return value')
err = self.assertRaises(errors.InvalidAPIMethodError,
plugin._installPosthook, 'nosuchApi', self.myPostHook)
self.assertEquals(err.method, 'nosuchApi')
err = self.assertRaises(errors.InvalidAPIMethodError,
plugin._getPosthooks, 'nosuchApi')
self.assertEquals(err.method, 'nosuchApi')
| apache-2.0 | 4,570,665,599,803,566,000 | 37.299065 | 78 | 0.63592 | false | 3.597893 | true | false | false |
yulia-baturina/python_training | fixture/group.py | 1 | 4587 | __author__ = 'IEUser'
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def is_groups_page_opened(self):
wd = self.app.wd
return wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0
def return_to_groups_page(self):
wd = self.app.wd
if not self.is_groups_page_opened():
wd.find_element_by_link_text("group page").click()
def fill_in_fields(self, group):
wd = self.app.wd
if group.name:
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(group.name)
if group.header:
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(group.header)
if group.footer:
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(group.footer)
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
# fill in group fields
self.fill_in_fields(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
self.group_cache=None
def open_groups_page(self):
wd = self.app.wd
if not self.is_groups_page_opened():
wd.find_element_by_link_text("groups").click()
def select_first_group(self):
self.select_group_by_index(0)
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def delete_first_group(self):
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_groups_page()
# select group by index
self.select_group_by_index(index)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache=None
def delete_group_by_id(self, id):
wd = self.app.wd
self.open_groups_page()
# select group by index
self.select_group_by_id(id)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache=None
def modify_group_by_index(self, group, index):
wd = self.app.wd
self.open_groups_page()
# select first group
self.select_group_by_index(index)
wd.find_element_by_name("edit").click()
# fill in group fields
self.fill_in_fields(group)
# submit update
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache=None
def modify_group_by_id(self, group, id):
wd = self.app.wd
self.open_groups_page()
# select first group
self.select_group_by_id(id)
wd.find_element_by_name("edit").click()
# fill in group fields
self.fill_in_fields(group)
# submit update
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache=None
def edit_first_group(self, group):
self.modify_group_by_index(group,0)
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache=None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_groups_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
def remove_contact_by_id_from_group(self, id):
wd = self.app.wd
wd.find_element_by_xpath("//input[@value='%s']" % id).click()
wd.find_element_by_xpath("//*[@name='remove']").click()
self.app.navigation.return_to_home_page()
| apache-2.0 | 8,509,506,878,258,116,000 | 32.727941 | 97 | 0.583824 | false | 3.459276 | false | false | false |
kartvep/Combaine | combaine/common/loggers.py | 1 | 3113 | import logging
import logging.handlers
import combaine.common.configloader.config
__all__ = ["ParsingLogger", "AggregateLogger", "CommonLogger"]
def _initLogger(name):
try:
config = combaine.common.configloader.config.parse_common_cfg("combaine")['cloud_config']
except Exception as err:
pass
print err
else:
_format = logging.Formatter("%(levelname)-5s %(asctime)s %(id)s %(message)s", "%Y-%m-%d %H:%M:%S")
parsing_log = logging.getLogger('combaine.%s' % name)
log_level = eval('logging.' + config['log_level'])
fh = logging.handlers.TimedRotatingFileHandler('/var/log/combaine/%s.log' % name, when="midnight", backupCount=3)
fh.setFormatter(_format)
fh.setLevel(log_level)
sh = logging.StreamHandler()
sh.setFormatter(_format)
sh.setLevel(log_level)
parsing_log.addHandler(fh)
parsing_log.addHandler(sh)
parsing_log.setLevel(log_level)
class GlobalLogId(object):
def __new__(cls, _id):
if not hasattr(cls, "_instanse"):
print "INIT GLOBAL LOGGER ID"
cls._instanse = super(GlobalLogId, cls).__new__(cls)
cls._id = _id
@classmethod
def get_id(cls):
if hasattr(cls, "_id"):
return cls._id
else:
return "DUMMY_ID"
class ParsingLogger(object):
def __new__(cls, _id):
if not hasattr(cls, "_instanse"):
cls._instanse = super(ParsingLogger, cls).__new__(cls)
_initLogger("parsing")
GlobalLogId(_id)
return logging.LoggerAdapter(logging.getLogger("combaine.parsing"), {"id" : _id})
class AggregateLogger(object):
def __new__(cls, _id):
if not hasattr(cls, "_instanse"):
cls._instanse = super(AggregateLogger, cls).__new__(cls)
_initLogger("aggregate")
GlobalLogId(_id)
return logging.LoggerAdapter(logging.getLogger("combaine.aggregate"), {"id" : _id})
class DataFetcherLogger(object):
def __new__(cls):
if not hasattr(cls, "_instanse"):
cls._instanse = super(DataFetcherLogger, cls).__new__(cls)
_initLogger("datafetcher")
return logging.LoggerAdapter(logging.getLogger("combaine.datafetcher"), {"id" : GlobalLogId.get_id()})
class DataGridLogger(object):
def __new__(cls):
if not hasattr(cls, "_instanse"):
cls._instanse = super(DataGridLogger, cls).__new__(cls)
_initLogger("datagrid")
return logging.LoggerAdapter(logging.getLogger("combaine.datagrid"), {"id" : GlobalLogId.get_id()})
class CommonLogger(object):
def __new__(cls):
if hasattr(ParsingLogger, "_instanse"):
return logging.LoggerAdapter(logging.getLogger("combaine.parsing"), {"id" : GlobalLogId.get_id()})
elif hasattr(AggregateLogger, "_instanse"):
return logging.LoggerAdapter(logging.getLogger("combaine.aggregate"), {"id" : GlobalLogId.get_id()})
else:
return logging.LoggerAdapter(logging.getLogger("combaine"), {"id" : GlobalLogId.get_id()})
| lgpl-3.0 | -6,377,385,242,612,563,000 | 33.977528 | 121 | 0.612592 | false | 3.688389 | true | false | false |
liqd/adhocracy3.mercator | src/adhocracy_core/adhocracy_core/auditing/__init__.py | 2 | 2629 | """Log which user modifies resources in additional 'audit' database."""
import substanced.util
import transaction
from pyramid.i18n import TranslationStringFactory
from pyramid.traversal import resource_path
from pyramid.request import Request
from BTrees.OOBTree import OOBTree
from logging import getLogger
from adhocracy_core.interfaces import IResource
from adhocracy_core.interfaces import SerializedActivity
from adhocracy_core.interfaces import Activity
logger = getLogger(__name__)
_ = TranslationStringFactory('adhocracy')
class AuditLog(OOBTree):
"""An Auditlog composed of audit entries.
This is a dictionary (:class:`collections.abc.Mapping`) with key
:class:`datetime.datetime` and value
:class:`adhocracy_core.interfaces.SerializedActivity`.
The methods `items`, `keys`, and `values` have the additional kwargs
`max_key` and `min_key` to allow range queries::
january = datetime(2015, 1, 1)
february = datetime(2015, 2, 1)
audit = get_auditlog(context)
audit.items(min=january, max=february)
...
"""
def add(self, activity: Activity) -> None:
"""Serialize `activity` and store in audit log."""
kwargs = {'object_path': resource_path(activity.object),
'type': activity.type,
}
if activity.subject:
kwargs['subject_path'] = resource_path(activity.subject)
if activity.target:
kwargs['target_path'] = resource_path(activity.target)
if activity.sheet_data:
kwargs['sheet_data'] = activity.sheet_data
entry = SerializedActivity()._replace(**kwargs)
self[activity.published] = entry
def get_auditlog(context: IResource) -> AuditLog:
"""Return the auditlog."""
return substanced.util.get_auditlog(context)
def set_auditlog(context: IResource) -> None:
"""Set an auditlog for the context."""
conn = context._p_jar
try:
connection = conn.get_connection('audit')
except KeyError:
return
root = connection.root()
if 'auditlog' in root:
return
auditlog = AuditLog()
root['auditlog'] = auditlog
def add_to_auditlog(activities: [Activity],
request: Request) -> None:
"""Add activities to the audit database.
The audit database is created if missing. If the `zodbconn.uri.audit`
value is not specified in the config, auditing does not happen.
"""
auditlog = get_auditlog(request.root)
if auditlog is None:
return
for activity in activities:
auditlog.add(activity)
transaction.commit()
| agpl-3.0 | -6,708,229,324,273,740,000 | 30.297619 | 73 | 0.669456 | false | 3.983333 | false | false | false |
QISKit/qiskit-sdk-py | qiskit/extensions/standard/t.py | 1 | 2534 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
T=sqrt(S) phase gate or its inverse.
"""
import numpy
from qiskit.circuit import Gate
from qiskit.circuit import QuantumCircuit
from qiskit.circuit import QuantumRegister
from qiskit.qasm import pi
from qiskit.extensions.standard.u1 import U1Gate
class TGate(Gate):
"""T Gate: pi/4 rotation around Z axis."""
def __init__(self, label=None):
"""Create new T gate."""
super().__init__("t", 1, [], label=label)
def _define(self):
"""
gate t a { u1(pi/4) a; }
"""
definition = []
q = QuantumRegister(1, "q")
rule = [
(U1Gate(pi/4), [q[0]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition
def inverse(self):
"""Invert this gate."""
return TdgGate()
def to_matrix(self):
"""Return a Numpy.array for the S gate."""
return numpy.array([[1, 0],
[0, (1+1j) / numpy.sqrt(2)]], dtype=complex)
class TdgGate(Gate):
"""T Gate: -pi/4 rotation around Z axis."""
def __init__(self, label=None):
"""Create new Tdg gate."""
super().__init__("tdg", 1, [], label=label)
def _define(self):
"""
gate t a { u1(pi/4) a; }
"""
definition = []
q = QuantumRegister(1, "q")
rule = [
(U1Gate(-pi/4), [q[0]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition
def inverse(self):
"""Invert this gate."""
return TGate()
def to_matrix(self):
"""Return a Numpy.array for the S gate."""
return numpy.array([[1, 0],
[0, (1-1j) / numpy.sqrt(2)]], dtype=complex)
def t(self, q):
"""Apply T to q."""
return self.append(TGate(), [q], [])
def tdg(self, q):
"""Apply Tdg to q."""
return self.append(TdgGate(), [q], [])
QuantumCircuit.t = t
QuantumCircuit.tdg = tdg
| apache-2.0 | 4,434,833,675,723,367,400 | 24.59596 | 77 | 0.561957 | false | 3.480769 | false | false | false |
franga2000/django-machina | machina/apps/forum_tracking/abstract_models.py | 1 | 2058 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from machina.core.loading import get_class
ForumReadTrackManager = get_class('forum_tracking.managers', 'ForumReadTrackManager')
@python_2_unicode_compatible
class AbstractForumReadTrack(models.Model):
"""
Represents a track which records which forums have been read by a given user.
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='forum_tracks', on_delete=models.CASCADE,
verbose_name=_('User'))
forum = models.ForeignKey(
'forum.Forum', related_name='tracks', on_delete=models.CASCADE, verbose_name=_('Forum'))
mark_time = models.DateTimeField(auto_now=True, db_index=True)
objects = ForumReadTrackManager()
class Meta:
abstract = True
app_label = 'forum_tracking'
unique_together = ['user', 'forum', ]
verbose_name = _('Forum track')
verbose_name_plural = _('Forum tracks')
def __str__(self):
return '{} - {}'.format(self.user, self.forum)
@python_2_unicode_compatible
class AbstractTopicReadTrack(models.Model):
"""
Represents a track which records which topics have been read by a given user.
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='topic_tracks', on_delete=models.CASCADE,
verbose_name=_('User'))
topic = models.ForeignKey(
'forum_conversation.Topic', related_name='tracks', on_delete=models.CASCADE,
verbose_name=_('Topic'))
mark_time = models.DateTimeField(auto_now=True, db_index=True)
class Meta:
abstract = True
app_label = 'forum_tracking'
unique_together = ['user', 'topic', ]
verbose_name = _('Topic track')
verbose_name_plural = _('Topic tracks')
def __str__(self):
return '{} - {}'.format(self.user, self.topic)
| bsd-3-clause | -2,361,927,502,164,073,000 | 32.193548 | 96 | 0.661322 | false | 3.875706 | false | false | false |
Vauxoo/stock-logistics-workflow | stock_lot_scrap/models/stock_scrap_lot.py | 2 | 3602 | # Copyright 2016 Carlos Dauden <[email protected]>
# Copyright 2016 Pedro M. Baeza <[email protected]>
# Copyright 2017 David Vidal <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, models
from odoo.exceptions import ValidationError
from lxml import etree
class StockScrap(models.Model):
_inherit = 'stock.scrap'
@api.multi
def action_validate(self):
self.ensure_one()
self.lot_id.message_post(
body=_("Lot was scrapped by <b>%s</b>.") % self.env.user.name)
return super(StockScrap, self).action_validate()
class StockProductionLot(models.Model):
_inherit = 'stock.production.lot'
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False,
submenu=False): # pragma: no cover
"""Inject the button here to avoid conflicts with other modules
that add a header element in the main view.
"""
res = super(StockProductionLot, self).fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar,
submenu=submenu)
eview = etree.fromstring(res['arch'])
xml_header = eview.xpath("//header")
if not xml_header:
# Create a header
header_element = etree.Element('header')
# Append it to the view
forms = eview.xpath("//form")
if forms:
forms[0].insert(0, header_element)
else:
header_element = xml_header[0]
button_element = etree.Element(
'button', {'type': 'object',
'name': 'action_scrap_lot',
'confirm': _('This will scrap the whole lot. Are you'
' sure you want to continue?'),
'string': _('Scrap')})
header_element.append(button_element)
res['arch'] = etree.tostring(eview)
return res
def _prepare_scrap_vals(self, quant, scrap_location_id):
self.ensure_one()
return {
'origin': quant.lot_id.name,
'product_id': quant.product_id.id,
'product_uom_id': quant.product_id.uom_id.id,
'scrap_qty': quant.quantity,
'location_id': quant.location_id.id,
'scrap_location_id': scrap_location_id,
'lot_id': self.id,
'package_id': quant.package_id.id,
}
@api.multi
def action_scrap_lot(self):
self.ensure_one()
quants = self.quant_ids.filtered(
lambda x: x.location_id.usage == 'internal',
)
if not quants:
raise ValidationError(
_("This lot doesn't contain any quant in internal location."),
)
scrap_obj = self.env['stock.scrap']
scraps = scrap_obj.browse()
scrap_location_id = self.env.ref('stock.stock_location_scrapped').id
for quant in quants:
scrap = scrap_obj.create(
self._prepare_scrap_vals(quant, scrap_location_id),
)
scraps |= scrap
result = self.env.ref('stock.action_stock_scrap').read()[0]
result['context'] = self.env.context
if len(scraps) != 1:
result['domain'] = "[('id', 'in', %s)]" % scraps.ids
else: # pragma: no cover
res = self.env.ref('stock.stock_scrap_form_view', False)
result['views'] = [(res and res.id or False, 'form')]
result['res_id'] = scraps.id
return result
| agpl-3.0 | -6,001,856,978,350,550,000 | 37.319149 | 78 | 0.561077 | false | 3.683027 | false | false | false |
enen92/script.sportscenter | resources/lib/tweetbuild.py | 1 | 1318 | import xbmc,xbmcgui,xbmcaddon,xbmcplugin
import urllib,re,datetime
import thesportsdb,feedparser
from random import randint
from centerutils.common_variables import *
from centerutils.tweet import *
def tweets(tweeter_user):
window = dialog_tweet('DialogTweeter.xml',addonpath,'Default',str(tweeter_user))
window.doModal()
class dialog_tweet(xbmcgui.WindowXMLDialog):
def __init__( self, *args, **kwargs ):
xbmcgui.WindowXML.__init__(self)
self.mode = eval(args[3])[0]
if self.mode == 'user':
self.twitter_var = eval(args[3])[1]
self.twitter_list = get_tweets(self.twitter_var)
else:
self.twitter_var = eval(args[3])[1]
self.twitter_list = get_hashtag_tweets(self.twitter_var)
def onInit(self):
#set twitter logo
self.getControl(3).setImage(os.path.join(addonpath,'resources','img','twitter.png'))
#set twitter user name
if self.mode == 'user':
self.getControl(1).setLabel('@'+self.twitter_var)
else:
self.getControl(1).setLabel('#'+self.twitter_var.replace('#',''))
for tweet_item,tweet_item_date in self.twitter_list:
tweet = xbmcgui.ListItem(tweet_item)
match = re.compile('(.+?) \+').findall(tweet_item_date)
if match:
tweet.setProperty('tweet_date',match[0])
self.getControl(6).addItem(tweet)
self.setFocusId(6)
self.getControl(6).selectItem(0)
| gpl-2.0 | 6,564,910,876,334,208,000 | 31.146341 | 86 | 0.709408 | false | 2.948546 | false | false | false |
NiceCircuits/pcbLibraryManager | src/pcbLibraryManager/libraries/libraryTest.py | 1 | 4474 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 06:41:54 2016
@author: piotr at nicecircuits.com
"""
from libraryManager.library import libraryClass
from libraryManager.part import part
from footprints.footprintSmdQuad import *
from footprints.footprintSmdDualRow import *
from libraryManager.footprintPrimitive import *
from libraryManager.defaults import *
from symbols.symbolsIC import symbolIC
from libraryManager.symbolPrimitive import *
from parts.icGenerator import icGenerator
from libraryManager.footprint import footprint
import os.path
from libraryManager.generateLibraries import generateLibraries
from libraries.libraryOpamps import symbolOpamp
class libraryTest(libraryClass):
"""
"""
def __init__(self):
super().__init__("Test")
# ============== R7F7010343AFP ==============
footprints = [footprintSmdQuad("R7F7010343AFP","niceSemiconductors",\
176,0.5,[25.4,25.4],[1.3,0.3],[24.1,24.1,1.7],defaults.court["N"],\
[1.0,0.25,1.7/2])]
path=os.path.join(os.path.dirname(__file__),"R7F7010343AFP.ods")
#generate quad pin-by-pin symbols
self.parts.extend(icGenerator.generate(path,pinNames=None,\
footprints=footprints,symbolType="quad",namePosfix="",size=5600))
# ============== AXK5S60047YG ==============
# added as footprints only
self.parts[0].footprints.append(footprintAXK5S60047YG())
self.parts[0].footprints.append(footprintAXK6S60447YG())
# ============== Dummy semiconductor for package generation ==============
self.parts.append(partDummySemiconductor())
class partDummySemiconductor(part):
"""
Dummy part
"""
def __init__(self, name="Dummy", refDes=defaults.icRefDes):
super().__init__(name, refDes)
self.symbols.append(symbolOpamp())
for density in ["N", "L", "M"]:
for pinCount in [8, 14, 16]:
self.footprints.append(footprintSoic(pinCount=pinCount,density=density))
for pinCount in [3,5,6,8]:
self.footprints.append(footprintSot23(pinCount=pinCount,density=density))
self.footprints.append(footprintSc70(pinCount=pinCount,density=density))
class footprintAXK5S60047YG(footprintSmdDualRow):
"""
Panasonic Narrow Pitch Connector P5KS Socket 60 pin
For mated height 4.0 mm, 5.0 mm and 6.0 mm
Without positioning boss/with direction for protection from reverse mating
"""
def __init__(self, name="AXK5S60047YG", alternativeLibName="niceConectorsOther", density="N", wide=False):
super().__init__(name, alternativeLibName,\
pinCount=60, pitch=0.5,\
padSpan=4.6,padDimensions=[0.25,2.2],\
bodyDimensions=[18.40,4.8,3.05],\
leadDimensions=[0.4,0.2,0.18],\
court = defaults.court[density],\
leadStyle="cube_metal")
pads=["%d"%(2*i+1) for i in range(30)]+["%d"%(60-2*i) for i in range(30)]
self.renamePads(pads)
class footprintAXK6S60447YG(footprintSmdDualRow):
"""
Panasonic Narrow Pitch Connector P5KS Header 60 pin
For mated height 4.0 mm, 4.5 mm and 7.0 mm
Without positioning boss/with direction for protection from reverse mating
"""
def __init__(self, name="AXK6S60447YG", alternativeLibName="niceConectorsOther", density="N", wide=False):
super().__init__(name, alternativeLibName,\
pinCount=60, pitch=0.5,\
padSpan=4.2,padDimensions=[0.25,2.2],\
bodyDimensions=[18.40,4.4,0.95],\
leadDimensions=[0.4,0.2,0.23],\
court = defaults.court[density],\
leadStyle="cube_metal")
pads=["%d"%(2*i+1) for i in range(30)]+["%d"%(60-2*i) for i in range(30)]
self.renamePads(pads)
self.addSimple3Dbody([0,0],[16.5,1.72,3.3])
class pogoPin(footprint):
"""
"""
def __init__(self, name="AXK5S60047YG", alternativeLibName="niceConectorsOther", density="N", wide=False):
super().__init__(name, alternativeLibName,\
pinCount=60, pitch=0.5,\
padSpan=4.6,padDimensions=[0.25,2.2],\
bodyDimensions=[18.40,4.8,3.05],\
leadDimensions=[0.4,0.2,0.18],\
court = defaults.court[density],\
leadStyle="cube_metal")
pads=["%d"%(2*i+1) for i in range(30)]+["%d"%(60-2*i) for i in range(30)]
self.renamePads(pads)
if __name__ == "__main__":
generateLibraries([libraryTest()])
| cc0-1.0 | 3,821,752,789,901,822,000 | 40.045872 | 110 | 0.630085 | false | 3.249092 | false | false | false |
UdK-VPT/Open_eQuarter | mole/extensions/eval3/oeq_QTP_Wall.py | 1 | 1369 | # -*- coding: utf-8 -*-
import os,math
from qgis.core import NULL
from mole import oeq_global
from mole.project import config
from mole.extensions import OeQExtension
from mole.stat_corr import contemporary_base_uvalue_by_building_age_lookup
def calculation(self=None, parameters={},feature = None):
from math import floor, ceil
from PyQt4.QtCore import QVariant
wl_qtp = NULL
if not oeq_global.isnull([parameters['WL_AR'],parameters['WL_UP'],parameters['HHRS']]):
wl_qtp=float(parameters['WL_AR']) * float(parameters['WL_UP'])*float(parameters['HHRS'])/1000
return {'WL_QTP': {'type': QVariant.Double, 'value': wl_qtp}}
extension = OeQExtension(
extension_id=__name__,
category='Evaluation',
subcategory='Present Transm. Heat Loss',
extension_name='Wall Quality (QT, Present)',
layer_name= 'QT Wall Present',
extension_filepath=os.path.join(__file__),
colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'),
field_id='WL_QTP',
source_type='none',
par_in=['WL_AR','WL_UP','HHRS'],
sourcelayer_name=config.data_layer_name,
targetlayer_name=config.data_layer_name,
active=True,
show_results=['WL_QTP'],
description=u"Calculate the present Transmission Heat Loss of the Building's Walls",
evaluation_method=calculation)
extension.registerExtension(default=True)
| gpl-2.0 | -3,066,049,638,907,766,000 | 34.102564 | 101 | 0.693207 | false | 3.244076 | false | false | false |
jgao54/airflow | airflow/contrib/hooks/ssh_hook.py | 3 | 10449 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import getpass
import os
import warnings
import paramiko
from paramiko.config import SSH_PORT
from sshtunnel import SSHTunnelForwarder
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
class SSHHook(BaseHook, LoggingMixin):
"""
Hook for ssh remote execution using Paramiko.
ref: https://github.com/paramiko/paramiko
This hook also lets you create ssh tunnel and serve as basis for SFTP file transfer
:param ssh_conn_id: connection id from airflow Connections from where all the required
parameters can be fetched like username, password or key_file.
Thought the priority is given to the param passed during init
:type ssh_conn_id: str
:param remote_host: remote host to connect
:type remote_host: str
:param username: username to connect to the remote_host
:type username: str
:param password: password of the username to connect to the remote_host
:type password: str
:param key_file: key file to use to connect to the remote_host.
:type key_file: str
:param port: port of remote host to connect (Default is paramiko SSH_PORT)
:type port: int
:param timeout: timeout for the attempt to connect to the remote_host.
:type timeout: int
:param keepalive_interval: send a keepalive packet to remote host every
keepalive_interval seconds
:type keepalive_interval: int
"""
def __init__(self,
ssh_conn_id=None,
remote_host=None,
username=None,
password=None,
key_file=None,
port=None,
timeout=10,
keepalive_interval=30
):
super(SSHHook, self).__init__(ssh_conn_id)
self.ssh_conn_id = ssh_conn_id
self.remote_host = remote_host
self.username = username
self.password = password
self.key_file = key_file
self.port = port
self.timeout = timeout
self.keepalive_interval = keepalive_interval
# Default values, overridable from Connection
self.compress = True
self.no_host_key_check = True
self.allow_host_key_change = False
self.host_proxy = None
# Placeholder for deprecated __enter__
self.client = None
# Use connection to override defaults
if self.ssh_conn_id is not None:
conn = self.get_connection(self.ssh_conn_id)
if self.username is None:
self.username = conn.login
if self.password is None:
self.password = conn.password
if self.remote_host is None:
self.remote_host = conn.host
if self.port is None:
self.port = conn.port
if conn.extra is not None:
extra_options = conn.extra_dejson
self.key_file = extra_options.get("key_file")
if "timeout" in extra_options:
self.timeout = int(extra_options["timeout"], 10)
if "compress" in extra_options\
and str(extra_options["compress"]).lower() == 'false':
self.compress = False
if "no_host_key_check" in extra_options\
and\
str(extra_options["no_host_key_check"]).lower() == 'false':
self.no_host_key_check = False
if "allow_host_key_change" in extra_options\
and\
str(extra_options["allow_host_key_change"]).lower() == 'true':
self.allow_host_key_change = True
if not self.remote_host:
raise AirflowException("Missing required param: remote_host")
# Auto detecting username values from system
if not self.username:
self.log.debug(
"username to ssh to host: %s is not specified for connection id"
" %s. Using system's default provided by getpass.getuser()",
self.remote_host, self.ssh_conn_id
)
self.username = getpass.getuser()
user_ssh_config_filename = os.path.expanduser('~/.ssh/config')
if os.path.isfile(user_ssh_config_filename):
ssh_conf = paramiko.SSHConfig()
ssh_conf.parse(open(user_ssh_config_filename))
host_info = ssh_conf.lookup(self.remote_host)
if host_info and host_info.get('proxycommand'):
self.host_proxy = paramiko.ProxyCommand(host_info.get('proxycommand'))
if not (self.password or self.key_file):
if host_info and host_info.get('identityfile'):
self.key_file = host_info.get('identityfile')[0]
self.port = self.port or SSH_PORT
def get_conn(self):
"""
Opens a ssh connection to the remote host.
:return paramiko.SSHClient object
"""
self.log.debug('Creating SSH client for conn_id: %s', self.ssh_conn_id)
client = paramiko.SSHClient()
if not self.allow_host_key_change:
self.log.warning('Remote Identification Change is not verified. '
'This wont protect against Man-In-The-Middle attacks')
client.load_system_host_keys()
if self.no_host_key_check:
self.log.warning('No Host Key Verification. This wont protect '
'against Man-In-The-Middle attacks')
# Default is RejectPolicy
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.password and self.password.strip():
client.connect(hostname=self.remote_host,
username=self.username,
password=self.password,
key_filename=self.key_file,
timeout=self.timeout,
compress=self.compress,
port=self.port,
sock=self.host_proxy)
else:
client.connect(hostname=self.remote_host,
username=self.username,
key_filename=self.key_file,
timeout=self.timeout,
compress=self.compress,
port=self.port,
sock=self.host_proxy)
if self.keepalive_interval:
client.get_transport().set_keepalive(self.keepalive_interval)
self.client = client
return client
def __enter__(self):
warnings.warn('The contextmanager of SSHHook is deprecated.'
'Please use get_conn() as a contextmanager instead.'
'This method will be removed in Airflow 2.0',
category=DeprecationWarning)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.client is not None:
self.client.close()
self.client = None
def get_tunnel(self, remote_port, remote_host="localhost", local_port=None):
"""
Creates a tunnel between two hosts. Like ssh -L <LOCAL_PORT>:host:<REMOTE_PORT>.
:param remote_port: The remote port to create a tunnel to
:type remote_port: int
:param remote_host: The remote host to create a tunnel to (default localhost)
:type remote_host: str
:param local_port: The local port to attach the tunnel to
:type local_port: int
:return: sshtunnel.SSHTunnelForwarder object
"""
if local_port:
local_bind_address = ('localhost', local_port)
else:
local_bind_address = ('localhost',)
if self.password and self.password.strip():
client = SSHTunnelForwarder(self.remote_host,
ssh_port=self.port,
ssh_username=self.username,
ssh_password=self.password,
ssh_pkey=self.key_file,
ssh_proxy=self.host_proxy,
local_bind_address=local_bind_address,
remote_bind_address=(remote_host, remote_port),
logger=self.log)
else:
client = SSHTunnelForwarder(self.remote_host,
ssh_port=self.port,
ssh_username=self.username,
ssh_pkey=self.key_file,
ssh_proxy=self.host_proxy,
local_bind_address=local_bind_address,
remote_bind_address=(remote_host, remote_port),
host_pkey_directories=[],
logger=self.log)
return client
def create_tunnel(self, local_port, remote_port=None, remote_host="localhost"):
warnings.warn('SSHHook.create_tunnel is deprecated, Please'
'use get_tunnel() instead. But please note that the'
'order of the parameters have changed'
'This method will be removed in Airflow 2.0',
category=DeprecationWarning)
return self.get_tunnel(remote_port, remote_host, local_port)
| apache-2.0 | 459,037,617,762,114,000 | 40.963855 | 90 | 0.56503 | false | 4.564875 | true | false | false |
jbagd/twitter-sentiment | twitterstream-copy.py | 1 | 1814 | import oauth2 as oauth
import urllib2 as urllib
# See https://dev.twitter.com/oauth/overview/application-owner-access-tokens for how to get these credentials
access_token_key = "type in your credentials"
access_token_secret = "type in your credentials"
consumer_key = "type in your credentials"
consumer_secret = "type in your credentials"
_debug = 0
oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)
oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
http_method = "GET"
http_handler = urllib.HTTPHandler(debuglevel=_debug)
https_handler = urllib.HTTPSHandler(debuglevel=_debug)
'''
Construct, sign, and open a twitter request
using the hard-coded credentials above.
'''
def twitterreq(url, method, parameters):
req = oauth.Request.from_consumer_and_token(oauth_consumer,
token=oauth_token,
http_method=http_method,
http_url=url,
parameters=parameters)
req.sign_request(signature_method_hmac_sha1, oauth_consumer, oauth_token)
headers = req.to_header()
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
opener = urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
response = opener.open(url, encoded_post_data)
return response
def fetchsamples():
url = "https://stream.twitter.com/1/statuses/sample.json"
parameters = []
response = twitterreq(url, "GET", parameters)
for line in response:
print line.strip()
if __name__ == '__main__':
fetchsamples()
| mit | -2,520,181,808,444,195,300 | 28.737705 | 109 | 0.662624 | false | 3.763485 | false | false | false |
niosus/EasyClangComplete | tests/test_flags_source.py | 1 | 2063 | """Tests for cmake database generation."""
import imp
from os import path
from unittest import TestCase
from EasyClangComplete.plugin.flags_sources import flags_source
from EasyClangComplete.plugin.utils import flag
imp.reload(flags_source)
imp.reload(flag)
FlagsSource = flags_source.FlagsSource
Flag = flag.Flag
class TestFlagsSource(TestCase):
"""Test getting flags from a list of chunks."""
def test_init(self):
"""Initialization test."""
include_prefixes = ["-I", "-isystem"]
flags_source = FlagsSource(include_prefixes)
self.assertEqual(flags_source._include_prefixes, include_prefixes)
def test_parse_flags(self):
"""Test that the flags are parsed correctly."""
from os import listdir
current_folder = path.dirname(__file__)
folder_to_expand = path.join(current_folder, '*')
initial_str_flags = ["-I", current_folder, "-I" + current_folder,
"-isystem", current_folder, "-std=c++11",
"#simulate a comment",
"-Iblah\n", "-I", "blah", "-I" + folder_to_expand]
flags = Flag.tokenize_list(initial_str_flags, current_folder)
expected_blah_path = path.join(current_folder, "blah")
self.assertIn(Flag("-I", current_folder, " "), flags)
self.assertIn(Flag("-I", current_folder), flags)
self.assertIn(Flag("-isystem", current_folder, " "), flags)
self.assertIn(Flag("-I", expected_blah_path, " "), flags)
self.assertIn(Flag("-I", expected_blah_path), flags)
self.assertIn(Flag("", "-std=c++11"), flags)
# Check star expansion for a flags source
for child in listdir(current_folder):
child = path.join(current_folder, child)
if path.isdir(child):
self.assertIn(Flag("-I", child), flags)
self.assertNotIn(Flag("", "-Iblah"), flags)
self.assertNotIn(Flag("-I", "blah", " "), flags)
self.assertNotIn(Flag("", "-isystem" + current_folder), flags)
| mit | -3,226,830,194,560,409,600 | 39.45098 | 79 | 0.610276 | false | 3.982625 | true | false | false |
gautam1858/tensorflow | tensorflow/python/kernel_tests/bias_op_test.py | 2 | 11215 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for BiasAdd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class BiasAddTest(test.TestCase):
def _npBias(self, inputs, bias):
assert len(bias.shape) == 1
print(inputs.shape)
print(bias.shape)
assert inputs.shape[-1] == bias.shape[0]
return inputs + bias.reshape(([1] * (len(inputs.shape) - 1)) +
[bias.shape[0]])
def testNpBias(self):
self.assertAllClose(
np.array([[11, 22, 33], [41, 52, 63]]),
self._npBias(
np.array([[10, 20, 30], [40, 50, 60]]), np.array([1, 2, 3])))
def _testBias(self, np_inputs, np_bias, use_gpu=False):
np_val = self._npBias(np_inputs, np_bias)
with self.cached_session(use_gpu=use_gpu):
tf_val = nn_ops.bias_add(np_inputs, np_bias).eval()
self.assertAllCloseAccordingToType(np_val, tf_val)
def _AtLeast3d(self, np_value):
# fill the input value to at least 3-dimension
if np_value.ndim < 3:
return np.reshape(np_value, (1,) * (3 - np_value.ndim) + np_value.shape)
return np_value
def _NHWCToNCHW(self, np_value):
# fill the input value to at least 3-dimension
np_value = self._AtLeast3d(np_value)
# move the last dimension to second
np_dim = list(range(np_value.ndim))
np_dim_new = list(np_dim[0:1]) + list(np_dim[-1:]) + list(np_dim[1:-1])
return np.transpose(np_value, np_dim_new)
def _NCHWToNHWC(self, np_value):
assert len(np_value.shape) >= 3
np_dim = list(range(np_value.ndim))
# move the second dimension to the last
np_dim_new = list(np_dim[0:1]) + list(np_dim[2:]) + list(np_dim[1:2])
return np.transpose(np_value, np_dim_new)
def _testBiasNCHW(self, np_inputs, np_bias, use_gpu):
np_val = self._npBias(np_inputs, np_bias)
np_inputs = self._NHWCToNCHW(np_inputs)
with self.cached_session(use_gpu=use_gpu):
tf_val = nn_ops.bias_add(np_inputs, np_bias, data_format="NCHW").eval()
tf_val = self._NCHWToNHWC(tf_val)
self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val)
def _testAll(self, np_inputs, np_bias):
self._testBias(np_inputs, np_bias, use_gpu=False)
self._testBiasNCHW(np_inputs, np_bias, use_gpu=False)
if np_inputs.dtype in [np.float16, np.float32, np.float64]:
self._testBias(np_inputs, np_bias, use_gpu=True)
self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)
@test_util.run_deprecated_v1
def testInputDims(self):
with self.assertRaises(ValueError):
nn_ops.bias_add([1, 2], [1])
@test_util.run_deprecated_v1
def testBiasVec(self):
with self.assertRaises(ValueError):
nn_ops.bias_add(
array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[1, 2]))
@test_util.run_deprecated_v1
def testBiasInputsMatch(self):
with self.assertRaises(ValueError):
nn_ops.bias_add(
array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1], shape=[1]))
@test_util.run_deprecated_v1
def testIntTypes(self):
for t in [np.int8, np.int16, np.int32, np.int64]:
self._testAll(
np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
np.array([1, 2, 3]).astype(t))
@test_util.run_deprecated_v1
def testFloatTypes(self):
for t in [np.float16, np.float32, np.float64]:
self._testAll(
np.random.rand(4, 3, 3).astype(t), np.random.rand(3).astype(t))
@test_util.run_deprecated_v1
def test4DFloatTypes(self):
for t in [np.float16, np.float32, np.float64]:
self._testAll(
np.random.rand(4, 3, 2, 3).astype(t),
np.random.rand(3).astype(t))
@test_util.run_deprecated_v1
def test5DFloatTypes(self):
for t in [np.float16, np.float32, np.float64]:
self._testAll(
np.random.rand(4, 3, 2, 3, 4).astype(t),
np.random.rand(4).astype(t))
def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
with self.cached_session(use_gpu=use_gpu):
if data_format == "NCHW":
np_input = self._NHWCToNCHW(np_input)
input_tensor = constant_op.constant(
np_input, shape=np_input.shape, dtype=dtype)
bias_tensor = constant_op.constant(bias, shape=bias.shape, dtype=dtype)
output_tensor = nn_ops.bias_add(
input_tensor, bias_tensor, data_format=data_format)
tensor_jacob_t, tensor_jacob_n = gradient_checker.compute_gradient(
input_tensor, np_input.shape, output_tensor, np_input.shape)
bias_jacob_t, bias_jacob_n = gradient_checker.compute_gradient(
bias_tensor, bias.shape, output_tensor, np_input.shape)
# Test gradient of BiasAddGrad
bias_add_grad = gradients_impl.gradients(
nn_ops.l2_loss(output_tensor), bias_tensor)[0]
grad_jacob_t, grad_jacob_n = gradient_checker.compute_gradient(
output_tensor, np_input.shape, bias_add_grad, bias.shape)
if dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
input_tensor = constant_op.constant(
np_input, shape=np_input.shape, dtype=np.float32)
bias_tensor = constant_op.constant(
bias, shape=bias.shape, dtype=np.float32)
output_tensor = nn_ops.bias_add(
input_tensor, bias_tensor, data_format=data_format)
_, tensor_jacob_n = gradient_checker.compute_gradient(input_tensor,
np_input.shape,
output_tensor,
np_input.shape)
_, bias_jacob_n = gradient_checker.compute_gradient(bias_tensor,
bias.shape,
output_tensor,
np_input.shape)
bias_add_grad = gradients_impl.gradients(
nn_ops.l2_loss(output_tensor), bias_tensor)[0]
_, grad_jacob_n = gradient_checker.compute_gradient(output_tensor,
np_input.shape,
bias_add_grad,
bias.shape)
threshold = 2e-3
if dtype == dtypes.float64:
threshold = 1e-10
self.assertAllClose(tensor_jacob_t, tensor_jacob_n, threshold, threshold)
self.assertAllClose(bias_jacob_t, bias_jacob_n, threshold, threshold)
self.assertAllClose(grad_jacob_t, grad_jacob_n, threshold, threshold)
@test_util.run_deprecated_v1
def testGradientTensor2D(self):
for (data_format, use_gpu) in ("NHWC", False), ("NHWC", True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.array(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
dtype=dtype.as_numpy_dtype).reshape(3, 2)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
@test_util.run_deprecated_v1
def testGradientTensor3D(self):
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
("NCHW", False), ("NCHW", True)]:
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
dtype=dtype.as_numpy_dtype).reshape(1, 3, 2)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
@test_util.run_deprecated_v1
def testGradientTensor4D(self):
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
("NCHW", False), ("NCHW", True)]:
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.arange(
1.0, 49.0, dtype=dtype.as_numpy_dtype).reshape(
[2, 3, 4, 2]).astype(np.float32)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
@test_util.run_deprecated_v1
def testGradientTensor5D(self):
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
("NCHW", False), ("NCHW", True)]:
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
np_input = np.arange(
1.0, 49.0, dtype=dtype.as_numpy_dtype).reshape(
[1, 2, 3, 4, 2]).astype(np.float32)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
@test_util.run_deprecated_v1
def testEmpty(self):
np.random.seed(7)
for shape in (0, 0), (2, 0), (0, 2), (4, 3, 0), (4, 0, 3), (0, 4, 3):
self._testAll(np.random.randn(*shape), np.random.randn(shape[-1]))
@test_util.run_deprecated_v1
def testEmptyGradient(self):
for (data_format, use_gpu) in ("NHWC", False), ("NHWC", True):
for shape in (0, 0), (2, 0), (0, 2):
self._testGradient(
np.random.randn(*shape), np.random.randn(shape[-1]), dtypes.float64,
data_format, use_gpu)
for (data_format, use_gpu) in [("NHWC", False), ("NHWC", True),
("NCHW", False), ("NCHW", True)]:
for shape in (4, 3, 0), (4, 0, 3), (0, 4, 3):
self._testGradient(
np.random.randn(*shape),
np.random.randn(shape[-1]), dtypes.float64, data_format, use_gpu)
if __name__ == "__main__":
test.main()
| apache-2.0 | -2,695,578,192,542,703,000 | 41.642586 | 80 | 0.594383 | false | 3.33482 | true | false | false |
simon816/Command-Block-Assembly | cbl/array_support.py | 1 | 7466 | from collections import namedtuple
from .native_type import NativeType
from .containers import DelegatedWrite
import cmd_ir.instructions as i
Pair = namedtuple('Pair', 'left right min max')
class ArrayType(NativeType):
def __init__(self, elem_type, size):
super().__init__()
assert size > 0, "Size must be > 0, is %d" % size
assert elem_type.typename == 'int', "TODO"
self.elem_type = elem_type
self.typename = elem_type.typename + '[]'
self.nbt_type = i.NBTType.int # TODO
self.size = size
def __repr__(self):
return 'ArrayType(%s[%d])' % (self.elem_type.typename, self.size)
@property
def ir_type(self):
return i.VarType.nbt
def allocate(self, compiler, namehint):
return compiler.create_var(namehint, self.ir_type)
def as_variable(self, instance):
return instance
def run_constructor(self, compiler, instance, arguments):
assert len(arguments) <= self.size
compiler.array_support.allocate(self.size)
var = instance.value
array = compiler.insn_def(i.CreateNBTList(self.nbt_type))
init_val = self._init_val(compiler)
with compiler.compiletime():
for _ in range(self.size):
compiler.add_insn(i.NBTListAppend(array, init_val))
compiler.add_insn(i.NBTAssign(var, array))
for n, arg in enumerate(arguments):
compiler.array_support.set(var, n, arg.type.as_variable(arg.value))
def _init_val(self, compiler):
# TODO non-int defaults
return compiler.insn_def(i.CreateNBTValue(self.nbt_type, 0))
def dispatch_operator(self, compiler, op, left, right=None):
if op == '[]':
return ArrayElementHolder(compiler, self, left, right)
return super().dispatch_operator(compiler, op, left, right)
class ArrayElementHolder(DelegatedWrite):
def __init__(self, compiler, arrtype, array, index):
self._compiler = compiler
self.type = arrtype.elem_type
self.array = array.type.as_variable(array.value)
self.index = index.type.as_variable(index.value)
self.__got_val = None
@property
def value(self):
if self.__got_val is None:
self.__got_val = self.read(self._compiler)
return self.__got_val
def read(self, compiler):
return compiler.array_support.get(self.array, self.index)
def write(self, compiler, other):
var = other.type.as_variable(other.value)
compiler.array_support.set(self.array, self.index, var)
return other
class ArraySupport:
index_type = i.VarType.i32
def __init__(self, compiler):
self.compiler = compiler
self.max_size = 0
self.getter = None
self.setter = None
def allocate(self, size):
self.max_size = max(self.max_size, size)
def finish(self):
if self.getter:
self.compiler.pragma('array_support_getter', self.max_size)
if self.setter:
self.compiler.pragma('array_support_setter', self.max_size)
def get(self, array, index):
self.lazy_load_get()
args = (array, index)
# TODO type
val = self.compiler.create_var('arrval', i.VarType.i32)
ret_args = (val,)
self.compiler.add_insn(i.Invoke(self.getter, args, ret_args))
return val
def set(self, array, index, value):
self.lazy_load_set()
args = (array, index, value)
self.compiler.add_insn(i.Invoke(self.setter, args, None))
def lazy_load_get(self):
if self.getter is None:
# TODO customize return type
self.getter = self.compiler.extern_function('_internal/array_get', (
(i.VarType.nbt, 'byval'), (self.index_type, 'byval')), (i.VarType.i32,))
def lazy_load_set(self):
if self.setter is None:
# TODO customise value type
self.setter = self.compiler.extern_function('_internal/array_set', (
(i.VarType.nbt, 'byref'), (self.index_type, 'byval'),
(i.VarType.i32, 'byval')), None)
@classmethod
def gen_getter(cls, top, size):
func = top.define_function('_internal/array_get')
arrparam = func.preamble.define(i.ParameterInsn(i.VarType.nbt, 'byval'))
indexparam = func.preamble.define(i.ParameterInsn(cls.index_type, 'byval'))
retvar = func.preamble.define(i.ReturnVarInsn(i.VarType.i32))
cls._gen_for(size, func, 'get', indexparam, cls._gen_getter, arrparam, retvar)
@classmethod
def gen_setter(cls, top, size):
func = top.define_function('_internal/array_set')
arrparam = func.preamble.define(i.ParameterInsn(i.VarType.nbt, 'byref'))
indexparam = func.preamble.define(i.ParameterInsn(cls.index_type, 'byval'))
valparam = func.preamble.define(i.ParameterInsn(i.VarType.i32, 'byval'))
cls._gen_for(size, func, 'set', indexparam, cls._gen_setter, arrparam, valparam)
@staticmethod
def _gen_getter(block, indexvar, indexval, arr, retvar):
path = i.VirtualString('[%d]' % indexval)
path_var = block._func.preamble.define(i.NBTSubPath(arr, path, retvar.type))
block.add(i.SetScore(retvar, path_var))
@staticmethod
def _gen_setter(block, indexvar, indexval, arr, value):
path = i.VirtualString('[%d]' % indexval)
path_var = block._func.preamble.define(i.NBTSubPath(arr, path, value.type))
block.add(i.SetScore(path_var, value))
@staticmethod
def _gen_for(size, func, prefix, indexparam, gen_callback, *cb_args):
entry = func.create_block('entry')
# Copy to local variable due to register allocation speedup
index = func.preamble.define(i.DefineVariable(indexparam.type))
entry.add(i.SetScore(index, indexparam))
def pair_name(pair):
return '%s_%d_%d' % (prefix, pair.min, pair.max)
def branch(func, index, pair):
return i.RangeBr(index, pair.min, pair.max,
func.get_or_create_block(pair_name(pair)), None)
def callback(pair):
block = func.get_or_create_block(pair_name(pair))
block.defined = True
if pair.left:
block.add(branch(func, index, pair.left))
if pair.right:
block.add(branch(func, index, pair.right))
if pair.min == pair.max:
gen_callback(block, index, pair.min, *cb_args)
root = generate_bin_tree(size, callback)
entry.add(i.Call(func.get_or_create_block(pair_name(root))))
entry.add(i.Return())
func.end()
def generate_bin_tree(size, callback):
assert size > 0
old_pairs = []
for n in range(size):
pair = Pair(None, None, n, n)
old_pairs.append(pair)
callback(pair)
while len(old_pairs) > 1:
pairs = []
waiting = None
for pair in old_pairs:
if waiting is None:
waiting = pair
else:
new_pair = Pair(waiting, pair, waiting.min, pair.max)
pairs.append(new_pair)
callback(new_pair)
waiting = None
if waiting is not None:
# Dangling node, occurs if size is not a power of 2
pairs.append(waiting)
callback(waiting)
old_pairs = pairs
return old_pairs[0]
| mit | -2,577,474,538,730,189,300 | 35.067633 | 88 | 0.602465 | false | 3.567129 | false | false | false |
wf4ever/ro-manager | src/checklist/minim_graph.py | 1 | 5519 | # Minim_graph.py
"""
Module to create RDF Minim graph throughn simple set of API calls
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import rdflib
from rocommand.ro_namespaces import RDF
from iaeval.ro_minim import MINIM
class Minim_graph(object):
"""
Class to create abstraction for constructing a Minim graph.
The actual format of the resulting graph is implementation-dependent.
This implementation builds an RDG graph, and serializes it in one of
a number of formats. The default format is Turtle/N3.
"""
def __init__(self, base=None):
self._base = base
self._minimgr = rdflib.Graph()
return
def prefix(self, prefix, nsuri):
self._minimgr.bind(prefix, rdflib.Namespace(nsuri))
return
def checklist(self, purpose=None, model=None, target="{+targetro}"):
cls = rdflib.URIRef("#ChecklistConstraints", base=self._base)
cln = rdflib.BNode()
clt = rdflib.Literal(target)
clp = rdflib.Literal(purpose)
clm = rdflib.URIRef(model, base=self._base)
self._minimgr.add( (cls, MINIM.hasChecklist, cln) )
self._minimgr.add( (cln, RDF.type, MINIM.Checklist) )
self._minimgr.add( (cln, MINIM.forTargetTemplate, clt) )
self._minimgr.add( (cln, MINIM.forPurpose, clp) )
self._minimgr.add( (cln, MINIM.toModel, clm) )
return cln
def model(self, modelid, itemlist):
model = rdflib.URIRef(modelid, base=self._base)
self._minimgr.add( (model, RDF.type, MINIM.Model) )
for (level, item) in itemlist:
self._minimgr.add( (model, level, item) )
return model
def item(self, seq=None, level="MUST", ruleid=None):
item = rdflib.BNode()
rule = rdflib.URIRef(ruleid, base=self._base)
self._minimgr.add( (item, RDF.type, MINIM.Requirement) )
self._minimgr.add( (item, MINIM.isDerivedBy, rule) )
if seq:
self._minimgr.add( (item, MINIM.seq, rdflib.Literal(seq)) )
levelmap = (
{ "MUST": MINIM.hasMustRequirement
, "SHOULD": MINIM.hasShouldRequirement
, "MAY": MINIM.hasMayRequirement
})
return (levelmap[level], item)
def rule(self,
ruleid, ForEach=None, ResultMod=None, Exists=None, Min=0, Max=None,
Aggregates=None, IsLive=None,
Command=None, Response=None,
Show=None, Pass="None", Fail="None", NoMatch="None"):
rule = rdflib.URIRef(ruleid, base=self._base)
if ForEach:
ruletype = MINIM.QueryTestRule
querynode = rdflib.BNode()
self._minimgr.add( (rule, MINIM.query, querynode) )
self._minimgr.add( (querynode, MINIM.sparql_query, rdflib.Literal(ForEach)) )
if ResultMod:
self._minimgr.add( (querynode, MINIM.result_mod, rdflib.Literal(ResultMod)) )
if Exists:
existsnode = rdflib.BNode()
self._minimgr.add( (rule, MINIM.exists, existsnode) )
self._minimgr.add( (existsnode, MINIM.sparql_query, rdflib.Literal(Exists)) )
if Min:
self._minimgr.add( (rule, MINIM.min, rdflib.Literal(Min)) )
if Max:
self._minimgr.add( (rule, MINIM.max, rdflib.Literal(Max)) )
if Aggregates:
self._minimgr.add( (rule, MINIM.aggregatesTemplate, rdflib.Literal(Aggregates)) )
if IsLive:
self._minimgr.add( (rule, MINIM.isLiveTemplate, rdflib.Literal(IsLive)) )
elif Exists:
ruletype = MINIM.QueryTestRule
existsnode = rdflib.BNode()
self._minimgr.add( (rule, MINIM.exists, existsnode) )
self._minimgr.add( (existsnode, MINIM.sparql_query, rdflib.Literal(Exists)) )
elif Command:
ruletype = MINIM.SoftwareEnvironmentRule
self._minimgr.add( (rule, MINIM.command, rdflib.Literal(Command)) )
self._minimgr.add( (rule, MINIM.response, rdflib.Literal(Response)) )
else:
raise ValueError("Unrecognized requirement rule pattern")
self._minimgr.add( (rule, RDF.type, ruletype) )
if Show:
self._minimgr.add( (rule, MINIM.show, rdflib.Literal(Show)) )
if Pass:
self._minimgr.add( (rule, MINIM.showpass, rdflib.Literal(Pass)) )
if Fail:
self._minimgr.add( (rule, MINIM.showfail, rdflib.Literal(Fail)) )
if NoMatch:
self._minimgr.add( (rule, MINIM.showmiss, rdflib.Literal(NoMatch)) )
return rule
def collectlist(self, rule, listprop, listvars):
for c in listvars:
listnode = rdflib.BNode()
self._minimgr.add( (rule, listprop, listnode) )
self._minimgr.add( (listnode, RDF.type, MINIM.ValueCollector) )
# Note: strips off leading '?' from variable names
self._minimgr.add( (listnode, MINIM.collectVar, rdflib.Literal(c["collectvar"][1:])) )
self._minimgr.add( (listnode, MINIM.collectList, rdflib.Literal(c["collectlist"][1:])) )
return
def serialize(self, outstr, format="turtle"):
self._minimgr.serialize(destination=outstr, format=format)
return
def graph(self):
return self._minimgr
# End.
| mit | 5,834,525,901,752,327,000 | 40.496241 | 100 | 0.596666 | false | 3.400493 | false | false | false |
j12y/predixpy | predix/data/weather.py | 1 | 3570 |
import os
import urllib
import predix.config
import predix.service
class WeatherForecast(object):
"""
Weather Forecast Service
.. important::
Deprecated
"""
def __init__(self, *args, **kwargs):
super(WeatherForecast, self).__init__(*args, **kwargs)
key = predix.config.get_env_key(self, 'uri')
self.uri = os.environ.get(key)
if not self.uri:
raise ValueError("%s environment unset" % key)
key = predix.config.get_env_key(self, 'zone_id')
self.zone_id = os.environ.get(key)
if not self.zone_id:
raise ValueError("%s environment unset" % key)
self.service = predix.service.Service(self.zone_id)
def authenticate_as_client(self, client_id, client_secret):
self.service.uaa.authenticate(client_id, client_secret)
def get_weather_forecast_days(self, latitude, longitude,
days=1, frequency=1, reading_type=None):
"""
Return the weather forecast for a given location.
::
results = ws.get_weather_forecast_days(lat, long)
for w in results['hits']:
print w['start_datetime_local']
print w['reading_type'], w['reading_value']
For description of reading types:
https://graphical.weather.gov/xml/docs/elementInputNames.php
"""
params = {}
# Can get data from NWS1 or NWS3 representing 1-hr and 3-hr
# intervals.
if frequency not in [1, 3]:
raise ValueError("Reading frequency must be 1 or 3")
params['days'] = days
params['source'] = 'NWS' + str(frequency)
params['latitude'] = latitude
params['longitude'] = longitude
if reading_type:
# url encoding will make spaces a + instead of %20, which service
# interprets as an "and" search which is undesirable
reading_type = reading_type.replace(' ', '%20')
params['reading_type'] = urllib.quote_plus(reading_type)
url = self.uri + '/v1/weather-forecast-days/'
return self.service._get(url, params=params)
def get_weather_forecast(self, latitude, longitude, start, end,
frequency=1, reading_type=None):
"""
Return the weather forecast for a given location for specific
datetime specified in UTC format.
::
results = ws.get_weather_forecast(lat, long, start, end)
for w in results['hits']:
print w['start_datetime_local']
print w['reading_type'], '=', w['reading_value']
For description of reading types:
https://graphical.weather.gov/xml/docs/elementInputNames.php
"""
params = {}
# Can get data from NWS1 or NWS3 representing 1-hr and 3-hr
# intervals.
if frequency not in [1, 3]:
raise ValueError("Reading frequency must be 1 or 3")
params['source'] = 'NWS' + str(frequency)
params['latitude'] = latitude
params['longitude'] = longitude
params['start_datetime_utc'] = start
params['end_datetime_utc'] = end
if reading_type:
# Not using urllib.quote_plus() because its using a + which is
# being interpreted by service as an and instead of a space.
reading_type = reading_type.replace(' ', '%20')
params['reading_type'] = reading_type
url = self.uri + '/v1/weather-forecast-datetime/'
return self.service._get(url, params=params)
| bsd-3-clause | 3,104,528,244,405,958,700 | 32.055556 | 77 | 0.589356 | false | 4.075342 | false | false | false |
koalakoker/knote_gcrypt | GCryptNote/__init__.py | 1 | 2654 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 24/mag/2015
@author: koala
'''
from keepnote.gui import extension
import sys
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
class debugView:
"""debug view"""
def __init__(self,path):
self.gladefile = path + "/debugViewXML.glade"
self.wTree = gtk.glade.XML(self.gladefile)
dic = {"on_CloseBtn_clicked" : self.on_CloseBtn_clicked}
self.wTree.signal_autoconnect(dic)
def debugTxt(self, iStr):
#obj = self.builder.get_object("debugDialog")
obj = self.wTree.get_widget("debugDialog")
obj.show()
#txtEdit = self.builder.get_object("debugTxt")
txtEdit = self.wTree.get_widget("debugTxt")
txtEdit.get_buffer().set_text(iStr)
def on_CloseBtn_clicked(self,sender):
obj = self.wTree.get_widget("debugDialog")
obj.destroy()
class Extension (extension.Extension):
lPath = ""
def __init__(self, app):
"""Initialize extension"""
extension.Extension.__init__(self, app)
self.app = app
def get_depends(self):
return [("keepnote", ">=", (0, 7, 1))]
def on_add_ui(self, window):
"""Initialize extension for a particular window"""
# add menu options
self.add_action(
window, "Set password", _("Set password"),
lambda w: self.on_setPassword(
window, window.get_notebook()),
tooltip=_("Set password for the notebook"))
# TODO: Fix up the ordering on the affected menus.
self.add_ui(window,
"""
<ui>
<menubar name="main_menu_bar">
<menu action="File">
<menuitem action="Set password"/>
</menu>
</menubar>
</ui>
""")
self.lPath = self.get_base_dir(False)
self.hwg = debugView(self.lPath)
#Gigi
window.get_notebook()
def on_setPassword(self, window, notebook):
"""Callback from gui for importing a plain text file"""
# self.hwg.debugTxt(
# """
# Questa è una fidestra di debug in cui è possibile scrivere più o meno quello che si vuole
# Proviamo a scrivere qualcosa su più righe
# Solo per vedere come funziona!!!!
# """)
sys.path.append(self.lPath)
# Caca
self.hwg.debugTxt(m_testo + "\n" + toHex(m_cript)) | gpl-2.0 | 7,136,069,054,866,454,000 | 24.737864 | 99 | 0.537736 | false | 3.665284 | false | false | false |
ehfeng/pipet | pipet/sources/zendesk/views.py | 2 | 3440 | from datetime import datetime
from flask import Blueprint, redirect, request, Response, render_template, url_for
from flask_login import current_user, login_required
import requests
from sqlalchemy.exc import ProgrammingError
from pipet.models import db
from pipet.sources.zendesk import ZendeskAccount
from pipet.sources.zendesk.forms import CreateAccountForm, DestroyAccountForm
from pipet.sources.zendesk.models import Base, SCHEMANAME
from pipet.sources.zendesk.tasks import sync
blueprint = Blueprint(SCHEMANAME, __name__, template_folder='templates')
@blueprint.route('/')
@login_required
def index():
return render_template('zendesk/index.html')
@blueprint.route('/activate', methods=['GET', 'POST'])
@login_required
def activate():
session = current_user.organization.create_session()
form = CreateAccountForm(obj=current_user.organization.zendesk_account)
account = current_user.organization.zendesk_account
if form.validate_on_submit():
if not account:
account = ZendeskAccount()
account.subdomain = form.subdomain.data
account.admin_email = form.admin_email.data
account.api_key = form.api_key.data
account.organization_id = current_user.organization.id
db.session.add(account)
db.session.commit()
return redirect(url_for('zendesk.index'))
if account:
form.subdomain.data = account.subdomain
form.admin_email.data = account.admin_email
form.api_key.data = account.api_key
return render_template('zendesk/activate.html', form=form)
@blueprint.route('/deactivate')
@login_required
def deactivate():
account = current_user.organization.zendesk_account
form = DestroyAccountForm()
if form.validate_on_submit() and form.drop.data:
account.destroy_target()
account.destroy_trigger()
account.drop_all()
db.session.add(account)
db.session.commit()
return redirect(url_for('zendesk.index'))
return render_template('zendesk/deactivate.html')
@blueprint.route('/reset')
@login_required
def reset():
session = current_user.organization.create_session()
current_user.organization.zendesk_account.drop_all(session)
current_user.organization.zendesk_account.create_all(session)
return redirect(url_for('zendesk.index'))
@blueprint.route("/hook", methods=['POST'])
def hook():
if not request.authorization:
return ('', 401)
account = Account.query.filter((Account.subdomain == request.authorization.username) &
(Account.api_key == request.authorization.password)).first()
scoped_session = account.organization.create_scoped_session()
session = scoped_session()
if not account:
return ('', 401)
ticket_id = request.get_json()['id']
resp = requests.get(account.api_base_url +
'/tickets/{id}.json?include=users,groups'.format(
id=ticket_id),
auth=account.auth)
ticket, _ = Ticket.create_or_update(resp.json()['ticket'], account)
session.add_all(ticket.update(resp.json()))
session.add(ticket)
resp = requests.get(account.api_base_url +
'/tickets/{id}/comments.json'.format(id=ticket.id), auth=account.auth)
session.add_all(ticket.update_comments(resp.json()['comments']))
session.commit()
return ('', 204)
| mit | -7,385,483,556,247,478,000 | 31.45283 | 95 | 0.679942 | false | 3.926941 | false | false | false |
anryko/ansible | lib/ansible/modules/cloud/google/gcp_pubsub_topic_info.py | 13 | 6629 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_pubsub_topic_info
description:
- Gather info for GCP Topic
short_description: Gather info for GCP Topic
version_added: '2.8'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a topic
gcp_pubsub_topic_info:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
name:
description:
- Name of the topic.
returned: success
type: str
kmsKeyName:
description:
- The resource name of the Cloud KMS CryptoKey to be used to protect access
to messages published on this topic. Your project's PubSub service account
(`service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com`) must
have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature.
- The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*` .
returned: success
type: str
labels:
description:
- A set of key/value label pairs to assign to this Topic.
returned: success
type: dict
messageStoragePolicy:
description:
- Policy constraining the set of Google Cloud Platform regions where messages
published to the topic may be stored. If not present, then no constraints
are in effect.
returned: success
type: complex
contains:
allowedPersistenceRegions:
description:
- A list of IDs of GCP regions where messages that are published to the
topic may be persisted in storage. Messages published by publishers running
in non-allowed GCP regions (or running outside of GCP altogether) will
be routed for storage in one of the allowed regions. An empty list means
that no regions are allowed, and is not a valid configuration.
returned: success
type: list
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict())
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/pubsub']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
return "https://pubsub.googleapis.com/v1/projects/{project}/topics".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'pubsub')
return auth.list(link, return_if_object, array_name='topics')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 | 4,018,752,534,457,117,700 | 32.649746 | 100 | 0.610801 | false | 4.401726 | false | false | false |
sebdelsol/pyload | module/plugins/container/RSDF.py | 3 | 1608 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import base64
import binascii
import re
from module.plugins.Container import Container
from module.utils import fs_encode
class RSDF(Container):
__name__ = "RSDF"
__version__ = "0.24"
__pattern__ = r'.+\.rsdf'
__description__ = """RSDF container decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "[email protected]"),
("spoob", "[email protected]")]
def decrypt(self, pyfile):
from Crypto.Cipher import AES
infile = fs_encode(pyfile.url.replace("\n", ""))
Key = binascii.unhexlify('8C35192D964DC3182C6F84F3252239EB4A320D2500000000')
IV = binascii.unhexlify('FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF')
IV_Cipher = AES.new(Key, AES.MODE_ECB)
IV = IV_Cipher.encrypt(IV)
obj = AES.new(Key, AES.MODE_CFB, IV)
try:
with open(infile, 'r') as rsdf:
data = rsdf.read()
except IOError, e:
self.fail(str(e))
if re.search(r"<title>404 - Not Found</title>", data) is None:
data = binascii.unhexlify(''.join(data.split()))
data = data.splitlines()
for link in data:
if not link:
continue
link = base64.b64decode(link)
link = obj.decrypt(link)
decryptedUrl = link.replace('CCF: ', '')
self.urls.append(decryptedUrl)
self.logDebug("Adding package %s with %d links" % (pyfile.package().name, len(self.urls)))
| gpl-3.0 | 1,757,640,070,915,786,800 | 27.714286 | 102 | 0.556592 | false | 3.613483 | false | false | false |
skosukhin/spack | var/spack/repos/builtin/packages/xfs/package.py | 1 | 1790 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xfs(AutotoolsPackage):
"""X Font Server."""
homepage = "http://cgit.freedesktop.org/xorg/app/xfs"
url = "https://www.x.org/archive/individual/app/xfs-1.1.4.tar.gz"
version('1.1.4', '0818a2e0317e0f0a1e8a15ca811827e2')
depends_on('[email protected]:')
depends_on('font-util')
depends_on('[email protected]:', type='build')
depends_on('fontsproto', type='build')
depends_on('xtrans', type='build')
depends_on('[email protected]:', type='build')
depends_on('util-macros', type='build')
| lgpl-2.1 | -3,601,966,879,886,061,000 | 40.627907 | 78 | 0.660894 | false | 3.653061 | false | false | false |
scott-maddox/simplepl | src/simplepl/instruments/drivers/thorlabs_fw102c.py | 1 | 3238 | #
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of SimplePL.
#
# SimplePL is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# SimplePL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with SimplePL. If not, see
# <http://www.gnu.org/licenses/>.
#
#######################################################################
# std lib imports
import logging
log = logging.getLogger(__name__)
import string
# third party imports
import serial
# local imports
#TODO: make sure the asked for nm is available on the given grating?
class FW102C(object):
'''
Device driver for ThorLabs FW102C Motorized Filter Wheel.
'''
def __init__(self, port, timeout=5.):
self._inst = serial.Serial(port,
baudrate=115200,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=timeout)
while self.__read(): # clear the filter's output buffer
pass
while True:
id = self.get_id()
if id != "Command error":
break
while self.__read(): # clear the filter's output buffer
pass
if id != "THORLABS FW102C/FW212C Filter Wheel version 1.01":
raise RuntimeError('Wrong instrument id: %s'%id)
def __read(self):
r = self._inst.readline()
log.debug('__read: return "%s"', r)
return r
def _read(self):
r = self.__read()
r = string.join(r.split()[1:-1]) # strip command echo and "ok"
log.debug('_read: return "%s"', r)
return r
def __write(self, s):
log.debug('__write: _inst.write("%s")', s)
self._inst.write(s+"\r")
def _write(self, s):
self.__write(s)
self._read() # command echo
def _ask(self, s):
self.__write(s)
return self._read()
def get_id(self):
return self._ask('*idn?')
#TODO: check how it confirms pos=1, if at all, and compensate
def set_filter(self, i):
'''
Sets the filter wheel position to the given index
'''
if not isinstance(i, int) or i < 1 or i > 6:
raise ValueError('i must be an integer in the range [1, 6]')
self._write('pos=%d'%i)
def get_filter(self):
return int(self._ask('pos?'))
if __name__ == "__main__":
# enable DEBUG output
logging.basicConfig(level=logging.DEBUG)
# Test
fw = FW102C(port=3)
print fw.get_id()
print fw.get_filter()
fw.set_filter(1)
print fw.get_filter()
fw.set_filter(2)
print fw.get_filter()
| agpl-3.0 | 6,502,911,783,775,601,000 | 29.54717 | 72 | 0.559296 | false | 3.873206 | false | false | false |
mafagafogigante/spellscream | spellscream.py | 1 | 6260 | import argparse
import collections
import os
class InspectionReport:
"""
The result of an inspection.
"""
def __init__(self, filename):
self.filename = filename
self.word_count = 0
self.warnings = []
self.issues = []
def __len__(self):
return len(self.issues)
def __str__(self):
lines = [self.get_report_heading()]
for warning in self.warnings:
lines.append(warning)
if len(self.issues) > 0:
maximum_line = self.issues[-1].line
for issue in self.issues:
lines.append(issue.format(max_line=maximum_line))
return "\n".join(lines)
def increment_word_count(self):
self.word_count += 1
def add_issue(self, issue):
self.issues.append(issue)
def analyze_issues(self):
"""
Analyzes the issues of this Report, possibly generating warnings and removing issues.
"""
typo_counter = collections.defaultdict(lambda: 0)
for issue in self.issues:
typo_counter[issue.text] += 1
# Typos that appear more than max(words / 10000, 10) times are assumed to be names
name_threshold = max(self.word_count / 10000, 10)
ignored_typos = []
for key, count in typo_counter.items():
if count > name_threshold:
ignored_typos.append((count, key))
ignored_typos.sort()
ignored_typos.reverse()
for typo in ignored_typos:
self.warnings.append("considering '" + typo[1] + "' a name as it was detected " + str(typo[0]) + " times")
self.remove_issues_based_on_text(set(typo[1] for typo in ignored_typos))
def remove_issues_based_on_text(self, typos):
new_issue_list = []
for issue in self.issues:
if issue.text not in typos:
new_issue_list.append(issue)
self.issues = new_issue_list
def get_report_heading(self):
"""
Creates a proper heading for this report.
"""
issue_count = len(self.issues)
issue_count_string = "(1 issue)" if issue_count == 1 else "(" + str(issue_count) + " issues)"
return self.filename + " " + issue_count_string + ":"
class Issue:
"""
A simple issue in a file.
"""
def __init__(self, line, text, issue_type='typo'):
self.line = line
self.text = text
self.type = issue_type
def __str__(self):
return str(self.line) + ": " + self.text
def format(self, max_line):
"""
Formats this the string representation of this issue by padding the line number for all issues to be aligned.
:param max_line: the biggest line number of the report this issue belongs to
:return: a human-readable and properly padded string representation of this issue
"""
length_delta = len(str(max_line)) - len(str(self.line))
return ' ' * length_delta + str(self)
# The shared word set used as a dictionary
dictionary = None
def init_dictionary():
global dictionary
dictionary = set()
with open("dictionary/english.txt") as dictionary_file:
for line in dictionary_file.readlines():
dictionary.add(line.strip())
def get_dictionary():
if dictionary is None:
init_dictionary()
return dictionary
def is_valid_word(word):
"""
Simply checks if the word is in the global dictionary or not.
:param word: the input word
"""
return word in get_dictionary()
def clean_word(word):
"""
Sanitizes the input word as to maximize the fairness of the dictionary check.
:param word: the input word
"""
word = word.strip("*_,:;.!?(){}[]'\"") # Stripping periods this way is problematic because "U.S." becomes "U.S"
word = word.lower() # May bring up issues with names, but is necessary for now for words that come after a period.
if word.endswith("'s"):
return word[:-2]
return word
def clean_file_words(file):
"""
Produces a generator of clean file words.
:param file: a file
"""
line_number = 0
for line in file.readlines():
line_number += 1
words = line.replace("--", " ").translate(str.maketrans("‘’“”", "''\"\"")).split()
for word in words:
yield line_number, word
def is_number(word):
"""
Detects if the word is a number. This function also detects monetary values and negative numbers.
:param word: a text word
:return: True if the word is considered to be a number, False otherwise
"""
# The first check is only needed for formal correctness. If performance requirements demand, it may be removed.
return len(word) > 0 and len(word.strip('0123456789,.-$')) == 0
def inspect_word(line, word, report):
"""
Inspects a single word from a text file.
:param line: the line of the file on which the word was found
:param word: the word to be inspected
:param report: the InspectionReport object relevant to the inspection
"""
word = clean_word(word)
if len(word) > 0:
report.increment_word_count()
if not is_number(word) and not is_valid_word(word):
report.add_issue(Issue(line, word))
def inspect_file(filename):
"""
Inspects a text file for grammar issues.
:param filename: the name of the file
:return: a InspectionResult object with all the issues found
"""
with open(filename) as open_file:
report = InspectionReport(filename)
for line, word in clean_file_words(open_file):
inspect_word(line, word, report)
report.analyze_issues()
return report
def list_files(root):
for root, directories, files in os.walk(root):
for file in files:
yield os.path.join(root, file)
def get_arguments():
parser = argparse.ArgumentParser(description="Inspects a file tree for grammar issues")
parser.add_argument("root", help="the root of the tree SpellScream will walk")
return parser.parse_args()
def main():
arguments = get_arguments()
files = list_files(arguments.root)
for file in files:
print(inspect_file(file))
if __name__ == "__main__":
main()
| isc | 4,623,976,332,109,623,000 | 29.79803 | 119 | 0.613884 | false | 3.88323 | false | false | false |
grow/grow | grow/documents/document_test.py | 1 | 33384 | """Tests for documents."""
import textwrap
import unittest
from grow.common import utils
from grow.testing import testing
from grow.documents import document
from grow.translations import locales
from grow.pods import pods
from grow import storage
class DocumentsTestCase(unittest.TestCase):
def setUp(self):
dir_path = testing.create_test_pod_dir()
self.pod = pods.Pod(dir_path, storage=storage.FileStorage)
def test_eq(self):
"""Test equal comparison."""
doc1 = self.pod.get_doc('/content/pages/contact.yaml')
doc2 = self.pod.get_doc('/content/pages/contact.yaml')
self.assertEqual(doc1, doc2)
col = self.pod.get_collection('pages')
for doc in col:
if doc.pod_path == '/content/pages/contact.yaml':
self.assertEqual(doc1, doc)
self.assertEqual(doc2, doc)
doc1 = self.pod.get_doc('/content/pages/about.yaml')
doc2 = self.pod.get_doc('/content/pages/[email protected]')
self.assertEqual(doc1, doc2)
def test_ge(self):
"""Test greater-than-equal comparison."""
doc1 = self.pod.get_doc('/content/pages/delta.yaml')
doc2 = self.pod.get_doc('/content/pages/charlie.yaml')
self.assertTrue(doc1 >= doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='de')
self.assertTrue(doc1 >= doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
self.assertTrue(doc1 >= doc2)
def test_gt(self):
"""Test greater-than comparison."""
doc1 = self.pod.get_doc('/content/pages/delta.yaml')
doc2 = self.pod.get_doc('/content/pages/charlie.yaml')
self.assertTrue(doc1 > doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='de')
self.assertTrue(doc1 > doc2)
doc1 = self.pod.get_doc('/content/pages/delta.yaml')
doc2 = self.pod.get_doc('/content/pages/delta.yaml')
self.assertFalse(doc1 > doc2)
def test_le(self):
"""Test less-than-equal comparison."""
doc1 = self.pod.get_doc('/content/pages/charlie.yaml')
doc2 = self.pod.get_doc('/content/pages/delta.yaml')
self.assertTrue(doc1 <= doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='de')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='ru')
self.assertTrue(doc1 <= doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='fr')
self.assertTrue(doc1 <= doc2)
def test_lt(self):
"""Test less-than comparison."""
doc1 = self.pod.get_doc('/content/pages/charlie.yaml')
doc2 = self.pod.get_doc('/content/pages/delta.yaml')
self.assertTrue(doc1 < doc2)
doc1 = self.pod.get_doc('/content/pages/bravo.yaml', locale='de')
doc2 = self.pod.get_doc('/content/pages/bravo.yaml', locale='en')
self.assertTrue(doc1 < doc2)
doc1 = self.pod.get_doc('/content/pages/delta.yaml')
doc2 = self.pod.get_doc('/content/pages/delta.yaml')
self.assertFalse(doc1 < doc2)
def test_doc_storage(self):
# Because this test involves translation priority, ensure that we have
# compiled the MO files before running the test.
self.pod.catalogs.compile()
doc = self.pod.get_doc('/content/pages/intro.md')
self.assertEqual('About page.', doc.body)
self.assertEqual('<p>About page.</p>', doc.html)
keys = sorted(['$title', '$order', '$titles', 'key', 'root_key'])
self.assertEqual(keys, sorted(list(doc.fields.keys())))
doc = self.pod.get_doc('/content/pages/home.yaml')
keys = sorted([
'$localization',
'$order',
'$path',
'$title',
'$view',
'csv_data',
'csv_data@',
'doc_data',
'doc_url_data',
'foo',
'json_data',
'static_data',
'static_url_data',
'tagged_fields',
'yaml_data',
'yaml_data@',
])
self.assertEqual(keys, sorted(list(doc.fields.keys())))
self.assertIsNone(doc.html)
about = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual(doc.doc_data, about)
self.assertEqual(doc.doc_url_data, about.url)
static = self.pod.get_static('/static/test.txt', locale='en')
self.assertEqual(doc.static_data, static)
self.assertEqual(doc.static_url_data, static.url)
default_doc = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual('bar', default_doc.foo)
de_doc = self.pod.get_doc('/content/pages/about.yaml', locale='de')
self.assertEqual('baz', de_doc.foo)
self.assertEqual('qux', de_doc.qaz)
def test_clean_localized_path(self):
input = '/content/pages/about.yaml'
expected = '/content/pages/about.yaml'
self.assertEqual(expected, document.Document.clean_localized_path(
input, None))
input = '/content/pages/[email protected]'
expected = '/content/pages/[email protected]'
self.assertEqual(expected, document.Document.clean_localized_path(
input, 'de'))
input = '/content/pages/[email protected]'
expected = '/content/pages/about.yaml'
self.assertEqual(expected, document.Document.clean_localized_path(
input, 'en'))
def test_collection_base_path(self):
about_doc = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual('/', about_doc.collection_base_path)
self.pod.write_file('/content/pages/sub/about.yaml', '')
about_doc = self.pod.get_doc('/content/pages/sub/about.yaml')
self.assertEqual('/sub/', about_doc.collection_base_path)
self.pod.write_file('/content/pages/sub/foo/about.yaml', '')
about_doc = self.pod.get_doc('/content/pages/sub/foo/about.yaml')
self.assertEqual('/sub/foo/', about_doc.collection_base_path)
def test_collection_sub_path(self):
about_doc = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual('/about.yaml', about_doc.collection_sub_path)
self.pod.write_file('/content/pages/sub/about.yaml', '')
about_doc = self.pod.get_doc('/content/pages/sub/about.yaml')
self.assertEqual('/sub/about.yaml', about_doc.collection_sub_path)
self.pod.write_file('/content/pages/sub/foo/about.yaml', '')
about_doc = self.pod.get_doc('/content/pages/sub/foo/about.yaml')
self.assertEqual('/sub/foo/about.yaml', about_doc.collection_sub_path)
def test_collection_sub_path_clean(self):
about_doc = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual('/about.yaml', about_doc.collection_sub_path_clean)
self.pod.write_file('/content/pages/sub/about.yaml', '')
about_doc = self.pod.get_doc('/content/pages/sub/about.yaml')
self.assertEqual('/sub/about.yaml', about_doc.collection_sub_path_clean)
self.pod.write_file('/content/pages/sub/[email protected]', '')
about_doc = self.pod.get_doc('/content/pages/sub/[email protected]')
self.assertEqual('/sub/about.yaml', about_doc.collection_sub_path_clean)
def test_get_serving_path(self):
about_doc = self.pod.get_doc('/content/pages/about.yaml')
self.assertEqual('/about/', about_doc.get_serving_path())
fi_doc = self.pod.get_doc('/content/pages/about.yaml', locale='fi')
self.assertEqual('/fi_ALL/about/', fi_doc.get_serving_path())
def test_locales(self):
doc = self.pod.get_doc('/content/pages/contact.yaml')
self.assertEqual(locales.Locale('de'), doc.locale)
expected = locales.Locale.parse_codes([
'de',
'fr',
'it',
])
self.assertEqual(expected, doc.locales)
# Currently, when requesting a document with a locale that is not
# specified, we return a path that is unmatchable. TBD whether we want
# to change this in a future version.
ko_doc = self.pod.get_doc('/content/pages/contact.yaml', locale='ko')
expected = '/ko/contact-us/'
self.assertEqual(expected, ko_doc.url.path)
self.assertTrue(ko_doc.exists)
def test_parse_localized_path(self):
path = '/content/pages/file@en_us.ext'
expected = ('/content/pages/file.ext', 'en_us')
self.assertEqual(
expected, document.Document.parse_localized_path(path))
path = '/content/pages/[email protected]'
expected = ('/content/pages/file.ext', 'en')
self.assertEqual(
expected, document.Document.parse_localized_path(path))
path = '/content/pages/file.ext'
expected = ('/content/pages/file.ext', None)
self.assertEqual(
expected, document.Document.parse_localized_path(path))
def test_localize_path(self):
path = '/content/pages/file.ext'
locale = 'locale'
expected = '/content/pages/[email protected]'
self.assertEqual(
expected, document.Document.localize_path(path, locale=locale))
# No Locale
path = '/content/pages/file.ext'
locale = None
expected = '/content/pages/file.ext'
self.assertEqual(
expected, document.Document.localize_path(path, locale=locale))
# Existing Locale
path = '/content/pages/[email protected]'
locale = 'elacol'
expected = '/content/pages/[email protected]'
self.assertEqual(
expected, document.Document.localize_path(path, locale=locale))
def test_next_prev(self):
collection = self.pod.get_collection('pages')
docs = collection.list_docs()
doc = self.pod.get_doc('/content/pages/contact.yaml')
doc.next(docs)
self.assertRaises(ValueError, doc.next, [1, 2, 3])
doc.prev(docs)
self.assertRaises(ValueError, doc.prev, [1, 2, 3])
def test_default_locale(self):
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='de')
self.assertEqual('/views/ja-specific-view.html', doc.view)
self.assertEqual(locales.Locale('de'), doc.locale)
self.assertEqual('base_ja', doc.foo)
self.assertEqual('baz', doc.bar)
doc = self.pod.get_doc('/content/localized/localized.yaml')
self.assertEqual('/views/ja-specific-view.html', doc.view)
self.assertEqual(locales.Locale('ja'), doc.locale)
self.assertEqual(locales.Locale('ja'), doc.default_locale)
self.assertEqual('base_ja', doc.foo)
self.assertEqual('baz', doc.bar)
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='ja')
self.assertEqual('/views/ja-specific-view.html', doc.view)
self.assertEqual(locales.Locale('ja'), doc.locale)
self.assertEqual('base_ja', doc.foo)
self.assertEqual('baz', doc.bar)
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='fr')
self.assertEqual('/views/ja-specific-view.html', doc.view)
self.assertEqual(locales.Locale('fr'), doc.locale)
self.assertEqual('base_ja', doc.foo)
self.assertEqual('baz', doc.bar)
self.assertEqual('/intl/fr/localized/', doc.url.path)
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='en')
self.assertEqual('/views/localized.html', doc.view)
self.assertEqual(locales.Locale('en'), doc.locale)
self.assertEqual('base', doc.foo)
self.assertEqual('baz', doc.bar)
self.assertEqual('/intl/en/localized/', doc.url.path)
def test_view_override(self):
doc = self.pod.get_doc('/content/localized/localized-view-override.yaml')
self.assertEqual('/views/localized.html', doc.view)
self.assertEqual(locales.Locale.parse('en_PK'), doc.locale)
doc = self.pod.get_doc('/content/localized/localized-view-override.yaml',
locale='en_PK')
self.assertEqual('/views/localized.html', doc.view)
self.assertEqual(locales.Locale.parse('en_PK'), doc.locale)
doc = self.pod.get_doc('/content/localized/localized-view-override.yaml',
locale='tr_TR')
self.assertEqual('/views/tr-specific-view.html', doc.view)
self.assertEqual(locales.Locale.parse('tr_TR'), doc.locale)
def test_exists(self):
doc = self.pod.get_doc('/content/localized/localized.yaml')
self.assertTrue(doc.exists)
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='ja')
self.assertTrue(doc.exists)
doc = self.pod.get_doc('/content/localized/localized.yaml', locale='de')
self.assertTrue(doc.exists)
doc = self.pod.get_doc('/content/localized/does-not-exist.yaml')
self.assertFalse(doc.exists)
def test_multi_file_localization(self):
fr_doc = self.pod.get_doc('/content/pages/intro.md', locale='fr')
self.assertEqual(locales.Locale('fr'), fr_doc.locale)
self.assertEqual('/content/pages/[email protected]', fr_doc.pod_path)
self.assertEqual('/content/pages/intro.md', fr_doc.root_pod_path)
self.assertIn('French About page.', fr_doc.html)
de_doc = self.pod.get_doc('/content/pages/intro.md', locale='de')
de_doc_from_fr_doc = fr_doc.localize('de')
self.assertEqual(de_doc, de_doc_from_fr_doc)
self.assertEqual('root_value', de_doc.key)
self.assertEqual('fr_value', fr_doc.key)
self.assertEqual('root_key_value', de_doc.root_key)
self.assertEqual('root_key_value', fr_doc.root_key)
keys = sorted(['$title', '$order', '$titles', 'key', 'root_key'])
self.assertEqual(keys, sorted(list(fr_doc.fields.keys())))
def test_default_locale_override(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'default_locale': 'en',
'locales': [
'en',
'de',
'it',
]
}
})
pod.write_file('/views/base.html', '{{doc.foo}}')
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
'$localization': {
'path': '/{locale}/{base}/',
},
})
pod.write_yaml('/content/pages/page.yaml', {
'$localization': {
'default_locale': 'de',
},
'foo': 'foo-base',
'foo@de': 'foo-de',
})
pod.write_yaml('/content/pages/page2.yaml', {
'foo': 'foo-base',
'foo@de': 'foo-de',
})
pod.router.add_all(use_cache=False)
# Verify ability to override using the default locale.
content = testing.render_path(pod, '/page/')
self.assertEqual('foo-de', content)
content = testing.render_path(pod, '/en/page/')
self.assertEqual('foo-base', content)
# Verify default behavior otherwise.
content = testing.render_path(pod, '/page2/')
self.assertEqual('foo-base', content)
content = testing.render_path(pod, '/de/page2/')
self.assertEqual('foo-de', content)
def test_locale_override(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'default_locale': 'en',
'locales': [
'de',
'fr',
'it',
]
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
'$localization': {
'path': '/{locale}/{base}/',
},
})
pod.write_yaml('/content/pages/a.yaml', {
'$view': '/views/base.html',
'$view@fr': '/views/base-fr.html',
'qaz': 'qux',
'qaz@fr': 'qux-fr',
'qaz@de': 'qux-de',
'qaz@fr': 'qux-fr',
'foo': 'bar-base',
'foo@en': 'bar-en',
'foo@de': 'bar-de',
'foo@fr': 'bar-fr',
'nested': {
'nested': 'nested-base',
'nested@fr': 'nested-fr',
},
})
doc = pod.get_doc('/content/pages/a.yaml')
self.assertEqual('en', doc.locale)
self.assertEqual('bar-en', doc.foo)
self.assertEqual('qux', doc.qaz)
de_doc = doc.localize('de')
self.assertEqual('bar-de', de_doc.foo)
self.assertEqual('/views/base.html', de_doc.view)
self.assertEqual('nested-base', de_doc.nested['nested'])
self.assertEqual('qux-de', de_doc.qaz)
fr_doc = doc.localize('fr')
self.assertEqual('bar-fr', fr_doc.foo)
self.assertEqual('/views/base-fr.html', fr_doc.view)
self.assertEqual('nested-fr', fr_doc.nested['nested'])
self.assertEqual('qux-fr', fr_doc.qaz)
it_doc = doc.localize('it')
self.assertEqual('bar-base', it_doc.foo)
self.assertEqual('qux', it_doc.qaz)
def test_localization(self):
# Localized document.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/_blueprint.yaml', {})
pod.write_yaml('/content/pages/page.yaml', {
'$path': '/{base}/',
'$localization': {
'path': '/{locale}/{base}/',
'locales': [
'de',
]
}
})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertIsNone(doc.default_locale)
self.assertEqual(['de'], doc.locales)
self.assertEqual('/{base}/', doc.path_format)
self.assertEqual('/page/', doc.url.path)
de_doc = doc.localize('de')
self.assertEqual('/{locale}/{base}/', de_doc.path_format)
self.assertEqual('/de/page/', de_doc.url.path)
# Localized collection.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$localization': {
'path': '/{locale}/{base}/',
'locales': [
'de',
],
}
})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertIsNone(doc.default_locale)
self.assertEqual(['de'], doc.locales)
self.assertEqual('/{base}/', doc.path_format)
de_doc = doc.localize('de')
self.assertEqual('/{locale}/{base}/', de_doc.path_format)
self.assertEqual('/de/page/', de_doc.url.path)
# Localized podspec (no $localization in blueprint or doc).
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'locales': [
'de',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
})
pod.write_yaml('/content/pages/page.yaml', {})
collection = pod.get_collection('/content/pages/')
self.assertEqual(['de'], collection.locales)
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual(['de'], doc.locales)
# Localized podspec ($localization in blueprint).
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'locales': [
'de',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$localization': {
'path': '/{locale}/{base}/',
},
})
pod.write_yaml('/content/pages/page.yaml', {})
collection = pod.get_collection('/content/pages/')
self.assertEqual(['de'], collection.locales)
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual(['de'], doc.locales)
self.assertEqual('/{base}/', doc.path_format)
de_doc = doc.localize('de')
self.assertEqual('/{locale}/{base}/', de_doc.path_format)
self.assertEqual('/de/page/', de_doc.url.path)
# Localized podspec ($localization in blueprint, no localized path).
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'locales': [
'de',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual(['de'], doc.locales)
self.assertEqual('/{base}/', doc.path_format)
de_doc = doc.localize('de')
self.assertEqual('/{base}/', de_doc.path_format)
self.assertEqual('/page/', de_doc.url.path)
# Override collection with "$localization:locales:[]" in doc.
pod.write_yaml('/content/pages/page.yaml', {
'$localization': {
'locales': [],
},
})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual([], doc.locales)
# Override collection with "$localization:~" in doc.
pod.write_yaml('/content/pages/page.yaml', {
'$localization': None,
})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual([], doc.locales)
# Override podspec with "$localization:locales:[]" in blueprint.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'locales': [
'de',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$localization': {
'locales': [],
},
})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
collection = pod.get_collection('/content/pages/')
self.assertEqual([], collection.locales)
self.assertEqual([], doc.locales)
# Override locales with "$localization:~" in blueprint.
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$localization': None,
})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
collection = pod.get_collection('/content/pages/')
self.assertEqual([], collection.locales)
self.assertEqual([], doc.locales)
# Override the overridden podspec.
pod.write_yaml('/content/pages/page.yaml', {
'$localization': {
'locales': [
'de',
'ja',
],
},
})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual(['de', 'ja'], doc.locales)
def test_localization_fallback(self):
# Verify locales aren't clobbered when no localized path is specified.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'default_locale': 'en',
'locales': [
'de',
'en',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
})
pod.write_yaml('/content/pages/page.yaml', {})
pod.write_file('/views/base.html', '{{doc.locale}}')
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
'$localization': None,
})
pod.router.add_all(use_cache=False)
content = testing.render_path(pod, '/page/')
self.assertEqual('en', content)
# Verify paths aren't clobbered by the default locale.
pod.write_yaml('/content/pages/page.yaml', {
'$path': '/{locale}/{base}/',
'$view': '/views/base.html',
'$localization': {
'default_locale': 'de',
'path': '/{locale}/{base}/',
'locales': [
'en',
'de',
],
},
})
pod.podcache.reset()
pod.router.routes.reset()
pod.router.add_all(use_cache=False)
content = testing.render_path(pod, '/de/page/')
self.assertEqual('de', content)
paths = list(pod.router.routes.paths)
expected = ['/de/page/', '/en/page/']
self.assertEqual(expected, paths)
def test_view_format(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/{base}.html',
})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
self.assertEqual('/views/page.html', doc.view)
def test_recursive_yaml(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/{base}.html',
'$localization': {
'default_locale': 'en',
'locales': ['de', 'en'],
}
})
pod.write_file('/content/pages/foo.yaml', textwrap.dedent(
"""\
bar: !g.doc /content/pages/bar.yaml
"""))
pod.write_file('/content/pages/bar.yaml', textwrap.dedent(
"""\
foo: !g.doc /content/pages/foo.yaml
"""))
foo_doc = pod.get_doc('/content/pages/foo.yaml', locale='de')
bar_doc = pod.get_doc('/content/pages/bar.yaml', locale='de')
self.assertEqual(bar_doc, foo_doc.bar)
self.assertEqual(bar_doc, foo_doc.bar.foo.bar)
self.assertEqual('de', foo_doc.bar.locale)
self.assertEqual(foo_doc, bar_doc.foo)
self.assertEqual(foo_doc, bar_doc.foo.bar.foo)
self.assertEqual('de', bar_doc.foo.locale)
foo_doc = pod.get_doc('/content/pages/foo.yaml', locale='en')
bar_doc = pod.get_doc('/content/pages/bar.yaml', locale='en')
self.assertEqual(bar_doc, foo_doc.bar)
self.assertEqual(bar_doc, foo_doc.bar.foo.bar)
self.assertEqual('en', foo_doc.bar.locale)
self.assertEqual(foo_doc, bar_doc.foo)
self.assertEqual(foo_doc, bar_doc.foo.bar.foo)
self.assertEqual('en', bar_doc.foo.locale)
def test_hreflang(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/{base}.html',
'$localization': {
'default_locale': 'en',
'locales': ['de', 'en', 'fr_ca'],
}
})
pod.write_file('/content/pages/foo.yaml', '')
foo_doc = pod.get_doc('/content/pages/foo.yaml', locale='en')
bar_doc = pod.get_doc('/content/pages/foo.yaml', locale='de')
baz_doc = pod.get_doc('/content/pages/foo.yaml', locale='fr_ca')
self.assertEqual('x-default', foo_doc.hreflang)
self.assertEqual('fr-ca', baz_doc.hreflang)
def test_locale_paths(self):
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_file('/content/pages/foo@en_us.yaml', '')
pod.write_file('/content/pages/[email protected]', '')
pod.write_file('/content/pages/foo.yaml', '')
doc = pod.get_doc('/content/pages/foo@en_us.yaml')
self.assertEqual([
'/content/pages/foo@en_us.yaml',
'/content/pages/[email protected]',
'/content/pages/foo.yaml',
], doc.locale_paths)
doc = pod.get_doc('/content/pages/[email protected]')
self.assertEqual([
'/content/pages/[email protected]',
'/content/pages/foo.yaml',
], doc.locale_paths)
doc = pod.get_doc('/content/pages/foo.yaml')
self.assertEqual([
'/content/pages/foo.yaml',
], doc.locale_paths)
def test_dependency_nesting_jinja(self):
# Verify that dependencies work for nested documents.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'default_locale': 'en',
'locales': [
'de',
'en',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
'$localization': {
'path': '/{locale}/{base}/'
},
})
pod.write_file('/content/pages/page.yaml', 'partial: !g.doc /content/partials/partial.yaml')
pod.write_yaml('/content/partials/_blueprint.yaml', {})
pod.write_yaml('/content/partials/partial.yaml', {})
pod.write_yaml('/content/partials/[email protected]', {})
pod.write_file(
'/views/base.html',
'{}{} {}'.format(
'{{doc.locale}}',
'{% for partial in g.docs(\'partials\') %} {{partial.locale}}{% endfor %}',
'{{g.doc(\'/content/partials/partial.yaml\').locale}}',
),
)
pod.router.add_all(use_cache=False)
content = testing.render_path(pod, '/page/')
self.assertEqual('en en en', content)
dependents = pod.podcache.dependency_graph.get_dependents(
'/content/partials/partial.yaml')
self.assertEqual(set([
'/content/partials/partial.yaml',
'/content/pages/page.yaml',
]), dependents)
content = testing.render_path(pod, '/de/page/')
self.assertEqual('de de de', content)
dependents = pod.podcache.dependency_graph.get_dependents(
'/content/partials/[email protected]')
self.assertEqual(set([
'/content/partials/[email protected]',
'/content/pages/page.yaml',
]), dependents)
def test_dependency_nesting_yaml(self):
# Verify that dependencies work for nested documents.
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {
'localization': {
'default_locale': 'en',
'locales': [
'de',
'en',
],
}
})
pod.write_yaml('/content/pages/_blueprint.yaml', {
'$path': '/{base}/',
'$view': '/views/base.html',
'$localization': {
'path': '/{locale}/{base}/'
},
})
pod.write_file('/content/pages/page.yaml', 'partial: !g.doc /content/partials/partial.yaml')
pod.write_yaml('/content/partials/_blueprint.yaml', {})
pod.write_yaml('/content/partials/partial.yaml', {})
pod.write_yaml('/content/partials/[email protected]', {})
pod.write_file('/views/base.html', '{{doc.locale}} {{doc.partial.locale}}')
pod.router.add_all(use_cache=False)
content = testing.render_path(pod, '/page/')
self.assertEqual('en en', content)
dependents = pod.podcache.dependency_graph.get_dependents(
'/content/partials/partial.yaml')
self.assertEqual(set([
'/content/partials/partial.yaml',
'/content/pages/page.yaml',
]), dependents)
content = testing.render_path(pod, '/de/page/')
self.assertEqual('de de', content)
dependents = pod.podcache.dependency_graph.get_dependents(
'/content/partials/[email protected]')
self.assertEqual(set([
'/content/partials/[email protected]',
'/content/pages/page.yaml',
]), dependents)
def test_yaml_dump(self):
"""Test if the yaml representer is working correctly."""
pod = testing.create_pod()
pod.write_yaml('/podspec.yaml', {})
pod.write_yaml('/content/pages/page.yaml', {})
doc = pod.get_doc('/content/pages/page.yaml')
input_obj = {
'doc': doc
}
expected = textwrap.dedent(
"""\
doc: !g.doc '/content/pages/page.yaml'
""")
self.assertEqual(expected, utils.dump_yaml(input_obj))
if __name__ == '__main__':
unittest.main()
| mit | -1,329,246,219,627,932,700 | 37.638889 | 100 | 0.549305 | false | 3.729639 | true | false | false |
laufercenter/meld | meld/system/openmm_runner/cmap.py | 1 | 6968 | #
# Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill
# All rights reserved
#
from collections import OrderedDict, namedtuple
import os
import math
from simtk import openmm
import numpy as np
from meld.system.system import ParmTopReader
CMAPResidue = namedtuple('CMAPResidue', 'res_num res_name index_N index_CA index_C')
#Termini residues that act as a cap and have no amap term
capped = ['ACE','NHE','OHE', 'NME', 'GLP','DUM','NAG','DIF','BER','GUM','KNI','PU5','AMP','0E9']
class CMAPAdder(object):
_map_index = {
'GLY': 0,
'PRO': 1,
'ALA': 2,
'CYS': 3,
'CYX': 3,
'ASP': 3,
'ASH': 3,
'GLU': 3,
'GLH': 3,
'PHE': 3,
'HIS': 3,
'HIE': 3,
'HID': 3,
'HIP': 3,
'ILE': 3,
'LYS': 3,
'LYN': 3,
'MET': 3,
'ASN': 3,
'GLN': 3,
'SER': 3,
'THR': 3,
'VAL': 3,
'TRP': 3,
'TYR': 3,
'LEU': 3,
'ARG': 3
}
def __init__(self, top_string, alpha_bias=1.0, beta_bias=1.0, ccap=False, ncap=False):
"""
Initialize a new CMAPAdder object
:param top_string: an Amber new-style topology in string form
:param alpha_bias: strength of alpha correction, default=1.0
:param beta_bias: strength of beta correction, default=1.0
"""
self._top_string = top_string
self._alpha_bias = alpha_bias
self._beta_bias = beta_bias
self._ccap = ccap
self._ncap = ncap
reader = ParmTopReader(self._top_string)
self._bonds = reader.get_bonds()
self._residue_numbers = reader.get_residue_numbers()
self._residue_names = reader.get_residue_names()
self._atom_map = reader.get_atom_map()
self._ala_map = None
self._gly_map = None
self._pro_map = None
self._gen_map = None
self._load_maps()
def add_to_openmm(self, openmm_system):
"""
Add CMAPTorsionForce to openmm system.
:param openmm_system: System object to receive the force
"""
cmap_force = openmm.CMAPTorsionForce()
cmap_force.addMap(self._gly_map.shape[0], self._gly_map.flatten())
cmap_force.addMap(self._pro_map.shape[0], self._pro_map.flatten())
cmap_force.addMap(self._ala_map.shape[0], self._ala_map.flatten())
cmap_force.addMap(self._gen_map.shape[0], self._gen_map.flatten())
# loop over all of the contiguous chains of amino acids
for chain in self._iterate_cmap_chains():
# loop over the interior residues
n_res = len(chain)
for i in range(1, n_res-1):
map_index = self._map_index[chain[i].res_name]
# subtract one from all of these to get zero-based indexing, as in openmm
c_prev = chain[i - 1].index_C - 1
n = chain[i].index_N - 1
ca = chain[i].index_CA - 1
c = chain[i].index_C - 1
n_next = chain[i+1].index_N - 1
print "CMAP term:",i,map_index
cmap_force.addTorsion(map_index, c_prev, n, ca, c, n, ca, c, n_next)
openmm_system.addForce(cmap_force)
def _iterate_cmap_chains(self):
"""
Yield a series of chains of amino acid residues that are bonded together.
:return: a generator that will yield lists of CMAPResidue
"""
# use an ordered dict to remember num, name pairs in order, while removing duplicates
residues = OrderedDict((num, name) for (num, name) in zip(self._residue_numbers, self._residue_names))
print residues
new_res = []
for r in residues.items():
num,name = r
if name not in capped:
new_res.append(r)
residues = OrderedDict(new_res)
print residues
# now turn the ordered dict into a list of CMAPResidues
residues = [self._to_cmap_residue(num, name) for (num, name) in residues.items()]
print residues
# is each residue i connected to it's predecessor, i-1?
connected = self._compute_connected(residues)
# now we iterate until we've handled all residues
while connected:
chain = [residues.pop(0)] # we always take the first residue
connected.pop(0)
# if there are other residues connected, take them too
while connected and connected[0]:
chain.append(residues.pop(0))
connected.pop(0)
# we've taken a single connected chain, so yield it
# then loop back to the beginning
print 'CHAIN:',chain
yield chain
def _compute_connected(self, residues):
"""
Return a list of boolean values indicating if each residue is connected to its predecessor.
:param residues: a list of CMAPResidue objects
:return: a list of boolean values indicating if residue i is bonded to i-1
"""
def has_c_n_bond(res_i, res_j):
"""Return True if there is a bond between C of res_i and N of res_j, otherwise False."""
if (res_i.index_C, res_j.index_N) in self._bonds:
return True
else:
return False
# zip to together consecutive residues and see if they are bonded
connected = [has_c_n_bond(i, j) for (i, j) in zip(residues[0:], residues[1:])]
# the first element has no element to the left, so it's not connected
connected = [False] + connected
return connected
def _to_cmap_residue(self, num, name):
"""
Turn a residue number and name into a CMAPResidue object
:param num: residue number
:param name: residue name
:return: CMAPResidue
"""
n = self._atom_map[(num, 'N')]
ca = self._atom_map[(num, 'CA')]
c = self._atom_map[(num, 'C')]
res = CMAPResidue(res_num=num, res_name=name, index_N=n, index_CA=ca, index_C=c)
return res
def _load_map(self, stem):
basedir = os.path.join(os.path.dirname(__file__), 'maps')
alpha = np.loadtxt(os.path.join(basedir, '{}_alpha.txt'.format(stem))) * self._alpha_bias
beta = np.loadtxt(os.path.join(basedir, '{}_beta.txt'.format(stem))) * self._beta_bias
total = alpha + beta
assert total.shape[0] == total.shape[1]
n = int(math.ceil(total.shape[0] / 2.0))
total = np.roll(total, -n, axis=0)
total = np.roll(total, -n, axis=1)
total = np.flipud(total)
return total
def _load_maps(self):
"""Load the maps from disk and apply the alpha and beta biases."""
self._gly_map = self._load_map('gly')
self._pro_map = self._load_map('pro')
self._ala_map = self._load_map('ala')
self._gen_map = self._load_map('gen')
| mit | 4,541,306,422,953,295,000 | 34.917526 | 110 | 0.562141 | false | 3.482259 | false | false | false |
traceguide/api-python | examples/trivial/main.py | 1 | 3315 | """Simple example showing several generations of spans in a trace.
"""
import argparse
import contextlib
import sys
import time
import opentracing
import lightstep.tracer
def sleep_dot():
"""Short sleep and writes a dot to the STDOUT.
"""
time.sleep(0.05)
sys.stdout.write('.')
sys.stdout.flush()
def add_spans():
"""Calls the opentracing API, doesn't use any LightStep-specific code.
"""
with opentracing.tracer.start_trace(operation_name='trivial/initial_request') as parent_span:
parent_span.set_tag('url', 'localhost')
sleep_dot()
parent_span.info('All good here! N=%d, flt=%f, string=%s', 42, 3.14, 'xyz')
parent_span.set_tag('span_type', 'parent')
sleep_dot()
# This is how you would represent starting work locally.
with parent_span.start_child(operation_name='trivial/child_request') as child_span:
child_span.error('Uh Oh! N=%d, flt=%f, string=%s', 42, 3.14, 'xyz')
child_span.set_tag('span_type', 'child')
sleep_dot()
# To connect remote calls, pass a trace context down the wire.
trace_context = child_span.trace_context
with opentracing.tracer.join_trace(operation_name='trivial/remote_span',
parent_trace_context=trace_context) as remote_span:
remote_span.info('Remote! N=%d, flt=%f, string=%s', 42, 3.14, 'xyz')
remote_span.set_tag('span_type', 'remote')
sleep_dot()
def lightstep_tracer_from_args():
"""Initializes lightstep from the commandline args.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--token', help='Your LightStep access token.')
parser.add_argument('--host', help='The LightStep reporting service host to contact.',
default='localhost')
parser.add_argument('--port', help='The LightStep reporting service port.',
type=int, default=9997)
parser.add_argument('--use_tls', help='Whether to use TLS for reporting',
type=bool, default=False)
parser.add_argument('--group-name', help='The LightStep runtime group',
default='Python-Opentracing-Remote')
args = parser.parse_args()
if args.use_tls:
return lightstep.tracer.init_tracer(
group_name=args.group_name,
access_token=args.token,
service_host=args.host,
service_port=args.port)
else:
return lightstep.tracer.init_tracer(
group_name=args.group_name,
access_token=args.token,
service_host=args.host,
service_port=args.port,
secure=False)
if __name__ == '__main__':
print 'Hello '
# Use opentracing's default no-op implementation
with contextlib.closing(opentracing.Tracer()) as impl:
opentracing.tracer = impl
add_spans()
#Use LightStep's debug tracer, which logs to the console instead of reporting to LightStep.
with contextlib.closing(lightstep.tracer.init_debug_tracer()) as impl:
opentracing.tracer = impl
add_spans()
# Use LightStep's opentracing implementation
with contextlib.closing(lightstep_tracer_from_args()) as impl:
opentracing.tracer = impl
add_spans()
print 'World!'
| mit | 6,759,870,083,484,675,000 | 34.645161 | 98 | 0.629261 | false | 3.779932 | false | false | false |
hoover/snoop | snoop/management/commands/exportpgp.py | 1 | 2884 | from django.core.management.base import BaseCommand
from pathlib import Path
import base64
from ... import models
from ... import emails
from ... import pgp
from ... import utils
from ...content_types import guess_content_type
class Command(BaseCommand):
help = "Export digested pgp .eml files to a zip archive"
def add_arguments(self, parser):
parser.add_argument('destination',
help='path to the folder where the files will be dumped')
parser.add_argument('--where', default="(flags->>'pgp')::bool",
help='SQL "WHERE" clause on the snoop_document table')
def handle(self, destination, where, **options):
query = utils.build_raw_query('snoop_document', where)
root = Path(destination)
done = 0
for doc in models.Document.objects.raw(query):
if emails.is_email(doc):
email = emails.open_email(doc)
if not email.pgp:
print("id:", doc.id, "is not a pgp-encrypted email")
continue
try:
output = decrypt_email_file(email)
dump_eml(root, doc.md5, output)
except Exception as e:
print("id:", doc.id, "failed: " + type(e).__name__)
else:
print("id:", doc.id, "is done")
done += 1
else:
print("id:", doc.id, "is not an email file")
print(done, "documents dumped.")
def decrypt_email_file(email):
message = email._message()
for part in message.walk():
if part.is_multipart():
continue
content_type = part.get_content_type()
filename = part.get_filename()
if filename:
if content_type == 'text/plain' or \
content_type == "application/octet-stream":
content_type = guess_content_type(filename)
part.set_type(content_type)
if filename == "message.html.pgp":
del part['Content-Disposition']
part.add_header('Content-Disposition',
'attachment',
filename='pgp.message.html')
part.replace_header('Content-Type', 'text/html')
data = part.get_payload(decode=True)
if not data:
continue
if pgp.contains_pgp_block(data):
data = pgp.decrypt_pgp_block(data)
b64 = base64.encodebytes(data)
part.set_payload(b64)
del part['Content-Transfer-Encoding']
part['Content-Transfer-Encoding'] = 'base64'
return message.as_bytes()
def dump_eml(root_path, md5, data):
folder = root_path / md5[0:2] / md5[2:4]
folder.mkdir(parents=True, exist_ok=True)
file = folder / (md5 + '.eml')
with file.open('wb') as f:
f.write(data)
| mit | -19,401,088,531,284,572 | 35.05 | 72 | 0.552358 | false | 4.016713 | false | false | false |
pfre00/a3c | shared_rmsprop.py | 1 | 1231 | import torch
import torch.optim as optim
class SharedRMSprop(optim.RMSprop):
def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False):
super(SharedRMSprop, self).__init__(params, lr, alpha, eps, weight_decay, momentum, centered)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = torch.zeros(1)
state['square_avg'] = p.data.new().resize_as_(p.data).zero_()
if group['momentum'] > 0:
state['momentum_buffer'] = p.data.new().resize_as_(p.data).zero_()
if group['centered']:
state['grad_avg'] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['square_avg'].share_memory_()
if group['momentum'] > 0:
state['momentum_buffer'].share_memory_()
if group['centered']:
state['grad_avg'].share_memory_() | mit | 581,964,390,641,405,000 | 41.482759 | 106 | 0.515841 | false | 3.945513 | false | false | false |
thehub/hubspace | monkeypatch.py | 1 | 1050 | import sys
sys.path.extend(['develop-eggs/PIL-1.1.6-py2.7-linux-i686.egg', 'develop-eggs/pycairo-1.8.10-py2.7-linux-i686.egg'])
reload(sys)
sys.setdefaultencoding('utf-8')
del sys.setdefaultencoding
print "default encoding: utf-8"
#monkeypatch cherrypy, see
#http://trac.turbogears.org/turbogears/ticket/1022
import cherrypy
def our_decode(self, enc):
def decodeit(value, enc):
if hasattr(value,'file'):
return value
elif isinstance(value,list):
return [decodeit(v,enc) for v in value]
elif isinstance(value,dict):
for k,v in value.items():
value[k] = decodeit(v,enc)
return value
#return value here?
else:
return value.decode(enc)
decodedParams = {}
for key, value in cherrypy.request.params.items():
decodedParams[key] = decodeit(value,enc)
cherrypy.request.params = decodedParams
from cherrypy.filters.decodingfilter import DecodingFilter
DecodingFilter.decode = our_decode
| gpl-2.0 | 4,549,012,412,736,883,700 | 27.378378 | 116 | 0.646667 | false | 3.52349 | false | false | false |
LaPingvino/pasportaservo | tools/migration_family_members.py | 1 | 4145 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import pprint
import pytz
import pymysql as mdb
from datetime import datetime
from getpass import getpass
import django
from django.utils.timezone import make_aware
from django.utils.translation import ugettext_lazy as _
from django.db import transaction
from countries import COUNTRIES, PHONE_CODES
import getters as g
PROJECT_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, PROJECT_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'pasportaservo.settings'
def u2dt(timestamp):
"""A convenience function to easily convert unix timestamps to datetime TZ aware."""
return make_aware(datetime.fromtimestamp(timestamp), pytz.timezone('UTC'))
@transaction.atomic
def migrate():
# Connect to an existing database
passwd = getpass("MySQL password for 'root': ")
dr = mdb.connect('localhost', 'root', passwd, 'pasportaservo', charset='utf8')
users = dr.cursor(mdb.cursors.DictCursor)
users.execute("""
SELECT u.uid id,
field_nomo_kaj_familia_nomo_1_value name1,
field_naskijaro_persono_1_value year1,
field_sekso_persono_1_value sex1,
field_nomo_kaj_familia_nomo_2_value name2,
field_naskijaro_persono_2_value year2,
field_sekso_persono_1_value sex2,
field_nomo_kaj_familia_nomo_3_value name3,
field_naskijaro_persono_3_value year3
-- There is no field_sekso_persono_3_value in the source database
FROM users u
INNER JOIN node n ON n.uid=u.uid AND n.type='profilo'
INNER JOIN content_type_profilo p ON p.nid=n.nid
WHERE u.uid > 1
AND u.name <> 'testuser'
AND ( (field_lando_value = 'Albanio' AND field_urbo_value is not NULL)
OR (field_lando_value <> 'Albanio'))
AND field_familia_nomo_value <> 12
GROUP BY u.uid
""")
user = users.fetchone()
from django.contrib.auth.models import User
from hosting.utils import title_with_particule
from hosting.models import Profile, Place
django.setup()
# Starting...
print('Ignoring:')
while user is not None:
data1 = {'first_name': title_with_particule(user['name1']), 'birth_date': g.get_birth_date(user['year1']), 'title': g.get_title(user['sex1'])}
data2 = {'first_name': title_with_particule(user['name2']), 'birth_date': g.get_birth_date(user['year2']), 'title': g.get_title(user['sex2'])}
data3 = {'first_name': title_with_particule(user['name3']), 'birth_date': g.get_birth_date(user['year3'])}
try:
place = Place.objects.get(pk=user['id'])
except Place.DoesNotExist:
place = None
print(user['id'], data1['first_name'], data3['birth_date'])
if place and (data1['birth_date'] or data1['first_name']):
profile1 = Profile(**data1)
profile1.save()
place.family_members.add(profile1)
if place and (data2['birth_date'] or data2['first_name']):
profile2 = Profile(**data2)
profile2.save()
place.family_members.add(profile2)
if place and (data3['birth_date'] or data3['first_name']):
profile3 = Profile(**data3)
profile3.save()
place.family_members.add(profile3)
user = users.fetchone()
users.close()
dr.close()
print('\n Success! \\o/\n')
if __name__ == '__main__':
migrate()
# Open a cursor to perform database operations
#cur = dj.cursor()
# Execute a command: this creates a new table
#cur.execute("CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);")
# Pass data to fill a query placeholders and let Psycopg perform
# the correct conversion (no more SQL injections!)
#cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", (100, "abc'def"))
# Query the database and obtain data as Python objects
#cur.execute("SELECT * FROM test;")
#cur.fetchone()
# Make the changes to the database persistent
#conn.commit()
# Close communication with the database
#cur.close()
#conn.close()
| agpl-3.0 | -6,203,545,569,085,463,000 | 31.131783 | 150 | 0.641978 | false | 3.451291 | false | false | false |
svalenti/agnkey | trunk/bin/agnscheduler.py | 1 | 15953 | #!/dark/usr/anaconda/bin/python
import agnkey
import numpy as np
import os,string,re
import datetime,time,ephem
import requests
def getstatus_all(token,status,date,user='stefano_valenti1',proposal='KEY2018B-001'):
req = 'https://observe.lco.global/api/userrequests/' + '?limit=1000&'+\
'proposal=' + proposal + '&'+\
'created_after=' + date + '&'+\
'user=' + user
if status:
req = req + '&state=' + status
print req
ss = requests.get(req, headers={'Authorization': 'Token ' + token})
return ss.json()
def updatetriggerslog(ll0):
username, passwd = agnkey.util.readpass['odinuser'], agnkey.util.readpass['odinpasswd']
token = agnkey.util.readpass['token']
track = ll0['tracknumber']
_status = ''
if track!=0:
_dict = agnkey.util.getstatus_new(token,str(track).zfill(10))
else:
print ll0
print 'warning track number 0'
_dict={}
################# update status
if 'state' in _dict.keys():
_status=_dict['state']
if ll0['status']!=_status:
agnkey.agnsqldef.updatevalue('triggerslog', 'status', _status, track,
connection='agnkey',namefile0='tracknumber')
else:
_status = 'NULL'
################# update reqnumber
if 'requests' in _dict.keys():
_reqnumber = _dict['requests'][0]['id']
if str(ll0['reqnumber']).zfill(10)!= _reqnumber:
agnkey.agnsqldef.updatevalue('triggerslog', 'reqnumber', _reqnumber, track,
connection='agnkey',namefile0='tracknumber')
else:
_reqnumber = ''
return _status
############################################################################
# LOAD ALL TRIGGER OF THE LAST 7 DAYS
datenow = datetime.datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday)
date = (datenow+datetime.timedelta(-15)).strftime('%Y-%m-%d')
status = ''
token = agnkey.util.readpass['token']
all = getstatus_all(token,status,date)
result = {}
for line in all['results']:
if line['requests']:
result[line['id']] = [line['state'],line['requests'][-1]['id']]
else:
result[line['id']] = [line['state'], 0 ]
# UPDATE STATUS
command1 = ['select t.*,d.filters,d.mode from triggerslog as t join triggers as d where t.status is NULL and d.id = t.triggerid']
data11 = agnkey.agnsqldef.query(command1)
if len(data11):
try:
for data2 in data11:
track = data2['tracknumber']
if track!=0:
if track in result:
_status = result[track][0]
_reqnumber = result[track][1]
agnkey.agnsqldef.updatevalue('triggerslog', 'status', _status, track,
connection='agnkey',namefile0='tracknumber')
if _reqnumber:
agnkey.agnsqldef.updatevalue('triggerslog', 'reqnumber', _reqnumber, track,
connection='agnkey',namefile0='tracknumber')
else:
#
# replace quering one by one with a single query
#
print 'warning problems here '+str(track)
_status = 'COMPLETED'
#_status = updatetriggerslog(data2)
print _status
else:
print 'warning track number = 0 '
except:
pass
_lunar = 20
_jd = ephem.julian_date()
command2 = ['select t.*,d.filters,d.mode,d.instrument from triggerslog as t join triggers as d where t.status = "PENDING" and d.id = t.triggerid']
data3 = agnkey.agnsqldef.query(command2)
if len(data3):
print 'ID JD_NOW STATUS END_WINDOW'
for data2 in data3:
track = data2['tracknumber']
if track in result:
_status = result[track][0]
_reqnumber = result[track][1]
agnkey.agnsqldef.updatevalue('triggerslog', 'status', _status, track,
connection='agnkey',namefile0='tracknumber')
if _reqnumber:
agnkey.agnsqldef.updatevalue('triggerslog', 'reqnumber', _reqnumber, track,
connection='agnkey',namefile0='tracknumber')
else:
# replace old method
_status = 'COMPLETED'
#_status = updatetriggerslog(data2)
print data2['id'], _jd , _status,
print _jd - data2['windowend']
if _status == 'PENDING' and _jd - data2['windowend'] > 0.1:
print 'warning this observation is still PENDING but window is over'
#raw_input('stop here')
agnkey.agnsqldef.updatevalue('triggerslog', 'status', 'UNSCHEDULABLE', data2['id'],
connection='agnkey',namefile0='id')
command3 = ['select t.*,l.name,l.ra_sn,l.dec_sn from triggers as t join lsc_sn_pos as l where active = 1 and l.id = t.targid']
data = agnkey.agnsqldef.query(command3)
#raw_input('here')
Warningdictionary={}
if len(data):
ll = {}
for jj in data[0].keys():
ll[jj] = []
for i in range(0,len(data)):
for jj in data[0].keys():
ll[jj].append(data[i][jj])
for jj,activeid in enumerate(ll['id']):
# if activeid in [265]:# 93:# [61,66,67]:
# if activeid in [243]:# 93:# [61,66,67]:
_jd = ephem.julian_date()
print '\n'
print '### id = ' + str(ll['id'][jj])
print '### name = ' + ll['name'][jj]
print '### filters = '+str(ll['filters'][jj])
print '### cadence = '+str(ll['cadence'][jj])
print '### mode = ' + str(ll['mode'][jj])
print '### instrument = ' + str(ll['instrument'][jj])
print '### trigger = '+ str(activeid)
print '\n'
command1 = ['select t.*,d.filters from triggerslog as t join triggers as d where t.triggerid = '+str(activeid)+' and d.id = t.triggerid order by windowend desc limit 3'] # and t.status="PENDING"']
data1 = agnkey.agnsqldef.query(command1)
trigger = False
if len(data1):
jd0 = 0
for data2 in data1:
track = data2['tracknumber']
if track in result:
_status = result[track][0]
_reqnumber = result[track][1]
if _status!='UNSCHEDULABLE':
agnkey.agnsqldef.updatevalue('triggerslog', 'status', _status, track,
connection='agnkey', namefile0='tracknumber')
if _reqnumber:
agnkey.agnsqldef.updatevalue('triggerslog', 'reqnumber', _reqnumber, track,
connection='agnkey', namefile0='tracknumber')
# raw_input('hehe')
else:
print 'Warning: trigger not found'
#_status = updatetriggerslog(data2)
_status = 'COMPLETED'
if _status == 'PENDING':
jd0 = _jd
elif _status == 'COMPLETED':
jd0 = max(jd0,data2['windowend'])
elif _status in ['UNSCHEDULABLE','CANCELED','WINDOW_EXPIRED']:
pass
else:
print 'status not recognized '+str(_status)
if jd0==0:
print 'no observation completed'
trigger=True
else:
print 'last observation '+str(float(_jd)-float(jd0))+' days ago'
print 'cadence '+str(ll['cadence'][jj]) # .1 to take in account the trigger time
if float(ll['cadence'][jj]) <= 2:
if float(_jd)-float(jd0) > .001:
print 'cadence less or equal to one day'
print 'last window ended, trigger'
trigger=True
# elif 1 < float(ll['cadence'][jj]) <= 2:
# print 'cadence between 1 and 2 days'
# print 'trigger if it is cadence-.3 days from end of the window'
# if float(ll['cadence'][jj])-.3 <= float(_jd)-float(jd0):
# trigger=True
else:
print 'trigger if it is cadence-.3 days from end of the window'
if float(ll['cadence'][jj])-.3 <= float(_jd)-float(jd0):
print 'trigger new observation'
trigger=True
else:
print 'last observation less than '+str(ll['cadence'][jj])+' days ago, do not trigger'
else:
print 'no trigger for this '+str(activeid)
trigger = True
if trigger:
SN_RA = ll['ra_sn'][jj]
SN_DEC = ll['dec_sn'][jj]
NAME = ll['name'][jj]
_airmass = ll['airmass'][jj]
_proposal = ll['proposal'][jj]
_site = ll['site'][jj]
_targid = ll['targid'][jj]
_mode = ll['mode'][jj]
proposals = agnkey.util.readpass['proposal']
users = agnkey.util.readpass['users']
token = agnkey.util.readpass['token']
if not _proposal:
_proposal=proposals[0]
_user0=users[0]
else:
_user0=users[proposals.index(_proposal)]
passwd=agnkey.util.readpass['odinpasswd']
datenow = datetime.datetime(time.gmtime().tm_year, time.gmtime().tm_mon, time.gmtime().tm_mday,\
time.gmtime().tm_hour, time.gmtime().tm_min, time.gmtime().tm_sec)
datenow = datenow + datetime.timedelta(2./1440.)
if float(ll['cadence'][jj])<1:
print 'cadence less than 24h'
dd1 = datenow + datetime.timedelta(float(ll['cadence'][jj]))
dd2 = datenow + datetime.timedelta(float(ll['cadence'][jj]))*2
dd3 = datenow + datetime.timedelta(float(ll['cadence'][jj]))*3
utstart = [datenow,dd1,dd2]
utend = [dd1,dd2,dd3]
else:
utstart = [datenow]
utend = [datenow + datetime.timedelta(1)]
################# loop on triggers
for mm,nn in enumerate(utstart):
if ll['filters'][jj] == 'floyds':
expvec = ll['exptime'][jj]
nexpvec = ll['numexp'][jj]
_slit = str(ll['slit'][jj])
_acmode = str(ll['acqmode'][jj])
if _acmode != 'brightest':
_acmode ='wcs'
print 'trigger floyds observations'
print str(NAME),expvec,str(SN_RA),str(SN_DEC),str(utstart[mm]),str(utend[mm]),_user0,token,\
_proposal,str(_airmass),_site,_slit,'after', nexpvec
logfile,pp = agnkey.util.sendfloydstrigger_new(str(NAME),expvec,str(SN_RA),str(SN_DEC),\
str(utstart[mm]),str(utend[mm]),_user0, token,
_proposal,_lunar,str(_airmass),_site,_slit,'after',\
nexpvec, _acmode, mode= _mode )
print logfile
try:
input_datesub, input_str_smjd, input_str_emjd, _site2, _instrument2, _nexp2, _exp2, _airmass2,\
_prop2, _user2, _seeing2, _sky2, _priority2, tracknum, reqnum = string.split(logfile)
dictionary={'targid':int(_targid), 'triggerjd':float(input_datesub),'windowstart':float(input_str_smjd),\
'windowend':float(input_str_emjd), 'reqnumber':int(reqnum),'tracknumber':int(tracknum),\
'triggerid':activeid}
agnkey.agnsqldef.insert_values(agnkey.agnsqldef.conn,'triggerslog',dictionary)
except:
Warningdictionary[str(ll['id'][jj])]=''+\
'\n### id = ' + str(ll['id'][jj])+\
'\n### name = ' + ll['name'][jj]+\
'\n### filters = '+str(ll['filters'][jj])+\
'\n### instrument = '+str(ll['instrument'][jj])+\
'\n### cadence = '+str(ll['cadence'][jj])+\
'\n### trigger = '+ str(activeid)+'\n\n'+str(pp)
else:
filtvec = string.split(ll['filters'][jj],',')
nexpvec = string.split(ll['numexp'][jj],',')
expvec = string.split(ll['exptime'][jj],',')
_instrument = ll['instrument'][jj]
print 'trigger photometric observations'
print str(NAME),str(SN_RA),str(SN_DEC),expvec,nexpvec,filtvec,str(utstart[mm]),str(utend[mm]),\
_user0,token,_proposal,_instrument,_airmass,_site,_mode
logfile,python_dict = agnkey.util.sendtrigger2_new(str(NAME),str(SN_RA),str(SN_DEC),\
expvec,nexpvec, filtvec,str(utstart[mm]),\
str(utend[mm]),_user0, token, _proposal,\
_instrument,_airmass, _lunar, _site, mode= _mode )
print logfile
print python_dict
good = False
if logfile:
input_datesub, input_str_smjd, input_str_emjd, _site2, _filters2, _nexp2, _exp2, _airmass2,\
_prop2, _user2, _seeing2, _sky2, _instrument2, _priority2, tracknum, reqnum = string.split(logfile)
if int(tracknum) !=0:
print logfile
good =True
if good:
dictionary={'targid':int(_targid),'triggerjd':float(input_datesub),'windowstart':float(input_str_smjd),\
'windowend':float(input_str_emjd),'reqnumber':int(reqnum),'tracknumber':int(tracknum),\
'triggerid':activeid}
agnkey.agnsqldef.insert_values(agnkey.agnsqldef.conn,'triggerslog', dictionary)
else:
Warningdictionary[str(ll['id'][jj])]=''+\
'\n### id = ' + str(ll['id'][jj])+\
'\n### name = ' + ll['name'][jj]+\
'\n### filters = '+str(ll['filters'][jj])+\
'\n### instrument = '+str(ll['instrument'][jj])+\
'\n### cadence = '+str(ll['cadence'][jj])+\
'\n### mode = '+str(ll['mode'][jj])+\
'\n### trigger = '+ str(activeid)+\
'\n### log = '+str(logfile)+\
'\n### python_dict = '+str(python_dict)+\
'\n\n'
else:
print 'no active objects'
print Warningdictionary
if len(Warningdictionary):
_from = '[email protected]'
_to1 = '[email protected]'
_subject = 'agnkey warning '
text = ''
for jj in Warningdictionary:
text = text + Warningdictionary[jj]
agnkey.util.sendemail(_from,_to1,_subject,text)
#raw_input('stop here')
| mit | -5,159,076,196,331,648,000 | 46.620896 | 204 | 0.474519 | false | 3.977312 | false | false | false |
jemofthewest/GalaxyMage | src/Main.py | 1 | 5206 | ## Automatically adapted for numpy.oldnumeric Jul 22, 2012 by
# Copyright (C) 2005 Colin McMillen <[email protected]>
#
# This file is part of GalaxyMage.
#
# GalaxyMage is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GalaxyMage is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalaxyMage; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
import os
import sys
import logging
import platform
import optparse
"""Version number is MAJOR.MINOR.REVISION, optionally followed by a
hyphen and some free-form text, like 'alpha' or 'prerelease'."""
__version__ = "0.3.0"
def dependencyCheck():
"""Check to make sure that external dependencies can be loaded
properly."""
logging.debug('Platform: ' + platform.platform())
logging.debug('Python version ' + sys.version)
try:
import numpy.oldnumeric as Numeric
logging.debug('Numeric version ' + Numeric.__version__)
except ImportError, err:
logging.error('Loading dependency "Numeric" failed: ' + str(err))
sys.exit(1)
try:
import pygame
logging.debug('pygame version ' + pygame.version.ver)
except ImportError, err:
logging.error('Loading dependency "pygame" failed: ' + str(err))
sys.exit(1)
try:
import OpenGL.GL
logging.debug('PyOpenGL version ' + OpenGL.__version__)
except ImportError, err:
logging.error('Loading dependency "OpenGL.GL" failed: ' + str(err))
sys.exit(1)
try:
import OpenGL.GLU
except ImportError, err:
logging.error('Loading dependency "OpenGL.GLU" failed: ' + str(err))
sys.exit(1)
try:
import twisted
logging.debug('Twisted version ' + twisted.__version__)
except ImportError, err:
logging.error('Loading dependency "twisted" failed: ' + str(err))
sys.exit(1)
def main():
"""Parse options and run the program accordingly."""
print 'GalaxyMage', __version__
import Translate
# init translate
translateConfig = Translate.Translate()
# Parse command-line options
parser = optparse.OptionParser(description="Cross-platform, open-source tactical RPG.")
parser.add_option("--fullscreen", "-f",
action="store_true", default=False,
help="start in fullscreen mode")
parser.add_option("--quiet", "-q", action="store_true", default=False,
help="disable sounds and music")
parser.add_option("--disable-jit", "-j",
dest="useJIT", action="store_false", default=True,
help='disable "psyco" just-in-time compiler')
parser.add_option("--verbose", "-v", action="count", default=0,
help='increase logging verbosity')
parser.add_option("-w", dest="width", type="int",
default=800, metavar="WIDTH",
help='initial window width [default: %default]')
parser.add_option("--edit-map", "-e", action="store", default=None,
metavar="MAPNAME",
help='start the map editor')
parser.add_option("--port", "-P", type='int', default=22222,
help='game server port [default: %default]')
parser.add_option("--lang", "-l", default="en",
help="set language")
parser.add_option("--user", default=os.environ.get('USER', 'Player'),
help="set username for multiplayer")
(options, args) = parser.parse_args()
# Enable logging
import Log
logLevel = logging.INFO - options.verbose * 10
logLevel = max(logLevel, 1)
Log.setUpLogging(logLevel)
#translateConfig.setLanguage(options.lang)
# Check to make sure we can load dependencies
dependencyCheck()
# Import Psyco if available
if False and options.useJIT: # FIXME: re-enable psyco
try:
import psyco
logging.debug('Enabled "psyco" just-in-time Python compiler')
psyco.full()
except ImportError:
logging.debug('"psyco" just-in-time Python compiler not found')
# Set up PyGame
import pygame
pygame.display.init()
pygame.font.init()
pygame.joystick.init()
try:
pygame.mixer.init(48000, -16, True, 4096)
except pygame.error, e:
options.quiet = True
logging.warn("Couldn't initialize sound: " + str(e))
# Import our own modules
import Resources
import Sound
import twistedmain
# Set texture size
Resources.texture.setTextureSize(64)
# Initialize Sound
Sound.setQuiet(options.quiet)
twistedmain.run(options)
| gpl-2.0 | -9,194,749,660,764,425,000 | 33.939597 | 91 | 0.629466 | false | 4.092767 | false | false | false |
hamogu/marxs | marxs/optics/baffle.py | 2 | 2038 | # Licensed under GPL version 3 - see LICENSE.rst
import numpy as np
from .base import FlatOpticalElement
from ..math.utils import h2e
from ..visualization.utils import plane_with_hole
class Baffle(FlatOpticalElement):
'''Plate with rectangular hole that allows photons through.
The probability of photons that miss is set to 0.
Parameters
----------
photons: astropy Table
table that includes information on all of the photons
'''
display = {'color': (1., 0.5, 0.4),
'outer_factor': 3,
'shape': 'plane with hole'}
def process_photons(self, photons, intersect, intercoos, interpoos):
photons['pos'][intersect] = intercoos[intersect]
photons['probability'][~intersect] = 0
return photons
def triangulate_inner_outer(self):
'''Return a triangulation of the baffle hole embedded in a square.
The size of the outer square is determined by the ``'outer_factor'`` element
in ``self.display``.
Returns
-------
xyz : np.array
Numpy array of vertex positions in Eukeldian space
triangles : np.array
Array of index numbers that define triangles
'''
r_out = self.display.get('outer_factor', 3)
g = self.geometry
outer = h2e(g['center']) + r_out * np.vstack([h2e( g['v_x']) + h2e(g['v_y']),
h2e(-g['v_x']) + h2e(g['v_y']),
h2e(-g['v_x']) - h2e(g['v_y']),
h2e( g['v_x']) - h2e(g['v_y'])
])
inner = h2e(g['center']) + np.vstack([h2e( g['v_x']) + h2e(g['v_y']),
h2e(-g['v_x']) + h2e(g['v_y']),
h2e(-g['v_x']) - h2e(g['v_y']),
h2e( g['v_x']) - h2e(g['v_y'])
])
return plane_with_hole(outer, inner)
| gpl-3.0 | 2,371,856,094,042,839,000 | 36.054545 | 85 | 0.490677 | false | 3.685353 | false | false | false |
sourlows/rating-cruncher | src/app/api/leaderboard.py | 1 | 2094 | from app.api.base import BaseAuthResource, StringArgument, IntegerArgument, DatastoreCursorArgument
from app.participant.models import ParticipantModel
from flask_restful import fields, marshal
LEADERBOARD_TEMPLATE = {
'name': fields.String,
'rating': fields.Float,
'games_played': fields.Integer,
'wins': fields.Integer,
'losses': fields.Integer,
'ties': fields.Integer,
'k_factor': fields.Integer,
}
SORT_ASCENDING = 'ascending'
SORT_DESCENDING = 'descending'
SORT_OPTIONS = ['name', 'rating', 'games_played', 'k_factor', 'wins', 'losses', 'ties']
class LeaderboardAPI(BaseAuthResource):
ARGUMENTS = frozenset([
StringArgument('sort_direction'),
StringArgument('sort_by'),
IntegerArgument('page_size'),
DatastoreCursorArgument('cursor'),
])
def get(self, league_id):
""" Return a sorted leaderboard representing participants in a league """
sort_by = self.args.get('sort_by')
sort_direction = self.args.get('sort_direction')
cursor = self.args.get('cursor')
page_size = self.args.get('page_size')
if sort_by and hasattr(ParticipantModel, sort_by) and sort_by in SORT_OPTIONS:
sort_by = getattr(ParticipantModel, sort_by)
elif sort_by is None:
sort_by = getattr(ParticipantModel, 'rating')
else:
return 'Invalid sort_by option %s' % sort_by, 400
if sort_direction == SORT_DESCENDING or not sort_direction:
leaderboard = ParticipantModel.query(getattr(ParticipantModel, 'league_id') == league_id).order(-sort_by)
elif sort_direction == SORT_ASCENDING:
leaderboard = ParticipantModel.query(getattr(ParticipantModel, 'league_id') == league_id).order(sort_by)
else:
return 'Invalid sort direction %s' % sort_direction, 400
results, cursor, more = leaderboard.fetch_page(page_size=page_size, start_cursor=cursor)
return {
'leaderboard': [marshal(l, LEADERBOARD_TEMPLATE) for l in results],
'cursor': cursor
}
| apache-2.0 | -6,152,879,241,195,459,000 | 37.777778 | 117 | 0.653295 | false | 3.725979 | false | false | false |
imallett/MOSS | source/x86/scripts/_build.py | 1 | 6833 | import hashlib
import os
try: import cPickle as pickle #Only present in Python 2.*; Python 3 automatically imports the
except: import pickle as pickle #new equivalent of cPickle, if it's available.
from subprocess import call
import _paths
#Directories
def get_abs_path(rel_path):
return os.path.abspath(rel_path).replace("\\","/") + "/"
def get_abs_path_from(directory,rel_path):
return os.path.normpath(directory+rel_path).replace("\\","/")
_scripts_dir = os.path.dirname(__file__)
root = get_abs_path(os.path.join(_scripts_dir,"../"))
root_build = root+".build/"
root_source = root+"source_moss/"
#Commands and Arguments
# TODO: do we need -nostartfiles?
args_compile = "-ffreestanding -O0 -Wall -Wextra -Wno-packed-bitfield-compat -fstack-protector-all -fno-exceptions -fno-rtti -std=c++11"
args_link = "-ffreestanding -O0 -nostdlib"
command_gcc = os.path.join(_paths.cross_dir,"i686-elf-gcc")
command_gpp = os.path.join(_paths.cross_dir,"i686-elf-g++")
command_nasm = "nasm"
#Types
class FileBase(object):
TYPE_HEADER = 0
TYPE_SOURCE = 1
def __init__(self, directory,name, type):
self.directory = directory
self.name = name
self.path = directory + name
self.type = type
self.changed = None
file=open(self.path,"rb"); data_str=file.read(); file.close()
self.hash = hashlib.sha224(data_str).hexdigest() #http://docs.python.org/2/library/hashlib.html
self.user_includes = []
def add_includes_to(self, file):
if self not in file.user_includes:
file.user_includes.append(self)
for include in self.user_includes:
include.add_includes_to(file)
class FileHeader(FileBase):
def __init__(self, directory,name):
FileBase.__init__(self, directory,name, FileBase.TYPE_HEADER)
class FileSource(FileBase): #and ASM
def __init__(self, directory,name):
FileBase.__init__(self, directory,name, FileBase.TYPE_SOURCE)
self.needs_compile = None
#Enumerate files to build system recursively
files = []
def add_files_directory(directory):
for name in os.listdir(directory):
path = directory + name #os.path.join(directory,name)
if os.path.isfile(path):
t = None
if path.endswith(".cpp") or path.endswith(".asm"): files.append(FileSource(directory,name))
else: files.append(FileHeader(directory,name)) #Headers (with or without extension)
elif os.path.isdir(path):
add_files_directory(path+"/")
add_files_directory(root_source)
#Figure out which have changed
for file in files:
file.changed = True #Assume the worst
if os.path.exists(root_build+"_cache.txt"):
file=open(root_build+"_cache.txt","rb"); data_str=file.read(); file.close()
file_hashes = pickle.loads(data_str) #map of path onto hash
for file in files:
if file.path in file_hashes.keys():
if file.hash == file_hashes[file.path]:
file.changed = False
#Figure out which need to be recompiled. This is every changed source file, plus every source file
# that includes a changed header.
changed_some_headers = False
for file in files:
if file.changed and file.type==FileBase.TYPE_HEADER:
changed_some_headers = True
break
if changed_some_headers: #optimization
for file in files:
fobj=open(file.path,"r"); file_data=fobj.read(); fobj.close()
for line in file_data.split("\n"):
line2 = line.strip().split("//")[0]
if "#include" in line2:
included_rel = line2.split("#include")[1]
i=0; j=len(included_rel)-1
while not (included_rel[i]=="\"" or included_rel[i]=="<"): i+=1
while not (included_rel[j]=="\"" or included_rel[j]==">"): j-=1
included_rel = included_rel[i:j+1]
if included_rel[0] == "<": continue #Not a user include; assume it can't change
included_abs = get_abs_path_from(file.directory,included_rel[1:-1])
found = False
for file2 in files:
if file2.path == included_abs:
found = True
break
assert found, "Could not find \"#include\"d file \"%s\"!" % included_abs
file.user_includes.append(file2)
for file in files:
for include in file.user_includes:
include.add_includes_to(file)
for file in files:
file.needs_compile = False
if file.type == FileBase.TYPE_SOURCE:
if file.changed:
file.needs_compile = True
else:
for include in file.user_includes:
if include.changed:
file.needs_compile = True
break
#Compile everything that needs compiling
link_files = []
def run(command):
#print(command)
call(command)
def get_arg_list(arg_str):
l = arg_str.split(" ")
l2 = []
for a in l:
if a=="": continue
l2.append(a)
return l2
def get_create_out_path(file):
out_path = root_build + file.path[len(root_source):]+"_obj"
out_dir = os.path.dirname(out_path)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
return out_path
def compile_cpp(file,out_path):
print(" Compiling: \""+file.path+"\"")
command = [command_gpp,"-c",file.path,"-o",out_path]
command += get_arg_list(args_compile)
run(command)
def assemble_asm(file,out_path):
print(" Assembling: \""+file.path+"\"")
command = [command_nasm,"-felf",file.path,"-o",out_path]
run(command)
def link():
command = [command_gcc,"-T",root_source+"linker.ld","-o",root_build+"MOSS.bin"]
command += get_arg_list(args_link)
for file in link_files: command.append(file)
command.append("-lgcc")
run(command)
try:
print(" Compiling:")
skipped = 0
for file in files:
out_path = get_create_out_path(file)
if file.needs_compile:
if file.name.endswith(".cpp"): compile_cpp(file,out_path)
elif file.name.endswith(".asm"): assemble_asm(file,out_path)
else: assert False
file.needs_compile = False
else:
skipped += 1
if file.type == FileBase.TYPE_SOURCE:
link_files.append(out_path)
if skipped > 0:
print(" Skipped %d files" % (skipped))
print(" Linking")
link()
except KeyboardInterrupt:
print(" Aborting")
#Save compiled cache
file_hashes = {}
for file in files:
if not file.needs_compile:
file_hashes[file.path] = file.hash
data_str = pickle.dumps(file_hashes)
file=open(root_build+"_cache.txt","wb"); file.write(data_str); file.close()
| mit | -1,722,869,990,907,578,400 | 34.774869 | 144 | 0.605444 | false | 3.5077 | false | false | false |
wolfthefallen/king-phisher | king_phisher/templates.py | 4 | 11551 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/templates.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import base64
import codecs
import datetime
import hashlib
import html
import json
import logging
import os
import random
import re
from king_phisher import find
from king_phisher import its
from king_phisher import ua_parser
from king_phisher import utilities
from king_phisher import version
import boltons.strutils
import jinja2
import requests
import requests.exceptions
import requests_file
__all__ = ('FindFileSystemLoader', 'TemplateEnvironmentBase', 'MessageTemplateEnvironment')
class FindFileSystemLoader(jinja2.BaseLoader):
"""
A :py:class:`~jinja2.BaseLoader` which loads templates by name from the file
system. Templates are searched for using the
:py:func:`~king_phisher.find.data_file` function.
"""
def get_source(self, environment, template):
template_path = find.data_file(template, os.R_OK)
if template_path is None:
raise jinja2.TemplateNotFound(template)
mtime = os.path.getmtime(template_path)
with codecs.open(template_path, 'r', encoding='utf-8') as file_h:
source = file_h.read()
return source, template_path, lambda: mtime == os.path.getmtime(template_path)
class TemplateEnvironmentBase(jinja2.Environment):
"""
A configured Jinja2 :py:class:`~jinja2.Environment` with additional filters
and default settings.
"""
def __init__(self, loader=None, global_vars=None):
"""
:param loader: The loader to supply to the environment.
:type loader: :py:class:`jinja2.BaseLoader`
:param dict global_vars: Additional global variables for the environment.
"""
self.logger = logging.getLogger('KingPhisher.TemplateEnvironment')
autoescape = jinja2.select_autoescape(['html', 'htm', 'xml'], default_for_string=False)
extensions = ['jinja2.ext.autoescape', 'jinja2.ext.do']
super(TemplateEnvironmentBase, self).__init__(autoescape=autoescape, extensions=extensions, loader=loader, trim_blocks=True)
# misc. string filters
self.filters['cardinalize'] = boltons.strutils.cardinalize
self.filters['ordinalize'] = boltons.strutils.ordinalize
self.filters['pluralize'] = boltons.strutils.pluralize
self.filters['singularize'] = boltons.strutils.singularize
self.filters['possessive'] = lambda word: word + ('\'' if word.endswith('s') else '\'s')
self.filters['encode'] = self._filter_encode
self.filters['decode'] = self._filter_decode
self.filters['hash'] = self._filter_hash
# counter part to https://jinja.readthedocs.io/en/stable/templates.html#tojson
self.filters['fromjson'] = self._filter_json
# time filters
self.filters['strftime'] = self._filter_strftime
self.filters['timedelta'] = self._filter_timedelta
self.filters['tomorrow'] = lambda dt: dt + datetime.timedelta(days=1)
self.filters['next_week'] = lambda dt: dt + datetime.timedelta(weeks=1)
self.filters['next_month'] = lambda dt: dt + datetime.timedelta(days=30)
self.filters['next_year'] = lambda dt: dt + datetime.timedelta(days=365)
self.filters['yesterday'] = lambda dt: dt + datetime.timedelta(days=-1)
self.filters['last_week'] = lambda dt: dt + datetime.timedelta(weeks=-1)
self.filters['last_month'] = lambda dt: dt + datetime.timedelta(days=-30)
self.filters['last_year'] = lambda dt: dt + datetime.timedelta(days=-365)
# global variables
self.globals['version'] = version.version
# global functions
self.globals['fetch'] = self._func_fetch
self.globals['parse_user_agent'] = ua_parser.parse_user_agent
self.globals['password_is_complex'] = utilities.password_is_complex
self.globals['random_integer'] = random.randint
# additional globals
self.globals.update(global_vars or {})
def from_file(self, path, **kwargs):
"""
A convenience method to load template data from a specified file,
passing it to :py:meth:`~jinja2.Environment.from_string`.
.. warning::
Because this method ultimately passes the template data to the
:py:meth:`~jinja2.Environment.from_string` method, the data will not
be automatically escaped based on the file extension as it would be
when using :py:meth:`~jinja2.Environment.get_template`.
:param str path: The path from which to load the template data.
:param kwargs: Additional keyword arguments to pass to :py:meth:`~jinja2.Environment.from_string`.
"""
with codecs.open(path, 'r', encoding='utf-8') as file_h:
source = file_h.read()
return self.from_string(source, **kwargs)
def join_path(self, template, parent):
"""
Over ride the default :py:meth:`jinja2.Environment.join_path` method to
explicitly specifying relative paths by prefixing the path with either
"./" or "../".
:param str template: The path of the requested template file.
:param str parent: The path of the template file which requested the load.
:return: The new path to the template.
:rtype: str
"""
if re.match(r'\.\.?/', template) is None:
return template
template = os.path.join(os.path.dirname(parent), template)
return os.path.normpath(template)
@property
def standard_variables(self):
"""
Additional standard variables that can optionally be used in templates.
"""
std_vars = {
'time': {
'local': datetime.datetime.now(),
'utc': datetime.datetime.utcnow()
}
}
return std_vars
def _filter_decode(self, data, encoding):
if its.py_v3 and isinstance(data, bytes):
data = data.decode('utf-8')
encoding = encoding.lower()
encoding = re.sub(r'^(base|rot)-(\d\d)$', r'\1\2', encoding)
if encoding == 'base16' or encoding == 'hex':
data = base64.b16decode(data)
elif encoding == 'base32':
data = base64.b32decode(data)
elif encoding == 'base64':
data = base64.b64decode(data)
elif encoding == 'rot13':
data = codecs.getdecoder('rot-13')(data)[0]
else:
raise ValueError('Unknown encoding type: ' + encoding)
if its.py_v3 and isinstance(data, bytes):
data = data.decode('utf-8')
return data
def _filter_encode(self, data, encoding):
if its.py_v3 and isinstance(data, str):
data = data.encode('utf-8')
encoding = encoding.lower()
encoding = re.sub(r'^(base|rot)-(\d\d)$', r'\1\2', encoding)
if encoding == 'base16' or encoding == 'hex':
data = base64.b16encode(data)
elif encoding == 'base32':
data = base64.b32encode(data)
elif encoding == 'base64':
data = base64.b64encode(data)
elif encoding == 'rot13':
data = codecs.getencoder('rot-13')(data.decode('utf-8'))[0]
else:
raise ValueError('Unknown encoding type: ' + encoding)
if its.py_v3 and isinstance(data, bytes):
data = data.decode('utf-8')
return data
def _filter_hash(self, data, hash_type):
if its.py_v3 and isinstance(data, str):
data = data.encode('utf-8')
hash_type = hash_type.lower()
hash_type = hash_type.replace('-', '')
hash_obj = hashlib.new(hash_type, data)
return hash_obj.digest()
def _filter_json(self, data):
try:
data = json.loads(data)
except json.JSONDecodeError:
self.logger.error('template failed to load json data')
data = None
return data
def _filter_strftime(self, dt, fmt):
try:
result = dt.strftime(fmt)
except ValueError:
self.logger.error("invalid time format '{0}'".format(fmt))
result = ''
return result
def _filter_timedelta(self, dt, *args, **kwargs):
try:
result = dt + datetime.timedelta(*args, **kwargs)
except ValueError:
self.logger.error('invalid timedelta specification')
result = ''
return result
def _func_fetch(self, url, allow_file=False):
session = requests.Session()
if allow_file:
session.mount('file://', requests_file.FileAdapter())
try:
response = session.get(url)
except requests.exceptions.RequestException:
self.logger.error('template failed to load url: ' + url)
return None
return response.text
class MessageTemplateEnvironment(TemplateEnvironmentBase):
"""A configured Jinja2 environment for formatting messages."""
MODE_PREVIEW = 0
MODE_ANALYZE = 1
MODE_SEND = 2
def __init__(self, *args, **kwargs):
super(MessageTemplateEnvironment, self).__init__(*args, **kwargs)
self._mode = None
self.set_mode(self.MODE_PREVIEW)
self.globals['inline_image'] = self._inline_image_handler
self.attachment_images = {}
"""A dictionary collecting the images that are going to be embedded and sent inline in the message."""
def set_mode(self, mode):
"""
Set the operation mode for the environment. Valid values are the MODE_*
constants.
:param int mode: The operation mode.
"""
if mode not in (self.MODE_PREVIEW, self.MODE_ANALYZE, self.MODE_SEND):
raise ValueError('mode must be one of the MODE_* constants')
self._mode = mode
if mode == self.MODE_ANALYZE:
self.attachment_images = {}
def _inline_image_handler(self, image_path, style=None, alt=None):
image_path = os.path.abspath(image_path)
if not os.path.isfile(image_path):
self.logger.warning('the specified inline image path is not a file')
elif not os.access(image_path, os.R_OK):
self.logger.warning('the specified inline image path can not be read')
if self._mode == self.MODE_PREVIEW:
if os.path.sep == '\\':
image_path = '/'.join(image_path.split('\\'))
if not image_path.startswith('/'):
image_path = '/' + image_path
image_path = 'file://' + image_path
else:
if image_path in self.attachment_images:
attachment_name = self.attachment_images[image_path]
else:
attachment_name = 'img_' + utilities.random_string_lower_numeric(8) + os.path.splitext(image_path)[-1]
while attachment_name in self.attachment_images.values():
attachment_name = 'img_' + utilities.random_string_lower_numeric(8) + os.path.splitext(image_path)[-1]
self.attachment_images[image_path] = attachment_name
image_path = 'cid:' + attachment_name
image_path = html.escape(image_path, quote=True)
img_tag = "<img src=\"{0}\"".format(image_path)
if style is not None:
img_tag += " style=\"{0}\"".format(html.escape(str(style), quote=True))
if alt is not None:
img_tag += " alt=\"{0}\"".format(html.escape(str(alt), quote=True))
img_tag += '>'
return img_tag
| bsd-3-clause | -2,517,951,449,152,028,000 | 36.26129 | 126 | 0.710415 | false | 3.349087 | false | false | false |
alfredodeza/ceph-doctor | setup.py | 1 | 3740 | from setuptools import setup, find_packages, Command
import re
import sys
import subprocess
install_requires = []
pyversion = sys.version_info[:2]
def read_module_contents():
with open('ceph_medic/__init__.py') as f:
return f.read()
module_file = read_module_contents()
metadata = dict(re.findall("__([a-z]+)__\s*=\s*'([^']+)'", module_file))
long_description = open('README.rst').read()
version = metadata['version']
class BumpCommand(Command):
""" Bump the __version__ number and commit all changes. """
user_options = [('version=', 'v', 'version number to use')]
def initialize_options(self):
new_version = metadata['version'].split('.')
new_version[-1] = str(int(new_version[-1]) + 1) # Bump the final part
self.version = ".".join(new_version)
def finalize_options(self):
pass
def run(self):
try:
print('old version: %s new version: %s' %
(metadata['version'], self.version))
raw_input('Press enter to confirm, or ctrl-c to exit >')
except KeyboardInterrupt:
raise SystemExit("\nNot proceeding")
old = "__version__ = '%s'" % metadata['version']
new = "__version__ = '%s'" % self.version
module_file = read_module_contents()
with open('ceph_medic/__init__.py', 'w') as fileh:
fileh.write(module_file.replace(old, new))
# Commit everything with a standard commit message
cmd = ['git', 'commit', '-a', '-m', 'version %s' % self.version]
print(' '.join(cmd))
subprocess.check_call(cmd)
class ReleaseCommand(Command):
""" Tag and push a new release. """
user_options = [('sign', 's', 'GPG-sign the Git tag and release files')]
def initialize_options(self):
self.sign = False
def finalize_options(self):
pass
def run(self):
# Create Git tag
tag_name = 'v%s' % version
cmd = ['git', 'tag', '-a', tag_name, '-m', 'version %s' % version]
if self.sign:
cmd.append('-s')
print(' '.join(cmd))
subprocess.check_call(cmd)
# Push Git tag to origin remote
cmd = ['git', 'push', 'origin', tag_name]
print(' '.join(cmd))
subprocess.check_call(cmd)
# Push package to pypi
cmd = ['python', 'setup.py', 'sdist', 'upload']
if self.sign:
cmd.append('--sign')
print(' '.join(cmd))
#subprocess.check_call(cmd)
# Push master to the remote
cmd = ['git', 'push', 'origin', 'master']
print(' '.join(cmd))
subprocess.check_call(cmd)
setup(
name='ceph-medic',
version=version,
packages=find_packages(),
author='Alfredo Deza',
author_email='[email protected]',
description='detect common issues with ceph clusters',
long_description=long_description,
license='MIT',
keywords='ceph doctor',
url="https://github.com/ceph/ceph-medic",
zip_safe=False,
install_requires=[
'execnet',
'tambo',
'remoto>=1.1.2',
] + install_requires,
tests_require=[
'pytest >=2.1.3',
'tox',
'mock',
],
scripts=['bin/ceph-medic'],
cmdclass={'bump': BumpCommand, 'release': ReleaseCommand},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Build Tools',
'Topic :: Utilities',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
]
)
| mit | 5,626,192,557,359,190,000 | 26.703704 | 78 | 0.563102 | false | 3.777778 | false | false | false |
yaukwankiu/armor | video/makeVideo.py | 1 | 14964 | # -*- coding: utf-8 -*-
"""
Module to convert videos from jpgs or pdfs
USE:
cd /media/KINGSTON/ARMOR/python
python
from armor.video import makeVideo as mv
reload(mv); mv.main()
mv.main(inputDate='2013-07-12', inputType='satellite1')
mv.main(inputDate='2013-07-12', inputType='satellite4')
import time
t0=time.time()
reload(mv); mv.makeVideoAll(inputType='rainfall1')
reload(mv); mv.makeVideoAll(inputType='satellite2')
reload(mv); mv.makeVideoAll(inputType='charts')
print '\n\ntime spent all in all:', time.time()-t0, '\n\n\n'
time.sleep(10)
t0=time.time()
reload(mv); mv.makeVideoAll(inputType='temperature')
reload(mv); mv.makeVideoAll(inputType='charts2')
reload(mv); mv.makeVideoAll(inputType='rainfall2')
#reload(mv); mv.makeVideoAll(inputType='satellite1')
reload(mv); mv.makeVideoAll(inputType='satellite3')
#reload(mv); mv.makeVideoAll(inputType='satellite4')
print 'time spent all in all:', time.time()-t0
import time
t0=time.time()
reload(mv); mv.makeVideoAll(inputType='rainfall1') ; mv.makeVideoAll(inputType = 'satellite2') ; mv.makeVideoAll(inputType='charts')
print 'time spent all in all:', time.time()-t0
and check /media/Seagate\ Expansion\ Drive/ARMOR/sandbox
or something like that
References
1. http://stackoverflow.com/questions/5772831/python-library-to-create-a-video-file-from-images
2. http://stackoverflow.com/questions/5772831/python-library-to-create-a-video-file-from-images
3. http://stackoverflow.com/questions/753190/programmatically-generate-video-or-animated-gif-in-python
4. http://opencv.willowgarage.com/documentation/reading_and_writing_images_and_video.html
5. http://stackoverflow.com/questions/12290023/opencv-2-4-in-python-video-processing/12333066#12333066
"
#THE FOLLOWING CODES ARE FROM REFERENCE 3 ABOVE
To create a video, you could use opencv,
#load your frames
frames = ...
#create a video writer
writer = cvCreateVideoWriter(filename, -1, fps, frame_size, is_color=1)
#and write your frames in a loop if you want
cvWriteFrame(writer, frames[i])
"
"""
#################
# imports
import time
import os
import numpy as np
from matplotlib import pyplot as plt
#from PIL import Image
try:
from scipy.misc import imread
except:
from matplotlib import image as mpimg
imread = mpimg.imread
import cv, cv2
from armor import pattern
dbz = pattern.DBZ
##################
# setup
from .. defaultParameters import *
dataRoot = externalHardDriveRoot + '../Work/CWB/'
defaultDate = '2013-07-12'
defaultType = 'charts'
defaultInputFolder = dataRoot + defaultType + '/' + defaultDate +'/'
defaultOutputFolder = externalHardDriveRoot + 'sandbox/'
defaultFrameSize = (600,600)
defaultFps =5
def getList(folder, extensions=['.txt','.dat']):
try:
L = os.listdir(folder)
L = [v for v in L if v[-4:].lower() in extensions]
#print L
L.sort()
return L
except:
print 'getList ERROR!!!!!!'
def makeDBZimages(inputFolder=defaultInputFolder,
outputFolder=defaultOutputFolder, extensions=['.txt', '.dat']):
L = getList(folder=inputFolder, extensions=extensions)
for fileName in L:
a = dbz(name=fileName, dataPath=inputFolder+fileName,
imagePath=defaultOutputFolder+fileName)
a.load()
a.saveImage()
def loadImages(inputFolder=defaultOutputFolder, extensions=['.png', '.jpg']):
"""yes that's right
inputFolder=defaultOutputFolder
because we expect the pics to be in the sandbox (i.e. default output folder)
"""
try:
L = getList(folder=inputFolder, extensions=extensions)
#print inputFolder
#print L
#print extensions
imageList=[""]*len(L)
#print L
for n, fileName in enumerate(L):
#img = Image.open(inputFolder+fileName) # doesn't work
#imageList[n] = cv.LoadImage(inputFolder+fileName) #old
try:
imageList[n] = imread(inputFolder+fileName) # new, converted to cv2
print n, inputFolder, fileName
except:
print n, inputFolder, fileName, "loadImages ERROR!!!!!!!!!!!!!!!!"
#print imageList[n]
return imageList
except:
print "loadImages ERROR!!!!!!!!"
def makeVideo(imageList,
outputPath= defaultOutputFolder+ str(int(time.time()))+'.avi',
fourcc=cv.CV_FOURCC('F', 'L', 'V', '1'),
fps = defaultFps,
frameSize=defaultFrameSize):
#print imageList
# create a video writer
# c.f. http://opencv.willowgarage.com/documentation/python/reading_and_writing_images_and_video.html
#fourcc=cv.FOURCC('P','I','M','1'), doesn't work?
#writer = cv.CreateVideoWriter(filename=outputFolder+inputDate+'_'+inputType+'.avi',
# fourcc=cv.FOURCC('F', 'L', 'V', '1'),
# fps=1, frame_size=(600,600), is_color=1)
#and write your frames in a loop if you want
# the above don't work. replace by the following.
# http://stackoverflow.com/questions/12290023/opencv-2-4-in-python-video-processing/12333066#12333066
time0 = time.time()
writer = cv2.VideoWriter(filename=outputPath,
fourcc=fourcc,
fps=fps,
frameSize=frameSize)
for frame in imageList:
#print frame
#cv.ShowImage(str(frame), frame)
#cv.WaitKey()
#cv.WriteFrame(writer, frame) #old writer replaced
writer.write(frame)
def makeVideoAll(inputType = defaultType,
inputFolder = "",
extensions = ['.png', '.jpg'],
outputFolder = "",
fourcc = cv.CV_FOURCC('F', 'L', 'V', '1'),
fps = defaultFps,
frameSize=defaultFrameSize):
"""
cd /media/KINGSTON/ARMOR/python/
python
from armor.video import makeVideo as mv
reload(mv) ; mv.makeVideoAll(inputType="charts2")
"""
time0 = time.time()
if inputFolder == "":
inputFolder = "%s%s/" % (dataRoot, inputType)
if outputFolder =="":
outputFolder = defaultOutputFolder + inputType + str(int(time.time())) +'/'
#debug
print inputFolder
os.makedirs(outputFolder)
LL = os.listdir(inputFolder)
LL.sort()
for folder in LL:
imageList = loadImages(inputFolder=inputFolder+folder+'/', extensions=extensions)
try:
print folder
makeVideo(imageList,
outputPath= outputFolder + folder + '_' + inputType + '.avi',
fourcc=fourcc,
fps = len(imageList),
#fps = len(imageList)/10. ,
frameSize=frameSize) # frames per sec = len(imageList)/10.
# - so that each day lasts 10 seconds
# no matter how many frames there are
except:
print folder, "makeVideo ERROR!!!!!!!!!!!" # don't care if it doesn't work
time.sleep(3)
print time.time()-time0
def main(inputDate=defaultDate, inputType=defaultType, inputFolder="",
outputFolder=defaultOutputFolder, extensions=['.png','.jpg'],
fps = '',
frameSize=defaultFrameSize):
"""
USE:
main(inputDate=defaultDate, inputType=DefaultType, inputFolder="", outputFolder="")
WHERE:
defaultDate = '2013-07-12'
defaultType = 'charts'
OUTPUT:
out
"""
time0 = time.time()
if inputFolder == "":
inputFolder = "%s%s/%s/" % (dataRoot, inputType, inputDate)
#print inputFolder
imageList = loadImages(inputFolder=inputFolder, extensions=extensions)
if fps =='':
fps = len(imageList)/10. # frames per sec = len(imageList)/10.
# - so that each day lasts 10 seconds
# no matter how many frames there are
makeVideo(imageList=imageList,
outputPath=outputFolder+inputDate+'_'+inputType+'.avi',
fourcc=cv.CV_FOURCC('F', 'L', 'V', '1'),
fps=fps,
frameSize=frameSize)
print outputFolder+inputDate+'_'+inputType
print time.time()-time0
"""
CV_FOURCC('P','I','M','1') = MPEG-1 codec
CV_FOURCC('M','J','P','G') = motion-jpeg codec (does not work well)
CV_FOURCC('M', 'P', '4', '2') = MPEG-4.2 codec
CV_FOURCC('D', 'I', 'V', '3') = MPEG-4.3 codec
CV_FOURCC('D', 'I', 'V', 'X') = MPEG-4 codec
CV_FOURCC('U', '2', '6', '3') = H263 codec
CV_FOURCC('I', '2', '6', '3') = H263I codec
CV_FOURCC('F', 'L', 'V', '1') = FLV1 codec
"""
def makeVideoFourInOne(inputTypes = ['rainfall1' , 'charts',
'temperature' , 'satellite2'],
outputFolder = "",
fourcc = cv.CV_FOURCC('F', 'L', 'V', '1'),
fps = defaultFps,
extension= '.avi',
#fourcc = cv.CV_FOURCC('P', 'I', 'M', '1'),
#extension= '.mpg',
# fps = defaultFps,
frameSize = (1200,1200),
startingFromDate=""):
# sizes- rainfall1: 400x400; charts: 600x600 ; temperature: 400x400 ; satellite2: 430,400
# == USE ==
# cd /media/KINGSTON/ARMOR/python
# python
# from armor.video import makeVideo as mv
# reload(mv) ; mv.makeVideoFourInOne()
# reload(mv) ; mv.makeVideoFourInOne(startingFromDate='2013-08-15')
# plan: 1. get four lists of file paths [(datetime, type) -> path]
# 2. initialise background
# 3. for each datetime do
# 1.look for relevant path, return blank if not found
# 2. load paste the new image and paste it to the frame, do nothing if not found
# 3. write the frame to the video
######################################################################
#
# 1. get four lists of file paths [(datetime, type) -> path]
#
if outputFolder =="":
outputFolder = defaultOutputFolder + '_'.join(inputTypes) + '_' + str(int(time.time())) + '/'
fileNameDict = {}
for inputType in inputTypes:
LL = os.listdir(dataRoot+inputType)
for inputDate in LL:
if not os.path.isdir(dataRoot+inputType+'/'+inputDate) : # not valid data
continue
L = os.listdir(dataRoot+inputType+'/'+inputDate)
if L == []: # empty folder
continue
for fileName in L:
# ('charts', '2013-05-17_1530') -> 2013-05-17_1530.MOS0.jpg
fileNameDict[(inputType, fileName[:15])] = fileName
#####################################################
# 2. initialise background, initialise writer
os.makedirs(outputFolder)
#currentFrame = np.ones((1200,1200,3)) #(1200x1200x3)
currentFrame = imread(dataRoot+defaultType+ '/2013-05-17/2013-05-17_1230.MOS0.jpg')
currentFrame = np.hstack([currentFrame, currentFrame])
currentFrame = np.vstack([currentFrame, currentFrame])
currentFrame = currentFrame *0 +1
#debug
#plt.imshow(currentFrame)
#plt.show()
# end debug
dateTimeList = sorted([ v[1] for v in fileNameDict.keys() \
if v[1]>startingFromDate])
# e.g. '2013-05-17_1530' > '2013-05-16'
# DEBUG
#print dateTimeList, startingFromDate
#x=raw_input('press enter:')
## END DEBUG
inputDateList = sorted(list(set([v[:10] for v in dateTimeList])))
for inputDate in inputDateList:
print inputDate
#split the video into dates
dateTimeListShort = [v for v in dateTimeList if inputDate in v]
#debug
#print outputFolder +inputDate +extension
#print fourcc
#print fps
#print frameSize
# end debug
# initialise video writer
writer = cv2.VideoWriter(filename=outputFolder +inputDate +extension,
fourcc=fourcc,
fps=fps,
frameSize=frameSize)
# initialise (black) currentFrame for each day
# added 2013-08-16
currentFrame = imread(dataRoot+defaultType + \
'/2013-05-17/2013-05-17_1230.MOS0.jpg')
currentFrame = np.hstack([currentFrame, currentFrame])
currentFrame = np.vstack([currentFrame, currentFrame])
currentFrame = currentFrame *0 +1
#####################################################
# 3. for each datetime do
# 1.look for relevant path, return blank if not found
# 2. load paste the new image and paste it to the frame, do nothing if not found
# 3. write the frame to the video
for dateTime in dateTimeListShort: # e.g. '2013-05-17_1530'
print "\n*****", dateTime, "******"
# can add some logics here to pick out specific dates and times
# too lazy to do it here
for N, inputType in enumerate(inputTypes): # e.g. 'charts'
print inputType,
if (inputType, dateTime) not in fileNameDict.keys():
print '-X',
continue
#2. load paste the new image and paste it to the frame
# e.g. /.../CWB/charts/2013-05-17/2013-05-17_1530.MOS0.jpg
fileName = fileNameDict[(inputType, dateTime)]
filePath = dataRoot +inputType +'/' + dateTime[:10] +'/' + fileName
if os.path.getsize(filePath) < 3000: #invalid file
continue
try:
img = imread(dataRoot +inputType +'/' + dateTime[:10] +'/' + fileName)
except:
continue
# debug
#print dataRoot +inputType +'/' + dateTime[:10] +'/' + fileName
#plt.imshow(currentFrame)
#plt.show()
# end debug
height, width, depth = img.shape
hor_displ = (N % 2)*600 # horizontal displacement: 1,3 on the right
vert_displ = (N//2 % 2)*600 # 2,3 on the bottom
currentFrame[vert_displ:vert_displ+height, hor_displ:hor_displ+width, :] = img
# debug
#print hor_displ, vert_displ
# end debug
writer.write(currentFrame)
| cc0-1.0 | 344,819,082,421,561,900 | 39.117962 | 132 | 0.556803 | false | 3.765476 | false | false | false |
Jokeren/neon | neon/data/dataloader.py | 1 | 12387 | # ----------------------------------------------------------------------------
# Copyright 2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import ctypes as ct
import logging
import numpy as np
import os
import atexit
from neon.util.persist import get_data_cache_dir
from .media import MediaParams
from .indexer import Indexer
from .dataiterator import NervanaDataIterator
logger = logging.getLogger(__name__)
BufferPair = (ct.c_void_p) * 2
class DeviceParams(ct.Structure):
_fields_ = [('type', ct.c_int),
('id', ct.c_int),
('data', BufferPair),
('targets', BufferPair),
('meta', BufferPair)]
class DataLoader(NervanaDataIterator):
"""
Encapsulates the data loader library and exposes an API to iterate over
generic data (images, video or audio given in compressed form). An index
file that maps the data examples to their targets is expected to be provided
in CSV format.
Arguments:
set_name (str):
Name of this dataset partition. This is used as prefix for
directories and index files that may be created while ingesting.
repo_dir (str):
Directory to find the data. This may also be used as the output
directory to store ingested data (in case archive_dir is not
specified).
media_params (MediaParams):
Parameters specific to the media type of the input data.
target_size (int):
The size of the targets. For example: if the target is a class
label, set this parameter to 1, indicating a single integer. If
the target is a mask image, the number of pixels in that image
should be specified.
archive_dir (str):
Directory to store ingested data. If this directory does not exist,
it will be created.
target_conversion (str, optional):
Specifies the method to be used for converting the targets that are
provided in the index file. The options are "no_conversion",
"ascii_to_binary", "char_to_index" and "read_contents". If this
parameter is set to "read_contents", the targets given in the index
file are treated as pathnames and their contents read in. Defaults
to "ascii_to_binary".
index_file (str, optional):
CSV formatted index file that defines the mapping between each
example and its target. The first line in the index file is
assumed to be a header and is ignored. Two columns are expected in
the index. The first column should be the file system path to
individual data examples. The second column may contain the actual
label or the pathname of a file that contains the labels (e.g. a
mask image). If this parameter is not specified, creation of an
index file is attempted. Automaitic index generation can only be
performed if the dataset is organized into subdirectories, which
also represent labels.
shuffle (boolean, optional):
Whether to shuffle the order of data examples as the data is
ingested.
reshuffle (boolean, optional):
Whether to reshuffle the order of data examples as they are loaded.
If this is set to True, the order is reshuffled for each epoch.
Useful for batch normalization. Defaults to False.
datum_type (data-type, optional):
Data type of input data. Defaults to np.uint8.
target_type (data-type, optional):
Data type of targets. Defaults to np.int32.
onehot (boolean, optional):
If the targets are categorical and have to be converted to a one-hot
representation.
nclasses (int, optional):
Number of classes, if this dataset is intended for a classification
problem.
subset_percent (int, optional):
Value between 0 and 100 indicating what percentage of the dataset
partition to use. Defaults to 100.
ingest_params (IngestParams):
Parameters to specify special handling for ingesting data.
alphabet (str, optional):
Alphabet to use for converting string labels. This is only
applicable if target_conversion is set to "char_to_index".
"""
_converters_ = {'no_conversion': 0,
'ascii_to_binary': 1,
'char_to_index': 2,
'read_contents': 3}
def __init__(self, set_name, repo_dir,
media_params, target_size,
archive_dir=None,
target_conversion='ascii_to_binary',
index_file=None,
shuffle=False, reshuffle=False,
datum_dtype=np.uint8, target_dtype=np.int32,
onehot=True, nclasses=None, subset_percent=100,
ingest_params=None,
alphabet=None):
if onehot is True and nclasses is None:
raise ValueError('nclasses must be specified for one-hot labels')
if target_conversion not in self._converters_:
raise ValueError('Unknown target type %s' % target_conversion)
self.set_name = set_name
repo_dir = os.path.expandvars(os.path.expanduser(repo_dir))
if not os.path.exists(repo_dir):
raise IOError('Directory not found: %s' % repo_dir)
self.macro_start = 0
self.repo_dir = repo_dir
parent_dir = os.path.split(repo_dir)[0]
self.archive_prefix = 'archive-'
if archive_dir is None:
self.archive_dir = get_data_cache_dir(parent_dir, set_name + '-ingested')
else:
self.archive_dir = os.path.expandvars(os.path.expanduser(archive_dir))
self.item_count = ct.c_int(0)
self.bsz = self.be.bsz
self.buffer_id = 0
self.start_idx = 0
self.media_params = media_params
self.shape = media_params.get_shape()
self.datum_size = media_params.datum_size()
self.target_size = target_size
self.target_conversion = self._converters_[target_conversion]
if index_file is None:
self.index_file = os.path.join(parent_dir, set_name + '-index.csv')
else:
self.index_file = index_file
self.shuffle = shuffle
self.reshuffle = reshuffle
self.datum_dtype = datum_dtype
self.target_dtype = target_dtype
self.onehot = onehot
self.nclasses = nclasses
self.subset_percent = int(subset_percent)
self.ingest_params = ingest_params
if alphabet is None:
self.alphabet = None
else:
self.alphabet = ct.c_char_p(alphabet)
self.load_library()
self.alloc()
self.start()
atexit.register(self.stop)
def load_library(self):
path = os.path.dirname(os.path.realpath(__file__))
libpath = os.path.join(path, os.pardir, os.pardir,
'loader', 'bin', 'loader.so')
self.loaderlib = ct.cdll.LoadLibrary(libpath)
self.loaderlib.start.restype = ct.c_void_p
self.loaderlib.next.argtypes = [ct.c_void_p]
self.loaderlib.stop.argtypes = [ct.c_void_p]
self.loaderlib.reset.argtypes = [ct.c_void_p]
def alloc(self):
def alloc_bufs(dim0, dtype):
return [self.be.iobuf(dim0=dim0, dtype=dtype) for _ in range(2)]
def ct_cast(buffers, idx):
return ct.cast(int(buffers[idx].raw()), ct.c_void_p)
def cast_bufs(buffers):
return BufferPair(ct_cast(buffers, 0), ct_cast(buffers, 1))
self.data = alloc_bufs(self.datum_size, self.datum_dtype)
self.targets = alloc_bufs(self.target_size, self.target_dtype)
self.meta = alloc_bufs(2, np.int32)
self.media_params.alloc(self)
self.device_params = DeviceParams(self.be.device_type,
self.be.device_id,
cast_bufs(self.data),
cast_bufs(self.targets),
cast_bufs(self.meta))
if self.onehot:
self.onehot_labels = self.be.iobuf(self.nclasses,
dtype=self.be.default_dtype)
if self.datum_dtype == self.be.default_dtype:
self.backend_data = None
else:
self.backend_data = self.be.iobuf(self.datum_size,
dtype=self.be.default_dtype)
@property
def nbatches(self):
return -((self.start_idx - self.ndata) // self.bsz)
def start(self):
"""
Launch background threads for loading the data.
"""
if not os.path.exists(self.archive_dir):
logger.warning('%s not found. Triggering data ingest...' % self.archive_dir)
os.makedirs(self.archive_dir)
if self.item_count.value == 0:
indexer = Indexer(self.repo_dir, self.index_file)
indexer.run()
datum_dtype_size = np.dtype(self.datum_dtype).itemsize
target_dtype_size = np.dtype(self.target_dtype).itemsize
if self.ingest_params is None:
ingest_params = ct.POINTER(MediaParams)()
else:
ingest_params = ct.POINTER(MediaParams)(self.ingest_params)
self.loader = self.loaderlib.start(
ct.byref(self.item_count), self.bsz,
ct.c_char_p(self.repo_dir.encode()),
ct.c_char_p(self.archive_dir.encode()),
ct.c_char_p(self.index_file.encode()),
ct.c_char_p(self.archive_prefix.encode()),
self.shuffle, self.reshuffle,
self.macro_start,
ct.c_int(self.datum_size), ct.c_int(datum_dtype_size),
ct.c_int(self.target_size), ct.c_int(target_dtype_size),
ct.c_int(self.target_conversion),
self.subset_percent,
ct.POINTER(MediaParams)(self.media_params),
ct.POINTER(DeviceParams)(self.device_params),
ingest_params,
self.alphabet)
self.ndata = self.item_count.value
if self.loader is None:
raise RuntimeError('Failed to start data loader.')
def stop(self):
"""
Clean up and exit background threads.
"""
self.loaderlib.stop(self.loader)
def reset(self):
"""
Restart data from index 0
"""
self.buffer_id = 0
self.start_idx = 0
self.loaderlib.reset(self.loader)
def next(self, start):
end = min(start + self.bsz, self.ndata)
if end == self.ndata:
self.start_idx = self.bsz - (self.ndata - start)
self.loaderlib.next(self.loader)
if self.backend_data is None:
data = self.data[self.buffer_id]
else:
# Convert data to the required precision.
self.backend_data[:] = self.data[self.buffer_id]
data = self.backend_data
if self.onehot:
# Convert labels to one-hot encoding.
self.onehot_labels[:] = self.be.onehot(
self.targets[self.buffer_id], axis=0)
targets = self.onehot_labels
else:
targets = self.targets[self.buffer_id]
meta = self.meta[self.buffer_id]
self.buffer_id = 1 if self.buffer_id == 0 else 0
return self.media_params.process(self, data, targets, meta)
def __iter__(self):
for start in range(self.start_idx, self.ndata, self.bsz):
yield self.next(start)
| apache-2.0 | 2,418,779,810,224,474,600 | 41.276451 | 88 | 0.588036 | false | 4.088119 | false | false | false |
jimporter/bfg9000 | test/unit/builtins/test_file_types.py | 1 | 13686 | from unittest import mock
from .common import AttrDict, BuiltinTest
from bfg9000.builtins import find, project, regenerate, version # noqa
from bfg9000.builtins.file_types import make_file_list, static_file
from bfg9000.file_types import *
from bfg9000.path import Path, Root
def srcpath(p):
return Path(p, Root.srcdir)
class TestStaticFile(BuiltinTest):
def test_basic(self):
expected = File(srcpath('file.txt'))
self.assertSameFile(static_file(self.context, File, 'file.txt'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_path(self):
p = srcpath('file.txt')
expected = File(p)
self.assertSameFile(static_file(self.context, File, p), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_builddir_path(self):
p = Path('file.txt', Root.builddir)
expected = File(p)
self.assertSameFile(static_file(self.context, File, p), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
def test_submodule(self):
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)):
expected = File(srcpath('dir/file.txt'))
self.assertSameFile(static_file(self.context, File, 'file.txt'),
expected)
self.assertEqual(list(self.build.sources()),
[self.bfgfile, expected])
def test_no_dist(self):
p = srcpath('file.txt')
expected = File(p)
self.assertSameFile(static_file(self.context, File, p, dist=False),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
def test_params_default(self):
expected = SourceFile(srcpath('file.txt'), 'c')
self.assertSameFile(static_file(
self.context, SourceFile, 'file.txt', params=[('lang', 'c')]
), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_params_custom(self):
expected = SourceFile(srcpath('file.txt'), 'c++')
self.assertSameFile(static_file(
self.context, SourceFile, 'file.txt', params=[('lang', 'c')],
kwargs={'lang': 'c++'}
), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_extra_kwargs(self):
self.assertRaises(TypeError, static_file, self.context,
SourceFile, 'file.txt', params=[('lang', 'c')],
kwargs={'lang': 'c++', 'extra': 'value'})
self.assertEqual(list(self.build.sources()), [self.bfgfile])
class TestFileList(BuiltinTest):
def make_file_list(self, *args):
def make_file(src, format=None):
obj = ObjectFile(src.path.stripext('.o').reroot(), format,
src.lang)
obj.creator = AttrDict(file=src)
return obj
files = [SourceFile(srcpath(i), 'c++') for i in args]
return make_file_list(self.context, make_file, files, format='elf')
def test_len(self):
self.assertEqual(len(self.make_file_list()), 0)
self.assertEqual(len(self.make_file_list('foo.cpp', 'bar.cpp')), 2)
def test_index_int(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
self.assertSameFile(f[0], ObjectFile(Path('foo.o'), 'elf', 'c++'))
def test_index_str(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
self.assertSameFile(f['foo.cpp'],
ObjectFile(Path('foo.o'), 'elf', 'c++'))
def test_index_path(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
self.assertSameFile(f[srcpath('foo.cpp')],
ObjectFile(Path('foo.o'), 'elf', 'c++'))
self.assertSameFile(f[srcpath('bar.cpp')],
ObjectFile(Path('bar.o'), 'elf', 'c++'))
def test_index_file(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
src = SourceFile(srcpath('foo.cpp'), 'c++')
self.assertEqual(f[src], ObjectFile(Path('foo.o'), 'elf', 'c++'))
def test_index_path_not_found(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
with self.assertRaises(IndexError):
f[srcpath('goofy.cpp')]
def test_submodule(self):
f = self.make_file_list('dir/foo.cpp', 'dir/bar.cpp')
obj = ObjectFile(Path('dir/foo.o'), 'elf', 'c++')
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)):
self.assertSameFile(f['foo.cpp'], obj)
self.assertSameFile(f['dir/foo.cpp'], obj)
def test_eq(self):
f1 = self.make_file_list('foo.cpp', 'bar.cpp')
f2 = self.make_file_list('foo.cpp', 'bar.cpp')
f3 = self.make_file_list('baz.cpp', 'quux.cpp')
s = list(f1)
self.assertTrue(f1 == f1)
self.assertFalse(f1 != f1)
self.assertTrue(f1 == f2)
self.assertFalse(f1 != f2)
self.assertFalse(f1 == f3)
self.assertTrue(f1 != f3)
self.assertTrue(f1 == s)
self.assertFalse(f1 != s)
def test_add(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
self.assertIsInstance(f + ['blah.cpp'], list)
self.assertEqual(f + ['blah.cpp'], [
ObjectFile(Path('foo.o'), 'elf', 'c++'),
ObjectFile(Path('bar.o'), 'elf', 'c++'),
'blah.cpp'
])
def test_radd(self):
f = self.make_file_list('foo.cpp', 'bar.cpp')
self.assertIsInstance(['blah.cpp'] + f, list)
self.assertEqual(['blah.cpp'] + f, [
'blah.cpp',
ObjectFile(Path('foo.o'), 'elf', 'c++'),
ObjectFile(Path('bar.o'), 'elf', 'c++'),
])
class TestAutoFile(BuiltinTest):
def test_identity(self):
expected = File(srcpath('file.txt'))
self.assertIs(self.context['auto_file'](expected), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
def test_source_file(self):
expected = SourceFile(srcpath('file.cpp'), 'c++')
self.assertSameFile(self.context['auto_file']('file.cpp'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_header_file(self):
expected = HeaderFile(srcpath('file.hpp'), 'c++')
self.assertSameFile(self.context['auto_file']('file.hpp'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_other_file(self):
expected = File(srcpath('file.txt'))
self.assertSameFile(self.context['auto_file']('file.txt'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_directory(self):
expected = Directory(srcpath('directory/'))
self.assertSameFile(self.context['auto_file']('directory/'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_header_directory(self):
expected = HeaderDirectory(srcpath('directory/'), 'c++')
self.assertSameFile(self.context['auto_file']('directory/', 'c++'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_auxext(self):
expected = HeaderFile(srcpath('file.h'), 'c++')
self.assertSameFile(self.context['auto_file']('file.h', 'c++'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_src_lang(self):
expected_src = SourceFile(srcpath('file.cpp'), 'qtmoc')
self.assertSameFile(self.context['auto_file']('file.cpp', 'qtmoc'),
expected_src)
expected_hdr = HeaderFile(srcpath('file.hpp'), 'qtmoc')
self.assertSameFile(self.context['auto_file']('file.hpp', 'qtmoc'),
expected_hdr)
self.assertEqual(list(self.build.sources()), [
self.bfgfile, expected_src, expected_hdr,
])
def test_unknown_ext(self):
expected = SourceFile(srcpath('file.goofy'), 'c++')
self.assertSameFile(self.context['auto_file']('file.goofy', 'c++'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_unknown_lang(self):
expected = SourceFile(srcpath('file.goofy'), 'goofy')
self.assertSameFile(self.context['auto_file']('file.goofy', 'goofy'),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_submodule(self):
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)):
expected = SourceFile(srcpath('file.cpp'), 'c++')
self.assertIs(self.context['auto_file'](expected), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
expected = SourceFile(srcpath('dir/file.cpp'), 'c++')
self.assertSameFile(self.context['auto_file']('file.cpp'),
expected)
self.assertEqual(list(self.build.sources()),
[self.bfgfile, expected])
class TestGenericFile(BuiltinTest):
type = File
args = ()
fn = 'generic_file'
filename = 'file.txt'
def test_identity(self):
expected = self.type(srcpath(self.filename), *self.args)
self.assertIs(self.context[self.fn](expected), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
def test_basic(self):
expected = self.type(srcpath(self.filename), *self.args)
self.assertSameFile(self.context[self.fn](self.filename),
expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_no_dist(self):
expected = self.type(srcpath(self.filename), *self.args)
self.assertSameFile(
self.context[self.fn](self.filename, dist=False), expected
)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
def test_path(self):
path = srcpath(self.filename)
expected = self.type(path, *self.args)
self.assertSameFile(self.context[self.fn](path), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
def test_submodule(self):
with self.context.push_path(Path('dir/build.bfg', Root.srcdir)):
expected = self.type(srcpath(self.filename), *self.args)
self.assertIs(self.context[self.fn](expected), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile])
expected = self.type(srcpath('dir/' + self.filename), *self.args)
self.assertSameFile(self.context[self.fn](self.filename),
expected)
self.assertEqual(list(self.build.sources()),
[self.bfgfile, expected])
class TestModuleDefFile(TestGenericFile):
type = ModuleDefFile
fn = 'module_def_file'
filename = 'file.def'
class TestSourceFile(TestGenericFile):
type = SourceFile
args = ('c++',)
fn = 'source_file'
filename = 'file.cpp'
lang = 'c++'
def test_lang(self):
expected = self.type(srcpath('file.goofy'), self.lang)
self.assertSameFile(self.context[self.fn](
'file.goofy', lang=self.lang
), expected)
self.assertEqual(list(self.build.sources()), [self.bfgfile, expected])
class TestHeaderFile(TestSourceFile):
type = HeaderFile
fn = 'header_file'
filename = 'file.hpp'
class TestDirectory(TestGenericFile):
type = Directory
fn = 'directory'
filename = 'dir'
def test_include(self):
def mock_walk(path, variables=None):
p = srcpath
return [
(p('dir'), [p('dir/sub')], [p('dir/file.txt')]),
(p('dir/sub'), [], [p('dir/sub/file2.txt')]),
]
expected = self.type(srcpath(self.filename), [
File(srcpath('dir/file.txt')),
File(srcpath('dir/sub/file2.txt')),
])
with mock.patch('bfg9000.builtins.find.walk', mock_walk):
self.assertSameFile(
self.context[self.fn](self.filename, include='**/*.txt'),
expected
)
self.assertEqual(list(self.build.sources()),
[self.bfgfile] + expected.files + [expected])
class TestHeaderDirectory(TestDirectory):
type = HeaderDirectory
fn = 'header_directory'
filename = 'include'
def test_include(self):
def mock_walk(path, variables=None):
p = srcpath
return [
(p('include'), [p('include/sub')], [p('include/file.hpp')]),
(p('include/sub'), [], [p('include/sub/file2.hpp')]),
]
expected = self.type(srcpath(self.filename), [
HeaderFile(srcpath('include/file.hpp'), 'c++'),
HeaderFile(srcpath('include/sub/file2.hpp'), 'c++'),
], langs=['c++'])
with mock.patch('bfg9000.builtins.find.walk', mock_walk):
self.assertSameFile(
self.context[self.fn](self.filename, include='**/*.hpp'),
expected
)
self.assertEqual(list(self.build.sources()),
[self.bfgfile] + expected.files + [expected])
| bsd-3-clause | -5,738,996,323,690,692,000 | 37.661017 | 78 | 0.571095 | false | 3.647655 | true | false | false |
gabrielferreira/apprelease | apprelease/settings.py | 1 | 6032 | """
Django settings for apprelease project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ADMINS = [('Gabriel Ferreira', '[email protected]'),('admin', '[email protected]')]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'kgv63hbu@9!qo1#2k)tsp7ko5u^f82#7amz$_u@nq#@7_eayx3')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG', True)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'bootstrap3',
'storages',
'release'
)
DAB_FIELD_RENDERER = 'django_admin_bootstrapped.renderers.BootstrapFieldRenderer'
from django.contrib import messages
MESSAGE_TAGS = {
messages.SUCCESS: 'alert-success success',
messages.WARNING: 'alert-warning warning',
messages.ERROR: 'alert-danger error'
}
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'apprelease.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.media',
'django.core.context_processors.static',
],
},
},
]
WSGI_APPLICATION = 'apprelease.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASSWORD'),
'HOST': os.environ.get('DB_HOST'), # Or an IP Address that your DB is hosted on
'PORT': os.environ.get('DB_PORT'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
if DEBUG:
STATIC_URL = '/static/'
MEDIA_URL = "/"
else:
AWS_HEADERS = { # see http://developer.yahoo.com/performance/rules.html#expires
'Cache-Control': 'max-age=%s' % (os.environ.get('CACHE_MAX_AGE')),
}
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_MEDIA_CUSTOM_DOMAIN = os.environ.get('AWS_MEDIA_CUSTOM_DOMAIN')
AWS_STATIC_CUSTOM_DOMAIN = os.environ.get('AWS_STATIC_CUSTOM_DOMAIN')
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
STATIC_URL = "%s/%s/" % (AWS_STATIC_CUSTOM_DOMAIN, STATICFILES_LOCATION)
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = "%s/%s/" % (AWS_MEDIA_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
# API config
# http://www.django-rest-framework.org/
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
# ]
}
try:
import django
django.setup()
from django.conf import settings
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
if User.objects.count() == 0:
for user in settings.ADMINS:
username = user[0].replace(' ', '')
email = user[1]
password = 'admin'
print('Creating account for %s (%s)' % (username, email))
admin = User.objects.create_superuser(email=email, username=username, password=password)
admin.is_active = True
admin.is_admin = True
admin.save()
else:
print('Admin accounts can only be initialized if no Accounts exist')
except:
pass
| apache-2.0 | 7,062,167,176,622,131,000 | 29.933333 | 100 | 0.661472 | false | 3.560803 | false | false | false |
stryku/hb | client_server/strbot/strykubot.py | 1 | 2285 | from git import Repo
import os
import shutil
import stat
import utils
import tempfile
def on_rm_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
class StrykuBot:
def __init__(self):
self.password = ''
self.gh_url = 'https://github.com/stryku/'
self.repo_name = ''
self.name = 'stryku-bot'
self.email = '[email protected]'
self.repo_dir = ''
self.tmp_repo_dir = None
self.repo = None
self.git = None
self.commit_prefix = '[stryku-bot]: '
self.last_branch = ''
file = open('strykubot.password')
self.password = file.read().strip()
file.close()
def clone_repo(self, repo_name, dest='build', rm_old=True):
if rm_old:
if os.path.exists(dest):
shutil.rmtree(dest, onerror=on_rm_error)
self.repo_dir = dest
self.repo_name = repo_name
self.repo = Repo.clone_from(self.gh_url + self.repo_name, dest)
self.git = self.repo.git
self.git.checkout('dev')
writer = self.repo.config_writer()
writer.set_value('user', 'name', self.name)
writer.set_value('user', 'email', self.email)
writer.write()
def clone_tmp_repo(self, repo_name):
self.tmp_repo_dir = tempfile.TemporaryDirectory()
self.clone_repo(repo_name, self.tmp_repo_dir.name, False)
def add_all(self):
self.git.add('--all')
def checkout_branch(self, branch):
self.last_branch = branch
try:
self.git.checkout('HEAD', b=branch)
except Exception:
print("Branch already exist. Remove and create a new one")
self.git.branch('-D', branch)
self.git.checkout('HEAD', b=branch)
def commit(self, msg):
self.git.commit(m=self.commit_prefix + msg)
def push_last_branch(self):
command = ('git push https://stryku-bot:%[email protected]/stryku/%s %s' % (self.password, self.repo_name, self.last_branch))
print(utils.run_process_split(command, cwd=self.repo_dir))
def get_repo_dir(self):
return self.repo_dir
| mit | -4,906,395,253,340,578,000 | 30.736111 | 128 | 0.595186 | false | 3.441265 | false | false | false |
ezhuk/aws-tools | ec2/check_snapshot_status.py | 1 | 1693 | #!/usr/bin/env python
# Copyright (c) 2014 Eugene Zhuk.
# Use of this source code is governed by the MIT license that can be found
# in the LICENSE file.
"""Checks AWS EBS Snapshot status.
This script displays the current status of one or more AWS EBS snapshots.
Usage:
./check_snapshot_status.py [options]
"""
import boto.ec2
import optparse
import sys
import time
class Error(Exception):
pass
def main():
parser = optparse.OptionParser('Usage: %prog [options]')
parser.add_option('-s', '--snapshot', dest='snapshots', action='append',
help='The snapshot ID(s) to check status for. This option is required.')
(opts, args) = parser.parse_args()
if 0 != len(args) or opts.snapshots is None:
parser.print_help()
return 1
try:
c = boto.connect_ec2()
while True:
snapshots = c.get_all_snapshots(snapshot_ids=opts.snapshots)
if not snapshots:
raise Error('could not find \'{0}\''.format(opts.snapshots))
for snap in snapshots:
print '{0}: [{1}{2}] {3}'.format(snap.id,
'#' * 4 * (int(snap.progress.strip('%')) / 10),
' ' * 4 * ((100 - int(snap.progress.strip('%'))) / 10),
snap.progress)
if all(snap.status != 'pending' for snap in snapshots):
break
size = len(snapshots)
if (1 < size):
sys.stdout.write('\x1b[1A' * size)
time.sleep(3)
except (Error, Exception), err:
sys.stderr.write('[ERROR] {0}\n'.format(err))
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| mit | 5,892,942,803,866,929,000 | 25.453125 | 80 | 0.561134 | false | 3.795964 | false | false | false |
palerdot/calibre | src/calibre/gui2/viewer/config.py | 4 | 15408 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import zipfile
from functools import partial
from PyQt4.Qt import (QFont, QVariant, QDialog, Qt, QColor, QColorDialog,
QMenu, QInputDialog)
from calibre.constants import iswindows, isxp
from calibre.utils.config import Config, StringConfig, JSONConfig
from calibre.gui2 import min_available_height
from calibre.gui2.shortcuts import ShortcutConfig
from calibre.gui2.viewer.config_ui import Ui_Dialog
from calibre.utils.localization import get_language
def config(defaults=None):
desc = _('Options to customize the ebook viewer')
if defaults is None:
c = Config('viewer', desc)
else:
c = StringConfig(defaults, desc)
c.add_opt('remember_window_size', default=False,
help=_('Remember last used window size'))
c.add_opt('user_css', default='',
help=_('Set the user CSS stylesheet. This can be used to customize the look of all books.'))
c.add_opt('max_fs_width', default=800,
help=_("Set the maximum width that the book's text and pictures will take"
" when in fullscreen mode. This allows you to read the book text"
" without it becoming too wide."))
c.add_opt('max_fs_height', default=-1,
help=_("Set the maximum height that the book's text and pictures will take"
" when in fullscreen mode. This allows you to read the book text"
" without it becoming too tall. Note that this setting only takes effect in paged mode (which is the default mode)."))
c.add_opt('fit_images', default=True,
help=_('Resize images larger than the viewer window to fit inside it'))
c.add_opt('hyphenate', default=False, help=_('Hyphenate text'))
c.add_opt('hyphenate_default_lang', default='en',
help=_('Default language for hyphenation rules'))
c.add_opt('remember_current_page', default=True,
help=_('Save the current position in the document, when quitting'))
c.add_opt('wheel_flips_pages', default=False,
help=_('Have the mouse wheel turn pages'))
c.add_opt('line_scrolling_stops_on_pagebreaks', default=False,
help=_('Prevent the up and down arrow keys from scrolling past '
'page breaks'))
c.add_opt('page_flip_duration', default=0.5,
help=_('The time, in seconds, for the page flip animation. Default'
' is half a second.'))
c.add_opt('font_magnification_step', default=0.2,
help=_('The amount by which to change the font size when clicking'
' the font larger/smaller buttons. Should be a number between '
'0 and 1.'))
c.add_opt('fullscreen_clock', default=False, action='store_true',
help=_('Show a clock in fullscreen mode.'))
c.add_opt('fullscreen_pos', default=False, action='store_true',
help=_('Show reading position in fullscreen mode.'))
c.add_opt('fullscreen_scrollbar', default=True, action='store_false',
help=_('Show the scrollbar in fullscreen mode.'))
c.add_opt('start_in_fullscreen', default=False, action='store_true',
help=_('Start viewer in full screen mode'))
c.add_opt('show_fullscreen_help', default=True, action='store_false',
help=_('Show full screen usage help'))
c.add_opt('cols_per_screen', default=1)
c.add_opt('use_book_margins', default=False, action='store_true')
c.add_opt('top_margin', default=20)
c.add_opt('side_margin', default=40)
c.add_opt('bottom_margin', default=20)
c.add_opt('text_color', default=None)
c.add_opt('background_color', default=None)
c.add_opt('show_controls', default=True)
fonts = c.add_group('FONTS', _('Font options'))
fonts('serif_family', default='Times New Roman' if iswindows else 'Liberation Serif',
help=_('The serif font family'))
fonts('sans_family', default='Verdana' if iswindows else 'Liberation Sans',
help=_('The sans-serif font family'))
fonts('mono_family', default='Courier New' if iswindows else 'Liberation Mono',
help=_('The monospaced font family'))
fonts('default_font_size', default=20, help=_('The standard font size in px'))
fonts('mono_font_size', default=16, help=_('The monospaced font size in px'))
fonts('standard_font', default='serif', help=_('The standard font type'))
fonts('minimum_font_size', default=8, help=_('The minimum font size in px'))
return c
def load_themes():
return JSONConfig('viewer_themes')
class ConfigDialog(QDialog, Ui_Dialog):
def __init__(self, shortcuts, parent=None):
QDialog.__init__(self, parent)
self.setupUi(self)
for x in ('text', 'background'):
getattr(self, 'change_%s_color_button'%x).clicked.connect(
partial(self.change_color, x, reset=False))
getattr(self, 'reset_%s_color_button'%x).clicked.connect(
partial(self.change_color, x, reset=True))
self.css.setToolTip(_('Set the user CSS stylesheet. This can be used to customize the look of all books.'))
self.shortcuts = shortcuts
self.shortcut_config = ShortcutConfig(shortcuts, parent=self)
bb = self.buttonBox
bb.button(bb.RestoreDefaults).clicked.connect(self.restore_defaults)
with zipfile.ZipFile(P('viewer/hyphenate/patterns.zip',
allow_user_override=False), 'r') as zf:
pats = [x.split('.')[0].replace('-', '_') for x in zf.namelist()]
names = list(map(get_language, pats))
pmap = {}
for i in range(len(pats)):
pmap[names[i]] = pats[i]
for x in sorted(names):
self.hyphenate_default_lang.addItem(x, QVariant(pmap[x]))
self.hyphenate_pats = pats
self.hyphenate_names = names
p = self.tabs.widget(1)
p.layout().addWidget(self.shortcut_config)
if isxp:
self.hyphenate.setVisible(False)
self.hyphenate_default_lang.setVisible(False)
self.hyphenate_label.setVisible(False)
self.themes = load_themes()
self.save_theme_button.clicked.connect(self.save_theme)
self.load_theme_button.m = m = QMenu()
self.load_theme_button.setMenu(m)
m.triggered.connect(self.load_theme)
self.delete_theme_button.m = m = QMenu()
self.delete_theme_button.setMenu(m)
m.triggered.connect(self.delete_theme)
opts = config().parse()
self.load_options(opts)
self.init_load_themes()
self.clear_search_history_button.clicked.connect(self.clear_search_history)
self.resize(self.width(), min(self.height(), max(575, min_available_height()-25)))
def clear_search_history(self):
from calibre.gui2 import config
config['viewer_search_history'] = []
def save_theme(self):
themename, ok = QInputDialog.getText(self, _('Theme name'),
_('Choose a name for this theme'))
if not ok:
return
themename = unicode(themename).strip()
if not themename:
return
c = config('')
c.add_opt('theme_name_xxx', default=themename)
self.save_options(c)
self.themes['theme_'+themename] = c.src
self.init_load_themes()
self.theming_message.setText(_('Saved settings as the theme named: %s')%
themename)
def init_load_themes(self):
for x in ('load', 'delete'):
m = getattr(self, '%s_theme_button'%x).menu()
m.clear()
for x in self.themes.iterkeys():
title = x[len('theme_'):]
ac = m.addAction(title)
ac.theme_id = x
def load_theme(self, ac):
theme = ac.theme_id
raw = self.themes[theme]
self.load_options(config(raw).parse())
self.theming_message.setText(_('Loaded settings from the theme %s')%
theme[len('theme_'):])
def delete_theme(self, ac):
theme = ac.theme_id
del self.themes[theme]
self.init_load_themes()
self.theming_message.setText(_('Deleted the theme named: %s')%
theme[len('theme_'):])
def restore_defaults(self):
opts = config('').parse()
self.load_options(opts)
def load_options(self, opts):
self.opt_remember_window_size.setChecked(opts.remember_window_size)
self.opt_remember_current_page.setChecked(opts.remember_current_page)
self.opt_wheel_flips_pages.setChecked(opts.wheel_flips_pages)
self.opt_page_flip_duration.setValue(opts.page_flip_duration)
fms = opts.font_magnification_step
if fms < 0.01 or fms > 1:
fms = 0.2
self.opt_font_mag_step.setValue(int(fms*100))
self.opt_line_scrolling_stops_on_pagebreaks.setChecked(
opts.line_scrolling_stops_on_pagebreaks)
self.serif_family.setCurrentFont(QFont(opts.serif_family))
self.sans_family.setCurrentFont(QFont(opts.sans_family))
self.mono_family.setCurrentFont(QFont(opts.mono_family))
self.default_font_size.setValue(opts.default_font_size)
self.minimum_font_size.setValue(opts.minimum_font_size)
self.mono_font_size.setValue(opts.mono_font_size)
self.standard_font.setCurrentIndex(
{'serif':0, 'sans':1, 'mono':2}[opts.standard_font])
self.css.setPlainText(opts.user_css)
self.max_fs_width.setValue(opts.max_fs_width)
self.max_fs_height.setValue(opts.max_fs_height)
pats, names = self.hyphenate_pats, self.hyphenate_names
try:
idx = pats.index(opts.hyphenate_default_lang)
except ValueError:
idx = pats.index('en_us')
idx = self.hyphenate_default_lang.findText(names[idx])
self.hyphenate_default_lang.setCurrentIndex(idx)
self.hyphenate.setChecked(opts.hyphenate)
self.hyphenate_default_lang.setEnabled(opts.hyphenate)
self.opt_fit_images.setChecked(opts.fit_images)
self.opt_fullscreen_clock.setChecked(opts.fullscreen_clock)
self.opt_fullscreen_scrollbar.setChecked(opts.fullscreen_scrollbar)
self.opt_start_in_fullscreen.setChecked(opts.start_in_fullscreen)
self.opt_show_fullscreen_help.setChecked(opts.show_fullscreen_help)
self.opt_fullscreen_pos.setChecked(opts.fullscreen_pos)
self.opt_cols_per_screen.setValue(opts.cols_per_screen)
self.opt_override_book_margins.setChecked(not opts.use_book_margins)
for x in ('top', 'bottom', 'side'):
getattr(self, 'opt_%s_margin'%x).setValue(getattr(opts,
x+'_margin'))
for x in ('text', 'background'):
setattr(self, 'current_%s_color'%x, getattr(opts, '%s_color'%x))
self.update_sample_colors()
self.opt_show_controls.setChecked(opts.show_controls)
def change_color(self, which, reset=False):
if reset:
setattr(self, 'current_%s_color'%which, None)
else:
initial = getattr(self, 'current_%s_color'%which)
if initial:
initial = QColor(initial)
else:
initial = Qt.black if which == 'text' else Qt.white
title = (_('Choose text color') if which == 'text' else
_('Choose background color'))
col = QColorDialog.getColor(initial, self,
title, QColorDialog.ShowAlphaChannel)
if col.isValid():
name = unicode(col.name())
setattr(self, 'current_%s_color'%which, name)
self.update_sample_colors()
def update_sample_colors(self):
for x in ('text', 'background'):
val = getattr(self, 'current_%s_color'%x)
if not val:
val = 'inherit' if x == 'text' else 'transparent'
ss = 'QLabel { %s: %s }'%('background-color' if x == 'background'
else 'color', val)
getattr(self, '%s_color_sample'%x).setStyleSheet(ss)
def accept(self, *args):
if self.shortcut_config.is_editing:
from calibre.gui2 import info_dialog
info_dialog(self, _('Still editing'),
_('You are in the middle of editing a keyboard shortcut'
' first complete that, by clicking outside the '
' shortcut editing box.'), show=True)
return
self.save_options(config())
return QDialog.accept(self, *args)
def save_options(self, c):
c.set('serif_family', unicode(self.serif_family.currentFont().family()))
c.set('sans_family', unicode(self.sans_family.currentFont().family()))
c.set('mono_family', unicode(self.mono_family.currentFont().family()))
c.set('default_font_size', self.default_font_size.value())
c.set('minimum_font_size', self.minimum_font_size.value())
c.set('mono_font_size', self.mono_font_size.value())
c.set('standard_font', {0:'serif', 1:'sans', 2:'mono'}[
self.standard_font.currentIndex()])
c.set('user_css', unicode(self.css.toPlainText()))
c.set('remember_window_size', self.opt_remember_window_size.isChecked())
c.set('fit_images', self.opt_fit_images.isChecked())
c.set('max_fs_width', int(self.max_fs_width.value()))
max_fs_height = self.max_fs_height.value()
if max_fs_height <= self.max_fs_height.minimum():
max_fs_height = -1
c.set('max_fs_height', max_fs_height)
c.set('hyphenate', self.hyphenate.isChecked())
c.set('remember_current_page', self.opt_remember_current_page.isChecked())
c.set('wheel_flips_pages', self.opt_wheel_flips_pages.isChecked())
c.set('page_flip_duration', self.opt_page_flip_duration.value())
c.set('font_magnification_step',
float(self.opt_font_mag_step.value())/100.)
idx = self.hyphenate_default_lang.currentIndex()
c.set('hyphenate_default_lang',
str(self.hyphenate_default_lang.itemData(idx).toString()))
c.set('line_scrolling_stops_on_pagebreaks',
self.opt_line_scrolling_stops_on_pagebreaks.isChecked())
c.set('fullscreen_clock', self.opt_fullscreen_clock.isChecked())
c.set('fullscreen_pos', self.opt_fullscreen_pos.isChecked())
c.set('fullscreen_scrollbar', self.opt_fullscreen_scrollbar.isChecked())
c.set('show_fullscreen_help', self.opt_show_fullscreen_help.isChecked())
c.set('cols_per_screen', int(self.opt_cols_per_screen.value()))
c.set('start_in_fullscreen', self.opt_start_in_fullscreen.isChecked())
c.set('use_book_margins', not
self.opt_override_book_margins.isChecked())
c.set('text_color', self.current_text_color)
c.set('background_color', self.current_background_color)
c.set('show_controls', self.opt_show_controls.isChecked())
for x in ('top', 'bottom', 'side'):
c.set(x+'_margin', int(getattr(self, 'opt_%s_margin'%x).value()))
| gpl-3.0 | -7,370,443,364,022,408,000 | 46.555556 | 126 | 0.620132 | false | 3.648591 | true | false | false |
shashikiranrp/kasi | src/kasi.py | 1 | 2877 | #!/usr/bin/env python
import string, sys, getopt, codecs
DEFAULT_MAP_FILE = "../kw_maps/kw_map_kannada.txt"
def getKeywordMapFromFile(fileName):
_k_map = {}
with codecs.open(fileName, "r", "utf-8") as fh:
for line in filter(lambda ln: 0 <> len(ln) and not ln.startswith("#"), map(string.strip, fh.readlines())):
(k, _, v) = map(string.strip, line.partition("="))
if not k or not v:
continue
_k_map[unicode(k)] = v
return _k_map
def is_ascii(in_str):
return all(ord(c) < 128 for c in in_str)
def kasi(kw_map_file, target_file):
kw_map = getKeywordMapFromFile(kw_map_file)
buffer_str = u""
quote_seen = False
line_no = 0
need_to_buffer = False
with codecs.open(target_file, "r", "utf-8") as rh, open("%s.C" % target_file, "w") as wh:
# yes, single pass :-)
for ln in rh:
line_no += 1
for ch in ln:
# handling string literals
if '"' == ch:
# Yes toggle
quote_seen = not quote_seen
# if inside the code just write it
if quote_seen:
wh.write(ch)
continue
# in the state of handling foriegn keywords
if need_to_buffer:
# is_ascii will change the state
if is_ascii(ch):
c_kw = kw_map.get(buffer_str, None)
# error out for an un mapped key word
if None == c_kw:
raise RuntimeError("no such keyword @ line_no %d" % line_no)
# write the map and current ascii char
wh.write(c_kw)
wh.write(ch)
# reset the state
buffer_str = u''
need_to_buffer = False
continue
else:
# else append to unicode buffer
buffer_str += ch
continue
# not ascii, drama starts
if not is_ascii(ch):
need_to_buffer = True
buffer_str += ch
continue
# don't care, just stream
wh.write(ch)
def usage():
sys.stderr.write('''usage: %s [-h] [-k <keyword_map_file>] [-v] <file>
-h, --help: show this help and exit.
-k, --kw_map: key word map file (default: %s).
-v, --verbose: enable verbose.
''' % (sys.argv[0], DEFAULT_MAP_FILE))
# start here
def main():
if 2 > len(sys.argv):
usage()
sys.exit(2)
try:
opts, args = getopt.getopt(sys.argv[1:], "hk:v", ["help", "kw_map", "verbose"])
except getopt.GetoptError as err:
sys.stderr.write("Error: %s\n" % str(err))
usage()
sys.exit(2)
kw_map_file = DEFAULT_MAP_FILE
verbose = False
for o, a in opts:
if o in ('-v', '--verbose'):
verbose = True
elif o in ('-c', '--kw_map'):
kw_map_file = a
elif o in ('-h', '--help'):
usage()
sys.exit()
kasi(kw_map_file, sys.argv[-1])
# let's start
if __name__ == '__main__':
main()
| apache-2.0 | -6,251,791,946,654,577,000 | 25.394495 | 110 | 0.533542 | false | 3.329861 | false | false | false |
bwasti/caffe2 | caffe2/python/layers/uniform_sampling.py | 1 | 3104 | ## @package uniform_sampling
# Module caffe2.python.layers.uniform_sampling
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import LayerParameter, ModelLayer
class UniformSampling(ModelLayer):
"""
Uniform sampling `num_samples - len(input_record)` unique elements from the
range [0, num_elements). `samples` is the concatenation of input_record and
the samples. input_record is expected to be unique.
"""
def __init__(
self,
model,
input_record,
num_samples,
num_elements,
name='uniform_sampling',
**kwargs
):
super(UniformSampling, self).__init__(
model, name, input_record, **kwargs
)
assert num_elements > 0
assert isinstance(input_record, schema.Scalar)
self.num_elements = num_elements
self.num_samples = model.net.NextScopedBlob(name + "_num_samples")
self.params.append(
LayerParameter(
parameter=self.num_samples,
initializer=core.CreateOperator(
"GivenTensorInt64Fill",
[],
self.num_samples,
shape=(1, ),
values=[num_samples],
),
optimizer=model.NoOptim,
)
)
self.sampling_prob = model.net.NextScopedBlob(name + "_prob")
self.params.append(
LayerParameter(
parameter=self.sampling_prob,
initializer=core.CreateOperator(
"ConstantFill",
[],
self.sampling_prob,
shape=(num_samples, ),
value=float(num_samples) / num_elements,
dtype=core.DataType.FLOAT
),
optimizer=model.NoOptim,
)
)
self.output_schema = schema.Struct(
(
'samples', schema.Scalar(
np.int32, model.net.NextScopedBlob(name + "_samples")
)
),
('sampling_prob', schema.Scalar(np.float32, self.sampling_prob)),
)
def add_ops(self, net):
net.StopGradient(self.sampling_prob, self.sampling_prob)
shape = net.Shape([self.input_record()], net.NextScopedBlob("shape"))
shape = net.Sub([self.num_samples, shape], shape)
samples = net.UniqueUniformFill(
[shape, self.input_record()],
net.NextScopedBlob("samples"),
min=0,
max=self.num_elements - 1,
input_as_shape=True
)
net.Concat(
[self.input_record(), samples],
[self.output_schema.samples(), net.NextScopedBlob("split_info")],
axis=0
)
net.StopGradient(
self.output_schema.samples(), self.output_schema.samples()
)
| apache-2.0 | 5,770,119,703,379,451,000 | 30.353535 | 79 | 0.539626 | false | 4.390382 | false | false | false |
ramonvanalteren/jenkinsapi | jenkinsapi/artifact.py | 2 | 3818 | """
Artifacts can be used to represent data created as a side-effect of running a Jenkins build.
Artifacts are files which are associated with a single build. A build can have any number of
artifacts associated with it.
This module provides a class called Artifact which allows you to download objects from the server
and also access them as a stream.
"""
from __future__ import with_statement
import urllib
import os
import logging
import hashlib
from jenkinsapi.exceptions import ArtifactBroken
from jenkinsapi.fingerprint import Fingerprint
log = logging.getLogger(__name__)
class Artifact(object):
"""
Represents a single Jenkins artifact, usually some kind of file
generated as a by-product of executing a Jenkins build.
"""
def __init__(self, filename, url, build=None):
self.filename = filename
self.url = url
self.build = build
def save(self, fspath):
"""
Save the artifact to an explicit path. The containing directory must exist.
Returns a reference to the file which has just been writen to.
:param fspath: full pathname including the filename, str
:return: filepath
"""
log.info("Saving artifact @ %s to %s" % (self.url, fspath))
if not fspath.endswith(self.filename):
log.warn("Attempt to change the filename of artifact %s on save." % self.filename)
if os.path.exists(fspath):
if self.build:
try:
if self._verify_download(fspath):
log.info("Local copy of %s is already up to date." % self.filename)
return fspath
except ArtifactBroken:
log.info("Jenkins artifact could not be identified.")
else:
log.info("This file did not originate from Jenkins, so cannot check.")
else:
log.info("Local file is missing, downloading new.")
filename = self._do_download(fspath)
try:
self._verify_download(filename)
except ArtifactBroken:
log.warning("fingerprint of the downloaded artifact could not be verified")
return filename
def _do_download(self, fspath):
"""
Download the the artifact to a path.
"""
filename, _ = urllib.urlretrieve(self.url, filename=fspath)
return filename
def _verify_download(self, fspath):
"""
Verify that a downloaded object has a valid fingerprint.
"""
local_md5 = self._md5sum(fspath)
fp = Fingerprint(self.build.job.jenkins.baseurl, local_md5, self.build.job.jenkins)
return fp.validate_for_build(os.path.basename(fspath), self.build.job.name, self.build.buildno)
def _md5sum(self, fspath, chunksize=2**20):
"""
A MD5 hashing function intended to produce the same results as that used by
Jenkins.
"""
md5 = hashlib.md5()
try:
with open(fspath,'rb') as f:
for chunk in iter(lambda: f.read(chunksize), ''):
md5.update(chunk)
except:
raise
return md5.hexdigest()
def savetodir(self, dirpath):
"""
Save the artifact to a folder. The containing directory must be exist, but use the artifact's
default filename.
"""
assert os.path.exists(dirpath)
assert os.path.isdir(dirpath)
outputfilepath = os.path.join(dirpath, self.filename)
return self.save(outputfilepath)
def __repr__(self):
"""
Produce a handy repr-string.
"""
return """<%s.%s %s>""" % (self.__class__.__module__,
self.__class__.__name__,
self.url)
| mit | -676,388,693,495,890,400 | 34.351852 | 103 | 0.59979 | false | 4.444703 | false | false | false |
fginter/roope | gui/roope.py | 1 | 9619 | from PyQt4.QtGui import *
from PyQt4.QtCore import Qt, pyqtSlot, QThread
from ui_roope import *
import sys
import struct
import serial.tools.list_ports
import serial
import math
import time
import datetime
DOWN=180
UP=0
#COMMAND:
# DSSAAPB; (D)rive (S)teps (A)ngle 2bytes, 0-360 (P)en 1byte 0-255 (B)ackwards 1byte 0/1
class DrawThread(QThread):
def __init__(self,roope):
QThread.__init__(self)
self.roope=roope
def run(self):
#self.roope.calibrate_sidestep(120)
#self.roope.calibrate_vertical(120*150,1)
#self.roope.calibrate_pen()
self.roope.drawing_started=datetime.datetime.now()
self.roope.draw_fig()
#self.roope.two_lines(40)
self.terminate()
class Roope(QMainWindow):
def __init__(self,pixel_v_steps,pixel_h_steps):
QMainWindow.__init__(self)
self.gui=Ui_MainWindow()
self.gui.setupUi(self)
self.scene=QGraphicsScene()
self.gui.g_view.setScene(self.scene)
self.port=None
self.connect_to_port()
self.pixel_v_steps=pixel_v_steps
self.pixel_h_steps=pixel_h_steps
self.gui.verticalCorrection.setValue(93.0)
self.gui.sideStepCorrection.setValue(90.0)
self.draw_t=DrawThread(self)
self.gui.position_label.setText("")
self.progress=0
self.total_pixels=0 #draw_fig will set this
self.drawing_started=None #The draw process will fill this
self.v_to_h_ratio=1.28 #Multiplier for the vertical step to get a square
# def refreshSerialPorts(self):
# self.gui.portList.clear()
# for path,comment,HWID in serial.tools.list_ports.comports():
# if not "USB" in path:
# continue
# self.gui.portList.addItem(path)
def connect_to_port(self,port=None):
if self.port!=None:
self.port.close()
self.port=None
if port==None: #Try to auto-detect
for path,comment,HWID in serial.tools.list_ports.comports():
if "ttyUSB" in path:
port=path
break
if port:
self.port=serial.Serial(port,9600,bytesize=serial.EIGHTBITS,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE)
time.sleep(3)
print >> sys.stderr, "Connected to", port
else:
print >> sys.stderr, "Couldn't find any port to connect to"
def load(self,fig_file,height=50):
img=QImage(fig_file)
img2=img.scaledToHeight(height,Qt.SmoothTransformation)
self.image=img2
img_vis=self.BW2(img2)
pix_map=QPixmap(img_vis)
pix_map_item=self.scene.addPixmap(pix_map)
scale= min(self.scene.itemsBoundingRect().height()/float(self.gui.g_view.height()),self.scene.itemsBoundingRect().width()/float(self.gui.g_view.width()))
self.gui.g_view.scale(scale,scale)
#self.gui.g_view.fitInView(pix_map_item,Qt.KeepAspectRatio)
def BW2(self,img,white_level=100):
img2=img.copy()
for x in xrange(img2.width()):
for y in xrange(img2.height()):
g=qGray(img2.pixel(x,y))
g=255-g
if g<=white_level:
g=0
else:
g=(g//51)*51
img2.setPixel(x,y,qRgb(255-g,255-g,255-g))
return img2
def BW(self,img):
img2=img.scaled(img.width()*5,img.height()*5)
for x2 in xrange(img2.width()):
for y2 in xrange(img2.height()):
img2.setPixel(x2,y2,qRgb(255,255,255))
for x in xrange(img.width()):
for y in xrange(img.height()):
p=img.pixel(x,y)
gray=5-(qGray(p)//51)
assert gray<=6
for x2 in xrange(x*5,x*5+gray):
for y2 in xrange(y*5,y*5+5):
img2.setPixel(x2,y2,qRgb(0,0,0))
return img2
def comm(self,s):
if not self.port:
time.sleep(0.005)
return
self.port.write(s)
#Now wait for "OK"
while True:
b=self.port.read()
if b!=";":
sys.stderr.write(b)
sys.stderr.flush()
else:
break
def move_pixel(self,steps,angle,pen,backwards):
print "S=", steps, "A=", angle, "P=", pen, "B=", backwards
command=struct.pack("<cHHBBc","D",steps,angle,pen,backwards,";") #c character, H unsigned 2B int, B unsigned byte "<" little endian
self.comm(command)
def calibrate_sidestep(self,pixel_h_steps):
counter=1
for _ in range(150):
print counter
self.side_step(UP,pixel_h_steps,20,101)
def calibrate_pen(self):
while True:
self.move_pixel(200,0,0,False)
self.move_pixel(200,0,255,True)
def two_lines(self,pixel_v_steps):
while True:
self.move_pixel(int(pixel_v_steps*self.gui.verticalCorrection.value()/100.0),0,250,True)
for _ in range(50):
self.move_pixel(int(pixel_v_steps*self.gui.verticalCorrection.value()/100.0),0,0,True)
self.move_pixel(int(pixel_v_steps*self.gui.verticalCorrection.value()/100.0),0,250,True)
self.side_step(UP,pixel_v_steps,20)
self.move_pixel(pixel_v_steps,0,250,False)
for _ in range(50):
self.move_pixel(pixel_v_steps,0,0,False)
self.move_pixel(pixel_v_steps,0,250,False)
self.side_step(UP,pixel_v_steps,20)
def calibrate_vertical(self,pixel_v_steps,reps):
counter=1
while True:
print counter
for _ in range(reps):
self.move_pixel(int(pixel_v_steps*self.gui.verticalCorrection.value()/100.0*self.v_to_h_ratio),0,0,True)
time.sleep(10)
for _ in range(reps):
self.move_pixel(pixel_v_steps*self.v_to_h_ratio,0,0,False)
def gohome(self):
print "GO HOME"
command=struct.pack("<cHHBBc","H",0,0,0,False,";") #c character, H unsigned 2B int, B unsigned byte "<" little endian
self.comm(command)
for _ in range(10): #how many pixels to back?
self.move_pixel(self.pixel_v_steps,0,0,True) #backs to position
def draw_fig(self,from_x=0,from_y=0,direction=DOWN):
self.total_pixels=(self.image.width()-from_x)*self.image.height()-from_y
self.gohome() #start by finding home
xs=range(from_x,self.image.width(),self.pixel_h_steps//self.pixel_v_steps)
for x in xs:
# print "X=",x, "Image width:", self.image.width(), "Image height:", self.image.height()
if x==from_x:
y=from_y
else:
if direction==DOWN:
y=0
else:
y=self.image.height()-1
self.follow_line(x,y,direction)
if direction==DOWN:
self.side_step(UP,steps=self.pixel_h_steps,angle=20)
direction=UP
else:
self.side_step(DOWN,steps=self.pixel_h_steps,angle=20)
self.gohome()
direction=DOWN
def follow_line(self,x=0,from_y=0,direction=DOWN):
if direction==DOWN:
ys=xrange(from_y,self.image.height())
backwards=1
elif direction==UP:
ys=xrange(from_y,-1,-1)
backwards=0
for y in ys:
if direction==DOWN:
step=int(self.pixel_v_steps*self.gui.verticalCorrection.value()/100.0)
elif direction==UP:
step=int(self.pixel_v_steps)
step=int(step*self.v_to_h_ratio)
color2=self.image.pixel(x,y)
print "x,y=",x,y
self.move_pixel(step,0,255-qGray(color2),backwards)
self.progress+=1
time_progressed=(datetime.datetime.now()-self.drawing_started).total_seconds()
portion_done=float(self.progress)/self.total_pixels
eta=self.drawing_started+datetime.timedelta(seconds=float(time_progressed)/portion_done)
self.gui.position_label.setText("X=%03d Y=%03d Done: %.2f%% ETA: %02d:%02d:%02d"%(x,y,100.0*portion_done,eta.hour,eta.minute,eta.second))
def side_step(self,direction,steps,angle,pen=0):
angleRAD=math.radians(90-abs(angle))
traverse=int(steps/math.cos(angleRAD)) #How many steps to travel under angle?
back=int(steps*math.tan(angleRAD))
if direction==DOWN:
self.move_pixel(traverse,360-angle,pen,True)
self.move_pixel(int(back*self.gui.sideStepCorrection.value()/100.0),0,pen,False) #maybe less of a correction needed here?
elif direction==UP:
self.move_pixel(traverse,angle,pen,False)
self.move_pixel(int(back*self.gui.sideStepCorrection.value()/100.0),0,pen,True)
def main(app):
global draw_t
roope=Roope(pixel_v_steps=240,pixel_h_steps=240)
#roope.load("20140617_010845.jpg",height=30)
roope.load("spiral.png",height=50)
roope.show()
#roope.draw_fig()
roope.draw_t.start()
#roope.move_pixel(100,0,1,0)
#roope.side_step(UP,100)
#roope.move_pixel(100,0,1,1)
#roope.move_pixel(20,0,101,0)
#roope.move_pixel(200,0,255,0)
#roope.move_pixel(2000,0,180,0)
#roope.follow_line()
#roope.load("photo.jpg")
return app.exec_()
if __name__ == "__main__":
app = QApplication(sys.argv)
sys.exit(main(app))
| gpl-2.0 | -6,359,030,835,796,211,000 | 36.870079 | 161 | 0.573033 | false | 3.260678 | false | false | false |
notsambeck/siftsite | siftsite/skeleton.py | 1 | 3619 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following line in the
entry_points section in setup.cfg:
console_scripts =
fibonacci = siftsite.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
from __future__ import division, print_function, absolute_import
import argparse
import sys
import os
import logging
import requests
from siftsite import __version__
__author__ = "Sam Beck"
__copyright__ = "Sam Beck"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def upload_dir(filepath, label, source):
'''upload whole directory (calls upload on all .png)'''
base_dir = os.path.expanduser(os.path.dirname(filepath))
files = os.listdir(base_dir)
input('will upload {} files, continue or ctrl-c'.format(len(files)))
for f in files:
print(f[-4:])
if f[-4:] == '.png':
upload(os.path.join(base_dir, f), label, source)
def upload(filepath, label, source):
'''POST request to your API with "files" key in requests data dict'''
base_dir = os.path.expanduser(os.path.dirname(filepath))
# url = 'http://localhost:8000/api/'
url = 'https://still-taiga-56301.herokuapp.com/api/'
file_name = os.path.basename(filepath)
with open(os.path.join(base_dir, file_name), 'rb') as fin:
print('file:', base_dir, '/', file_name)
POST_data = {'correct_label': label, 'source': source,
'filename': file_name}
files = {'filename': (file_name, fin), 'file': file_name}
resp = requests.post(url, data=POST_data, files=files)
print(resp)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--version',
action='version',
version='siftsite {ver}'.format(ver=__version__))
parser.add_argument(
'--upload',
dest="upload",
help="Path to image file for upload to labler API",
type=str)
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_known_args(args)
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args, unknown = parse_args(args)
if args.upload:
upload(args.upload)
else:
print('yeah cool sure')
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| mit | -3,538,116,300,525,530,000 | 27.496063 | 77 | 0.623377 | false | 3.715606 | false | false | false |
Antergos/Cnchi | src/installation/wrapper.py | 1 | 6440 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# wrapper.py
#
# Copyright © 2013-2018 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Helper module to run some disk/partition related utilities """
import subprocess
import logging
from misc.extra import InstallError
from misc.run_cmd import call
# When testing, no _() is available
try:
_("")
except NameError as err:
def _(message):
return message
def wipefs(device, fatal=True):
""" Wipe fs from device """
err_msg = "Cannot wipe the filesystem of device {0}".format(device)
cmd = ["wipefs", "-a", device]
call(cmd, msg=err_msg, fatal=fatal)
def run_dd(input_device, output_device, bytes_block=512, count=2048, seek=0):
""" Helper function to call dd
Copy a file, converting and formatting according to the operands."""
cmd = [
'dd',
'if={}'.format(input_device),
'of={}'.format(output_device),
'bs={}'.format(bytes_block),
'count={}'.format(count),
'seek={}'.format(seek),
'status=noxfer']
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
logging.warning("Command %s failed: %s", err.cmd, err.output)
def partprobe():
""" Runs partprobe """
try:
subprocess.check_output('/usr/bin/partprobe', stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
logging.error("Command %s failed: %s", err.cmd, err.output.decode())
def sgdisk(command, device):
""" Helper function to call sgdisk (GPT) """
if command == 'zap-all':
# this will be the first sgdisk command. Try to run partprobe first
# so previous changes are communicated to the kernel
partprobe()
cmd = ['sgdisk', "--{0}".format(command), device]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
logging.error("Command %s failed: %s", err.cmd, err.output.decode())
txt = _("Command {0} failed: {1}").format(err.cmd, err.output.decode())
raise InstallError(txt)
def sgdisk_new(device, part_num, label, size, hex_code):
""" Helper function to call sgdisk --new (GPT) """
# --new: Create a new partition, numbered partnum, starting at sector start
# and ending at sector end.
# Parameters: partnum:start:end (zero in start or end means using default
# value)
# --typecode: Change a partition's GUID type code to the one specified by
# hexcode. Note that hexcode is a gdisk/sgdisk internal
# two-byte hexadecimal code.
# You can obtain a list of codes with the -L option.
# Parameters: partnum:hexcode
# --change-name: Change the name of the specified partition.
# Parameters: partnum:name
cmd = [
'sgdisk',
'--new={0}:0:+{1}M'.format(part_num, size),
'--typecode={0}:{1}'.format(part_num, hex_code),
'--change-name={0}:{1}'.format(part_num, label),
device]
_create_partition_cmd(device, cmd)
def parted_set(device, number, flag, state):
""" Helper function to call set parted command """
cmd = [
'parted', '--align', 'optimal', '--script', device,
'set', number, flag, state]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
txt = "Cannot set flag {0} on device {1}. Command {2} has failed: {3}"
txt = txt.format(flag, device, err.cmd, err.output.decode())
logging.error(txt)
def parted_mkpart(device, ptype, start, end, filesystem=""):
""" Helper function to call mkpart parted command """
# If start is < 0 we assume we want to mkpart at the start of the disk
if start < 0:
start_str = "1"
else:
start_str = "{0}MiB".format(start)
# -1s means "end of disk"
if end == "-1s":
end_str = end
else:
end_str = "{0}MiB".format(end)
cmd = [
'parted', '--align', 'optimal', '--script', device,
'--',
'mkpart', ptype, filesystem, start_str, end_str]
_create_partition_cmd(device, cmd)
def _create_partition_cmd(device, cmd):
""" Runs cmd command that tries to create a new partition in device """
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
txt = "Cannot create a new partition on device {0}. Command {1} has failed: {2}"
txt = txt.format(device, err.cmd, err.output.decode())
logging.error(txt)
txt = _(
"Cannot create a new partition on device {0}. Command {1} has failed: {2}")
txt = txt.format(device, err.cmd, err.output.decode())
raise InstallError(txt)
def parted_mklabel(device, label_type="msdos"):
""" Helper function to call mktable parted command """
cmd = [
"parted", "--align", "optimal", "--script", device,
"mklabel", label_type]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
txt = ("Cannot create a new partition table on device {0}. "
"Command {1} failed: {2}")
txt = txt.format(device, err.cmd, err.output.decode())
logging.error(txt)
txt = _("Cannot create a new partition table on device {0}. "
"Command {1} failed: {2}")
txt = txt.format(device, err.cmd, err.output.decode())
raise InstallError(txt)
| gpl-3.0 | -5,595,447,501,265,313,000 | 33.994565 | 88 | 0.634726 | false | 3.754519 | false | false | false |
openstack/zaqar | zaqar/storage/swift/subscriptions.py | 1 | 7064 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import functools
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import swiftclient
try: # Python3
from urllib.parse import quote_plus
except ImportError: # Python2
from urllib import quote_plus
from zaqar import storage
from zaqar.storage import errors
from zaqar.storage.swift import utils
class SubscriptionController(storage.Subscription):
"""Implements subscription resource operations with swift backend.
Subscriptions are scoped by queue and project.
subscription -> Swift mapping:
+----------------+---------------------------------------+
| Attribute | Storage location |
+----------------+---------------------------------------+
| Sub UUID | Object name |
+----------------+---------------------------------------+
| Queue Name | Container name prefix |
+----------------+---------------------------------------+
| Project name | Container name prefix |
+----------------+---------------------------------------+
| Created time | Object Creation Time |
+----------------+---------------------------------------+
| Sub options | Object content |
+----------------+---------------------------------------+
"""
def __init__(self, *args, **kwargs):
super(SubscriptionController, self).__init__(*args, **kwargs)
self._client = self.driver.connection
def list(self, queue, project=None, marker=None,
limit=storage.DEFAULT_SUBSCRIPTIONS_PER_PAGE):
container = utils._subscription_container(queue, project)
try:
_, objects = self._client.get_container(container,
limit=limit,
marker=marker)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
objects = []
else:
raise
marker_next = {}
yield utils.SubscriptionListCursor(
objects, marker_next,
functools.partial(self._client.get_object, container))
yield marker_next and marker_next['next']
def get(self, queue, subscription_id, project=None):
container = utils._subscription_container(queue, project)
try:
headers, data = self._client.get_object(container, subscription_id)
except swiftclient.ClientException as exc:
if exc.http_status == 404:
raise errors.SubscriptionDoesNotExist(subscription_id)
raise
return utils._subscription_to_json(data, headers)
def create(self, queue, subscriber, ttl, options, project=None):
sub_container = utils._subscriber_container(queue, project)
slug = uuidutils.generate_uuid()
try:
utils._put_or_create_container(
self._client,
sub_container,
quote_plus(subscriber),
contents=slug,
headers={'x-delete-after': ttl, 'if-none-match': '*'})
except swiftclient.ClientException as exc:
if exc.http_status == 412:
return
raise
container = utils._subscription_container(queue, project)
data = {'id': slug,
'source': queue,
'subscriber': subscriber,
'options': options,
'ttl': ttl,
'confirmed': False}
utils._put_or_create_container(
self._client, container, slug, contents=jsonutils.dumps(data),
content_type='application/json', headers={'x-delete-after': ttl})
return slug
def update(self, queue, subscription_id, project=None, **kwargs):
container = utils._subscription_container(queue, project)
data = self.get(queue, subscription_id, project)
data.pop('age')
ttl = data['ttl']
if 'subscriber' in kwargs:
sub_container = utils._subscriber_container(queue, project)
try:
self._client.put_object(
sub_container,
quote_plus(kwargs['subscriber']),
contents=subscription_id,
headers={'x-delete-after': ttl, 'if-none-match': '*'})
except swiftclient.ClientException as exc:
if exc.http_status == 412:
raise errors.SubscriptionAlreadyExists()
raise
self._client.delete_object(sub_container,
quote_plus(data['subscriber']))
data.update(kwargs)
self._client.put_object(container,
subscription_id,
contents=jsonutils.dumps(data),
content_type='application/json',
headers={'x-delete-after': ttl})
def exists(self, queue, subscription_id, project=None):
container = utils._subscription_container(queue, project)
return self._client.head_object(container, subscription_id)
def delete(self, queue, subscription_id, project=None):
try:
data = self.get(queue, subscription_id, project)
except errors.SubscriptionDoesNotExist:
return
sub_container = utils._subscriber_container(queue, project)
try:
self._client.delete_object(sub_container,
quote_plus(data['subscriber']))
except swiftclient.ClientException as exc:
if exc.http_status != 404:
raise
container = utils._subscription_container(queue, project)
try:
self._client.delete_object(container, subscription_id)
except swiftclient.ClientException as exc:
if exc.http_status != 404:
raise
def get_with_subscriber(self, queue, subscriber, project=None):
sub_container = utils._subscriber_container(queue, project)
headers, obj = self._client.get_object(sub_container,
quote_plus(subscriber))
return self.get(queue, obj, project)
def confirm(self, queue, subscription_id, project=None, confirmed=True):
self.update(queue, subscription_id, project, confirmed=confirmed)
| apache-2.0 | -8,218,387,053,009,681,000 | 42.337423 | 79 | 0.54346 | false | 5.002833 | false | false | false |
aagusti/e-sipkd | esipkd/views/reports.py | 1 | 155518 | import sys
import re
from email.utils import parseaddr
from sqlalchemy import not_, func, case, and_, or_, desc, extract
from sqlalchemy.orm import aliased
from sqlalchemy.sql.expression import literal_column, column
from datetime import datetime
from time import gmtime, strftime
from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
import colander
from esipkd.views.base_view import BaseViews
from pyjasper import (JasperGenerator)
from pyjasper import (JasperGeneratorWithSubreport)
import xml.etree.ElementTree as ET
from pyramid.path import AssetResolver
from ..models import (
DBSession,
UserResourcePermission,
Resource,
User,
Group,
)
from ..models.isipkd import *
from ..security import group_in
"""import sys
import unittest
import os.path
import uuid
from osipkd.tools import row2dict, xls_reader
from datetime import datetime
#from sqlalchemy import not_, func
from sqlalchemy import *
from pyramid.view import (view_config,)
from pyramid.httpexceptions import ( HTTPFound, )
import colander
from deform import (Form, widget, ValidationFailure, )
from osipkd.models import DBSession, User, Group, Route, GroupRoutePermission
from osipkd.models.apbd_anggaran import Kegiatan, KegiatanSub, KegiatanItem
from datatables import ColumnDT, DataTables
from osipkd.views.base_view import BaseViews
from pyjasper import (JasperGenerator)
from pyjasper import (JasperGeneratorWithSubreport)
import xml.etree.ElementTree as ET
from pyramid.path import AssetResolver
from osipkd.models.base_model import *
from osipkd.models.pemda_model import *
from osipkd.models.apbd import *
from osipkd.models.apbd_anggaran import *
"""
def get_rpath(filename):
a = AssetResolver('esipkd')
resolver = a.resolve(''.join(['reports/',filename]))
return resolver.abspath()
angka = {1:'satu',2:'dua',3:'tiga',4:'empat',5:'lima',6:'enam',7:'tujuh',\
8:'delapan',9:'sembilan'}
b = ' puluh '
c = ' ratus '
d = ' ribu '
e = ' juta '
f = ' milyar '
g = ' triliun '
def Terbilang(x):
y = str(x)
n = len(y)
if n <= 3 :
if n == 1 :
if y == '0' :
return ''
else :
return angka[int(y)]
elif n == 2 :
if y[0] == '1' :
if y[1] == '1' :
return 'sebelas'
elif y[0] == '0':
x = y[1]
return Terbilang(x)
elif y[1] == '0' :
return 'sepuluh'
else :
return angka[int(y[1])] + ' belas'
elif y[0] == '0' :
x = y[1]
return Terbilang(x)
else :
x = y[1]
return angka[int(y[0])] + b + Terbilang(x)
else :
if y[0] == '1' :
x = y[1:]
return 'seratus ' + Terbilang(x)
elif y[0] == '0' :
x = y[1:]
return Terbilang(x)
else :
x = y[1:]
return angka[int(y[0])] + c + Terbilang(x)
elif 3< n <=6 :
p = y[-3:]
q = y[:-3]
if q == '1' :
return 'seribu' + Terbilang(p)
elif q == '000' :
return Terbilang(p)
else:
return Terbilang(q) + d + Terbilang(p)
elif 6 < n <= 9 :
r = y[-6:]
s = y[:-6]
return Terbilang(s) + e + Terbilang(r)
elif 9 < n <= 12 :
t = y[-9:]
u = y[:-9]
return Terbilang(u) + f + Terbilang(t)
else:
v = y[-12:]
w = y[:-12]
return Terbilang(w) + g + Terbilang(v)
class ViewLaporan(BaseViews):
def __init__(self, context, request):
global logo
global logo_pemda
BaseViews.__init__(self, context, request)
logo = self.request.static_url('esipkd:static/img/logo.png')
logo_pemda = self.request.static_url('esipkd:static/img/logo-pemda.png')
"""BaseViews.__init__(self, context, request)
self.app = 'anggaran'
row = DBSession.query(Tahun.status_apbd).filter(Tahun.tahun==self.tahun).first()
self.session['status_apbd'] = row and row[0] or 0
self.status_apbd = 'status_apbd' in self.session and self.session['status_apbd'] or 0
#self.status_apbd_nm = status_apbd[str(self.status_apbd)]
self.all_unit = 'all_unit' in self.session and self.session['all_unit'] or 0
self.unit_id = 'unit_id' in self.session and self.session['unit_id'] or 0
self.unit_kd = 'unit_kd' in self.session and self.session['unit_kd'] or "X.XX.XX"
self.unit_nm = 'unit_nm' in self.session and self.session['unit_nm'] or "Pilih Unit"
self.keg_id = 'keg_id' in self.session and self.session['keg_id'] or 0
self.datas['status_apbd'] = self.status_apbd
#self.datas['status_apbd_nm'] = self.status_apbd_nm
self.datas['all_unit'] = self.all_unit
self.datas['unit_kd'] = self.unit_kd
self.datas['unit_nm'] = self.unit_nm
self.datas['unit_id'] = self.unit_id
self.cust_nm = 'cust_nm' in self.session and self.session['cust_nm'] or 'PEMERINTAH KABUPATEN TANGERANG'
customer = self.cust_nm
logo = self.request.static_url('osipkd:static/img/logo.png')
"""
# LAPORAN PENERIMAAN
@view_config(route_name="report-sspd", renderer="templates/report/report_sspd.pt", permission="read")
def report_sspd(self):
params = self.request.params
return dict()
# LAPORAN
@view_config(route_name="report", renderer="templates/report/report.pt", permission="read")
def report(self):
params = self.request.params
return dict()
@view_config(route_name="reports_act")
def reports_act(self):
global awal, akhir, tgl_awal, tgl_akhir, u, unit_kd, unit_nm, unit_al, now, thn,bulan,bulan2
req = self.request
params = req.params
url_dict = req.matchdict
u = req.user.id
now = datetime.now().strftime('%Y-%m-%d')
thn = datetime.now().strftime('%Y')
id = 'id' in params and params['id'] and int(params['id']) or 0
#---------------------- Laporan ----------------------------------------------#
jenis = 'jenis' in params and params['jenis'] and str(params['jenis']) or ''
bayar = 'bayar' in params and params['bayar'] and str(params['bayar']) or ''
rek = 'rek' in params and params['rek'] and str(params['rek']) or ''
h2h = 'h2h' in params and params['h2h'] and str(params['h2h']) or ''
unit = 'unit' in params and params['unit'] and str(params['unit']) or ''
bulan = 'bulan' in params and params['bulan'] and str(params['bulan']) or ''
if bulan!='':
bulan2 = int(bulan)-1
sptpd_id = 'sptpd_id' in params and params['sptpd_id'] and str(params['sptpd_id']) or ''
if group_in(req, 'bendahara'):
unit_id = DBSession.query(UserUnit.unit_id
).filter(UserUnit.user_id==u
).first()
unit_id = '%s' % unit_id
unit_id = int(unit_id)
unit = DBSession.query(Unit.kode.label('kd'),
Unit.nama.label('nm'),
Unit.alamat.label('al')
).filter(UserUnit.unit_id==unit_id,
Unit.id==unit_id
).first()
unit_kd = '%s' % unit.kd
unit_nm = '%s' % unit.nm
unit_al = '%s' % unit.al
elif group_in(req, 'wp'):
unit_id = DBSession.query(UserUnit.unit_id
).filter(UserUnit.user_id==u
).first()
unit_id = '%s' % unit_id
unit_id = int(unit_id)
unit = DBSession.query(Unit.kode.label('kd'),
Unit.nama.label('nm'),
Unit.alamat.label('al')
).filter(UserUnit.unit_id==unit_id,
Unit.id==unit_id
).first()
unit_kd = '%s' % unit.kd
unit_nm = '%s' % unit.nm
unit_al = '%s' % unit.al
else:
unit_kd = "1.20.05."
unit_nm = "BADAN PENDAPATAN DAERAH"
unit_al = "Jl. Soekarno Hatta, No. 528, Bandung"
#-----------------------------------------------------------------------------#
tgl_awal = 'tgl_awal' in params and params['tgl_awal'] and str(params['tgl_awal']) or 0
tgl_akhir = 'tgl_akhir' in params and params['tgl_akhir'] and str(params['tgl_akhir']) or 0
awal = 'awal' in params and params['awal'] and str(params['awal']) or datetime.now().strftime('%Y-%m-%d')
akhir = 'akhir' in params and params['akhir'] and str(params['akhir']) or datetime.now().strftime('%Y-%m-%d')
##----------------------- Query laporan -------------------------------------##
if url_dict['act']=='Laporan_1' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
func.sum(ARInvoice.dasar).label('dasar'),
func.sum(ARInvoice.pokok).label('pokok'),
func.sum(ARInvoice.denda).label('denda'),
func.sum(ARInvoice.bunga).label('bunga'),
func.sum(ARInvoice.jumlah).label('jumlah')
).order_by(ARInvoice.unit_kode)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0,
ARInvoice.rekening_id==rek
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1),
ARInvoice.rekening_id==rek
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.rekening_id==rek
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap1Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_2' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah')
).order_by(ARInvoice.unit_kode,
ARInvoice.kode,
ARInvoice.jumlah)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0,
ARInvoice.rekening_id==rek)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1),
ARInvoice.rekening_id==rek)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.rekening_id==rek)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap2Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_3' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
func.sum(ARInvoice.dasar).label('dasar'),
func.sum(ARInvoice.pokok).label('pokok'),
func.sum(ARInvoice.denda).label('denda'),
func.sum(ARInvoice.bunga).label('bunga'),
func.sum(ARInvoice.jumlah).label('jumlah')
).order_by(ARInvoice.rek_kode)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0,
ARInvoice.unit_id==unit
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1),
ARInvoice.unit_id==unit
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.unit_id==unit
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap3Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_4' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah')
).order_by(ARInvoice.rek_kode,
ARInvoice.kode,
ARInvoice.jumlah)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0,
ARInvoice.unit_id==unit)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1),
ARInvoice.unit_id==unit)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.unit_id==unit)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap4Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_5' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
func.sum(ARInvoice.dasar).label('dasar'),
func.sum(ARInvoice.pokok).label('pokok'),
func.sum(ARInvoice.denda).label('denda'),
func.sum(ARInvoice.bunga).label('bunga'),
func.sum(ARInvoice.jumlah).label('jumlah')
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1)
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir)
).group_by(ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap5Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_6' :
query = DBSession.query(ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
func.sum(ARInvoice.dasar).label('dasar'),
func.sum(ARInvoice.pokok).label('pokok'),
func.sum(ARInvoice.denda).label('denda'),
func.sum(ARInvoice.bunga).label('bunga'),
func.sum(ARInvoice.jumlah).label('jumlah')
).order_by(ARInvoice.rek_kode,
ARInvoice.unit_kode
)
if bayar == '1':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.is_tbp==0,
ARInvoice.status_bayar==0
).group_by(ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama,
ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '2':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
or_(ARInvoice.status_bayar==1,
ARInvoice.is_tbp==1)
).group_by(ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama,
ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
elif bayar == '3':
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir)
).group_by(ARInvoice.rekening_id,
ARInvoice.rek_kode,
ARInvoice.rek_nama,
ARInvoice.unit_id,
ARInvoice.unit_kode,
ARInvoice.unit_nama)
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap6Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_8' :
if group_in(req, 'bendahara'):
query = DBSession.query(ARTbp.kode.label('kd'),
ARTbp.wp_nama.label('wp_nm'),
ARTbp.rekening_id.label('rek_id'),
ARTbp.rek_kode.label('rek_kd'),
ARTbp.rek_nama.label('rek_nm'),
ARTbp.unit_id.label('un_id'),
ARTbp.unit_kode.label('un_kd'),
ARTbp.unit_nama.label('un_nm'),
ARTbp.dasar.label('dasar'),
ARTbp.pokok.label('pokok'),
ARTbp.denda.label('denda'),
ARTbp.bunga.label('bunga'),
ARTbp.jumlah.label('jumlah')
).filter(ARTbp.tgl_terima.between(awal,akhir)
).order_by(desc(ARTbp.tgl_terima),
desc(ARTbp.kode)
)
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARTbp.unit_id==z)
generator = lap8benGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
query = DBSession.query(ARTbp.unit_id.label('un_id'),
ARTbp.unit_kode.label('un_kd'),
ARTbp.unit_nama.label('un_nm'),
ARTbp.rekening_id.label('rek_id'),
ARTbp.rek_kode.label('rek_kd'),
ARTbp.rek_nama.label('rek_nm'),
ARTbp.kode.label('kd'),
ARTbp.wp_nama.label('wp_nm'),
ARTbp.dasar.label('dasar'),
ARTbp.pokok.label('pokok'),
ARTbp.denda.label('denda'),
ARTbp.bunga.label('bunga'),
ARTbp.jumlah.label('jumlah')
).filter(ARTbp.tgl_terima.between(awal,akhir)
).order_by(ARTbp.unit_kode,
desc(ARTbp.tgl_terima),
desc(ARTbp.kode)
)
generator = lap8Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_9' :
if group_in(req, 'bendahara'):
query = DBSession.query(ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah')
).filter(ARInvoice.tgl_tetap.between(awal,akhir)
).order_by(desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode)
)
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap9benGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah')
).filter(ARInvoice.tgl_tetap.between(awal,akhir)
).order_by(ARInvoice.unit_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode)
)
generator = lap9Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_7' :
query = DBSession.query(ARSspd.bayar.label('bayar'),
ARSspd.bunga.label('bunga'),
ARSspd.tgl_bayar.label('tgl'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.jumlah.label('jumlah')
).join(ARInvoice)
if group_in(req, 'bendahara'):
if h2h=='1':
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0,
ARSspd.bank_id!=0
).order_by(desc(ARSspd.tgl_bayar),
ARInvoice.kode
)
elif h2h=='2':
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0,
ARSspd.bank_id==None
).order_by(desc(ARSspd.tgl_bayar),
ARInvoice.kode
)
else:
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0,
).order_by(desc(ARSspd.tgl_bayar),
ARInvoice.kode
)
x = DBSession.query(UserUnit.unit_id).filter(UserUnit.user_id==u).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap7benGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
if h2h=='1':
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0,
ARSspd.bank_id!=0
).order_by(ARInvoice.unit_kode,
desc(ARSspd.tgl_bayar),
ARInvoice.kode,
ARInvoice.rek_kode
)
elif h2h == '2':
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0,
ARSspd.bank_id == None
).order_by(ARInvoice.unit_kode,
desc(ARSspd.tgl_bayar),
ARInvoice.kode,
ARInvoice.rek_kode
)
else:
query = query.filter(ARSspd.tgl_bayar.between(awal,akhir),
ARSspd.bayar!=0
).order_by(ARInvoice.unit_kode,
desc(ARSspd.tgl_bayar),
ARInvoice.kode,
ARInvoice.rek_kode
)
generator = lap7Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_10' :
query = DBSession.query(ARTbp.unit_id.label('un_id'),
ARTbp.unit_kode.label('un_kd'),
ARTbp.unit_nama.label('un_nm'),
ARTbp.rekening_id.label('rek_id'),
ARTbp.rek_kode.label('rek_kd'),
ARTbp.rek_nama.label('rek_nm'),
ARTbp.kode.label('kd'),
ARTbp.invoice_kode.label('invoice_kode'),
ARTbp.tgl_terima.label('tgl_terima'),
ARTbp.periode_1.label('periode_1'),
ARTbp.periode_2.label('periode_2'),
ARTbp.jatuh_tempo.label('jatuh_tempo'),
ARTbp.wp_nama.label('wp_nm'),
ARTbp.op_nama.label('op_nm'),
ARTbp.dasar.label('dasar'),
ARTbp.pokok.label('pokok'),
ARTbp.denda.label('denda'),
ARTbp.bunga.label('bunga'),
ARTbp.jumlah.label('jumlah')
).order_by(ARTbp.unit_kode,
desc(ARTbp.tgl_terima),
desc(ARTbp.kode),
ARTbp.rek_kode,
)
if group_in(req, 'bendahara'):
query = query.filter(ARTbp.tgl_terima.between(awal,akhir),
ARTbp.unit_kode.ilike('%%%s%%' % unit_kd))
generator = lap10Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
query = query.filter(ARTbp.tgl_terima.between(awal,akhir))
generator = lap10budGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_11' :
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah'),
case([(ARInvoice.status_bayar==0,"Belum")],
else_="Sudah").label('status'))
if group_in(req, 'bendahara'):
## kondisi status Bayar ##
cek_bayar = bayar;
if(cek_bayar=='1'):
bayar=0;
elif(cek_bayar=='2'):
bayar=1;
if(cek_bayar=='1'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#ARInvoice.is_tbp==0,
ARInvoice.status_bayar==bayar,
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
elif (cek_bayar=='2'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#or_(ARInvoice.status_bayar==bayar,
# ARInvoice.is_tbp==1),
ARInvoice.status_bayar==bayar,
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
elif(cek_bayar=='3'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
generator = lap11Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
## kondisi status Bayar ##
cek_bayar = bayar;
if(cek_bayar=='1'):
bayar=0;
elif(cek_bayar=='2'):
bayar=1;
if(cek_bayar=='1'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#ARInvoice.is_tbp==0,
ARInvoice.status_bayar==bayar,
#ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
elif (cek_bayar=='2'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#or_(ARInvoice.status_bayar==bayar,
# ARInvoice.is_tbp==1),
ARInvoice.status_bayar==bayar,
#ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
elif(cek_bayar=='3'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd)
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
generator = lap11budGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_12' :
if group_in(req, 'bendahara'):
## kondisi status Bayar ##
cek_bayar = bayar;
if(cek_bayar=='1'):
bayar=0;
elif(cek_bayar=='2'):
bayar=1;
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah'),
case([(ARInvoice.status_bayar==0,"Belum")],
else_="Sudah").label('status')
).order_by(ARInvoice.rek_kode,
ARInvoice.unit_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
if(cek_bayar=='1'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#ARInvoice.is_tbp==0,
ARInvoice.status_bayar==bayar,
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd))
elif(cek_bayar=='2'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#or_(ARInvoice.status_bayar==bayar,
# ARInvoice.is_tbp==1),
ARInvoice.status_bayar==bayar,
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd))
elif(cek_bayar=='3'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd))
generator = lap12Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
## kondisi status Bayar ##
cek_bayar = bayar;
if(cek_bayar=='1'):
bayar=0;
elif(cek_bayar=='2'):
bayar=1;
query = DBSession.query(ARInvoice.unit_id.label('un_id'),
ARInvoice.unit_kode.label('un_kd'),
ARInvoice.unit_nama.label('un_nm'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.wp_nama.label('wp_nm'),
ARInvoice.dasar.label('dasar'),
ARInvoice.pokok.label('pokok'),
ARInvoice.denda.label('denda'),
ARInvoice.bunga.label('bunga'),
ARInvoice.jumlah.label('jumlah'),
case([(ARInvoice.status_bayar==0,"Belum")],
else_="Sudah").label('status')
).order_by(ARInvoice.unit_kode,
ARInvoice.rek_kode,
desc(ARInvoice.tgl_tetap),
desc(ARInvoice.kode))
if(cek_bayar=='1'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#ARInvoice.is_tbp==0,
#ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd),
ARInvoice.status_bayar==bayar)
elif(cek_bayar=='2'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir),
#or_(ARInvoice.status_bayar==bayar,
# ARInvoice.is_tbp==1),
#ARInvoice.unit_kode.ilike('%%%s%%' % unit_kd),
ARInvoice.status_bayar==bayar)
elif(cek_bayar=='3'):
query = query.filter(ARInvoice.tgl_tetap.between(awal,akhir))
generator = lap12budGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_13' :
subq = DBSession.query(Rekening.kode.label('rek_kd'),
Rekening.nama.label('rek_nm'),
func.sum(case([(func.coalesce(Anggaran.perubahan,0)>0,func.coalesce(Anggaran.perubahan,0))],
else_=func.coalesce(Anggaran.murni,0))).label('target')
).select_from(Rekening
).filter(Anggaran.tahun==thn,
Anggaran.kode.ilike(func.concat(Rekening.kode,'%'))
).group_by(Rekening.kode,
Rekening.nama
).order_by(Rekening.kode
).subquery()
query = DBSession.query(subq.c.rek_kd,
subq.c.rek_nm,
subq.c.target,
func.sum(case([(func.extract('month', ARSspd.tgl_bayar)<=bulan2,func.coalesce(ARSspd.bayar,0))],
else_=0)).label('bayar1'),
func.sum(case([(func.extract('month', ARSspd.tgl_bayar)==bulan,func.coalesce(ARSspd.bayar,0))],
else_=0)).label('bayar2')
).select_from(ARSspd
).join(ARInvoice
).filter(ARSspd.tahun_id==ARInvoice.tahun_id,
ARInvoice.rek_kode.ilike(func.concat(subq.c.rek_kd,'%')),
ARInvoice.tahun_id==thn
).group_by(subq.c.rek_kd,
subq.c.rek_nm,
subq.c.target
).order_by(subq.c.rek_kd
)
##-------- Untuk memfilter sesuai Jalur Pembayaran ----------##
if h2h=='1':
query = query.filter(ARSspd.bayar!=0,
ARSspd.bank_id!=0)
elif h2h=='2':
query = query.filter(ARSspd.bayar!=0,
ARSspd.bank_id==None)
else:
query = query.filter(ARSspd.bayar!=0)
##---------------- Untuk memfilter sesuai Unit --------------##
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id
).filter(UserUnit.user_id==u
).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap13Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_14' :
query = DBSession.query(ARSspd.bayar.label('bayar'),
ARSspd.tgl_bayar.label('tgl'),
ARInvoice.rekening_id.label('rek_id'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
ARInvoice.kode.label('kd'),
ARInvoice.tgl_tetap.label('tgl_ttp'),
ARInvoice.jumlah.label('jumlah'),
Wilayah.kode.label('wil_kd'),
Wilayah.nama.label('wil_nm')
).join(ARInvoice
).outerjoin(Wilayah
).order_by(Wilayah.kode,
ARInvoice.rek_kode,
ARSspd.tgl_bayar,
ARInvoice.kode)
##-------- Untuk memfilter sesuai Jalur Pembayaran ----------##
if h2h=='1':
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
extract('month', ARSspd.tgl_bayar)==bulan,
ARSspd.bayar!=0,
ARSspd.bank_id!=0)
elif h2h=='2':
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
extract('month', ARSspd.tgl_bayar)==bulan,
ARSspd.bayar!=0,
ARSspd.bank_id==None)
else:
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
extract('month', ARSspd.tgl_bayar)==bulan,
ARSspd.bayar!=0)
##---------------- Untuk memfilter sesuai Unit --------------##
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id
).filter(UserUnit.user_id==u
).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap14Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='Laporan_15' :
query = DBSession.query(func.sum(ARSspd.bayar).label('bayar'),
ARInvoice.rek_kode.label('rek_kd'),
ARInvoice.rek_nama.label('rek_nm'),
func.extract('month', ARSspd.tgl_bayar).label("bln"),
case([(func.extract('month', ARSspd.tgl_bayar)==1,"1"),
(func.extract('month', ARSspd.tgl_bayar)==2,"1"),
(func.extract('month', ARSspd.tgl_bayar)==3,"1"),
(func.extract('month', ARSspd.tgl_bayar)==4,"2"),
(func.extract('month', ARSspd.tgl_bayar)==5,"2"),
(func.extract('month', ARSspd.tgl_bayar)==6,"2"),
(func.extract('month', ARSspd.tgl_bayar)==7,"3"),
(func.extract('month', ARSspd.tgl_bayar)==8,"3"),
(func.extract('month', ARSspd.tgl_bayar)==9,"3"),
(func.extract('month', ARSspd.tgl_bayar)==10,"4"),
(func.extract('month', ARSspd.tgl_bayar)==11,"4"),
(func.extract('month', ARSspd.tgl_bayar)==12,"4")],
else_="4").label("triwulan"),
).join(ARInvoice
).order_by(func.extract('month', ARSspd.tgl_bayar),
ARInvoice.rek_kode
).group_by(func.extract('month', ARSspd.tgl_bayar),
ARInvoice.rek_kode,
ARInvoice.rek_nama
)
##-------- Untuk memfilter sesuai Jalur Pembayaran ----------##
if h2h=='1':
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
ARSspd.bayar!=0,
ARSspd.bank_id!=0
)
elif h2h=='2':
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
ARSspd.bayar!=0,
ARSspd.bank_id==None
)
else:
query = query.filter(extract('year', ARSspd.tgl_bayar)==thn,
ARSspd.bayar!=0
)
##---------------- Untuk memfilter sesuai Unit --------------##
if group_in(req, 'bendahara'):
x = DBSession.query(UserUnit.unit_id
).filter(UserUnit.user_id==u
).first()
y = '%s' % x
z = int(y)
query = query.filter(ARInvoice.unit_id==z)
generator = lap15Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
##----------------------------- End Laporan -----------------##
###################### USER
elif url_dict['act']=='r001' :
# function case when alchemy -> case([(User.status==1,"Aktif"),],else_="Tidak Aktif").label("status")
# iWan Mampir
query = DBSession.query(User.user_name.label('username'), User.email, case([(User.status==1,"Aktif"),],else_="Tidak Aktif").label("status"), User.last_login_date.label('last_login'), User.registered_date).\
order_by(User.user_name).all()
generator = r001Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### GROUP
elif url_dict['act']=='r002' :
query = DBSession.query(Group.group_name.label('kode'), Group.description.label('nama')).order_by(Group.group_name).all()
generator = r002Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### SKPD/UNIT
elif url_dict['act']=='r003' :
query = DBSession.query(Unit.kode, Unit.nama, Unit.level_id, Unit.is_summary).order_by(Unit.kode).all()
generator = r003Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### JABATAN
elif url_dict['act']=='r004' :
query = DBSession.query(Jabatan.kode, Jabatan.nama, Jabatan.status).order_by(Jabatan.kode).all()
generator = r004Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### PEGAWAI
elif url_dict['act']=='r005' :
query = DBSession.query(Pegawai.kode, Pegawai.nama).order_by(Pegawai.kode).all()
generator = r005Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### REKENING
elif url_dict['act']=='r006' :
query = DBSession.query(Rekening.kode, Rekening.nama, Rekening.level_id, Rekening.is_summary).order_by(Rekening.kode).all()
generator = r006Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### PAJAK DAN TARIF
elif url_dict['act']=='r007' :
query = DBSession.query(Pajak.kode, Pajak.nama, Rekening.nama.label('rek_nm'), Pajak.tahun, Pajak.tarif
).filter(Pajak.rekening_id==Rekening.id
).order_by(Pajak.kode).all()
generator = r007Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### WILAYAH
elif url_dict['act']=='r008' :
query = DBSession.query(Wilayah.kode, Wilayah.nama, Wilayah.level_id).order_by(Wilayah.kode).all()
generator = r008Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### JENISPAJAK
# function case when alchemy -> case([(JnsPajak.status==1,"Aktif"),],else_="Tidak Aktif").label("status")
# iWan Mampir
elif url_dict['act']=='semua_sektor' :
query = DBSession.query(JnsPajak.kode, JnsPajak.nama, case([(JnsPajak.status==1,"Aktif"),],else_="Tidak Aktif").label("status")).order_by(JnsPajak.kode).all()
generator = semua_sektorGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### --------- SUBJEK PAJAK --------- ###
elif url_dict['act']=='rSubjekPajak' :
query = DBSession.query(SubjekPajak.kode,
SubjekPajak.nama,
SubjekPajak.alamat_1,
SubjekPajak.kelurahan,
SubjekPajak.kecamatan,
SubjekPajak.kota,
SubjekPajak.email,
case([(SubjekPajak.status==1,"Aktif")],
else_="Tidak Aktif").label("status"),
Unit.nama.label('unit')
).join(Unit
).filter(SubjekPajak.status_grid==0
).order_by(Unit.kode,
desc(SubjekPajak.kode))
if group_in(req, 'bendahara'):
query = query.join(UserUnit
).filter(UserUnit.user_id==u)
generator = rSubjekPajakGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### --------- OBJEK PAJAK --------- ###
elif url_dict['act']=='r010' :
query = DBSession.query(ObjekPajak.nama.label('op_nm'),
SubjekPajak.kode.label('sp_kd'),
SubjekPajak.nama.label('sp_nm'),
Pajak.kode.label('p_kd'),
Wilayah.nama.label('w_nm'),
case([(SubjekPajak.status==1,"Aktif")],
else_="Tidak Aktif").label("status"),
Unit.nama.label('unit')
).join(SubjekPajak
).join(Unit
).outerjoin(Pajak
).outerjoin(Wilayah
).filter(ObjekPajak.status_grid==0,
ObjekPajak.status==1
).order_by(Unit.kode,
SubjekPajak.kode,
ObjekPajak.kode
)
if group_in(req, 'wp'):
query = query.filter(SubjekPajak.email==req.user.email)
elif group_in(req, 'bendahara'):
query = query.filter(SubjekPajak.unit_id==Unit.id)
query = query.join(UserUnit).filter(UserUnit.unit_id==Unit.id,
UserUnit.user_id==u)
generator = r010Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### ARINVOICE FAST PAY
elif url_dict['act']=='r101' :
query = DBSession.query(ARInvoice
).filter(ARInvoice.id==id,
ARInvoice.status_grid==1,
#ARInvoice.tgl_tetap.between(awal,akhir)
).order_by(ARInvoice.kode)
if u != 1:
query = query.filter(ARInvoice.owner_id==u)
generator = r101Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### ARINVOICE
elif url_dict['act']=='r100' :
query = DBSession.query(ARInvoice
).filter(ARInvoice.id==id
).order_by(ARInvoice.kode).all()
generator = r100Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### ARSSPD
# function trim to_char alchemy -> func.trim(func.to_char(ARInvoice.tarif,'999,999,999,990')).label('tarif'),
# iWan Mampir
elif url_dict['act']=='r200' :
print '*********tgl_akhir********',tgl_akhir
query = DBSession.query(ARSspd.id,
ARInvoice.kode,
ARInvoice.wp_kode,
ARInvoice.wp_nama,
ARInvoice.op_kode,
ARInvoice.op_nama,
ARInvoice.rek_kode,
ARInvoice.rek_nama,
func.trim(func.to_char(ARSspd.bayar,'999,999,999,990')).label('bayar'),
ARSspd.tgl_bayar,
).join(ARInvoice
).filter(and_(ARSspd.tgl_bayar>=tgl_awal, ARSspd.tgl_bayar<=tgl_akhir)
).order_by(ARSspd.id).all()
generator = r200Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
elif url_dict['act']=='r200frm' :
query = DBSession.query(ARSspd.id,
ARSspd.tgl_bayar,
ARInvoice.wp_kode,
ARInvoice.wp_nama,
ARInvoice.op_kode,
ARInvoice.op_nama,
ARInvoice.rek_kode,
ARInvoice.rek_nama,
ARInvoice.unit_kode,
ARInvoice.unit_nama,
ARInvoice.kode,
func.trim(func.to_char(ARInvoice.tarif,'999,999,999,990')).label('tarif'),
func.trim(func.to_char(ARInvoice.dasar,'999,999,999,990')).label('dasar'),
func.trim(func.to_char(ARInvoice.pokok,'999,999,999,990')).label('pokok'),
func.trim(func.to_char(ARInvoice.bunga,'999,999,999,990')).label('bunga'),
func.trim(func.to_char(ARInvoice.denda,'999,999,999,990')).label('denda'),
func.trim(func.to_char(ARSspd.bayar,'999,999,999,990')).label('bayar'),
).join(ARInvoice
).filter(ARSspd.id==id,
)#.order_by(ARSspd.id).all()
generator = r200frmGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### ARSTS
elif url_dict['act']=='r300' :
query = DBSession.query(ARSts.id,
ARSts.kode,
ARSts.nama,
ARSts.tgl_sts,
Unit.kode.label('unit_kd'),
Unit.nama.label('unit_nm'),
Unit.alamat.label('unit_al'),
ARStsItem.rek_kode.label('rek_kd'),
ARStsItem.rek_nama.label('rek_nm'),
# ARStsItem.jumlah,
func.trim(func.to_char(ARStsItem.jumlah,'999,999,999,990')).label('jumlah'),
func.trim(func.to_char(ARSts.jumlah,'999,999,999,990')).label('jumlah_sts'),
ARStsItem.kode.label('no_bayar')
).filter(ARSts.id==id,
ARSts.unit_id==Unit.id,
ARStsItem.sts_id==ARSts.id,
ARStsItem.invoice_id==ARInvoice.id,
ARStsItem.rekening_id==Rekening.id,
).order_by(ARStsItem.rek_kode).all()
generator = r300Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### SPTPD RINCIAN ###
elif url_dict['act']=='sptpd_rincian' :
query = DBSession.query(Sptpd.id,
Sptpd.kode,
Sptpd.wp_nama.label('nama'),
InvoiceDet.sektor_nm,
Sptpd.tgl_sptpd,
Sptpd.periode_1,
Sptpd.periode_2,
InvoiceDet.wilayah_nm,
InvoiceDet.peruntukan_nm,
InvoiceDet.produk_nm,
InvoiceDet.nama.label('wp'),
InvoiceDet.volume,
InvoiceDet.dpp,
InvoiceDet.tarif,
InvoiceDet.total_pajak,
).filter(Sptpd.id==req.params['sptpd_id'],
InvoiceDet.sptpd_id==Sptpd.id
).order_by(Sptpd.kode,
InvoiceDet.sektor_nm,
InvoiceDet.wilayah_nm,
InvoiceDet.nama,
InvoiceDet.peruntukan_nm,
InvoiceDet.produk_nm
).all()
generator = rSptpdRincianGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### SPTPD SSPD ###
elif url_dict['act']=='sptpd_sspd' :
query = DBSession.query(Sptpd.id,
Sptpd.wp_nama.label('nama'),
Sptpd.wp_alamat_1,
InvoiceDet.produk_nm,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tgl_sptpd,
SubjekPajak.kode,
func.sum(InvoiceDet.total_pajak).label('total_pajak')
).filter(Sptpd.id==req.params['sptpd_id'],
InvoiceDet.sptpd_id==Sptpd.id,
SubjekPajak.id==Sptpd.subjek_pajak_id
).group_by(Sptpd.id,
SubjekPajak.kode,
Sptpd.nama,
Sptpd.wp_alamat_1,
InvoiceDet.produk_nm,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tgl_sptpd,
).order_by(Sptpd.kode,
InvoiceDet.produk_nm
).all()
generator = rSptpdSspdGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### SPTPD Lampiran ###
elif url_dict['act']=='sptpd_lampiran' :
query = DBSession.query(Sptpd.id,
Sptpd.kode,
Sptpd.nama,
Sptpd.wp_kode,
Sptpd.wp_nama,
Sptpd.wp_alamat_1,
Sptpd.tgl_sptpd,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tahun_id,
InvoiceDet.sektor_id,
InvoiceDet.sektor_nm,
InvoiceDet.produk_nm,
func.sum(InvoiceDet.volume).label('volume'),
func.sum(InvoiceDet.dpp).label('dpp'),
func.sum(InvoiceDet.total_pajak).label('total_pajak'),
).filter(Sptpd.id==req.params['sptpd_id'],
InvoiceDet.sptpd_id==Sptpd.id
).group_by(Sptpd.id,
Sptpd.kode,
Sptpd.nama,
Sptpd.wp_kode,
Sptpd.wp_nama,
Sptpd.wp_alamat_1,
Sptpd.tgl_sptpd,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tahun_id,
InvoiceDet.sektor_id,
InvoiceDet.sektor_nm,
InvoiceDet.produk_nm,
).order_by(Sptpd.kode,
InvoiceDet.sektor_id,
InvoiceDet.produk_nm,
).all()
generator = rSptpdLampiranGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
### SPTPD ###
elif url_dict['act']=='sptpd' :
query = DBSession.query(Sptpd.id,
Sptpd.kode,
Sptpd.nama,
Sptpd.wp_kode,
Sptpd.wp_nama,
Sptpd.wp_alamat_1,
Sptpd.tgl_sptpd,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tahun_id,
InvoiceDet.sektor_id,
InvoiceDet.sektor_nm,
InvoiceDet.produk_nm,
func.sum(InvoiceDet.volume).label('volume'),
func.sum(InvoiceDet.dpp).label('dpp'),
func.sum(InvoiceDet.total_pajak).label('total_pajak'),
).filter(Sptpd.id==req.params['sptpd_id'],
InvoiceDet.sptpd_id==Sptpd.id
).group_by(Sptpd.id,
Sptpd.kode,
Sptpd.nama,
Sptpd.wp_kode,
Sptpd.wp_nama,
Sptpd.wp_alamat_1,
Sptpd.tgl_sptpd,
Sptpd.periode_1,
Sptpd.periode_2,
Sptpd.tahun_id,
InvoiceDet.sektor_id,
InvoiceDet.sektor_nm,
InvoiceDet.produk_nm,
).order_by(Sptpd.kode,
InvoiceDet.sektor_id,
InvoiceDet.produk_nm,
).all()
generator = rSptpdGenerator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### E-SAMSAT
elif url_dict['act']=='esamsat' :
query = self.request.params
generator = r400Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
###################### E-PAP
elif url_dict['act']=='epap' :
query = self.request.params
generator = r500Generator()
pdf = generator.generate(query)
response=req.response
response.content_type="application/pdf"
response.content_disposition='filename=output.pdf'
response.write(pdf)
return response
else:
return HTTPNotFound() #TODO: Warning Hak Akses
class rSptpdRincianGenerator(JasperGenerator):
def __init__(self):
super(rSptpdRincianGenerator, self).__init__()
self.reportname = get_rpath('sptpd_rincian.jrxml')
self.xpath = '/webr/sptpd_rincian'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'sptpd_rincian')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "sektor_nm").text = row.sektor_nm
ET.SubElement(xml_greeting, "produk_nm").text = row.produk_nm
ET.SubElement(xml_greeting, "wilayah_nm").text = row.wilayah_nm
ET.SubElement(xml_greeting, "peruntukan_nm").text = row.peruntukan_nm
ET.SubElement(xml_greeting, "wp").text = row.wp
ET.SubElement(xml_greeting, "tgl_sptpd").text = unicode(row.tgl_sptpd)
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "volume").text = unicode(row.volume)
ET.SubElement(xml_greeting, "dpp").text = unicode(row.dpp)
ET.SubElement(xml_greeting, "tarif").text = unicode(row.tarif)
ET.SubElement(xml_greeting, "total_pajak").text = unicode(row.total_pajak)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class rSptpdSspdGenerator(JasperGenerator):
def __init__(self):
super(rSptpdSspdGenerator, self).__init__()
self.reportname = get_rpath('sptpd_sspd.jrxml')
self.xpath = '/webr/sptpd_sspd'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'sptpd_sspd')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "alamat_1").text = row.wp_alamat_1
ET.SubElement(xml_greeting, "produk_nm").text = row.produk_nm
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "tgl_sptpd").text = unicode(row.tgl_sptpd)
ET.SubElement(xml_greeting, "total_pajak").text = unicode(row.total_pajak)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "now").text = now
return self.root
class rSptpdLampiranGenerator(JasperGenerator):
def __init__(self):
super(rSptpdLampiranGenerator, self).__init__()
self.reportname = get_rpath('sptpd_lampiran.jrxml')
self.xpath = '/webr/sptpd_lampiran'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'sptpd_lampiran')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "wp_kode").text = row.wp_kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "wp_alamat_1").text = row.wp_alamat_1
ET.SubElement(xml_greeting, "tgl_sptpd").text = unicode(row.tgl_sptpd)
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "tahun_id").text = unicode(row.tahun_id)
ET.SubElement(xml_greeting, "sektor_id").text = unicode(row.sektor_id)
ET.SubElement(xml_greeting, "sektor_nm").text = row.sektor_nm
ET.SubElement(xml_greeting, "produk_nm").text = row.produk_nm
ET.SubElement(xml_greeting, "volume").text = unicode(row.volume)
ET.SubElement(xml_greeting, "dpp").text = unicode(row.dpp)
ET.SubElement(xml_greeting, "total_pajak").text = unicode(row.total_pajak)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "now").text = now
return self.root
class rSptpdGenerator(JasperGenerator):
def __init__(self):
super(rSptpdGenerator, self).__init__()
self.reportname = get_rpath('sptpd.jrxml')
self.xpath = '/webr/sptpd'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'sptpd')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "wp_kode").text = row.wp_kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "wp_alamat_1").text = row.wp_alamat_1
ET.SubElement(xml_greeting, "tgl_sptpd").text = unicode(row.tgl_sptpd)
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "tahun_id").text = unicode(row.tahun_id)
ET.SubElement(xml_greeting, "sektor_id").text = unicode(row.sektor_id)
ET.SubElement(xml_greeting, "sektor_nm").text = row.sektor_nm
ET.SubElement(xml_greeting, "produk_nm").text = row.produk_nm
ET.SubElement(xml_greeting, "volume").text = unicode(row.volume)
ET.SubElement(xml_greeting, "dpp").text = unicode(row.dpp)
ET.SubElement(xml_greeting, "total_pajak").text = unicode(row.total_pajak)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "now").text = now
return self.root
## ----------------- LAPORAN -------------------------------------------##
class lap1Generator(JasperGenerator):
def __init__(self):
super(lap1Generator, self).__init__()
self.reportname = get_rpath('Lap1.jrxml')
self.xpath = '/webr/lap1'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap1')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap2Generator(JasperGenerator):
def __init__(self):
super(lap2Generator, self).__init__()
self.reportname = get_rpath('Lap2.jrxml')
self.xpath = '/webr/lap2'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap2')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap3Generator(JasperGenerator):
def __init__(self):
super(lap3Generator, self).__init__()
self.reportname = get_rpath('Lap3.jrxml')
self.xpath = '/webr/lap3'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap3')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap4Generator(JasperGenerator):
def __init__(self):
super(lap4Generator, self).__init__()
self.reportname = get_rpath('Lap4.jrxml')
self.xpath = '/webr/lap4'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap4')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap5Generator(JasperGenerator):
def __init__(self):
super(lap5Generator, self).__init__()
self.reportname = get_rpath('Lap5.jrxml')
self.xpath = '/webr/lap5'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap5')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap6Generator(JasperGenerator):
def __init__(self):
super(lap6Generator, self).__init__()
self.reportname = get_rpath('Lap6.jrxml')
self.xpath = '/webr/lap6'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap6')
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class lap7Generator(JasperGenerator):
def __init__(self):
super(lap7Generator, self).__init__()
self.reportname = get_rpath('Lap7.jrxml')
self.xpath = '/webr/lap7'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap7')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "bayar").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "tgl").text = unicode(row.tgl)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
#ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
#ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
return self.root
class lap7benGenerator(JasperGenerator):
def __init__(self):
super(lap7benGenerator, self).__init__()
self.reportname = get_rpath('Lap7bendahara.jrxml')
self.xpath = '/webr/lap7ben'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap7ben')
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "bayar").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "tgl").text = unicode(row.tgl)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
ET.SubElement(xml_greeting, "un_al").text = unit_al
return self.root
class lap8Generator(JasperGenerator):
def __init__(self):
super(lap8Generator, self).__init__()
self.reportname = get_rpath('Lap8.jrxml')
self.xpath = '/webr/lap8'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap8')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
#ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
#ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
return self.root
class lap8benGenerator(JasperGenerator):
def __init__(self):
super(lap8benGenerator, self).__init__()
self.reportname = get_rpath('Lap8bendahara.jrxml')
self.xpath = '/webr/lap8ben'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap8ben')
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
ET.SubElement(xml_greeting, "un_al").text = unit_al
return self.root
class lap9Generator(JasperGenerator):
def __init__(self):
super(lap9Generator, self).__init__()
self.reportname = get_rpath('Lap9.jrxml')
self.xpath = '/webr/lap9'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap9')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
#ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
#ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
return self.root
class lap9benGenerator(JasperGenerator):
def __init__(self):
super(lap9benGenerator, self).__init__()
self.reportname = get_rpath('Lap9bendahara.jrxml')
self.xpath = '/webr/lap9ben'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap9ben')
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "un_al").text = unit_al
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
return self.root
## iWan mampir
class lap10Generator(JasperGenerator):
def __init__(self):
super(lap10Generator, self).__init__()
self.reportname = get_rpath('Lap10.jrxml')
self.xpath = '/webr/lap10'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap10')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "invoice_kode").text = row.invoice_kode
ET.SubElement(xml_greeting, "tgl_terima").text = unicode(row.tgl_terima)
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "jatuh_tempo").text = unicode(row.jatuh_tempo)
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "op_nm").text = row.op_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "un_al").text = unit_al
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
return self.root
class lap10budGenerator(JasperGenerator):
def __init__(self):
super(lap10budGenerator, self).__init__()
self.reportname = get_rpath('Lap10bud.jrxml')
self.xpath = '/webr/lap10bud'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap10bud')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "invoice_kode").text = row.invoice_kode
ET.SubElement(xml_greeting, "tgl_terima").text = unicode(row.tgl_terima)
ET.SubElement(xml_greeting, "periode_1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode_2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "jatuh_tempo").text = unicode(row.jatuh_tempo)
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "op_nm").text = row.op_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
return self.root
class lap11Generator(JasperGenerator):
def __init__(self):
super(lap11Generator, self).__init__()
self.reportname = get_rpath('Lap11.jrxml')
self.xpath = '/webr/lap11'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap11')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "un_al").text = unit_al
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
ET.SubElement(xml_greeting, "status").text = row.status
return self.root
class lap11budGenerator(JasperGenerator):
def __init__(self):
super(lap11budGenerator, self).__init__()
self.reportname = get_rpath('Lap11bud.jrxml')
self.xpath = '/webr/lap11bud'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap11bud')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "status").text = row.status
return self.root
class lap12Generator(JasperGenerator):
def __init__(self):
super(lap12Generator, self).__init__()
self.reportname = get_rpath('Lap12.jrxml')
self.xpath = '/webr/lap12'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
ttd=DBSession.query(Pegawai.kode.label('pg_kd'),
Pegawai.nama.label('pg_nm')
).filter(Pegawai.user_id==u
).first()
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap12')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "un_al").text = unit_al
ET.SubElement(xml_greeting, "pg_kd").text = ttd.pg_kd
ET.SubElement(xml_greeting, "pg_nm").text = ttd.pg_nm
ET.SubElement(xml_greeting, "status").text = row.status
return self.root
class lap12budGenerator(JasperGenerator):
def __init__(self):
super(lap12budGenerator, self).__init__()
self.reportname = get_rpath('Lap12bud.jrxml')
self.xpath = '/webr/lap12bud'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap12bud')
ET.SubElement(xml_greeting, "un_id").text = unicode(row.un_id)
ET.SubElement(xml_greeting, "un_kd").text = row.un_kd
ET.SubElement(xml_greeting, "un_nm").text = row.un_nm
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "wp_nm").text = row.wp_nm
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "awal").text = awal
ET.SubElement(xml_greeting, "akhir").text = akhir
ET.SubElement(xml_greeting, "status").text = row.status
return self.root
class lap14Generator(JasperGenerator):
def __init__(self):
super(lap14Generator, self).__init__()
self.reportname = get_rpath('Lap14.jrxml')
self.xpath = '/webr/lap14'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap14')
ET.SubElement(xml_greeting, "rek_id").text = unicode(row.rek_id)
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "kd").text = row.kd
ET.SubElement(xml_greeting, "tgl_ttp").text = unicode(row.tgl_ttp)
ET.SubElement(xml_greeting, "bayar").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "tgl").text = unicode(row.tgl)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "wil_kd").text = row.wil_kd
ET.SubElement(xml_greeting, "wil_nm").text = row.wil_nm
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "unit_al").text = unit_al
ET.SubElement(xml_greeting, "now").text = now
ET.SubElement(xml_greeting, "bulan").text = bulan
ET.SubElement(xml_greeting, "thn").text = thn
return self.root
class lap13Generator(JasperGenerator):
def __init__(self):
super(lap13Generator, self).__init__()
self.reportname = get_rpath('Lap13.jrxml')
self.xpath = '/webr/lap13'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap13')
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "ag_m").text = unicode(row.target)
ET.SubElement(xml_greeting, "byr1").text = unicode(row.bayar1)
ET.SubElement(xml_greeting, "byr2").text = unicode(row.bayar2)
x=row.bayar1+row.bayar2
ET.SubElement(xml_greeting, "byr3").text = unicode(x)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "unit_al").text = unit_al
ET.SubElement(xml_greeting, "now").text = now
ET.SubElement(xml_greeting, "bulan").text = bulan
ET.SubElement(xml_greeting, "thn").text = thn
return self.root
class lap15Generator(JasperGenerator):
def __init__(self):
super(lap15Generator, self).__init__()
self.reportname = get_rpath('Lap15.jrxml')
self.xpath = '/webr/lap15'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'lap15')
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "bayar").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "unit_kd").text = unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = unit_nm
ET.SubElement(xml_greeting, "unit_al").text = unit_al
ET.SubElement(xml_greeting, "now").text = now
ET.SubElement(xml_greeting, "thn").text = thn
ET.SubElement(xml_greeting, "bln").text = unicode(row.bln)
ET.SubElement(xml_greeting, "triwulan").text = row.triwulan
return self.root
## ----------------------------- End Laporan ----------------------------------------##
#User
class r001Generator(JasperGenerator):
def __init__(self):
super(r001Generator, self).__init__()
self.reportname = get_rpath('R0001.jrxml')
self.xpath = '/webr/user'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'user')
ET.SubElement(xml_greeting, "username").text = row.username
ET.SubElement(xml_greeting, "email").text = row.email
ET.SubElement(xml_greeting, "status").text = unicode(row.status)
ET.SubElement(xml_greeting, "last_login").text = unicode(row.last_login)
ET.SubElement(xml_greeting, "registered_date").text = unicode(row.registered_date)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Grup
class r002Generator(JasperGenerator):
def __init__(self):
super(r002Generator, self).__init__()
self.reportname = get_rpath('R0002.jrxml')
self.xpath = '/webr/grup'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'grup')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Unit
class r003Generator(JasperGenerator):
def __init__(self):
super(r003Generator, self).__init__()
self.reportname = get_rpath('R0003.jrxml')
self.xpath = '/webr/unit'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'unit')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "level_id").text = unicode(row.level_id)
ET.SubElement(xml_greeting, "is_summary").text = unicode(row.is_summary)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Jabatan
class r004Generator(JasperGenerator):
def __init__(self):
super(r004Generator, self).__init__()
self.reportname = get_rpath('R0004.jrxml')
self.xpath = '/webr/jabatan'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'jabatan')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "status").text = unicode(row.status)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Pegawai
class r005Generator(JasperGenerator):
def __init__(self):
super(r005Generator, self).__init__()
self.reportname = get_rpath('R0005.jrxml')
self.xpath = '/webr/pegawai'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'pegawai')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Rekening
class r006Generator(JasperGenerator):
def __init__(self):
super(r006Generator, self).__init__()
self.reportname = get_rpath('R0006.jrxml')
self.xpath = '/webr/rekening'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'rekening')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "level_id").text = unicode(row.level_id)
ET.SubElement(xml_greeting, "is_summary").text = unicode(row.is_summary)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Pajak dan Tarif
class r007Generator(JasperGenerator):
def __init__(self):
super(r007Generator, self).__init__()
self.reportname = get_rpath('R0007.jrxml')
self.xpath = '/webr/pajak'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'pajak')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "tahun").text = unicode(row.tahun)
ET.SubElement(xml_greeting, "tarif").text = unicode(row.tarif)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#Wilayah
class r008Generator(JasperGenerator):
def __init__(self):
super(r008Generator, self).__init__()
self.reportname = get_rpath('R0008.jrxml')
self.xpath = '/webr/wilayah'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'wilayah')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "level_id").text = unicode(row.level_id)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#JnsPajak
class semua_sektorGenerator(JasperGenerator):
def __init__(self):
super(semua_sektorGenerator, self).__init__()
self.reportname = get_rpath('semua_sektor.jrxml')
self.xpath = '/webr/semua_sektor'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'semua_sektor')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "status").text = row.status
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
### ----------- Subjek Pajak ----------- ###
class rSubjekPajakGenerator(JasperGenerator):
def __init__(self):
super(rSubjekPajakGenerator, self).__init__()
self.reportname = get_rpath('R0009.jrxml')
self.xpath = '/webr/subjekpajak'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'subjekpajak')
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "alamat_1").text = row.alamat_1
ET.SubElement(xml_greeting, "kelurahan").text = row.kelurahan
ET.SubElement(xml_greeting, "kecamatan").text = row.kecamatan
ET.SubElement(xml_greeting, "kota").text = row.kota
ET.SubElement(xml_greeting, "email").text = row.email
ET.SubElement(xml_greeting, "status").text = unicode(row.status)
ET.SubElement(xml_greeting, "unit").text = row.unit
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "un_nm").text = unit_nm
ET.SubElement(xml_greeting, "un_al").text = unit_al
#ET.SubElement(xml_greeting, "alamat_2").text = row.alamat_2
return self.root
#ObjekPajak
class r010Generator(JasperGenerator):
def __init__(self):
super(r010Generator, self).__init__()
self.reportname = get_rpath('R0010.jrxml')
self.xpath = '/webr/op'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'op')
ET.SubElement(xml_greeting, "op_nm").text = row.op_nm
ET.SubElement(xml_greeting, "sp_kd").text = row.sp_kd
ET.SubElement(xml_greeting, "sp_nm").text = row.sp_nm
ET.SubElement(xml_greeting, "p_kd").text = row.p_kd
ET.SubElement(xml_greeting, "w_nm").text = row.w_nm
ET.SubElement(xml_greeting, "status").text = unicode(row.status)
ET.SubElement(xml_greeting, "unit").text = row.unit
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "un_nm").text = unit_nm
ET.SubElement(xml_greeting, "un_al").text = unit_al
return self.root
#ARINVOICE FAST PAY
class r101Generator(JasperGenerator):
def __init__(self):
super(r101Generator, self).__init__()
self.reportname = get_rpath('epayment.jrxml')
self.xpath = '/webr/epayment'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'epayment')
ET.SubElement(xml_greeting, "kd_bayar").text = row.kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "op_nama").text = row.op_nama
ET.SubElement(xml_greeting, "unit_kd").text = row.unit_kode
ET.SubElement(xml_greeting, "unit_nm").text = row.unit_nama
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kode
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nama
ET.SubElement(xml_greeting, "periode1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "tgl_tetap").text = unicode(row.tgl_tetap)
ET.SubElement(xml_greeting, "tgl_jt_tempo").text = unicode(row.jatuh_tempo)
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "tarif").text = unicode(row.tarif)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#ARINVOICE
class r100Generator(JasperGenerator):
def __init__(self):
super(r100Generator, self).__init__()
self.reportname = get_rpath('epayment.jrxml')
self.xpath = '/webr/epayment'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'epayment')
ET.SubElement(xml_greeting, "kd_bayar").text = row.kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "op_nama").text = row.op_nama
ET.SubElement(xml_greeting, "unit_kd").text = row.unit_kode
ET.SubElement(xml_greeting, "unit_nm").text = row.unit_nama
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kode
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nama
ET.SubElement(xml_greeting, "periode1").text = unicode(row.periode_1)
ET.SubElement(xml_greeting, "periode2").text = unicode(row.periode_2)
ET.SubElement(xml_greeting, "tgl_tetap").text = unicode(row.tgl_tetap)
ET.SubElement(xml_greeting, "tgl_jt_tempo").text = unicode(row.jatuh_tempo)
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "tarif").text = unicode(row.tarif)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#ARSSPD
class r200Generator(JasperGenerator):
def __init__(self):
super(r200Generator, self).__init__()
self.reportname = get_rpath('R2000.jrxml')
self.xpath = '/webr/arsspd'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'arsspd')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "wp_kode").text = row.wp_kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "op_kode").text = row.op_kode
ET.SubElement(xml_greeting, "op_nama").text = row.op_nama
ET.SubElement(xml_greeting, "rek_nama").text = row.rek_nama
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "tgl_bayar").text = unicode(row.tgl_bayar)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
class r200frmGenerator(JasperGenerator):
def __init__(self):
super(r200frmGenerator, self).__init__()
self.reportname = get_rpath('R2000FRM.jrxml')
self.xpath = '/webr/arsspdfrm'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'arsspdfrm')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "wp_kode").text = row.wp_kode
ET.SubElement(xml_greeting, "wp_nama").text = row.wp_nama
ET.SubElement(xml_greeting, "op_kode").text = row.op_kode
ET.SubElement(xml_greeting, "op_nama").text = row.op_nama
ET.SubElement(xml_greeting, "rek_kode").text = row.rek_kode
ET.SubElement(xml_greeting, "rek_nama").text = row.rek_nama
ET.SubElement(xml_greeting, "unit_kode").text = row.unit_kode
ET.SubElement(xml_greeting, "unit_nama").text = row.unit_nama
ET.SubElement(xml_greeting, "tarif").text = unicode(row.tarif)
ET.SubElement(xml_greeting, "dasar").text = unicode(row.dasar)
ET.SubElement(xml_greeting, "pokok").text = unicode(row.pokok)
ET.SubElement(xml_greeting, "denda").text = unicode(row.denda)
ET.SubElement(xml_greeting, "bunga").text = unicode(row.bunga)
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.bayar)
ET.SubElement(xml_greeting, "tgl_bayar").text = unicode(row.tgl_bayar)
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#ARSTS
class r300Generator(JasperGenerator):
def __init__(self):
super(r300Generator, self).__init__()
self.reportname = get_rpath('R3000.jrxml')
self.xpath = '/webr/arsts'
self.root = ET.Element('webr')
def generate_xml(self, tobegreeted):
for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'arsts')
ET.SubElement(xml_greeting, "id").text = unicode(row.id)
ET.SubElement(xml_greeting, "kode").text = row.kode
ET.SubElement(xml_greeting, "nama").text = row.nama
ET.SubElement(xml_greeting, "tgl_sts").text = unicode(row.tgl_sts)
ET.SubElement(xml_greeting, "unit_kd").text = row.unit_kd
ET.SubElement(xml_greeting, "unit_nm").text = row.unit_nm
ET.SubElement(xml_greeting, "unit_al").text = row.unit_al
ET.SubElement(xml_greeting, "rek_kd").text = row.rek_kd
ET.SubElement(xml_greeting, "rek_nm").text = row.rek_nm
ET.SubElement(xml_greeting, "jumlah").text = unicode(row.jumlah)
ET.SubElement(xml_greeting, "jumlah_sts").text = unicode(row.jumlah_sts)
# ET.SubElement(xml_greeting, "jumlah").text = row.jumlah
ET.SubElement(xml_greeting, "no_bayar").text = row.no_bayar
ET.SubElement(xml_greeting, "logo").text = logo_pemda
return self.root
#E-SAMSAT
class r400Generator(JasperGenerator):
def __init__(self):
super(r400Generator, self).__init__()
self.reportname = get_rpath('esamsat.jrxml')
self.xpath = '/webr/esamsat'
self.root = ET.Element('webr')
def generate_xml(self, row):
#for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'esamsat')
ET.SubElement(xml_greeting, "logo").text = logo
ET.SubElement(xml_greeting, "customer").text = 'AAAA'
ET.SubElement(xml_greeting, "kd_bayar").text = row['kd_bayar']
ET.SubElement(xml_greeting, "no_rangka").text = row['no_rangka1']
ET.SubElement(xml_greeting, "no_polisi").text = row['no_polisi']
ET.SubElement(xml_greeting, "no_identitas").text = row['no_ktp1']
ET.SubElement(xml_greeting, "nm_pemilik").text = row['nm_pemilik']
ET.SubElement(xml_greeting, "warna").text = row['warna_tnkb']
ET.SubElement(xml_greeting, "merk").text = row['nm_merek_kb']
ET.SubElement(xml_greeting, "model").text = row['nm_model_kb']
ET.SubElement(xml_greeting, "tahun").text = row['th_buatan']
ET.SubElement(xml_greeting, "tgl_pjk_lama").text = row['tg_akhir_pjklm']
ET.SubElement(xml_greeting, "tgl_pjk_baru").text = row['tg_akhir_pjkbr']
ET.SubElement(xml_greeting, "pokok_bbn").text = row['bbn_pok']
ET.SubElement(xml_greeting, "denda_bbn").text = row['bbn_den']
ET.SubElement(xml_greeting, "pokok_swdkllj").text = row['swd_pok']
ET.SubElement(xml_greeting, "denda_swdkllj").text = row['swd_den']
ET.SubElement(xml_greeting, "adm_stnk").text = row['adm_stnk']
ET.SubElement(xml_greeting, "adm_tnkb").text = row['adm_tnkb']
ET.SubElement(xml_greeting, "jumlah").text = row['jumlah']
ET.SubElement(xml_greeting, "status_byr").text = row['kd_status']
ET.SubElement(xml_greeting, "keterangan").text = row['ket']
return self.root
#E-PAP
class r500Generator(JasperGenerator):
def __init__(self):
super(r500Generator, self).__init__()
self.reportname = get_rpath('epap.jrxml')
self.xpath = '/webr/epap'
self.root = ET.Element('webr')
def generate_xml(self, row):
#for row in tobegreeted:
xml_greeting = ET.SubElement(self.root, 'epap')
ET.SubElement(xml_greeting, "logo").text = logo_pemda
ET.SubElement(xml_greeting, "kd_bayar").text = row['kd_bayar']
ET.SubElement(xml_greeting, "npwpd").text = row['npwpd']
ET.SubElement(xml_greeting, "nm_perus").text = row['nm_perus']
ET.SubElement(xml_greeting, "al_perus").text = row['al_perus']
ET.SubElement(xml_greeting, "vol_air").text = row['vol_air']
ET.SubElement(xml_greeting, "npa").text = row['npa']
ET.SubElement(xml_greeting, "m_pjk_thn").text = row['m_pjk_thn']
ET.SubElement(xml_greeting, "m_pjk_bln").text = row['m_pjk_bln']
ET.SubElement(xml_greeting, "bea_pok_pjk").text = row['bea_pok_pjk']
ET.SubElement(xml_greeting, "bea_den_pjk").text = row['bea_den_pjk']
ET.SubElement(xml_greeting, "tgl_tetap").text = row['tgl_tetap']
ET.SubElement(xml_greeting, "tgl_jt_tempo").text = row['tgl_jt_tempo']
ET.SubElement(xml_greeting, "keterangan").text = row['keterangan']
return self.root
| lgpl-3.0 | 5,254,077,287,836,435,000 | 52.299476 | 218 | 0.471997 | false | 3.768672 | false | false | false |
baolinw/cloud | cloudAPI/local_api/lib_local.py | 1 | 1721 | import argparse
import httplib2
import os
import sys
import json
import io
import os.path
from os import listdir
from os.path import isfile,join
# simulate the 'Cloud' in local storage
ROOT_DIR = '/home/ubuntu/tmp/'
def upload_file(service,from_file_name,to_file_name):
# try delete it first
try:
delete_file(service,'',"/" + to_file_name)
except Exception as e:
pass
# The BytesIO object may be replaced with any io.Base instance.
f = open(from_file_name,'r')
out_folder_name = ROOT_DIR + service['folder'] + '/'
out_f = open(out_folder_name + to_file_name,'w')
out_f.write(f.read())
f.close()
out_f.close()
def upload_string(service, str_to_upload,to_file_name):
# The BytesIO object may be replaced with any io.Base instance.
out_folder_name = ROOT_DIR + service['folder'] + '/'
out_f = open(out_folder_name + to_file_name,'w')
out_f.write(str_to_upload);
out_f.close()
def delete_file(service,object_name):
out_folder_name = ROOT_DIR + service['folder'] + '/'
os.remove(out_folder_name + object_name);
def download_file(service ,object_name, to_file_name):
in_folder_name = ROOT_DIR + service['folder'] + '/'
f_in = open(in_folder_name + object_name, 'r');
f_out = open(to_file_name,'w');
f_out.write(f_in.read());
return None
def get_all_file_names(service):
folder_name = ROOT_DIR + service['folder'] + '/'
file_names = [(f,os.stat(join(folder_name,f)).st_size) for f in os.listdir(folder_name) if isfile(join(folder_name,f)) ]
return file_names
def create_service_object(extra_info):
service = {'folder':extra_info}
return service
if __name__ == "__main__":
s = create_service_object()
print get_all_file_names(s)
| apache-2.0 | 9,077,936,335,657,262,000 | 28.192982 | 121 | 0.66473 | false | 2.821311 | false | false | false |
vamitrou/jpath-py | jpath.py | 1 | 4364 | ########################################################################
#
# jpath-py
# An XPath-like querying interface for JSON objects
#
# author: Vasileios Mitrousis
# email: [email protected]
#
# The software is given as is, no guarantees from the author
# Licenced under Apache 2.0 licence
#
########################################################################
debug = False
# This function will accept a JSON document and a path /x/y/z[4]/*
# and will return you the actual value of the key(s)
def get_dict_value(doc, path, leaf=None):
if len(path.strip()) == 0:
return doc
path_splits = path.split('/')
for i, key in enumerate(path_splits):
if debug: print 'key processing: ' + key
if not doc:
return None
if '[' in key and ']' in key and i != len(path_splits)-1:
# array element
if debug: print 'array element'
idx = int(key[key.index('[')+1:key.index(']')])
key = key[:key.index('[')]
if debug: print 'key stripped: ' + key
if not doc.get(key):
return None
if isinstance(doc[key], list):
if debug: print 'is an array'
if idx >= len(doc[key]):
# index out of bounds
if debug: print 'out of bounds'
return None
doc = doc[key][idx]
else:
# single object, accept 0 index only
if debug: print 'single object'
if idx > 0:
return None
doc = doc[key]
elif key == '*':
# '*' has 2 meanings. The whole array,
# or the whole object if it is the last key
if debug: print 'wildcard key'
if i == len(path_splits) - 1:
# it is the last element, push the whole object
if debug: print 'last element'
else:
# '*' requires the whole array in this case
if debug: print 'getting the whole array'
if isinstance(doc, list):
if debug: print 'is type of array'
else:
if debug: print 'is not type of array, constructing it manually'
doc = [doc]
idx = -1
item_arr = []
recon_path = '/'.join(path_splits[i+1:])
if ']' == recon_path[-1]:
# we need indexed result
if debug: print 'getting indexed result'
idx = int(recon_path[recon_path.index('[')+1:recon_path.index(']')])
recon_path = recon_path[:recon_path.index('[')]
for k, item in enumerate(doc):
val = get_dict_value(item, recon_path, leaf)
if val:
item_arr.append(val)
if idx != -1:
if idx < len(item_arr):
return item_arr[idx]
return None
return item_arr
else:
if debug: print 'normal key: ' + key
if isinstance(doc, list):
if debug: print 'pointing to an array'
print "Warning: '%s' array was detected but not expected. Returning first item." % path_splits[i-1]
if len(doc) > 0:
doc = doc[0].get(key)
else:
if debug: print 'getting object normaly'
doc = doc.get(key)
if i == len(path_splits) - 1:
if debug: print 'it is the last component'
if isinstance(doc, list):
if debug: print 'it is a list, generate a @Val array'
try:
doc = [d[leaf] for d in doc if d]
except:
if debug: print "1,", path, doc
#raw_input()
else:
if debug: print 'final object @Val'
if doc and leaf:
try:
doc = doc[leaf]
except Exception, e:
print 'jpath_error:', e
#raw_input()
return doc
| apache-2.0 | -5,848,436,728,557,343,000 | 36.299145 | 115 | 0.43538 | false | 4.588854 | false | false | false |
jedie/DragonPy | PyDC/PyDC/configs.py | 1 | 4858 | #!/usr/bin/env python2
"""
PyDC - configs
==============
:copyleft: 2013 by Jens Diemer
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import inspect
from MC6809.utils.humanize import byte2bit_string
class BaseConfig:
""" shared config values """
# For writing WAVE files:
FRAMERATE = 22050
SAMPLEWIDTH = 2 # 1 for 8-bit, 2 for 16-bit, 4 for 32-bit samples
VOLUME_RATIO = 90 # "Loundness" in percent of the created wave file
def print_debug_info(self):
print(f"Config: '{self.__class__.__name__}'")
for name, value in inspect.getmembers(self): # , inspect.isdatadescriptor):
if name.startswith("_"):
continue
# print name, type(value)
if not isinstance(value, (int, str, list, tuple, dict)):
continue
if isinstance(value, int):
bit_string = byte2bit_string(value)
print("{:>20} = {:<4} (in hex: {:>7} - binary: {})".format(
name, value, repr(hex(value)), bit_string
))
else:
print(f"{name:>20} = {value}")
class Dragon32Config(BaseConfig):
"""
Dragon 32 specific config values
>> d32cfg = Dragon32Config()
>> d32cfg.print_debug_info() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Config: 'Dragon32Config'
AVG_COUNT = 0 (in hex: '0x0' - binary: 00000000)
BASIC_ASCII = 255 (in hex: '0xff' - binary: 11111111)
BASIC_CODE_END = [0, 0]
BASIC_TOKENIZED = 0 (in hex: '0x0' - binary: 00000000)
BASIC_TYPE_DICT = {0: 'tokenized BASIC (0x00)', 255: 'ASCII BASIC (0xff)'}
BIT_NUL_HZ = 1200 (in hex: '0x4b0' - binary: 00001101001)
BIT_ONE_HZ = 2400 (in hex: '0x960' - binary: 000001101001)
BLOCK_TYPE_DICT = {0: 'filename block (0x00)', 1: 'data block (0x01)', 255: 'end-of-file block (0xff)'}
DATA_BLOCK = 1 (in hex: '0x1' - binary: 10000000)
END_COUNT = 2 (in hex: '0x2' - binary: 01000000)
EOF_BLOCK = 255 (in hex: '0xff' - binary: 11111111)
FILENAME_BLOCK = 0 (in hex: '0x0' - binary: 00000000)
FILETYPE_DICT = {0: 'BASIC programm (0x00)', 1: 'Data file (0x01)', 255: 'Binary file (0xFF)'}
FTYPE_BASIC = 0 (in hex: '0x0' - binary: 00000000)
FTYPE_BIN = 255 (in hex: '0xff' - binary: 11111111)
FTYPE_DATA = 1 (in hex: '0x1' - binary: 10000000)
HZ_VARIATION = 450 (in hex: '0x1c2' - binary: 010000111)
LEAD_BYTE_CODEPOINT = 85 (in hex: '0x55' - binary: 10101010)
LEAD_BYTE_LEN = 255 (in hex: '0xff' - binary: 11111111)
MAX_SYNC_BYTE_SEARCH = 600 (in hex: '0x258' - binary: 0001101001)
MID_COUNT = 1 (in hex: '0x1' - binary: 10000000)
MIN_VOLUME_RATIO = 5 (in hex: '0x5' - binary: 10100000)
SYNC_BYTE_CODEPOINT = 60 (in hex: '0x3c' - binary: 00111100)
"""
# For reading WAVE files:
BIT_NUL_HZ = 1100 # Spec says: 1200Hz - Bit "0" is a single cycle at 1200 Hz
BIT_ONE_HZ = 2100 # Spec says: 2400Hz - Bit "1" is a single cycle at 2400 Hz
# see: http://five.pairlist.net/pipermail/coco/2013-August/070879.html
HZ_VARIATION = 450 # How much Hz can signal scatter to match 1 or 0 bit ?
MIN_VOLUME_RATIO = 5 # percent volume to ignore sample
AVG_COUNT = 0 # How many samples should be merged into a average value?
END_COUNT = 2 # Sample count that must be pos/neg at once
MID_COUNT = 1 # Sample count that can be around null
# Format values:
LEAD_BYTE_CODEPOINT = 0x55 # 10101010
LEAD_BYTE_LEN = 255
SYNC_BYTE_CODEPOINT = 0x3C # 00111100
MAX_SYNC_BYTE_SEARCH = 600 # search size in **Bytes**
# Block types:
FILENAME_BLOCK = 0x00
DATA_BLOCK = 0x01
EOF_BLOCK = 0xff
BLOCK_TYPE_DICT = {
FILENAME_BLOCK: "filename block (0x00)",
DATA_BLOCK: "data block (0x01)",
EOF_BLOCK: "end-of-file block (0xff)",
}
# File types:
FTYPE_BASIC = 0x00
FTYPE_DATA = 0x01
FTYPE_BIN = 0x02
FILETYPE_DICT = {
FTYPE_BASIC: "BASIC programm (0x00)",
FTYPE_DATA: "Data file (0x01)",
FTYPE_BIN: "Binary machine code file (0x02)",
}
# Basic format types:
BASIC_TOKENIZED = 0x00
BASIC_ASCII = 0xff
BASIC_TYPE_DICT = {
BASIC_TOKENIZED: "tokenized BASIC (0x00)",
BASIC_ASCII: "ASCII BASIC (0xff)",
}
# The gap flag
NO_GAPS = 0x00
GAPS = 0xff
# Convert to uppercase if source is .bas and to lowercase if destination is .bas
case_convert = False
if __name__ == "__main__":
import doctest
print(doctest.testmod(
verbose=False
# verbose=True
))
| gpl-3.0 | 1,682,621,820,583,178,200 | 35.253731 | 112 | 0.568958 | false | 3.110115 | true | false | false |
qqwjq/crab | scikits/crab/recommenders/knn/base.py | 10 | 5194 | """
Generalized Recommender models amd utility classes.
This module contains basic memory recommender interfaces used throughout
the whole scikit-crab package as also utility classes.
The interfaces are realized as abstract base classes (ie., some optional
functionality is provided in the interface itself, so that the interfaces
can be subclassed).
"""
# Author: Marcel Caraciolo <[email protected]>
#
# License: BSD Style.
from ..base import MemoryBasedRecommender
#===========================
#Item-based Recommender Interface
class ItemRecommender(MemoryBasedRecommender):
def most_similar_items(self, item_id, how_many=None):
'''
Return the most similar items to the given item, ordered
from most similar to least.
Parameters
-----------
item_id: int or string
ID of item for which to find most similar other items
how_many: int
Desired number of most similar items to find
'''
raise NotImplementedError("ItemRecommender is an abstract class.")
def recommended_because(self, user_id, item_id, how_many, **params):
'''
Returns the items that were most influential in recommending a given item
to a given user. In most implementations, this method will return items
that the user prefers and that are similar to the given item.
Parameters
-----------
user_id : int or string
ID of the user who was recommended the item
item_id: int or string
ID of item that was recommended
how_many: int
Maximum number of items to return.
Returns
----------
The list of items ordered from most influential in recommended the given item to least
'''
raise NotImplementedError("ItemRecommender is an abstract class.")
#===========================
#User-based Recommender Interface
class UserRecommender(MemoryBasedRecommender):
def most_similar_users(self, user_id, how_many=None):
'''
Return the most similar users to the given user, ordered
from most similar to least.
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
how_many: int
Desired number of most similar users to find
'''
raise NotImplementedError("UserRecommender is an abstract class.")
def recommended_because(self, user_id, item_id, how_many, **params):
'''
Returns the users that were most influential in recommending a given item
to a given user. In most implementations, this method will return users
that prefers the recommended item and that are similar to the given user.
Parameters
-----------
user_id : int or string
ID of the user who was recommended the item
item_id: int or string
ID of item that was recommended
how_many: int
Maximum number of items to return.
Returns
----------
The list of users ordered from most influential in recommended the given item to least
'''
raise NotImplementedError("UserRecommender is an abstract class.")
#===========================
# Base Item Candidate Strategy
class BaseCandidateItemsStrategy(object):
'''
Base implementation for retrieving
all items that could possibly be recommended to the user
'''
def candidate_items(self, user_id, data_model, **params):
'''
Return the candidate items that could possibly be recommended to the user
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
data_model: The data model that will be the source for the possible
candidates
'''
raise NotImplementedError("BaseCandidateItemsStrategy is an abstract class.")
#===========================
# Base User Candidates Strategies
class BaseUserNeighborhoodStrategy(object):
'''
Base implementation for retrieving
all users that could possibly be select as part of the neighborhood.
'''
def user_neighborhood(self, user_id, data_model, n_similarity='user_similarity',
distance=None, n_users=None, **params):
'''
Computes a neighborhood consisting of the n users to a given user based on the
strategy implemented in this method.
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
data_model: DataModel instance
The data model that will be the source for the possible
candidates
n_similarity: string
The similarity to compute the neighborhood (default = user_similarity)
distance: function
Pairwise metric to compute the similarity between the users.
nhood_size: int
The neighborhood size (default = None all users)
'''
raise NotImplementedError("BaseCandidateItemsStrategy is an abstract class.")
| bsd-3-clause | 1,605,499,775,484,829,700 | 30.289157 | 94 | 0.632075 | false | 4.854206 | false | false | false |
prasanna08/oppia | scripts/linters/test_files/invalid_metaclass.py | 2 | 1562 | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python file with invalid syntax, used by scripts/linters/
python_linter_test. This file is using __metaclass__ tag which is not allowed.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import python_utils
class FakeClass(python_utils.OBJECT):
"""This is a fake docstring for invalid syntax purposes."""
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self, name):
"""This doesn't do anything.
Args:
name: str. Means nothing.
Yields:
tuple(str, str). The argument passed in but twice in a tuple.
"""
yield (name, name)
class MyObject:
"""This is a fake docstring."""
__metaclass__ = FakeClass # Use of __metaclass__ is not allowed
def __init__(self, fake_arg):
self.fake_arg = fake_arg
| apache-2.0 | 786,655,446,843,638,700 | 30.877551 | 78 | 0.684379 | false | 3.934509 | false | false | false |
tutufan/subterfugue | tricks/FileTrick.py | 1 | 1633 | #
# Substitute one filename by another in open and stat64 call
#
# Copyright 2003 Jiri Dobes <[email protected]>
# Can be freely distributed and used under the terms of the GNU GPL.
#
# Version 0.0.1
from Trick import Trick
from Memory import *
class File(Trick):
def usage(self):
return """
This trick substitute one filename by another in calling open or state64.
Usefull to change hardcoded filenames.
Example:
sf --trick=File:\'file=[[\"a\",\"c\"],[\"b\",\"d\"]]\' cp a b
instead of copying \"a\" to \"b\" it copy file \"c\" to file \"d\"
(altrough some warining message is issued).
"""
def __init__(self, options):
if options.has_key('file'):
self.options = options[ 'file' ]
else:
self.options = [["",""]] #do nothing by default
def callmask( self ):
return { 'open' : 1, 'stat64' : 1 }
def callbefore(self, pid, call, args):
"Perform argument substitution"
#both calls have same argument structure
if( call == "open" or call == "stat64" ):
list = self.options
m = getMemory(pid)
address = args[0]
filename = m.get_string(address)
for x in list:
if( filename == x[0] ):
area, asize = m.areas()[0]
m.poke( area , x[1] + '%c' % 0 , self )
newargs = ( area , args[ 1 ], args[ 2 ] )
return (None, None, None , newargs )
if( call == "stat" ): #maybe include above?
print "stat is not yet implemented!"
| gpl-2.0 | -6,754,329,897,064,963,000 | 30.403846 | 74 | 0.532762 | false | 3.719818 | false | false | false |
simonolander/euler | euler-323-bitwise-or-operations-on-random-integers.py | 1 | 1292 | import operator as op
from functools import reduce
def ncr(n, r):
r = min(r, n-r)
if r == 0: return 1
numer = reduce(op.mul, range(n, n-r, -1))
denom = reduce(op.mul, range(1, r+1))
return numer//denom
def p(n, r):
return ncr(n, r) / 2**n
f8 = 0
# f7 = 1 + p(1, 1)*f8 + p(1, 0)*f7 -> (1 - p(1, 0) * f7 =
f7 = (1 + p(1, 1)*f8) / (1 - p(1, 0))
f6 = (1 + p(2, 1)*f7 + p(2, 2)*f8) / (1 - p(2, 0))
f5 = (1 + p(3, 1)*f6 + p(3, 2)*f7 + p(3, 3)*f8) / (1 - p(3, 0))
f4 = (1 + p(4, 1)*f5 + p(4, 2)*f6 + p(4, 3)*f7 + p(4, 4)*f8) / (1 - p(4, 0))
f3 = (1 + p(5, 1)*f4 + p(5, 2)*f5 + p(5, 3)*f6 + p(5, 4)*f7 + p(5, 5)*f8) / (1 - p(5, 0))
f2 = (1 + p(6, 1)*f3 + p(6, 2)*f4 + p(6, 3)*f5 + p(6, 4)*f6 + p(6, 5)*f7 + p(6, 6)*f8) / (1 - p(6, 0))
f1 = (1 + p(7, 1)*f2 + p(7, 2)*f3 + p(7, 3)*f4 + p(7, 4)*f5 + p(7, 5)*f6 + p(7, 6)*f7 + p(7, 7)*f8) / (1 - p(7, 0))
f0 = (1 + p(8, 1)*f1 + p(8, 2)*f2 + p(8, 3)*f3 + p(8, 4)*f4 + p(8, 5)*f5 + p(8, 6)*f6 + p(8, 7)*f7 + p(8, 8)*f8) / (1 - p(8, 0))
print(f8)
print(f7)
print(f6)
print(f5)
print(f4)
print(f3)
print(f2)
print(f1)
print(round(f0, 10))
f = {32: 0}
for n in reversed(range(32)):
remaining = 32 - n
fn = (1 + sum(p(remaining, i)*f[n + i] for i in range(1, remaining + 1))) / (1 - p(remaining, 0))
f[n] = fn
print(f)
print(round(f[0], 10))
| mit | 6,128,869,015,706,782,000 | 29.046512 | 128 | 0.4613 | false | 1.684485 | false | false | false |
vntarasov/openpilot | selfdrive/locationd/models/live_kf.py | 1 | 12011 | #!/usr/bin/env python3
import sys
import numpy as np
import sympy as sp
from selfdrive.locationd.models.constants import ObservationKind
from rednose.helpers.ekf_sym import EKF_sym, gen_code
from rednose.helpers.sympy_helpers import euler_rotate, quat_matrix_r, quat_rotate
EARTH_GM = 3.986005e14 # m^3/s^2 (gravitational constant * mass of earth)
class States():
ECEF_POS = slice(0, 3) # x, y and z in ECEF in meters
ECEF_ORIENTATION = slice(3, 7) # quat for pose of phone in ecef
ECEF_VELOCITY = slice(7, 10) # ecef velocity in m/s
ANGULAR_VELOCITY = slice(10, 13) # roll, pitch and yaw rates in device frame in radians/s
GYRO_BIAS = slice(13, 16) # roll, pitch and yaw biases
ODO_SCALE = slice(16, 17) # odometer scale
ACCELERATION = slice(17, 20) # Acceleration in device frame in m/s**2
IMU_OFFSET = slice(20, 23) # imu offset angles in radians
# Error-state has different slices because it is an ESKF
ECEF_POS_ERR = slice(0, 3)
ECEF_ORIENTATION_ERR = slice(3, 6) # euler angles for orientation error
ECEF_VELOCITY_ERR = slice(6, 9)
ANGULAR_VELOCITY_ERR = slice(9, 12)
GYRO_BIAS_ERR = slice(12, 15)
ODO_SCALE_ERR = slice(15, 16)
ACCELERATION_ERR = slice(16, 19)
IMU_OFFSET_ERR = slice(19, 22)
class LiveKalman():
name = 'live'
initial_x = np.array([-2.7e6, 4.2e6, 3.8e6,
1, 0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 0,
1,
0, 0, 0,
0, 0, 0])
# state covariance
initial_P_diag = np.array([1e16, 1e16, 1e16,
1e6, 1e6, 1e6,
1e4, 1e4, 1e4,
1**2, 1**2, 1**2,
0.05**2, 0.05**2, 0.05**2,
0.02**2,
1**2, 1**2, 1**2,
(0.01)**2, (0.01)**2, (0.01)**2])
# process noise
Q = np.diag([0.03**2, 0.03**2, 0.03**2,
0.001**2, 0.001*2, 0.001**2,
0.01**2, 0.01**2, 0.01**2,
0.1**2, 0.1**2, 0.1**2,
(0.005 / 100)**2, (0.005 / 100)**2, (0.005 / 100)**2,
(0.02 / 100)**2,
3**2, 3**2, 3**2,
(0.05 / 60)**2, (0.05 / 60)**2, (0.05 / 60)**2])
@staticmethod
def generate_code(generated_dir):
name = LiveKalman.name
dim_state = LiveKalman.initial_x.shape[0]
dim_state_err = LiveKalman.initial_P_diag.shape[0]
state_sym = sp.MatrixSymbol('state', dim_state, 1)
state = sp.Matrix(state_sym)
x, y, z = state[States.ECEF_POS, :]
q = state[States.ECEF_ORIENTATION, :]
v = state[States.ECEF_VELOCITY, :]
vx, vy, vz = v
omega = state[States.ANGULAR_VELOCITY, :]
vroll, vpitch, vyaw = omega
roll_bias, pitch_bias, yaw_bias = state[States.GYRO_BIAS, :]
odo_scale = state[States.ODO_SCALE, :][0, :]
acceleration = state[States.ACCELERATION, :]
imu_angles = state[States.IMU_OFFSET, :]
dt = sp.Symbol('dt')
# calibration and attitude rotation matrices
quat_rot = quat_rotate(*q)
# Got the quat predict equations from here
# A New Quaternion-Based Kalman Filter for
# Real-Time Attitude Estimation Using the Two-Step
# Geometrically-Intuitive Correction Algorithm
A = 0.5 * sp.Matrix([[0, -vroll, -vpitch, -vyaw],
[vroll, 0, vyaw, -vpitch],
[vpitch, -vyaw, 0, vroll],
[vyaw, vpitch, -vroll, 0]])
q_dot = A * q
# Time derivative of the state as a function of state
state_dot = sp.Matrix(np.zeros((dim_state, 1)))
state_dot[States.ECEF_POS, :] = v
state_dot[States.ECEF_ORIENTATION, :] = q_dot
state_dot[States.ECEF_VELOCITY, 0] = quat_rot * acceleration
# Basic descretization, 1st order intergrator
# Can be pretty bad if dt is big
f_sym = state + dt * state_dot
state_err_sym = sp.MatrixSymbol('state_err', dim_state_err, 1)
state_err = sp.Matrix(state_err_sym)
quat_err = state_err[States.ECEF_ORIENTATION_ERR, :]
v_err = state_err[States.ECEF_VELOCITY_ERR, :]
omega_err = state_err[States.ANGULAR_VELOCITY_ERR, :]
acceleration_err = state_err[States.ACCELERATION_ERR, :]
# Time derivative of the state error as a function of state error and state
quat_err_matrix = euler_rotate(quat_err[0], quat_err[1], quat_err[2])
q_err_dot = quat_err_matrix * quat_rot * (omega + omega_err)
state_err_dot = sp.Matrix(np.zeros((dim_state_err, 1)))
state_err_dot[States.ECEF_POS_ERR, :] = v_err
state_err_dot[States.ECEF_ORIENTATION_ERR, :] = q_err_dot
state_err_dot[States.ECEF_VELOCITY_ERR, :] = quat_err_matrix * quat_rot * (acceleration + acceleration_err)
f_err_sym = state_err + dt * state_err_dot
# Observation matrix modifier
H_mod_sym = sp.Matrix(np.zeros((dim_state, dim_state_err)))
H_mod_sym[States.ECEF_POS, States.ECEF_POS_ERR] = np.eye(States.ECEF_POS.stop - States.ECEF_POS.start)
H_mod_sym[States.ECEF_ORIENTATION, States.ECEF_ORIENTATION_ERR] = 0.5 * quat_matrix_r(state[3:7])[:, 1:]
H_mod_sym[States.ECEF_ORIENTATION.stop:, States.ECEF_ORIENTATION_ERR.stop:] = np.eye(dim_state - States.ECEF_ORIENTATION.stop)
# these error functions are defined so that say there
# is a nominal x and true x:
# true x = err_function(nominal x, delta x)
# delta x = inv_err_function(nominal x, true x)
nom_x = sp.MatrixSymbol('nom_x', dim_state, 1)
true_x = sp.MatrixSymbol('true_x', dim_state, 1)
delta_x = sp.MatrixSymbol('delta_x', dim_state_err, 1)
err_function_sym = sp.Matrix(np.zeros((dim_state, 1)))
delta_quat = sp.Matrix(np.ones((4)))
delta_quat[1:, :] = sp.Matrix(0.5 * delta_x[States.ECEF_ORIENTATION_ERR, :])
err_function_sym[States.ECEF_POS, :] = sp.Matrix(nom_x[States.ECEF_POS, :] + delta_x[States.ECEF_POS_ERR, :])
err_function_sym[States.ECEF_ORIENTATION, 0] = quat_matrix_r(nom_x[States.ECEF_ORIENTATION, 0]) * delta_quat
err_function_sym[States.ECEF_ORIENTATION.stop:, :] = sp.Matrix(nom_x[States.ECEF_ORIENTATION.stop:, :] + delta_x[States.ECEF_ORIENTATION_ERR.stop:, :])
inv_err_function_sym = sp.Matrix(np.zeros((dim_state_err, 1)))
inv_err_function_sym[States.ECEF_POS_ERR, 0] = sp.Matrix(-nom_x[States.ECEF_POS, 0] + true_x[States.ECEF_POS, 0])
delta_quat = quat_matrix_r(nom_x[States.ECEF_ORIENTATION, 0]).T * true_x[States.ECEF_ORIENTATION, 0]
inv_err_function_sym[States.ECEF_ORIENTATION_ERR, 0] = sp.Matrix(2 * delta_quat[1:])
inv_err_function_sym[States.ECEF_ORIENTATION_ERR.stop:, 0] = sp.Matrix(-nom_x[States.ECEF_ORIENTATION.stop:, 0] + true_x[States.ECEF_ORIENTATION.stop:, 0])
eskf_params = [[err_function_sym, nom_x, delta_x],
[inv_err_function_sym, nom_x, true_x],
H_mod_sym, f_err_sym, state_err_sym]
#
# Observation functions
#
#imu_rot = euler_rotate(*imu_angles)
h_gyro_sym = sp.Matrix([vroll + roll_bias,
vpitch + pitch_bias,
vyaw + yaw_bias])
pos = sp.Matrix([x, y, z])
gravity = quat_rot.T * ((EARTH_GM / ((x**2 + y**2 + z**2)**(3.0 / 2.0))) * pos)
h_acc_sym = (gravity + acceleration)
h_phone_rot_sym = sp.Matrix([vroll, vpitch, vyaw])
speed = sp.sqrt(vx**2 + vy**2 + vz**2 + 1e-6)
h_speed_sym = sp.Matrix([speed * odo_scale])
h_pos_sym = sp.Matrix([x, y, z])
h_vel_sym = sp.Matrix([vx, vy, vz])
h_orientation_sym = q
h_imu_frame_sym = sp.Matrix(imu_angles)
h_relative_motion = sp.Matrix(quat_rot.T * v)
obs_eqs = [[h_speed_sym, ObservationKind.ODOMETRIC_SPEED, None],
[h_gyro_sym, ObservationKind.PHONE_GYRO, None],
[h_phone_rot_sym, ObservationKind.NO_ROT, None],
[h_acc_sym, ObservationKind.PHONE_ACCEL, None],
[h_pos_sym, ObservationKind.ECEF_POS, None],
[h_vel_sym, ObservationKind.ECEF_VEL, None],
[h_orientation_sym, ObservationKind.ECEF_ORIENTATION_FROM_GPS, None],
[h_relative_motion, ObservationKind.CAMERA_ODO_TRANSLATION, None],
[h_phone_rot_sym, ObservationKind.CAMERA_ODO_ROTATION, None],
[h_imu_frame_sym, ObservationKind.IMU_FRAME, None]]
gen_code(generated_dir, name, f_sym, dt, state_sym, obs_eqs, dim_state, dim_state_err, eskf_params)
def __init__(self, generated_dir):
self.dim_state = self.initial_x.shape[0]
self.dim_state_err = self.initial_P_diag.shape[0]
self.obs_noise = {ObservationKind.ODOMETRIC_SPEED: np.atleast_2d(0.2**2),
ObservationKind.PHONE_GYRO: np.diag([0.025**2, 0.025**2, 0.025**2]),
ObservationKind.PHONE_ACCEL: np.diag([.5**2, .5**2, .5**2]),
ObservationKind.CAMERA_ODO_ROTATION: np.diag([0.05**2, 0.05**2, 0.05**2]),
ObservationKind.IMU_FRAME: np.diag([0.05**2, 0.05**2, 0.05**2]),
ObservationKind.NO_ROT: np.diag([0.00025**2, 0.00025**2, 0.00025**2]),
ObservationKind.ECEF_POS: np.diag([5**2, 5**2, 5**2]),
ObservationKind.ECEF_VEL: np.diag([.5**2, .5**2, .5**2]),
ObservationKind.ECEF_ORIENTATION_FROM_GPS: np.diag([.2**2, .2**2, .2**2, .2**2])}
# init filter
self.filter = EKF_sym(generated_dir, self.name, self.Q, self.initial_x, np.diag(self.initial_P_diag), self.dim_state, self.dim_state_err, max_rewind_age=0.2)
@property
def x(self):
return self.filter.state()
@property
def t(self):
return self.filter.filter_time
@property
def P(self):
return self.filter.covs()
def rts_smooth(self, estimates):
return self.filter.rts_smooth(estimates, norm_quats=True)
def init_state(self, state, covs_diag=None, covs=None, filter_time=None):
if covs_diag is not None:
P = np.diag(covs_diag)
elif covs is not None:
P = covs
else:
P = self.filter.covs()
self.filter.init_state(state, P, filter_time)
def predict_and_observe(self, t, kind, meas, R=None):
if len(meas) > 0:
meas = np.atleast_2d(meas)
if kind == ObservationKind.CAMERA_ODO_TRANSLATION:
r = self.predict_and_update_odo_trans(meas, t, kind)
elif kind == ObservationKind.CAMERA_ODO_ROTATION:
r = self.predict_and_update_odo_rot(meas, t, kind)
elif kind == ObservationKind.ODOMETRIC_SPEED:
r = self.predict_and_update_odo_speed(meas, t, kind)
else:
if R is None:
R = self.get_R(kind, len(meas))
elif len(R.shape) == 2:
R = R[None]
r = self.filter.predict_and_update_batch(t, kind, meas, R)
# Normalize quats
quat_norm = np.linalg.norm(self.filter.x[3:7, 0])
self.filter.x[States.ECEF_ORIENTATION, 0] = self.filter.x[States.ECEF_ORIENTATION, 0] / quat_norm
return r
def get_R(self, kind, n):
obs_noise = self.obs_noise[kind]
dim = obs_noise.shape[0]
R = np.zeros((n, dim, dim))
for i in range(n):
R[i, :, :] = obs_noise
return R
def predict_and_update_odo_speed(self, speed, t, kind):
z = np.array(speed)
R = np.zeros((len(speed), 1, 1))
for i, _ in enumerate(z):
R[i, :, :] = np.diag([0.2**2])
return self.filter.predict_and_update_batch(t, kind, z, R)
def predict_and_update_odo_trans(self, trans, t, kind):
z = trans[:, :3]
R = np.zeros((len(trans), 3, 3))
for i, _ in enumerate(z):
R[i, :, :] = np.diag(trans[i, 3:]**2)
return self.filter.predict_and_update_batch(t, kind, z, R)
def predict_and_update_odo_rot(self, rot, t, kind):
z = rot[:, :3]
R = np.zeros((len(rot), 3, 3))
for i, _ in enumerate(z):
R[i, :, :] = np.diag(rot[i, 3:]**2)
return self.filter.predict_and_update_batch(t, kind, z, R)
if __name__ == "__main__":
generated_dir = sys.argv[2]
LiveKalman.generate_code(generated_dir)
| mit | 648,415,760,771,225,300 | 40.560554 | 161 | 0.590126 | false | 2.757979 | false | false | false |
uahic/nest-simulator | pynest/nest/tests/test_labeled_synapses.py | 3 | 6034 | # -*- coding: utf-8 -*-
#
# test_labeled_synapses.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Test setting and getting labels on synapses.
"""
import unittest
import nest
HAVE_GSL = nest.sli_func("statusdict/have_gsl ::")
@nest.check_stack
@unittest.skipIf(not HAVE_GSL, 'GSL is not available')
class LabeledSynapsesTestCase(unittest.TestCase):
"""Test labeled synapses"""
def default_network(self):
nest.ResetKernel()
# set volume transmitter for stdp_dopamine_synapse_lbl
vol = nest.Create('volume_transmitter', 3)
nest.SetDefaults('stdp_dopamine_synapse', {'vt': vol[0]})
nest.SetDefaults('stdp_dopamine_synapse_lbl', {'vt': vol[1]})
nest.SetDefaults('stdp_dopamine_synapse_hpc', {'vt': vol[2]})
# create neurons that accept all synapse connections (especially gap
# junctions)... hh_psc_alpha_gap is only available with GSL, hence the
# skipIf above
return nest.Create("hh_psc_alpha_gap", 5)
def test_SetLabelToSynapseOnConnect(self):
"""Set a label to a labeled synapse on connect."""
labeled_synapse_models = [s for s in nest.Models(
mtype='synapses') if s.endswith("_lbl")]
for syn in labeled_synapse_models:
a = self.default_network()
# set a label during connection
nest.Connect(a, a, {"rule": "one_to_one"}, {
"model": syn, "synapse_label": 123})
c = nest.GetConnections(a, a)
self.assertTrue(
all([
status['synapse_label'] == 123
for status in nest.GetStatus(c)
])
)
def test_SetLabelToSynapseSetStatus(self):
"""Set a label to a labeled synapse on SetStatus."""
labeled_synapse_models = [s for s in nest.Models(
mtype='synapses') if s.endswith("_lbl")]
for syn in labeled_synapse_models:
a = self.default_network()
# set no label during connection
nest.Connect(a, a, {"rule": "one_to_one"}, {"model": syn})
c = nest.GetConnections(a, a)
# still unlabeled
self.assertTrue(
all([
status['synapse_label'] == -1
for status in nest.GetStatus(c)
])
)
# set a label
nest.SetStatus(c, {'synapse_label': 123})
self.assertTrue(
all([
status['synapse_label'] == 123
for status in nest.GetStatus(c)
])
)
def test_SetLabelToSynapseSetDefaults(self):
"""Set a label to a labeled synapse on SetDefaults."""
labeled_synapse_models = [s for s in nest.Models(
mtype='synapses') if s.endswith("_lbl")]
for syn in labeled_synapse_models:
a = self.default_network()
# set a label during SetDefaults
nest.SetDefaults(syn, {'synapse_label': 123})
nest.Connect(a, a, {"rule": "one_to_one"}, {"model": syn})
c = nest.GetConnections(a, a)
self.assertTrue(
all([
status['synapse_label'] == 123
for status in nest.GetStatus(c)
])
)
def test_GetLabeledSynapses(self):
"""Get labeled synapses with GetConnections."""
labeled_synapse_models = [s for s in nest.Models(
mtype='synapses') if s.endswith("_lbl")]
for syn in labeled_synapse_models:
a = self.default_network()
# some more connections
nest.Connect(a, a, {"rule": "one_to_one"},
{"model": "static_synapse"})
# set a label during connection
nest.Connect(a, a, {"rule": "one_to_one"}, {
"model": syn, "synapse_label": 123})
c = nest.GetConnections(a, a, synapse_label=123)
self.assertTrue(
all([
status['synapse_label'] == 123
for status in nest.GetStatus(c)
])
)
def test_SetLabelToNotLabeledSynapse(self):
"""Try set a label to an 'un-label-able' synapse."""
labeled_synapse_models = [s for s in nest.Models(
mtype='synapses') if not s.endswith("_lbl")]
for syn in labeled_synapse_models:
a = self.default_network()
# try set a label during SetDefaults
with self.assertRaises(nest.NESTError):
nest.SetDefaults(syn, {'synapse_label': 123})
# try set on connect
with self.assertRaises(nest.NESTError):
nest.Connect(a, a, {"rule": "one_to_one"}, {
"model": syn, "synapse_label": 123})
# plain connection
nest.Connect(a, a, {"rule": "one_to_one"}, {"model": syn})
# try set on SetStatus
c = nest.GetConnections(a, a)
with self.assertRaises(nest.NESTError):
nest.SetStatus(c, {'synapse_label': 123})
def suite():
suite = unittest.makeSuite(LabeledSynapsesTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 | 7,329,957,250,267,788,000 | 33.878613 | 78 | 0.558336 | false | 3.977587 | true | false | false |
lapets/bu-gsubmit-grading | gradefiles-push.py | 1 | 2477 | #####################################################################
##
## gradefiles-push.py
##
## Script to post grade files to the gsubmit directories of
## enrolled students; the input grade file names should correspond
## to the user names of the students.
##
##
import sys # For command line arguments.
import os # For commands and file manipulation (walk, path, system).
#####################################################################
## ASCII escape sequence macros for color output on the terminal.
##
class bcolors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
def printred(s): print(bcolors.RED + s + bcolors.ENDC)
def printblue(s): print(bcolors.BLUE + s + bcolors.ENDC)
def printyellow(s): print(bcolors.YELLOW + s + bcolors.ENDC)
def printpurple(s): print(bcolors.PURPLE + s + bcolors.ENDC)
#####################################################################
## Process the command line parameters.
##
if len(sys.argv) == 5\
and int(sys.argv[1]) in range(100,1000)\
and sys.argv[2] in ['Fall', 'Spring']\
and int(sys.argv[3]) in range(2000,2100):
courseNumber = sys.argv[1]
season = sys.argv[2]
year = sys.argv[3]
path = sys.argv[4]
task = path
course = 'cs' + courseNumber + '/' + season + '-' + year
else:
print('\n Usage:\n\n % python gradefiles-push.py <###> <Fall|Spring> <YYYY> <task>\n')
exit()
#####################################################################
## Check for list of files.
##
if not os.path.exists('./data'):
print('No folder "data" containing grade files found. Exiting.')
exit()
#####################################################################
## Post the grade files.
##
for curdir, dirs, files in os.walk('./data/'):
for file in files:
txt = open('./data/'+file, 'r').read()
name = file.split('.')[0]
path = '/cs/course/' + course + '/homework/spool/'+name
target = path+'/grade.' + task + '.txt'
if os.path.exists(path):
open(target, 'w').write(txt)
print('Wrote file: ' + target)
else:
printred('Path '+path+' does not exist!')
#####################################################################
## Adjust grade file permissions so that students can read them.
##
os.system('chmod 0664 /cs/course/' + course + '/homework/spool/*/grade.' + task + '.txt')
#eof | mit | -784,425,278,218,611,800 | 30.367089 | 94 | 0.505046 | false | 3.616058 | false | false | false |
jimberlage/servo | tests/wpt/web-platform-tests/tools/third_party/h2/examples/fragments/client_upgrade_fragment.py | 26 | 3723 | # -*- coding: utf-8 -*-
"""
Client Plaintext Upgrade
~~~~~~~~~~~~~~~~~~~~~~~~
This example code fragment demonstrates how to set up a HTTP/2 client that uses
the plaintext HTTP Upgrade mechanism to negotiate HTTP/2 connectivity. For
maximum explanatory value it uses the synchronous socket API that comes with
the Python standard library. In product code you will want to use an actual
HTTP/1.1 client if possible.
This code requires Python 3.5 or later.
"""
import h2.connection
import socket
def establish_tcp_connection():
"""
This function establishes a client-side TCP connection. How it works isn't
very important to this example. For the purpose of this example we connect
to localhost.
"""
return socket.create_connection(('localhost', 80))
def send_initial_request(connection, settings):
"""
For the sake of this upgrade demonstration, we're going to issue a GET
request against the root of the site. In principle the best request to
issue for an upgrade is actually ``OPTIONS *``, but this is remarkably
poorly supported and can break in weird ways.
"""
# Craft our initial request per RFC 7540 Section 3.2. This requires two
# special header fields: the Upgrade headre, and the HTTP2-Settings header.
# The value of the HTTP2-Settings header field comes from h2.
request = (
b"GET / HTTP/1.1\r\n" +
b"Host: localhost\r\n" +
b"Upgrade: h2c\r\n" +
b"HTTP2-Settings: " + settings + "\r\n"
b"\r\n"
)
connection.sendall(request)
def get_upgrade_response(connection):
"""
This function reads from the socket until the HTTP/1.1 end-of-headers
sequence (CRLFCRLF) is received. It then checks what the status code of the
response is.
This is not a substitute for proper HTTP/1.1 parsing, but it's good enough
for example purposes.
"""
data = b''
while b'\r\n\r\n' not in data:
data += connection.recv(8192)
headers, rest = data.split(b'\r\n\r\n', 1)
# An upgrade response begins HTTP/1.1 101 Switching Protocols. Look for the
# code. In production code you should also check that the upgrade is to
# h2c, but here we know we only offered one upgrade so there's only one
# possible upgrade in use.
split_headers = headers.split()
if split_headers[1] != b'101':
raise RuntimeError("Not upgrading!")
# We don't care about the HTTP/1.1 data anymore, but we do care about
# any other data we read from the socket: this is going to be HTTP/2 data
# that must be passed to the H2Connection.
return rest
def main():
"""
The client upgrade flow.
"""
# Step 1: Establish the TCP connecton.
connection = establish_tcp_connection()
# Step 2: Create H2 Connection object, put it in upgrade mode, and get the
# value of the HTTP2-Settings header we want to use.
h2_connection = h2.connection.H2Connection()
settings_header_value = h2_connection.initiate_upgrade_connection()
# Step 3: Send the initial HTTP/1.1 request with the upgrade fields.
send_initial_request(connection, settings_header_value)
# Step 4: Read the HTTP/1.1 response, look for 101 response.
extra_data = get_upgrade_response(connection)
# Step 5: Immediately send the pending HTTP/2 data.
connection.sendall(h2_connection.data_to_send())
# Step 6: Feed the body data to the connection.
events = connection.receive_data(extra_data)
# Now you can enter your main loop, beginning by processing the first set
# of events above. These events may include ResponseReceived, which will
# contain the response to the request we made in Step 3.
main_loop(events)
| mpl-2.0 | 8,718,685,781,124,614,000 | 35.145631 | 79 | 0.689229 | false | 3.858031 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.