repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
111t8e/h2o-2 | py/testdir_multi_jvm/test_rf_parity_cmp.py | 9 | 3669 | import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_rf
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3, java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_parity_cmp(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
# always match the run below!
# just using one file for now
for x in [50000]:
shCmdString = "perl " + h2o.find_file("syn_scripts/parity.pl") + " 128 4 "+ str(x) + " quad " + SYNDATASETS_DIR
h2o.spawn_cmd_and_wait('parity.pl', shCmdString.split(),4)
csvFilename = "parity_128_4_" + str(x) + "_quad.data"
def doBoth():
h2o.verboseprint("Trial", trial)
start = time.time()
# make sure ntrees and max_depth are the same for both
rfView = h2o_cmd.runRF(parseResult=parseResult, ntrees=ntrees, max_depth=40, response=response,
timeoutSecs=600, retryDelaySecs=3)
elapsed1 = time.time() - start
(totalError1, classErrorPctList1, totalScores2) = h2o_rf.simpleCheckRFView(rfv=rfView)
rfView = h2o_cmd.runSpeeDRF(parseResult=parseResult, ntrees=ntrees, max_depth=40, response=response,
timeoutSecs=600, retryDelaySecs=3)
elapsed2 = time.time() - start
(totalError2, classErrorPctList2, totalScores2) = h2o_rf.simpleCheckRFView(rfv=rfView)
print "Checking that results are similar (within 20%)"
print "DRF2 then SpeeDRF"
print "per-class variance is large..basically we can't check very well for this dataset"
for i, (j,k) in enumerate(zip(classErrorPctList1, classErrorPctList2)):
print "classErrorPctList[%s]:i %s %s" % (i, j, k)
# self.assertAlmostEqual(classErrorPctList1[i], classErrorPctList2[i],
# delta=1 * classErrorPctList2[i], msg="Comparing RF class %s errors for DRF2 and SpeeDRF" % i)
print "totalError: %s %s" % (totalError1, totalError2)
self.assertAlmostEqual(totalError1, totalError2, delta=.2 * totalError2, msg="Comparing RF total error for DRF2 and SpeeDRF")
print "elapsed: %s %s" % (elapsed1, elapsed2)
self.assertAlmostEqual(elapsed1, elapsed2, delta=.5 * elapsed2, msg="Comparing RF times for DRF2 and SpeeDRF")
# always match the gen above!
for trial in range (1):
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
hex_key = csvFilename + "_" + str(trial) + ".hex"
parseResult = h2o_cmd.parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30, doSummary=False)
inspect = h2o_cmd.runInspect(key=hex_key)
numCols = inspect['numCols']
numRows = inspect['numRows']
response = "C" + str(numCols)
ntrees = 30
doBoth()
print "*****************************"
print "end # %s RF compare" % trial,
print "*****************************"
print "Now change all cols to enums"
for e in range(numCols):
enumResult = h2o.nodes[0].to_enum(src_key=hex_key, column_index=(e+1))
doBoth()
print "*********************************"
print "end # %s RF compare, with enums #" % trial,
print "*********************************"
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 | -1,359,742,464,387,223,300 | 43.204819 | 146 | 0.56773 | false | 3.438613 | false | false | false |
viswimmer1/PythonGenerator | data/python_files/31241242/paragraph.py | 1 | 5151 | import html
import inline
import re
def forge_line(modifiers, line):
for modifier in modifiers:
line = modifier(line)
return line
class Paragraph:
lines = []
def __init__(self, lines):
self.lines = lines
def build(self):
return self.tail(
reduce(lambda r, line: r + [forge_line(self.modifiers(), line)],
self.lines, self.head()))
def head(self):
return []
def tail(self, result):
return result
def modifiers(self):
from nijiconf import BR
return [html.forge, inline.forge, lambda x: x + BR]
def length(self):
return reduce(lambda length, line: length + len(line), self.lines, 0)
class Table(Paragraph):
def __init__(self, lines):
Paragraph.__init__(self, lines)
def head(self):
from nijiconf import TABLE_BEGIN
return [TABLE_BEGIN]
def tail(self, result):
from nijiconf import TABLE_END
result.append(TABLE_END)
return result
def modifiers(self):
from table import row_extract
from nijiconf import ROW_BEGIN, ROW_END
return [html.forge, inline.forge,
lambda text: ROW_BEGIN + row_extract(text) + ROW_END]
class CodeBlock(Paragraph):
def __init__(self, lines):
Paragraph.__init__(self, lines)
def head(self):
from nijiconf import MONO_BLOCK_BEGIN
return [MONO_BLOCK_BEGIN]
def tail(self, result):
from nijiconf import MONO_BLOCK_END
result.append(MONO_BLOCK_END)
return result
def modifiers(self):
from nijiconf import BR
return [html.forge, inline.forge, lambda x: x + BR]
class AsciiArt(Paragraph):
def __init__(self, lines):
Paragraph.__init__(self, lines)
def head(self):
from nijiconf import AA_BEGIN
return [AA_BEGIN]
def tail(self, result):
from nijiconf import AA_END
result.append(AA_END)
return result
def modifiers(self):
from nijiconf import BR
return [html.forge, lambda x: x[2: ] + BR]
class Bullets(Paragraph):
def __init__(self, lines):
Paragraph.__init__(self, lines)
def head(self):
from nijiconf import UL_BEGIN
return [UL_BEGIN]
def tail(self, result):
from nijiconf import UL_END
result.append(UL_END)
return result
def modifiers(self):
from nijiconf import LI_BEGIN, LI_END
return [html.forge, inline.forge,
lambda text: LI_BEGIN + text[2: len(text)] + LI_END]
import nijiconf
LEVEL_2_STR = (
(nijiconf.H1_BEGIN, nijiconf.H1_END),
(nijiconf.H2_BEGIN, nijiconf.H2_END),
(nijiconf.H3_BEGIN, nijiconf.H3_END),
)
class Head(Paragraph):
level = 0
def __init__(self, lines, level):
Paragraph.__init__(self, lines)
self.level = level
def modifiers(self):
from nijiconf import LI_BEGIN, LI_END
return [html.forge, inline.forge,
lambda text: LEVEL_2_STR[self.level][0] +
text[self.level + 2: len(text)] +
LEVEL_2_STR[self.level][1]]
class Head1(Head):
def __init__(self, lines):
Head.__init__(self, lines, 0)
class Head2(Head):
def __init__(self, lines):
Head.__init__(self, lines, 1)
class Head3(Head):
def __init__(self, lines):
Head.__init__(self, lines, 2)
LINE_PATTERNS = (
('{{{', '}}}', CodeBlock, True),
('\[\[\[', ']]]', Table, True),
('[*][ ]', '(?![*][ ])', Bullets, False),
('(: |:$)', '(?!(: |:$))', AsciiArt, False),
('=[ ]', '', Head1, False),
('==[ ]', '', Head2, False),
('===[ ]', '', Head3, False),
)
def pattern_begin(pattern):
return pattern[0]
def pattern_end(pattern):
return pattern[1]
def pattern_ctor(pattern):
return pattern[2]
def pattern_excluded(pattern):
return pattern[3]
def search_for_para(document, begin, paragraphs):
pattern = match_pattern_begin(document[begin])
begin += 1 if pattern_excluded(pattern) else 0
end = begin + 1
while end < len(document) and not re.match(pattern_end(pattern),
document[end]):
end += 1
paragraphs.append(pattern_ctor(pattern)(document[begin: end]))
return end + (1 if pattern_excluded(pattern) else 0)
def normal_text_from(document, begin, paragraphs):
if match_pattern_begin(document[begin]):
return begin
end = begin
while end < len(document) and not match_pattern_begin(document[end]):
end += 1
paragraphs.append(Paragraph(document[begin: end]))
return end
def match_pattern_begin(line):
for pattern in LINE_PATTERNS:
if re.match(pattern_begin(pattern), line):
return pattern
return None
def split_document(document):
paragraphs = []
cursor = 0
while cursor < len(document):
cursor = normal_text_from(document, cursor, paragraphs)
if cursor < len(document):
cursor = search_for_para(document, cursor, paragraphs)
return paragraphs
| gpl-2.0 | -4,387,755,949,561,151,500 | 25.689119 | 80 | 0.584741 | false | 3.542641 | false | false | false |
ver228/tierpsy-tracker | tierpsy/analysis/food_cnt/getFoodContour.py | 1 | 2474 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 16:55:14 2017
@author: ajaver
"""
import os
import tables
import numpy as np
import warnings
from .getFoodContourNN import get_food_contour_nn
from .getFoodContourMorph import get_food_contour_morph
from tierpsy.helper.misc import TimeCounter, print_flush, get_base_name
def calculate_food_cnt(mask_file, use_nn_food_cnt, model_path, _is_debug=False, solidity_th=0.98):
if use_nn_food_cnt:
if not os.path.exists(model_path):
warnings.warn('The model to obtain the food contour was not found. Nothing to do here...\n If you dont have a valid model. You could try to set `food_method=MORPH` to use a different algorithm.')
return
food_cnt, food_prob,cnt_solidity = get_food_contour_nn(mask_file, model_path, _is_debug=_is_debug)
if cnt_solidity < solidity_th:
food_cnt = np.zeros(0)
else:
food_cnt = get_food_contour_morph(mask_file, _is_debug=_is_debug)
return food_cnt
def getFoodContour(mask_file,
skeletons_file,
use_nn_food_cnt,
model_path,
solidity_th=0.98,
_is_debug = False
):
base_name = get_base_name(mask_file)
progress_timer = TimeCounter('')
print_flush("{} Calculating food contour {}".format(base_name, progress_timer.get_time_str()))
food_cnt = calculate_food_cnt(mask_file,
use_nn_food_cnt = use_nn_food_cnt,
model_path = model_path,
solidity_th= solidity_th,
_is_debug = _is_debug)
#store contour coordinates into the skeletons file and mask_file the contour file
for fname in [skeletons_file, mask_file]:
with tables.File(fname, 'r+') as fid:
if '/food_cnt_coord' in fid:
fid.remove_node('/food_cnt_coord')
#if it is a valid contour save it
if food_cnt is not None and \
food_cnt.size >= 2 and \
food_cnt.ndim == 2 and \
food_cnt.shape[1] == 2:
tab = fid.create_array('/',
'food_cnt_coord',
obj=food_cnt)
tab._v_attrs['use_nn_food_cnt'] = int(use_nn_food_cnt)
| mit | 7,047,628,497,492,391,000 | 33.84507 | 205 | 0.545675 | false | 3.564841 | false | false | false |
maxalbert/bokeh | bokeh/sphinxext/collapsible_code_block.py | 2 | 3129 | """ Display code blocks in collapsible sections when outputting
to HTML.
Usage
-----
This directive takes a heading to use for the collapsible code block::
.. collapsible-code-block:: python
:heading: Some Code
from __future__ import print_function
print("Hello, Bokeh!")
Options
-------
This directive is identical to the standard ``code-block`` directive
that Sphinx supplies, with the addition of one new option:
heading : string
A heading to put for the collapsible block. Clicking the heading
expands or collapses the block
Examples
--------
The inline example code above produces the following output:
----
.. collapsible-code-block:: python
:heading: Some Code
from __future__ import print_function
print("Hello, Bokeh!")
"""
from __future__ import absolute_import
from docutils import nodes
from docutils.parsers.rst.directives import unchanged
from os.path import basename
import jinja2
from sphinx.directives.code import CodeBlock
PROLOGUE_TEMPLATE = jinja2.Template(u"""
<div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true">
<div class="panel panel-default">
<div class="panel-heading" role="tab" id="heading-{{ id }}">
<h4 class="panel-title">
<a class="collapsed" data-toggle="collapse" data-parent="#accordion" href="#collapse-{{ id }}" aria-expanded="false" aria-controls="collapse-{{ id }}">
{{ heading }}
</a>
</h4>
</div>
<div id="collapse-{{ id }}" class="panel-collapse collapse" role="tabpanel" aria-labelledby="heading-{{ id }}">
<div class="panel-body">
""")
EPILOGUE_TEMPLATE = jinja2.Template(u"""
</div>
</div>
</div>
</div>
""")
class collapsible_code_block(nodes.General, nodes.Element):
pass
class CollapsibleCodeBlock(CodeBlock):
option_spec = CodeBlock.option_spec
option_spec.update(heading=unchanged)
def run(self):
env = self.state.document.settings.env
rst_source = self.state_machine.node.document['source']
rst_filename = basename(rst_source)
target_id = "%s.ccb-%d" % (rst_filename, env.new_serialno('bokeh-plot'))
target_id = target_id.replace(".", "-")
target_node = nodes.target('', '', ids=[target_id])
node = collapsible_code_block()
node['target_id'] = target_id
node['heading'] = self.options.get('heading', "Code")
cb = CodeBlock.run(self)
node.setup_child(cb[0])
node.children.append(cb[0])
return [target_node, node]
def html_visit_collapsible_code_block(self, node):
self.body.append(
PROLOGUE_TEMPLATE.render(
id=node['target_id'],
heading=node['heading']
)
)
def html_depart_collapsible_code_block(self, node):
self.body.append(EPILOGUE_TEMPLATE.render())
def setup(app):
app.add_node(
collapsible_code_block,
html=(
html_visit_collapsible_code_block,
html_depart_collapsible_code_block
)
)
app.add_directive('collapsible-code-block', CollapsibleCodeBlock)
| bsd-3-clause | 8,304,154,637,717,243,000 | 23.445313 | 159 | 0.643976 | false | 3.551646 | false | false | false |
yslib/SGMMCluster | SGMM/TrainSGMM.py | 1 | 11875 | import numpy as np
from multiprocessing import Process
from sklearn import mixture
import time
import os
import sys
# volume data order: width > depth > height
# block order: width > depth > height
# version 1.0 only for int raw data
# by gxchen
# single gauss component
class Gauss:
def __init__(self, weight, mean, covariance):
self.weight_ = weight
self.mean_ = mean # 3D array
self.covariance_ = covariance # 9D array
# single bin in histogram
class Bin:
def __init__(self, probability, gauss_count , sgmm):
self.probability_ = probability
self.gauss_count_ = gauss_count
self.gausses_ = []
def add_gauss(self, gauss):
self.gausses_.append(gauss)
# single block
class Block:
def __init__(self):
self.bin_num_ = 0
self.bins_ = []
self.bin_indexs_ = []
def add_bin(self, bin):
self.bins_.append(bin)
# data_source = 'Combustion'
# width = 480
# depth = 720
# height = 112
# process_num = 4
# disk_address = 'c:/train/'
#
# src_raw_name = disk_address+data_source+'.raw'
# side = 16
# zero_block_threshold = 0.003
# block_size = side * side * side
# width_num = width / side
# depth_num = depth / side
# height_num = height / side
#
# total_num = width_num * depth_num * height_num
# max_bin_num = 128
# ubg = 4 # max component number
# restore_raw = bytearray(width * depth * height)
# np.random.seed(1)
# stride = total_num / process_num
#
# f_all_data = open(src_raw_name, 'rb')
# f_all_data.seek(0, 0)
# all_data = bytearray(f_all_data.read())
# all_hit = [0] * width * depth * height
# read index th block data
def read_block(index,all_data, width, depth, width_num, depth_num, block_size,side):
height_index = index / (width_num * depth_num)
depth_index = (index - height_index * width_num * depth_num) / width_num
width_index = index - height_index * width_num * depth_num - depth_index * width_num
result_data = [0] * block_size
for z in range(0, side): # width
for y in range(0, side): # depth
for x in range(0, side): # height
final_index_z = height_index * side + z
final_index_y = depth_index * side + y
final_index_x = width_index * side + x
final_index = final_index_z * width * depth + final_index_y * width + final_index_x
result_data[z * side * side + y * side + x] = all_data[final_index]
return result_data
# train index th block data
def train_single_block(block_index,
block_data,
block_size,
max_bin_num,
side,
ubg):
block = Block()
count = [0] * max_bin_num
train_data = [] * max_bin_num
for i in range(0, max_bin_num):
train_data.append([])
non_zero_count = 0
for z in range(0, side):
for y in range(0, side):
for x in range(0, side):
final_index = z * side * side + y * side + x
index = block_data[final_index] / 2
count[index] += 1 # map to value-distribution
train_data[index].append([x, y, z])
if block_data[final_index] != 0:
non_zero_count += 1
# train SGMM
block.bin_num_ = 0
if non_zero_count > int(side * side * side * 0.3): # make sure not a empty block
for bin_index in range(0, max_bin_num):
if count[bin_index] > 0:
block.bin_indexs_.append(bin_index)
block.bin_num_ += 1
for bin_count in range(0, block.bin_num_):
real_index = block.bin_indexs_[bin_count]
# if train_data[i] is empty or very small, skip it
if len(train_data[real_index]) <= 0: # not happen when equal 0, you can make it larger to speed up
continue
g = mixture.GaussianMixture(n_components=1, tol=1e-5, max_iter=5000)
g.fit(train_data[real_index])
max_bic = g.bic(np.array(train_data[real_index]))
final_g = g
final_component_num = 1
max_num = min(ubg, len(train_data[real_index]))
for component_num in range(2, max_num+1):
g = mixture.GaussianMixture(n_components=component_num, tol=1e-5, max_iter=5000)
g.fit(train_data[real_index])
bic_temp = g.bic(np.array(train_data[real_index]))
if block_index == 456:
print component_num,bic_temp
if bic_temp < max_bic:
final_g = g
final_component_num = component_num
max_bic = bic_temp
# already got final SGMM for bin i
bin = Bin(1.0 * count[real_index]/block_size, final_component_num, final_g)
for i in range(0, final_component_num):
gauss = Gauss(final_g.weights_[i], final_g.means_[i], final_g.covariances_[i])
bin.add_gauss(gauss)
block.add_bin(bin)
print("training block index " + str(block_index)+" done, bin_num_ = "+str(block.bin_num_))
return block
# make sure the value if not to small, else it will result in wrong input in C++ program
def check_value(value_in):
if value_in < 1.0e-40:
return 1.0e-40
else:
return value_in
# train a part of original data
# and save sgmm arguments into a txt file
def train_blocks(disk_address,
data_source,
block_num,
index,
stride,
src_raw_name,
all_data,
width,
depth,
width_num,
depth_num,
max_bin_num,
block_size,
side,
ubg):
block_sgmm = [Block()] * stride
end_block = (index+1)*stride
end_index = stride
with open(src_raw_name, 'rb') as f_src:
for i in range(0, stride):
if index*stride + i >= block_num:
end_block = index*stride+i
end_index = i
break
block_data = read_block(index * stride + i,all_data,width, depth, width_num, depth_num, block_size,side)
block_sgmm[i] = train_single_block(index * stride + i, block_data, block_size, max_bin_num, side, ubg)
sgmm_output = disk_address + data_source + '_SGMM_Result_'+str(index)+'.txt' # only sgmm arguments
# restore block_sgmm into txt file
with open(sgmm_output, "w") as f_out:
for i in range(0, end_index):
# f_out.write(str(index * stride + i) + '###\n') # test only
idx = index*stride+i
if idx == 20 or idx == 13 or id == 6 or idx == 0:
print("block_index:"+str(idx)+" bin num:"+str(block_sgmm[i].bin_num_))
f_out.write(str(block_sgmm[i].bin_num_) + '\n')
for bin_count in range(0, block_sgmm[i].bin_num_):
real_bin_index = block_sgmm[i].bin_indexs_[bin_count]
f_out.write(str(real_bin_index)+' ' + str(check_value(block_sgmm[i].bins_[bin_count].probability_))+' '+str(block_sgmm[i].bins_[bin_count].gauss_count_)+'\n')
for k in range(0, block_sgmm[i].bins_[bin_count].gauss_count_):
f_out.write(str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].weight_))+'\n')
f_out.write(str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].mean_[0]))+' '+str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].mean_[1]))+' '+str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].mean_[2]))+'\n')
f_out.write(str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[0][0]))+' '+str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[0][1]))+' '+str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[0][2]))+'\n')
f_out.write(str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[1][1]))+' '+str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[1][2]))+'\n')
f_out.write(str(check_value(block_sgmm[i].bins_[bin_count].gausses_[k].covariance_[2][2]))+'\n')
print("----------IN FILE:"+str(index)+" training and saving blocks from "+str(index*stride)+" to "+str(end_block)+" done")
# train all block, parallel computing, assign into 4 cpu kernel
if __name__ == '__main__':
disk_address =""
data_source = ""
width = 0
depth = 0
height = 0
process_num = 0
side =0
if len(sys.argv) == 1:
disk_address = raw_input("input disk address:")
data_source = raw_input('input the data name:')
width = int(raw_input('weight:'))
depth = int(raw_input('depth:'))
height = int(raw_input('height:'))
side = int(raw_input('side:'))
process_num = int(raw_input('input the process num (must be the divisor of the block number):'))
else:
disk_address = sys.argv[1]
data_source=sys.argv[2]
width = int(sys.argv[3])
depth = int(sys.argv[4])
height = int(sys.argv[5])
side = int(sys.argv[6])
process_num = int(sys.argv[7])
if not os.path.exists(disk_address+data_source+".raw"):
print('file doesn\'t exists')
exit(0)
print("disk address:"+disk_address)
print("data name:"+data_source)
print("width:"+str(width)+" depth:"+str(depth)+" height:"+str(height)+" side:"+str(side))
print("process num (file num):"+str(process_num))
src_raw_name = disk_address+data_source+'.raw'
zero_block_threshold = 0.003
block_size = side * side * side
width_num = width / side
depth_num = depth / side
height_num = height / side
total_num = width_num * depth_num * height_num
max_bin_num = 128
ubg = 4 # max component number
restore_raw = bytearray(width * depth * height)
np.random.seed(1)
stride = (total_num+process_num-1) / process_num
f_all_data = open(src_raw_name, 'rb')
f_all_data.seek(0, 0)
all_data = bytearray(f_all_data.read())
all_hit = [0] * width * depth * height
begin_time = time.localtime(time.time())
cpu_time_begin = time.clock()
proc_record = []
for i in range(0, process_num): # a block / 3 seconds
p = Process(target=train_blocks, args=(disk_address,
data_source,
total_num,
i,
stride,
src_raw_name,
all_data,
width,
depth,
width_num,
depth_num,
max_bin_num,
block_size,
side,
ubg))
p.start()
proc_record.append(p)
for p in proc_record:
p.join()
print("training SGMM done.")
cpu_time_end = time.clock();
print time.strftime('Training began at %Y-%m-%d %H:%M:%S', begin_time)
print time.strftime('Training finished at %Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print("cpu time cost in python :"+str(cpu_time_end-cpu_time_begin)+"s.")
# with open(src_raw_name, "rb") as f_src:
# single_block_data = read_block(3500)
# train_single_block(3500, single_block_data)
| mit | -5,475,334,288,070,634,000 | 36.225705 | 282 | 0.530105 | false | 3.486494 | false | false | false |
lorcan/CSVJoin | csvjoin/csvfilter.py | 1 | 1491 | """
csvfilter.py
Copyright (c) 2013 Lorcan Coyle, http://lorcancoyle.org
License: MIT License
Documentation: https://github.com/lorcan/CSVJoin
"""
import argparse
import csv
import sys
parser = argparse.ArgumentParser(description='Takes a CSV files a column header and value and generates a new file that does not contain any rows with that column value')
parser.add_argument("inputfile", help="This is the CSV file to be processed.")
parser.add_argument("columnName", help="This is the name of the header to be filtered")
parser.add_argument("columnValue", help="This is the value used for filtering")
parser.add_argument("outputfile", help="This is the name of the file where the output is to be put.")
args = parser.parse_args()
outputfile = csv.writer(open(args.outputfile, 'w'))
filterCount = 0
with open(args.inputfile, 'r') as csvfile:
reader = csv.reader(csvfile)
first = True
filterColumnNumber = -1
for row in reader:
if first:
first = False
if args.columnName not in row:
print "There is no column called " + args.columnName + " in the input files's header " + str(row) + ". Unable to filter. Exiting."
sys.exit()
filterColumnNumber = row.index(args.columnName)
outputfile.writerow(row)
else:
if(args.columnValue == row[filterColumnNumber]):
filterCount = filterCount + 1
# Do nothing
else:
outputfile.writerow(row)
print "Filtered " + str(filterCount) + " records."
| mit | 1,164,402,537,131,164,200 | 34.5 | 170 | 0.697518 | false | 3.736842 | false | false | false |
luzfcb/documentos | src/core/fields.py | 1 | 31981 | import weakref
import django
from django.db import models
from django.db.models import F, Q
from django.db.models.signals import post_init, m2m_changed, post_delete, post_save
from django.utils import six
try:
from django.db.models.expressions import Combinable
QueryExpressionType = Combinable
except ImportError:
from django.db.models.expressions import ExpressionNode
QueryExpressionType = ExpressionNode
class CounterField(models.IntegerField):
"""A field that provides atomic counter updating and smart initialization.
The CounterField makes it easy to atomically update an integer,
incrementing or decrementing it, without raise conditions or conflicts.
It can update a single instance at a time, or a batch of objects at once.
CounterField is useful for storing counts of objects, reducing the number
of queries performed. This requires that the calling code properly
increments or decrements at all the right times, of course.
This takes an optional ``initializer`` parameter that, if provided, can
be used to auto-populate the field the first time the model instance is
loaded, perhaps based on querying a number of related objects. The value
passed to ``initializer`` must be a function taking the model instance
as a parameter, and must return an integer or None. If it returns None,
the counter will not be updated or saved.
The model instance will gain four new functions:
* ``increment_{field_name}`` - Atomically increment by one.
* ``decrement_{field_name}`` - Atomically decrement by one.
* ``reload_{field_name}`` - Reload the value in this instance from the
database.
* ``reinit_{field_name}`` - Re-initializes the stored field using the
initializer function.
The field on the class (not the instance) provides two functions for
batch-updating models:
* ``increment`` - Takes a queryset and increments this field for
each object.
* ``decrement`` - Takes a queryset and decrements this field for
each object.
"""
@classmethod
def increment_many(cls, model_instance, values, reload_object=True):
"""Increments several fields on a model instance at once.
This takes a model instance and dictionary of fields to values,
and will increment each of those fields by that value.
If reload_object is True, then the fields on the instance will
be reloaded to reflect the current values.
"""
cls._update_values(model_instance, values, reload_object, 1)
@classmethod
def decrement_many(cls, model_instance, values, reload_object=True):
"""Decrements several fields on a model instance at once.
This takes a model instance and dictionary of fields to values,
and will decrement each of those fields by that value.
If reload_object is True, then the fields on the instance will
be reloaded to reflect the current values.
"""
cls._update_values(model_instance, values, reload_object, -1)
@classmethod
def _update_values(cls, model_instance, values, reload_object, multiplier):
update_values = {}
for attname, value in six.iteritems(values):
if value != 0:
update_values[attname] = F(attname) + value * multiplier
cls._set_values(model_instance, update_values, reload_object)
@classmethod
def _set_values(cls, model_instance, values, reload_object=True):
if values:
queryset = model_instance.__class__.objects.filter(
pk=model_instance.pk)
queryset.update(**values)
if reload_object:
cls._reload_model_instance(model_instance,
six.iterkeys(values))
@classmethod
def _reload_model_instance(cls, model_instance, attnames):
"""Reloads the value in this instance from the database."""
q = model_instance.__class__.objects.filter(pk=model_instance.pk)
values = q.values(*attnames)[0]
for attname, value in six.iteritems(values):
setattr(model_instance, attname, value)
def __init__(self, verbose_name=None, name=None,
initializer=None, default=None, **kwargs):
kwargs.update({
'blank': True,
'null': True,
})
super(CounterField, self).__init__(verbose_name, name, default=default,
**kwargs)
self._initializer = initializer
self._locks = {}
def increment(self, queryset, increment_by=1):
"""Increments this field on every object in the provided queryset."""
queryset.update(**{self.attname: F(self.attname) + increment_by})
def decrement(self, queryset, decrement_by=1):
"""Decrements this field on every object in the provided queryset."""
queryset.update(**{self.attname: F(self.attname) - decrement_by})
def contribute_to_class(self, cls, name):
def _increment(model_instance, *args, **kwargs):
self._increment(model_instance, *args, **kwargs)
def _decrement(model_instance, *args, **kwargs):
self._decrement(model_instance, *args, **kwargs)
def _reload(model_instance):
self._reload(model_instance)
def _reinit(model_instance):
self._reinit(model_instance)
super(CounterField, self).contribute_to_class(cls, name)
setattr(cls, 'increment_%s' % self.name, _increment)
setattr(cls, 'decrement_%s' % self.name, _decrement)
setattr(cls, 'reload_%s' % self.name, _reload)
setattr(cls, 'reinit_%s' % self.name, _reinit)
setattr(cls, self.attname, self)
post_init.connect(self._post_init, sender=cls)
def _increment(self, model_instance, reload_object=True, increment_by=1):
"""Increments this field by one."""
if increment_by != 0:
cls = model_instance.__class__
self.increment(cls.objects.filter(pk=model_instance.pk),
increment_by)
if reload_object:
self._reload(model_instance)
def _decrement(self, model_instance, reload_object=True, decrement_by=1):
"""Decrements this field by one."""
if decrement_by != 0:
cls = model_instance.__class__
self.decrement(cls.objects.filter(pk=model_instance.pk),
decrement_by)
if reload_object:
self._reload(model_instance)
def _reload(self, model_instance):
"""Reloads the value in this instance from the database."""
self._reload_model_instance(model_instance, [self.attname])
def _reinit(self, model_instance):
"""Re-initializes the value in the database from the initializer."""
if not (model_instance.pk or self._initializer or
six.callable(self._initializer)):
# We don't want to end up defaulting this to 0 if creating a
# new instance unless an initializer is provided. Instead,
# we'll want to handle this the next time the object is
# accessed.
return
value = 0
if self._initializer:
if isinstance(self._initializer, QueryExpressionType):
value = self._initializer
elif six.callable(self._initializer):
model_instance_id = id(model_instance)
self._locks[model_instance_id] = 1
value = self._initializer(model_instance)
del self._locks[model_instance_id]
if value is not None:
is_expr = isinstance(value, QueryExpressionType)
if is_expr and not model_instance.pk:
value = 0
is_expr = False
if is_expr:
cls = model_instance.__class__
cls.objects.filter(pk=model_instance.pk).update(**{
self.attname: value,
})
self._reload_model_instance(model_instance, [self.attname])
else:
setattr(model_instance, self.attname, value)
if model_instance.pk:
model_instance.save(update_fields=[self.attname])
def _post_init(self, instance=None, **kwargs):
# Prevent the possibility of recursive lookups where this
# same CounterField on this same instance tries to initialize
# more than once. In this case, this will have the updated
# value shortly.
if instance:
instance_id = id(instance)
if instance_id not in self._locks:
self._do_post_init(instance)
def _do_post_init(self, instance):
value = self.value_from_object(instance)
if value is None:
reinit = getattr(instance, 'reinit_%s' % self.name)
reinit()
class RelationCounterField(CounterField):
"""A field that provides an atomic count of a relation.
RelationCounterField is a specialization of CounterField that tracks
how many objects there are on the other side of a ManyToManyField or
ForeignKey relation.
RelationCounterField takes the name of a relation (either a field name,
for a forward ManyToManyField relation, or the "related_name" for
the reverse relation of another model's ForeignKey or ManyToManyField.
(Note that using a forward ForeignKey relation is considered invalid,
as the count can only be 1 or 0.)
The counter will be initialized with the number of objects on the
other side of the relation, and this will be kept updated so long as
all updates to the table are made using standard create/save/delete
operations on models.
Note that updating a relation outside of a model's regular API (such as
through raw SQL or something like an update() call) will cause the
counters to get out of sync. They would then need to be reset using
``reinit_{field_name}``.
"""
# Stores state across all instances of a RelationCounterField.
#
# Django doesn't make it easy to track updates to the other side of a
# relation, meaning we have to do it ourselves. This dictionary will
# weakly track InstanceState objects (which are tied to the lifecycle of
# a particular model instancee). These objects are used to look up model
# instances and their RelationCounterFields, given a model name, model
# instance ID, and a relation name.
_instance_states = weakref.WeakValueDictionary()
# Stores instances we're tracking that haven't yet been saved.
#
# An unsaved instance may never be saved. We want to keep tabs on it
# so we can disconnect any signal handlers if it ever falls out of
# scope.
#
# Note that we're using a plain dictionary here, since we need to
# control the weak references ourselves.
_unsaved_instances = {}
# Most of the hard work really lives in RelationTracker below. Here, we
# store all registered instances of RelationTracker. There will be one
# per model_cls/relation_name pair.
_relation_trackers = {}
class InstanceState(object):
"""Tracks state for a RelationCounterField assocation.
State instances are bound to the lifecycle of a model instance.
They keep track of the model instance (using a weak reference) and
all RelationCounterFields tied to the relation name provided.
These are used for looking up the proper instance and
RelationCounterFields on the other end of a reverse relation, given
a model, relation name, and IDs, through the _instance_states
dictionary.
"""
def __init__(self, model_instance, fields):
self.model_instance_ref = weakref.ref(model_instance)
self.fields = fields
self.to_clear = set()
@property
def model_instance(self):
return self.model_instance_ref()
def reinit_fields(self):
"""Reinitializes all associated fields' counters."""
model_instance = self.model_instance
for field in self.fields:
field._reinit(model_instance)
def increment_fields(self, by=1):
"""Increments all associated fields' counters."""
RelationCounterField.increment_many(
self.model_instance,
dict([(field.attname, by) for field in self.fields]))
def decrement_fields(self, by=1):
"""Decrements all associated fields' counters."""
RelationCounterField.decrement_many(
self.model_instance,
dict([(field.attname, by) for field in self.fields]))
def zero_fields(self):
"""Zeros out all associated fields' counters."""
RelationCounterField._set_values(
self.model_instance,
dict([(field.attname, 0) for field in self.fields]))
def reload_fields(self):
"""Reloads all associated fields' counters."""
RelationCounterField._reload_model_instance(
self.model_instance,
[field.attname for field in self.fields])
def __repr__(self):
return '<RelationCounterField.InstanceState for %s.pk=%s>' % (
self.model_instance.__class__.__name__,
self.model_instance.pk)
class RelationTracker(object):
"""Tracks relations and updates state for all affected CounterFields.
This class is responsible for all the hard work of updating
RelationCounterFields refererring to a relation, based on updates
to that relation. It's really the meat of RelationCounterField.
Each RelationTracker is responsible for a given model/relation name
pairing, across all instances of a model and across all
RelationCounterFields following that relation name.
The main reason the code lives here instead of in each
RelationCounterField is to keep state better in sync and to ensure
we're only ever dealing with one set of queries per relation name.
We're also simplifying signal registration, helping to make things
less error-prone.
"""
def __init__(self, model_cls, rel_field_name):
self._rel_field_name = rel_field_name
if django.VERSION >= (1, 7):
# Django >= 1.7
self._rel_field = model_cls._meta.get_field(rel_field_name)
rel_model = self._rel_field.model
is_rel_direct = (not self._rel_field.auto_created or
self._rel_field.concrete)
is_m2m = self._rel_field.many_to_many
else:
# Django < 1.7
self._rel_field, rel_model, is_rel_direct, is_m2m = \
model_cls._meta.get_field_by_name(rel_field_name)
self._is_rel_reverse = not is_rel_direct
if not is_m2m and is_rel_direct:
# This combination doesn't make any sense. There's only ever
# one item on this side, so no point in counting. Let's just
# complain about it.
raise ValueError(
"RelationCounterField cannot work with the forward end of "
"a ForeignKey ('%s')"
% rel_field_name)
dispatch_uid = '%s-%s.%s-related-save' % (
id(self),
self.__class__.__module__,
self.__class__.__name__)
if is_m2m:
# This is going to be one end or the other of a ManyToManyField
# relation.
if is_rel_direct:
# This is a ManyToManyField, and we can get the 'rel'
# attribute through it.
m2m_field = self._rel_field
self._related_name = m2m_field.rel.related_name
else:
# This is a RelatedObject. We need to get the field through
# this.
m2m_field = self._rel_field.field
self._related_name = m2m_field.attname
# Listen for all M2M updates on the through table for this
# ManyToManyField. Unfortunately, we can't look at a
# particular instance, but we'll use state tracking to do the
# necessary lookups and updates in the handler.
m2m_changed.connect(
self._on_m2m_changed,
weak=False,
sender=m2m_field.rel.through,
dispatch_uid=dispatch_uid)
else:
# This is a ForeignKey or similar. It must be the reverse end.
assert not is_rel_direct
model = self._get_rel_field_related_model(self._rel_field)
self._related_name = self._rel_field.field.attname
# Listen for deletions and saves on that model type. In the
# handler, we'll look up state for the other end of the
# relation (the side owning this RelationCounterField), so that
# we can update the counts.
#
# Unfortunately, we can't listen on the particular instance, so
# we use the state tracking.
post_delete.connect(
self._on_related_delete,
weak=False,
sender=model,
dispatch_uid=dispatch_uid)
post_save.connect(
self._on_related_save,
weak=False,
sender=model,
dispatch_uid=dispatch_uid)
def _on_m2m_changed(self, instance, action, reverse, model, pk_set,
**kwargs):
"""Handler for when a M2M relation has been updated.
This will figure out the necessary operations that may need to be
performed, given the update.
For post_add/post_remove operations, it's pretty simple. We see
if there are any instances (by way of stored state) for any of the
affected IDs, and we re-initialize them.
For clear operations, it's more tricky. We have to fetch all
instances on the other side of the relation before any database
changes are made, cache them in the InstanceState, and then update
them all in post_clear.
"""
if reverse != self._is_rel_reverse:
# This doesn't match the direction we're paying attention to.
# Ignore it.
return
is_post_clear = (action == 'post_clear')
is_post_add = (action == 'post_add')
is_post_remove = (action == 'post_remove')
if is_post_clear or is_post_add or is_post_remove:
state = RelationCounterField._get_state(
instance.__class__, instance.pk, self._rel_field_name)
if state:
if is_post_add:
state.increment_fields(by=len(pk_set))
elif is_post_remove:
state.decrement_fields(by=len(pk_set))
elif is_post_clear:
state.zero_fields()
if not pk_set and is_post_clear:
# See the note below for 'pre_clear' for an explanation
# of why we're doing this.
pk_set = state.to_clear
state.to_clear = set()
if pk_set:
# If any of the models have their own
# RelationCounterFields, make sure they've been updated to
# handle this side of things.
if is_post_add:
update_by = 1
else:
update_by = -1
# Update all RelationCounterFields on the other side of the
# relation that are referencing this relation.
self._update_counts(model, pk_set, '_related_name',
update_by)
for pk in pk_set:
state = RelationCounterField._get_state(
model, pk, self._related_name)
if state:
state.reload_fields()
elif action == 'pre_clear':
# m2m_changed doesn't provide any information on affected IDs
# for clear events (pre or post). We can, however, look up
# these IDs ourselves, and if they match any existing
# instances, we can re-initialize their counters in post_clear
# above.
#
# We do this by fetching the IDs (without instantiating new
# models) and storing it in the associated InstanceState. We'll
# use those IDs above in the post_clear handler.
state = RelationCounterField._get_state(
instance.__class__, instance.pk, self._rel_field_name)
if state:
mgr = getattr(instance, self._rel_field_name)
state.to_clear.update(mgr.values_list('pk', flat=True))
def _on_related_delete(self, instance, **kwargs):
"""Handler for when a ForeignKey relation is deleted.
This will check if a model entry that has a ForeignKey relation
to this field's parent model entry has been deleted from the
database. If so, any associated counter fields on this end will be
decremented.
"""
state = self._get_reverse_foreign_key_state(instance)
if state:
state.decrement_fields()
else:
self._update_unloaded_fkey_rel_counts(instance, -1)
def _on_related_save(self, instance=None, created=False, raw=False,
**kwargs):
"""Handler for when a ForeignKey relation is created.
This will check if a model entry has been created that has a
ForeignKey relation to this field's parent model entry. If so, any
associated counter fields on this end will be decremented.
"""
if raw or not created:
return
state = self._get_reverse_foreign_key_state(instance)
if state:
state.increment_fields()
else:
self._update_unloaded_fkey_rel_counts(instance, 1)
def _update_unloaded_fkey_rel_counts(self, instance, by):
"""Updates unloaded model entry counters for a ForeignKey relation.
This will get the ID of the model being referenced by the
matching ForeignKey in the provided instance. If set, it will
update all RelationCounterFields on that model that are tracking
the ForeignKey.
"""
rel_pk = getattr(instance, self._rel_field.field.attname)
if rel_pk is not None:
self._update_counts(
self._get_rel_field_parent_model(self._rel_field),
[rel_pk], '_rel_field_name', by)
def _update_counts(self, model_cls, pks, rel_attname, update_by):
"""Updates counts on all model entries matching the given criteria.
This will update counts on all RelationCounterFields on all entries
of the given model in the database that are tracking the given
relation.
"""
values = dict([
(field.attname, F(field.attname) + update_by)
for field in model_cls._meta.local_fields
if (isinstance(field, RelationCounterField) and
(getattr(field._relation_tracker, rel_attname) ==
self._rel_field_name))
])
if values:
if len(pks) == 1:
q = Q(pk=list(pks)[0])
else:
q = Q(pk__in=pks)
model_cls.objects.filter(q).update(**values)
def _get_reverse_foreign_key_state(self, instance):
"""Return an InstanceState for the other end of a ForeignKey.
This is used when listening to changes on models that establish a
ForeignKey to this counter field's parent model. Given the instance
on that end, we can get the state for this end.
"""
return RelationCounterField._get_state(
self._get_rel_field_parent_model(self._rel_field),
getattr(instance, self._rel_field.field.attname),
self._rel_field_name)
def _get_rel_field_parent_model(self, rel_field):
"""Return the model owning a relation field.
This provides compatibility across different versions of Django.
"""
if hasattr(rel_field, 'parent_model'):
# Django < 1.7
return rel_field.parent_model
else:
# Django >= 1.7
return rel_field.model
def _get_rel_field_related_model(self, rel_field):
"""Return the model on the other side of a relation field.
This provides compatibility across different versions of Django.
"""
if hasattr(rel_field, 'related_model'):
# Django >= 1.7
return rel_field.related_model
else:
# Django < 1.7
return rel_field.model
@classmethod
def _reset_state(cls, instance):
"""Resets state for an instance.
This will clear away any state tied to a particular instance ID. It's
used to ensure that any old, removed entries (say, from a previous
unit test) are cleared away before storing new state.
"""
for key, state in list(six.iteritems(cls._instance_states)):
if (state.model_instance.__class__ is instance.__class__ and
state.model_instance.pk == instance.pk):
del cls._instance_states[key]
@classmethod
def _store_state(cls, instance, field):
"""Stores state for a model instance and field.
This constructs an InstanceState instance for the given model instance
and RelationCounterField. It then associates it with the model instance
and stores a weak reference to it in _instance_states.
"""
assert instance.pk is not None
key = (instance.__class__, instance.pk, field._rel_field_name)
if key in cls._instance_states:
cls._instance_states[key].fields.append(field)
else:
state = cls.InstanceState(instance, [field])
setattr(instance, '_%s_state' % field.attname, state)
cls._instance_states[key] = state
@classmethod
def _get_state(cls, model_cls, instance_id, rel_field_name):
"""Returns an InstanceState instance for the given parameters.
If no InstanceState instance can be found that matches the
parameters, None will be returned.
"""
return cls._instance_states.get(
(model_cls, instance_id, rel_field_name))
def __init__(self, rel_field_name=None, *args, **kwargs):
def _initializer(model_instance):
if model_instance.pk:
return getattr(model_instance, rel_field_name).count()
else:
return 0
kwargs['initializer'] = _initializer
super(RelationCounterField, self).__init__(*args, **kwargs)
self._rel_field_name = rel_field_name
self._relation_tracker = None
def _do_post_init(self, instance):
"""Handles initialization of an instance of the parent model.
This will begin the process of storing state about the model
instance and listening to signals coming from the model on the
other end of the relation.
"""
super(RelationCounterField, self)._do_post_init(instance)
cls = instance.__class__
# We may not have a ID yet on the instance (as it may be a
# newly-created instance not yet saved to the database). In this case,
# we need to listen for the first save before storing the state.
if instance.pk is None:
instance_id = id(instance)
dispatch_uid = '%s-%s.%s-first-save' % (
instance_id,
self.__class__.__module__,
self.__class__.__name__)
post_save.connect(
lambda **kwargs: self._on_first_save(
instance_id, dispatch_uid=dispatch_uid, **kwargs),
weak=False,
sender=cls,
dispatch_uid=dispatch_uid)
self._unsaved_instances[instance_id] = weakref.ref(
instance,
lambda *args, **kwargs: self._on_unsaved_instance_destroyed(
cls, instance_id, dispatch_uid))
else:
RelationCounterField._store_state(instance, self)
if not self._relation_tracker:
key = (cls, self._rel_field_name)
self._relation_tracker = \
RelationCounterField._relation_trackers.get(key)
if not self._relation_tracker:
self._relation_tracker = \
self.RelationTracker(cls, self._rel_field_name)
RelationCounterField._relation_trackers[key] = \
self._relation_tracker
def _on_first_save(self, expected_instance_id, instance, dispatch_uid,
created=False, **kwargs):
"""Handler for the first save on a newly created instance.
This will disconnect the signal and store the state on the instance.
"""
if id(instance) == expected_instance_id:
assert created
# Stop listening immediately for any new signals here.
# The Signal stuff deals with thread locks, so we shouldn't
# have to worry about reaching any of this twice.
post_save.disconnect(sender=instance.__class__,
dispatch_uid=dispatch_uid)
cls = self.__class__
# This is a new row in the database (that is, the model instance
# has been saved for the very first time), we need to flush any
# existing state.
#
# The reason is that we may be running in a unit test situation, or
# are dealing with code that deleted an entry and then saved a new
# one with the old entry's PK explicitly assigned. Using the old
# state will just cause problems.
cls._reset_state(instance)
# Now we can register each RelationCounterField on here.
for field in instance.__class__._meta.local_fields:
if isinstance(field, cls):
cls._store_state(instance, field)
def _on_unsaved_instance_destroyed(self, cls, instance_id, dispatch_uid):
"""Handler for when an unsaved instance is destroyed.
An unsaved instance would still have a signal connection set.
We need to disconnect it to keep that connection from staying in
memory indefinitely.
"""
post_save.disconnect(sender=cls, dispatch_uid=dispatch_uid)
del self._unsaved_instances[instance_id] | mpl-2.0 | 6,099,096,065,673,854,000 | 42.991747 | 83 | 0.585222 | false | 4.634256 | false | false | false |
etienne-gauvin/music-player-core | tests/test_ffmpeg_memleak.py | 2 | 1471 | # MusicPlayer, https://github.com/albertz/music-player
# Copyright (c) 2012, Albert Zeyer, www.az2000.de
# All rights reserved.
# This code is under the 2-clause BSD license, see License.txt in the root directory of this project.
import better_exchook
better_exchook.install()
import sys, os, gc
def step():
gc.collect()
os.system("ps up %i" % os.getpid())
#print "\npress enter to continue"
#sys.stdin.readline()
def progr():
sys.stdout.write(".")
sys.stdout.flush()
def getFileList(n):
from RandomFileQueue import RandomFileQueue
fileQueue = RandomFileQueue(
rootdir=os.path.expanduser("~/Music"),
fileexts=["mp3", "ogg", "flac", "wma"])
return [fileQueue.getNextFile() for i in xrange(n)]
N = 10
files = getFileList(N)
from pprint import pprint
pprint(files)
import musicplayer
print "imported"
step()
for i in xrange(N):
musicplayer.createPlayer()
print "after createPlayer"
step()
class Song:
def __init__(self, fn):
self.url = fn
self.f = open(fn)
def readPacket(self, bufSize):
s = self.f.read(bufSize)
return s
def seekRaw(self, offset, whence):
r = self.f.seek(offset, whence)
return self.f.tell()
for f in files:
musicplayer.getMetadata(Song(f))
progr()
print "after getMetadata"
step()
for f in files:
musicplayer.calcAcoustIdFingerprint(Song(f))
progr()
print "after calcAcoustIdFingerprint"
step()
for f in files:
musicplayer.calcBitmapThumbnail(Song(f))
progr()
print "after calcBitmapThumbnail"
step()
| bsd-2-clause | 6,221,640,735,279,427,000 | 18.355263 | 101 | 0.717199 | false | 2.75985 | false | false | false |
YeEmrick/learning | stanford-tensorflow/2017/examples/03_linear_regression_starter.py | 1 | 1802 | """ Simple linear regression example in TensorFlow
This program tries to predict the number of thefts from
the number of fire in the city of Chicago
Author: Chip Huyen
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
cs20si.stanford.edu
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import xlrd
import utils
DATA_FILE = 'data/fire_theft.xls'
# Phase 1: Assemble the graph
# Step 1: read in data from the .xls file
book = xlrd.open_workbook(DATA_FILE, encoding_override='utf-8')
sheet = book.sheet_by_index(0)
data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])
n_samples = sheet.nrows - 1
# Step 2: create placeholders for input X (number of fire) and label Y (number of theft)
# Both have the type float32
# Step 3: create weight and bias, initialized to 0
# name your variables w and b
# Step 4: predict Y (number of theft) from the number of fire
# name your variable Y_predicted
# Step 5: use the square error as the loss function
# name your variable loss
# Step 6: using gradient descent with learning rate of 0.01 to minimize loss
# Phase 2: Train our model
with tf.Session() as sess:
# Step 7: initialize the necessary variables, in this case, w and b
# TO - DO
# Step 8: train the model
for i in range(50): # run 100 epochs
total_loss = 0
for x, y in data:
# Session runs optimizer to minimize loss and fetch the value of loss. Name the received value as l
# TO DO: write sess.run()
total_loss += l
print("Epoch {0}: {1}".format(i, total_loss/n_samples))
# plot the results
# X, Y = data.T[0], data.T[1]
# plt.plot(X, Y, 'bo', label='Real data')
# plt.plot(X, X * w + b, 'r', label='Predicted data')
# plt.legend()
# plt.show() | apache-2.0 | 5,057,828,337,622,505,000 | 26.318182 | 102 | 0.707547 | false | 3.189381 | false | false | false |
benjamindeleener/scad | scripts/scad_validation.py | 1 | 10654 | #!/usr/bin/env python
#########################################################################################
#
# Validation script for SCAD (Spinal Cord Automatic Detection)
#
# Brainhack MTL 2015: Algorithms for automatic spinal cord detection on MR images
#
# This repository is intented to develop and test new algorithms for automatically detect the spinal cord on various
# contrasts of MR volumes.
# The developed algorithms must satisfy the following criteria:
# - they can be coded in Python or C++
# - they must read a nifti image as input image (.nii or .nii.gz): "-i" (input file name) option
# - they have to output a binary image with the same format and orientation as input image, containing the location
# or the centerline of the spinal cord: "-o" (output file name) option
# - they have to be **fast**
#
# To validate a new algorithm, it must go through the validation pipeline using the following command:
#
# scad_validation.py "algo_name"
#
# The validation pipeline tests your algorithm throughout a testing dataset, containing many images of the spinal cord
# with various contrasts and fields of view, along with their manual segmentation.
# It tests several criteria:
# 1. if your detection is inside the spinal cord
# 2. if your detection is near the spinal cord centerline (at least near the manual centerline)
# 3. if the length of the centerline your algorithm extracted correspond with the manual centerline
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Benjamin De Leener
# Modified: 2015-07-22
#
# About the license: see the file LICENSE
#########################################################################################
import sys
import os
import nibabel as nib
from msct_image import Image
from scad import SCAD
import numpy as np
import scad
def scadMRValidation(algorithm, isPython=False, verbose=True):
if not isinstance(algorithm, str) or not algorithm:
print 'ERROR: You must provide the name of your algorithm as a string.'
usage()
import time
import sct_utils as sct
# creating a new folder with the experiment
path_experiment = 'scad-experiment.'+algorithm+'.'+time.strftime("%y%m%d%H%M%S")
#status, output = sct.run('mkdir '+path_experiment, verbose)
# copying images from "data" folder into experiment folder
sct.copyDirectory('data', path_experiment)
# Starting validation
os.chdir(path_experiment)
# t1
os.chdir('t1/')
for subject_dir in os.listdir('./'):
if os.path.isdir(subject_dir):
os.chdir(subject_dir)
# creating list of images and corresponding manual segmentation
list_images = dict()
for file_name in os.listdir('./'):
if not 'manual_segmentation' in file_name:
for file_name_corr in os.listdir('./'):
if 'manual_segmentation' in file_name_corr and sct.extract_fname(file_name)[1] in file_name_corr:
list_images[file_name] = file_name_corr
# running the proposed algorithm on images in the folder and analyzing the results
for image, image_manual_seg in list_images.items():
print image
path_in, file_in, ext_in = sct.extract_fname(image)
image_output = file_in+'_centerline'+ext_in
if ispython:
try:
eval(algorithm+'('+image+', t1, verbose='+str(verbose)+')')
except Exception as e:
print 'Error during spinal cord detection on line {}:'.format(sys.exc_info()[-1].tb_lineno)
print 'Subject: t1/'+subject_dir+'/'+image
print e
sys.exit(2)
else:
cmd = algorithm+' -i '+image+' -t t1'
if verbose:
cmd += ' -v'
status, output = sct.run(cmd, verbose=verbose)
if status != 0:
print 'Error during spinal cord detection on Subject: t1/'+subject_dir+'/'+image
print output
sys.exit(2)
# analyzing the resulting centerline
from msct_image import Image
manual_segmentation_image = Image(image_manual_seg)
manual_segmentation_image.change_orientation()
centerline_image = Image(image_output)
centerline_image.change_orientation()
from msct_types import Coordinate
# coord_manseg = manual_segmentation_image.getNonZeroCoordinates()
coord_centerline = centerline_image.getNonZeroCoordinates()
# check if centerline is in manual segmentation
result_centerline_in = True
for coord in coord_centerline:
if manual_segmentation_image.data[coord.x, coord.y, coord.z] == 0:
result_centerline_in = False
print 'failed on slice #' + str(coord.z)
break
if result_centerline_in:
print 'OK: Centerline is inside manual segmentation.'
else:
print 'FAIL: Centerline is outside manual segmentation.'
# check the length of centerline compared to manual segmentation
# import sct_process_segmentation as sct_seg
# length_manseg = sct_seg.compute_length(image_manual_seg)
# length_centerline = sct_seg.compute_length(image_output)
# if length_manseg*0.9 <= length_centerline <= length_manseg*1.1:
# print 'OK: Length of centerline correspond to length of manual segmentation.'
# else:
# print 'FAIL: Length of centerline does not correspond to length of manual segmentation.'
os.chdir('..')
# t2
# t2*
# dmri
# gre
def validate_scad(folder_input):
"""
Expecting folder to have the following structure :
errsm_01:
- t2
-- errsm_01.nii.gz or t2.nii.gz
:param folder_input:
:return:
"""
current_folder = os.getcwd()
os.chdir(folder_input)
try:
patients = next(os.walk('.'))[1]
for i in patients:
if i != "errsm_01" and i !="errsm_02":
directory = i + "/t2"
os.chdir(directory)
try:
if os.path.isfile(i+"_t2.nii.gz"):
raw_image = Image(i+"_t2.nii.gz")
elif os.path.isfile("t2.nii.gz"):
raw_image = Image("t2.nii.gz")
else:
raise Exception("t2.nii.gz or "+i+"_t2.nii.gz file is not found")
raw_orientation = raw_image.change_orientation()
SCAD(raw_image, contrast="t2", rm_tmp_file=1, verbose=1).test_debug()
manual_seg = Image(i+"_t2_manual_segmentation.nii.gz")
manual_orientation = manual_seg.change_orientation()
from scipy.ndimage.measurements import center_of_mass
# find COM
iterator = range(manual_seg.data.shape[2])
com_x = [0 for ix in iterator]
com_y = [0 for iy in iterator]
for iz in iterator:
com_x[iz], com_y[iz] = center_of_mass(manual_seg.data[:, :, iz])
#raw_image.change_orientation(raw_orientation)
#manual_seg.change_orientation(manual_orientation)
centerline_scad = Image(i+"_t2_centerline.nii.gz")
os.remove(i+"_t2_centerline.nii.gz")
centerline_scad.change_orientation()
distance = []
for iz in range(centerline_scad.data.shape[2]):
ind1 = np.argmax(centerline_scad.data[:, :, iz])
X,Y = scad.ind2sub(centerline_scad.data[:, :, i].shape,ind1)
com_phys = centerline_scad.transfo_pix2phys([[com_x[iz], com_y[iz], iz]])
scad_phys = centerline_scad.transfo_pix2phys([[X, Y, iz]])
distance_magnitude = np.linalg.norm(com_phys-scad_phys)
distance.append(distance_magnitude)
os.chdir(folder_input)
except Exception, e:
print e.message
pass
except Exception, e:
print e.message
def usage():
print """
""" + os.path.basename(__file__) + """
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Brainhack MTL 2015
DESCRIPTION
Validation script for SCAD (Spinal Cord Automatic Detection)
USAGE
""" + os.path.basename(__file__) + """ <algorithm_name>
MANDATORY ARGUMENTS
<algorithm_name> name of the script you want to validate. The script must have -i, -o and -v options enabled.
OPTIONAL ARGUMENTS
ispython Switch to python validation. It means that the algorithm will be called as a python method.
verbose Disable display. Default: display on.
-h help. Show this message
"""
sys.exit(1)
# START PROGRAM
# ==========================================================================================
if __name__ == "__main__":
# reading the name of algorithm from arguments
script_arguments = sys.argv[1:]
if "-h" in script_arguments:
usage()
### Start of not good code
if "-scad" in script_arguments:
folder = script_arguments[script_arguments.index("-i") + 1]
if folder != "" or folder is not None:
validate_scad(folder)
# elif len(script_arguments) > 3:
# print 'ERROR: this script only accepts three arguments: the name of your algorithm, if it is a python script or' \
# 'not and the verbose option.'
# usage()
#
# algorithm = script_arguments[0]
# verbose = True
# ispython = False
# if len(script_arguments) >= 2:
# if 'verbose' in script_arguments[1:]:
# verbose = False
# if 'ispython' in script_arguments[1:]:
# ispython = True
#
# scadMRValidation(algorithm=algorithm, isPython=ispython, verbose=verbose)
| mit | 5,965,855,604,423,026,000 | 40.617188 | 124 | 0.548433 | false | 4.254792 | false | false | false |
Mirantis/disk_perf_test_tool | wally/utils.py | 1 | 3311 | import os
import sys
import uuid
import logging
import datetime
import contextlib
from typing import Any, Tuple, Iterator, Iterable
try:
from petname import Generate as pet_generate
except ImportError:
def pet_generate(_1: str, _2: str) -> str:
return str(uuid.uuid4())
from cephlib.common import run_locally, sec_to_str
logger = logging.getLogger("wally")
STORAGE_ROLES = ['ceph-osd']
class StopTestError(RuntimeError):
pass
class LogError:
def __init__(self, message: str, exc_logger: logging.Logger = None) -> None:
self.message = message
self.exc_logger = exc_logger
def __enter__(self) -> 'LogError':
return self
def __exit__(self, tp: type, value: Exception, traceback: Any) -> bool:
if value is None or isinstance(value, StopTestError):
return False
if self.exc_logger is None:
exc_logger = sys._getframe(1).f_globals.get('logger', logger)
else:
exc_logger = self.exc_logger
exc_logger.exception(self.message, exc_info=(tp, value, traceback))
raise StopTestError(self.message) from value
class TaskFinished(Exception):
pass
def log_block(message: str, exc_logger:logging.Logger = None) -> LogError:
logger.debug("Starts : " + message)
return LogError(message, exc_logger)
def check_input_param(is_ok: bool, message: str) -> None:
if not is_ok:
logger.error(message)
raise StopTestError(message)
def yamable(data: Any) -> Any:
if isinstance(data, (tuple, list)):
return map(yamable, data)
if isinstance(data, dict):
res = {}
for k, v in data.items():
res[yamable(k)] = yamable(v)
return res
return data
def get_creds_openrc(path: str) -> Tuple[str, str, str, str, bool]:
fc = open(path).read()
echo = 'echo "$OS_INSECURE:$OS_TENANT_NAME:$OS_USERNAME:$OS_PASSWORD@$OS_AUTH_URL"'
msg = "Failed to get creads from openrc file"
with LogError(msg):
data = run_locally(['/bin/bash'], input_data=(fc + "\n" + echo).encode('utf8')).decode("utf8")
msg = "Failed to get creads from openrc file: " + data
with LogError(msg):
data = data.strip()
insecure_str, user, tenant, passwd_auth_url = data.split(':', 3)
insecure = (insecure_str in ('1', 'True', 'true'))
passwd, auth_url = passwd_auth_url.rsplit("@", 1)
assert (auth_url.startswith("https://") or
auth_url.startswith("http://"))
return user, passwd, tenant, auth_url, insecure
@contextlib.contextmanager
def empty_ctx(val: Any = None) -> Iterator[Any]:
yield val
def get_uniq_path_uuid(path: str, max_iter: int = 10) -> Tuple[str, str]:
for i in range(max_iter):
run_uuid = pet_generate(2, "_")
results_dir = os.path.join(path, run_uuid)
if not os.path.exists(results_dir):
break
else:
run_uuid = str(uuid.uuid4())
results_dir = os.path.join(path, run_uuid)
return results_dir, run_uuid
def get_time_interval_printable_info(seconds: int) -> Tuple[str, str]:
exec_time_s = sec_to_str(seconds)
now_dt = datetime.datetime.now()
end_dt = now_dt + datetime.timedelta(0, seconds)
return exec_time_s, "{:%H:%M:%S}".format(end_dt)
| apache-2.0 | -8,652,072,735,847,001,000 | 25.918699 | 102 | 0.621867 | false | 3.385481 | false | false | false |
gstiebler/odemis | src/odemis/gui/comp/text.py | 1 | 36004 | # -*- coding: utf-8 -*-
"""
@author: Rinze de Laat
Copyright © 2012 Rinze de Laat, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License version 2 as published by the Free
Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
Content:
This module contains classes describing various customized text fields used
throughout Odemis.
"""
from __future__ import division
import locale
import logging
import math
import os
import re
import string
import sys
import wx
import wx.lib.mixins.listctrl as listmix
from odemis.gui import FG_COLOUR_DIS, FG_COLOUR_EDIT
from odemis.util import units
from odemis.util.units import decompose_si_prefix, si_scale_val
# Locale is needed for correct string sorting
locale.setlocale(locale.LC_ALL, "")
# The SuggestTextCtrl and ChoiceListCtrl class are adaptations of the
# TextCtrlAutoComplete class found at
# http://wiki.wxpython.org/index.cgi/TextCtrlAutoComplete
#
# Adaptation for Delmic by R. de Laat
#
# wxPython Custom Widget Collection 20060207
# Written By: Edward Flick (eddy -=at=- cdf-imaging -=dot=- com)
# Michele Petrazzo (michele -=dot=- petrazzo -=at=- unipex =dot= it)
# Will Sadkin (wsadkin-=at=- nameconnector -=dot=- com)
# Copyright 2006 (c) CDF Inc. ( http://www.cdf-imaging.com )
# Contributed to the wxPython project under the wxPython project's license.
#
class ChoiceListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):
""" Choice list used by the SuggestTextCtrl class """
def __init__(self, *args, **kwargs):
wx.ListCtrl.__init__(self, *args, **kwargs)
listmix.ListCtrlAutoWidthMixin.__init__(self)
class SuggestTextCtrl(wx.TextCtrl, listmix.ColumnSorterMixin):
def __init__(self, parent, choices=None, drop_down_click=True,
col_fetch=-1, col_search=0, hide_on_no_match=True,
select_callback=None, entry_callback=None, match_function=None,
**text_kwargs):
"""
Constructor works just like wx.TextCtrl except you can pass in a
list of choices. You can also change the choice list at any time
by calling SetChoices.
When a choice is picked, or the user has finished typing, a
EVT_COMMAND_ENTER is sent.
"""
text_kwargs['style'] = wx.TE_PROCESS_ENTER | wx.BORDER_NONE | text_kwargs.get('style', 0)
super(SuggestTextCtrl, self).__init__(parent, **text_kwargs)
# Some variables
self._drop_down_click = drop_down_click
self._choices = choices
self._lastinsertionpoint = 0
self._hide_on_no_match = hide_on_no_match
self._select_callback = select_callback
self._entry_callback = entry_callback
self._match_function = match_function
self._screenheight = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)
# sort variable needed by listmix
self.itemDataMap = dict()
# Load and sort data
if not self._choices:
self._choices = []
# raise ValueError, "Pass me at least one of multiChoices OR choices"
# widgets
self.dropdown = wx.PopupWindow(self)
# Control the style
flags = wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_SORT_ASCENDING
flags = flags | wx.LC_NO_HEADER
# Create the list and bind the events
self.dropdownlistbox = ChoiceListCtrl(self.dropdown, style=flags, pos=wx.Point(0, 0))
ln = 1
# else: ln = len(choices)
listmix.ColumnSorterMixin.__init__(self, ln)
# load the data
# self.SetChoices(choices)
gp = self
while gp is not None:
gp.Bind(wx.EVT_MOVE, self.onControlChanged, gp)
gp.Bind(wx.EVT_SIZE, self.onControlChanged, gp)
gp = gp.GetParent()
self.Bind(wx.EVT_KILL_FOCUS, self.onControlChanged, self)
self.Bind(wx.EVT_TEXT, self.onEnteredText, self)
self.Bind(wx.EVT_KEY_DOWN, self.onKeyDown, self)
# If need drop down on left click
if drop_down_click:
self.Bind(wx.EVT_LEFT_DOWN, self.onClickToggleDown, self)
self.Bind(wx.EVT_LEFT_UP, self.onClickToggleUp, self)
self.dropdown.Bind(wx.EVT_LISTBOX, self.onListItemSelected, self.dropdownlistbox)
self.dropdownlistbox.Bind(wx.EVT_LEFT_DOWN, self.onListClick)
self.dropdownlistbox.Bind(wx.EVT_LEFT_DCLICK, self.onListDClick)
# This causes the text the user is typing to directly auto-fill with
# the closest possibility.
# self.dropdown.Bind(wx.EVT_LIST_ITEM_SELECTED, self.onListDClick)
self.dropdownlistbox.Bind(wx.EVT_LIST_COL_CLICK, self.onListColClick)
# TODO: needed?
self.il = wx.ImageList(16, 16)
self.dropdownlistbox.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self._ascending = True
def _send_change_event(self):
"""
Sends an event EVT_COMMAND_ENTER to notify that the value has changed
"""
changeEvent = wx.CommandEvent(wx.wxEVT_COMMAND_ENTER, self.Id)
wx.PostEvent(self, changeEvent)
def GetListCtrl(self):
return self.dropdownlistbox
# -- event methods
def onListClick(self, evt):
toSel, dummy = self.dropdownlistbox.HitTest(evt.GetPosition())
#no values on position, return
if toSel == -1:
return
self.dropdownlistbox.Select(toSel)
def onListDClick(self, evt):
self._setValueFromSelected()
def onListColClick(self, evt):
col = evt.GetColumn()
#reverse the sort
if col == self._col_search:
self._ascending = not self._ascending
self.SortListItems(evt.GetColumn(), ascending=self._ascending)
self._col_search = evt.GetColumn()
evt.Skip()
def onEnteredText(self, event):
text = event.GetString()
if self._entry_callback:
self._entry_callback()
if not text:
# control is empty; hide dropdown if shown:
if self.dropdown.IsShown():
self._showDropDown(False)
event.Skip()
return
found = False
choices = self._choices
for numCh, choice in enumerate(choices):
if self._match_function and self._match_function(text, choice):
found = True
elif choice.lower().startswith(text.lower()):
found = True
if found:
self._showDropDown(True)
item = self.dropdownlistbox.GetItem(numCh)
toSel = item.GetId()
self.dropdownlistbox.Select(toSel)
break
if not found:
self.dropdownlistbox.Select(self.dropdownlistbox.GetFirstSelected(), False)
if self._hide_on_no_match:
self._showDropDown(False)
self._listItemVisible()
event.Skip()
def onKeyDown(self, event):
""" Do some work when the user press on the keys:
up and down: move the cursor
left and right: move the search
"""
sel = self.dropdownlistbox.GetFirstSelected()
KC = event.GetKeyCode()
if KC == wx.WXK_DOWN:
if sel < self.dropdownlistbox.GetItemCount() - 1:
self.dropdownlistbox.Select(sel + 1)
self._listItemVisible()
self._showDropDown()
elif KC == wx.WXK_UP:
if sel > 0:
self.dropdownlistbox.Select(sel - 1)
self._listItemVisible()
self._showDropDown()
elif KC == wx.WXK_RETURN or KC == wx.WXK_NUMPAD_ENTER:
visible = self.dropdown.IsShown()
if visible:
self._setValueFromSelected()
else:
self._send_change_event()
elif KC == wx.WXK_ESCAPE:
self._showDropDown(False)
else:
event.Skip()
def onListItemSelected(self, event):
self._setValueFromSelected()
event.Skip()
def onClickToggleDown(self, event):
self._lastinsertionpoint = self.GetInsertionPoint()
event.Skip()
def onClickToggleUp(self, event):
if self.GetInsertionPoint() == self._lastinsertionpoint:
self._showDropDown(not self.dropdown.IsShown())
event.Skip()
def onControlChanged(self, event):
if self and self.IsShown():
self._showDropDown(False)
if isinstance(event, wx.FocusEvent):
# KILL_FOCUS => that means the user is happy with the current value
self._send_change_event()
event.Skip()
def SetChoices(self, choices):
"""
Sets the choices available in the popup wx.ListBox.
The items will be sorted case insensitively.
"""
self._choices = choices
flags = wx.LC_REPORT | wx.LC_SINGLE_SEL | \
wx.LC_SORT_ASCENDING | wx.LC_NO_HEADER
self.dropdownlistbox.SetWindowStyleFlag(flags)
if not isinstance(choices, list):
self._choices = list(choices)
self._choices.sort(cmp=locale.strcoll)
self._updateDataList(self._choices)
self.dropdownlistbox.InsertColumn(0, "")
for num, colVal in enumerate(self._choices):
index = self.dropdownlistbox.InsertImageStringItem(sys.maxint,
colVal, -1)
self.dropdownlistbox.SetStringItem(index, 0, colVal)
self.dropdownlistbox.SetItemData(index, num)
self._setListSize()
# there is only one choice for both search and fetch if setting a
# single column:
self._col_search = 0
self._col_fetch = -1
def GetChoices(self):
return self._choices
def Setselect_callback(self, cb=None):
self._select_callback = cb
def Setentry_callback(self, cb=None):
self._entry_callback = cb
def Setmatch_function(self, mf=None):
self._match_function = mf
#-- Internal methods
def _setValueFromSelected(self):
""" Sets the wx.TextCtrl value from the selected wx.ListCtrl item.
Will do nothing if no item is selected in the wx.ListCtrl.
"""
sel = self.dropdownlistbox.GetFirstSelected()
if sel > -1:
if self._col_fetch != -1:
col = self._col_fetch
else:
col = self._col_search
itemtext = self.dropdownlistbox.GetItem(sel, col).GetText()
if self._select_callback:
dd = self.dropdownlistbox
values = [dd.GetItem(sel, x).GetText()
for x in xrange(dd.GetColumnCount())]
self._select_callback(values)
self.SetValue(itemtext)
self.SetToolTip(wx.ToolTip(itemtext))
self.SetInsertionPointEnd()
self.SetSelection(-1, -1)
self._showDropDown(False)
self._send_change_event()
def _showDropDown(self, show=True):
"""
Either display the drop down list (show = True) or hide it (show = False).
"""
if show:
size = self.dropdown.GetSize()
width, height = self . GetSizeTuple()
x, y = self.ClientToScreenXY(0, height)
if size.GetWidth() != width:
size.SetWidth(width)
self.dropdown.SetSize(size)
self.dropdownlistbox.SetSize(self.dropdown.GetClientSize())
if y + size.GetHeight() < self._screenheight:
self.dropdown.SetPosition(wx.Point(x, y))
else:
self.dropdown.SetPosition(
wx.Point(x, y - height - size.GetHeight()))
self.dropdown.Show(show)
def _listItemVisible(self):
"""
Moves the selected item to the top of the list ensuring it is always visible.
"""
toSel = self.dropdownlistbox.GetFirstSelected()
if toSel == -1:
return
self.dropdownlistbox.EnsureVisible(toSel)
def _updateDataList(self, choices):
#delete, if need, all the previous data
if self.dropdownlistbox.GetColumnCount() != 0:
self.dropdownlistbox.DeleteAllColumns()
self.dropdownlistbox.DeleteAllItems()
#and update the dict
if choices:
for numVal, data in enumerate(choices):
self.itemDataMap[numVal] = data
else:
numVal = 0
self.SetColumnCount(numVal)
def _setListSize(self):
choices = self._choices
longest = 0
for choice in choices:
longest = max(len(choice), longest)
longest += 3
itemcount = min(len(choices), 7) + 2
charheight = self.dropdownlistbox.GetCharHeight()
charwidth = self.dropdownlistbox.GetCharWidth()
self.popupsize = wx.Size(charwidth * longest, charheight * itemcount)
self.dropdownlistbox.SetSize(self.popupsize)
self.dropdown.SetClientSize(self.popupsize)
class _NumberValidator(wx.PyValidator):
""" Base class used for number validation
Note::
In wxPython 3.0 Phoenix, wx.PyValidator will be replaced with wx. Validator. When you try
and replace wx.PyValidator with wx.Validator in wxPython 3.0 Classic, however, validators
will not be assigned correctly to TextCtrls (most notably the one in the Slider class)
No clear reason was found for this and no attempt to change the super class should be made
as long as Odemis uses the Classic version of wxPython.
"""
def __init__(self, min_val=None, max_val=None, choices=None, unit=None):
""" Constructor """
super(_NumberValidator, self).__init__()
self.Bind(wx.EVT_CHAR, self.on_char)
# this is a kludge because default value in XRC is 0:
if min_val == 0 and max_val == 0:
min_val = None
max_val = None
# Minimum and maximum allowed values
self.min_val = min_val
self.max_val = max_val
self.choices = choices
self.unit = unit
if None not in (min_val, max_val) and min_val > max_val:
raise ValueError("Min value is bigger than max value: %r > %r" % (min_val, max_val))
self._validate_choices()
# Build a regular expression pattern against which we can match the data that is being
# entered
reg_data = {
'negative_sign': '',
'unit': u"[ ]*[GMkmµunp]?(%s)?" % unit if unit else ''
}
if (
(min_val is None or min_val < 0) or
(max_val is not None and max_val < 0) or
(choices and min(choices) < 0)
):
reg_data['negative_sign'] = '-'
# Update the regular expression with the variables we've discovered
self.entry_regex = self.entry_regex.format(**reg_data)
# Compile the regex pattern what will be used for validation
self.entry_pattern = re.compile(self.entry_regex)
def set_value_range(self, min_val, max_val):
# TODO: check values and recompute .legal as in init
self.min_val = min_val
self.max_val = max_val
def GetRange(self):
return self.min_val, self.max_val
def _validate_choices(self):
""" Validate all the choice values, if choice values are defined """
if self.choices:
if not all([self._is_valid_value(c) for c in self.choices]):
raise ValueError("Illegal value (%s) found in choices" % c)
def _is_valid_value(self, val):
""" Validate the given value
Args:
val (str):
Returns:
(boolean): True if the given string is valid
"""
# Don't fail on empty string
if val is False or val is None:
return False
try:
num = self._cast(val)
except ValueError:
return False
if self.choices and num not in self.choices:
return False
if self.min_val and num < self.min_val:
return False
if self.max_val and num > self.max_val:
return False
return True
def _get_str_value(self):
""" Return the string value of the wx.Window to which this validator belongs """
# Special trick in, the very likely, case we are validating a NumberTextCtrl, which has it's
# default 'GetValue' method replaced with one that returns number instances
fld = self.GetWindow()
if hasattr(fld, "get_value_str"):
val = fld.get_value_str()
else:
val = fld.GetValue()
return val
def Clone(self):
raise NotImplementedError
def on_char(self, event):
""" This method prevents the entry of illegal characters """
key = event.GetKeyCode()
# Allow control keys to propagate
if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255:
event.Skip()
return
field_val = unicode(self._get_str_value())
start, end = self.GetWindow().GetSelection()
field_val = field_val[:start] + chr(key) + field_val[end:]
if not field_val or self.entry_pattern.match(field_val):
# logging.debug("Field value %s accepted using %s", "field_val", self.entry_regex)
event.Skip()
else:
logging.debug("Field value %s NOT accepted using %s", field_val, self.entry_regex)
def Validate(self, win=None):
""" This method is called when the 'Validate()' method is called on the
parent of the TextCtrl to which this validator belongs. It can also
be called as a standalone validation method.
returns (boolean)
"""
is_valid = self._is_valid_value(self._get_str_value())
# logging.debug("Value '%s' is %s valid", self._get_str_value(), "" if is_valid else "not")
return is_valid
def get_validated_number(self, str_val):
""" Return a validated number represented by the string value provided
If choices is set, it will pick the closest matching value available.
If min_val or max_val are set, it will always return a value within bounds.
Args:
str_val (string): a string representing a number
Returns:
(None or number of the right type): the most meaningful value that would fit the
validator for the given string or None if the string is empty.
"""
if not str_val:
return None
# Aggressively try to cast the string to a legal value by removing characters
while len(str_val):
try:
num = self._cast(str_val)
break
except ValueError:
pass
str_val = str_val[:-1]
if not str_val:
return None
# Find the closest value in choices
if self.choices:
num = min(self.choices, key=lambda x: abs(x - num))
# bound the value by min/max
msg = "Truncating out of range [{}, {}] value {}"
if self.min_val is not None and num < self.min_val:
logging.debug(msg.format(self.min_val, self.max_val, num))
num = self.min_val
if self.max_val is not None and num > self.max_val:
logging.debug(msg.format(self.min_val, self.max_val, num))
num = self.max_val
return num
def _cast(self, str_val):
""" Cast the value string to the desired type
Args:
str_val (str): Value to cast
Returns:
number: Scaled and correctly typed number value
"""
raise NotImplementedError
def _step_from_range(min_val, max_val):
""" Dynamically create step size based on range """
try:
step = (max_val - min_val) / 255
# To keep the inc/dec values 'clean', set the step
# value to the nearest power of 10
step = 10 ** round(math.log10(step))
return step
except ValueError:
msg = "Error calculating step size for range [%s..%s]" % (min_val, max_val)
logging.exception(msg)
class _NumberTextCtrl(wx.TextCtrl):
""" A base text control specifically tailored to contain numerical data
Use .GetValue() and .SetValue()/.ChangeValue() to get/set the raw value
(number). SetValue and ChangeValue are identical but the first one generates
an event as if the user had typed something in.
To get the string that is displayed by the control, use .get_value_str() and .SetValueStr().
Generates a wxEVT_COMMAND_ENTER whenever a new number is set by the user.
This happens typically when loosing the focus or when pressing the [Enter] key.
"""
def __init__(self, *args, **kwargs):
"""
Args:
validator (Validator): Validator that checks the value entered by the user
key_step (number or None): by how much the value should be changed on key up/down
accuracy (None or int): how many significant digits to keep when cleanly displayed. If
None, it is never truncated.
"""
# Make sure that a validator is provided
if "validator" not in kwargs:
raise ValueError("Validator required!")
# The step size for when the up and down keys are pressed
self.key_step = kwargs.pop('key_step', None)
self.accuracy = kwargs.pop('accuracy', None)
# For the wx.EVT_TEXT_ENTER event to work, the TE_PROCESS_ENTER style needs to be set, but
# setting it in XRC throws an error. A possible workaround is to include the style by hand
kwargs['style'] = kwargs.get('style', 0) | wx.TE_PROCESS_ENTER | wx.BORDER_NONE
if len(args) > 2:
val = args[2]
args = args[:2]
else:
val = kwargs.pop('value', None)
# The
self._number_value = val
wx.TextCtrl.__init__(self, *args, **kwargs)
self.SetBackgroundColour(self.Parent.BackgroundColour)
self.SetForegroundColour(FG_COLOUR_EDIT)
# Set the value so it will be validated to be a valid number
if val is not None:
self.SetValue(self._number_value)
if self.key_step is not None:
self.Bind(wx.EVT_CHAR, self.on_char)
self.Bind(wx.EVT_KILL_FOCUS, self.on_kill_focus)
self.Bind(wx.EVT_SET_FOCUS, self.on_focus)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter)
def _display_pretty(self):
if self._number_value is None:
str_val = u""
else:
str_val = units.readable_str(self._number_value, sig=self.accuracy)
wx.TextCtrl.ChangeValue(self, str_val)
def Disable(self):
self.Enable(False)
def Enable(self, enable=True):
# TODO: Find a better way to deal with this hack that was put in place because under
# MS Windows the background colour cannot (at all?) be set when a control is disabled
if os.name == 'nt':
self.SetEditable(enable)
if enable:
self.SetForegroundColour(FG_COLOUR_EDIT)
else:
self.SetForegroundColour(FG_COLOUR_DIS)
else:
super(_NumberTextCtrl, self).Enable(enable)
def SetValue(self, val):
""" Set the numerical value of the text field
Args:
val (numerical type): The value to set the field to
"""
self.ChangeValue(val)
def GetValue(self):
""" Return the numerical value of the text field or None if no (valid) value is present
Warning: we return the last validated value, not the current value in the text field
"""
return self._number_value
def ChangeValue(self, val):
""" Set the value of the text field
No checks are done on the value to be correct. If this is needed, use the validator.
Args:
val (numerical type): The value to set the field to
"""
self._number_value = val
# logging.debug("Setting value to '%s' for %s", val, self.__class__.__name__)
self._display_pretty()
def get_value_str(self):
""" Return the value of the control as a string """
return wx.TextCtrl.GetValue(self)
def set_value_str(self, val):
wx.TextCtrl.SetValue(self, val)
def change_value_str(self, val):
""" Set the value of the field, without generating a change event """
wx.TextCtrl.ChangeValue(self, val)
def SetValueRange(self, minv, maxv):
""" Same as SetRange of a slider """
self.Validator.set_value_range(minv, maxv)
def GetValueRange(self):
return self.GetValidator().GetRange()
def _set_number_value(self, str_number):
""" Parse the given number string and set the internal number value
This method is used when the enter key is pressed, or when the text field loses focus, i.e.
situations where we always need to leave a valid and well formatted value.
"""
prev_num = self._number_value
if str_number is None or str_number == "":
self._number_value = None
else:
# set new value even if not validated, so that we reach the boundaries
self._number_value = self.GetValidator().get_validated_number(str_number)
# TODO: turn the text red temporarily if not valid?
# if not validated:
# logging.debug("Converted '%s' into '%s'", str_number, self._number_value)
if prev_num != self._number_value:
self._send_change_event()
# Event handlers
def _send_change_event(self):
""" Create and send a change event (wxEVT_COMMAND_ENTER) """
changeEvent = wx.CommandEvent(wx.wxEVT_COMMAND_ENTER, self.Id)
wx.PostEvent(self, changeEvent)
def on_char(self, evt):
""" This event handler increases or decreases the integer value when
the up/down cursor keys are pressed.
The event is ignored otherwise.
"""
key = evt.GetKeyCode()
prev_num = self._number_value
num = self._number_value
if key == wx.WXK_UP and self.key_step and self.IsEditable():
num = (num or 0) + self.key_step
elif key == wx.WXK_DOWN and self.key_step and self.IsEditable():
num = (num or 0) - self.key_step
else:
# Skip the event, so it can be processed in the regular way
# (As in validate typed numbers etc.)
evt.Skip()
return
val = u"%r" % num # GetNumber needs a string
self._number_value = self.GetValidator().get_validated_number(val)
if prev_num != self._number_value:
self._display_pretty() # Update the GUI immediately
self._send_change_event()
def on_focus(self, evt):
""" Select the number part (minus any unit indication) of the data in the text field """
number_length = len(self.get_value_str().rstrip(string.ascii_letters + u" µ"))
wx.CallAfter(self.SetSelection, 0, number_length)
evt.Skip()
def on_kill_focus(self, evt):
""" Display the current number value as a formatted string when the focus is lost """
wx.CallAfter(self.SetSelection, 0, 0)
str_val = wx.TextCtrl.GetValue(self)
self._set_number_value(str_val)
self._display_pretty()
evt.Skip()
def on_text_enter(self, evt):
""" Process [enter] key presses """
logging.debug("New text entered in %s", self.__class__.__name__)
# almost the same as on_kill_focus, but still display raw
wx.CallAfter(self.SetSelection, 0, 0)
str_val = wx.TextCtrl.GetValue(self)
self._set_number_value(str_val)
self._display_pretty()
# END Event handlers
class UnitNumberCtrl(_NumberTextCtrl):
def __init__(self, *args, **kwargs):
"""
unit (None or string): if None then behave like NumberTextCtrl
"""
self.unit = kwargs.pop('unit', None)
_NumberTextCtrl.__init__(self, *args, **kwargs)
def _display_pretty(self):
if self._number_value is None:
str_val = u""
elif self._number_value == 0 and self.key_step and self.unit not in units.IGNORE_UNITS:
# Special case with 0: readable_str return just "0 unit", without
# prefix. This is technically correct, but quite inconvenient and
# a little strange when the typical value has a prefix (eg, nm, kV).
# => use prefix of key_step (as it's a "small value")
_, prefix = units.get_si_scale(self.key_step)
str_val = "0 %s%s" % (prefix, self.unit)
else:
str_val = units.readable_str(self._number_value, self.unit, self.accuracy)
# Get the length of the number (string length, minus the unit length)
number_length = len(str_val.rstrip(string.ascii_letters + u" µ"))
wx.TextCtrl.ChangeValue(self, str_val)
# Select the number value
wx.CallAfter(self.SetSelection, number_length, number_length)
#########################################
# Integer controls
#########################################
class IntegerValidator(_NumberValidator):
""" This validator can be used to make sure only valid characters are
entered into a control (digits and a minus symbol).
It can also validate if the value that is present is a valid integer.
"""
def __init__(self, min_val=None, max_val=None, choices=None, unit=None):
# The regular expression to check the validity of what is being typed, is a bit different
# from a regular expression that would validate an entire string, because we need to check
# validity as the user types
self.entry_regex = u"[+{negative_sign}]?[\d]*{unit}$"
_NumberValidator.__init__(self, min_val, max_val, choices, unit)
def Clone(self):
""" Required method """
return IntegerValidator(self.min_val, self.max_val, self.choices, self.unit)
def _cast(self, str_val):
""" Cast the string value to an integer and return it
Args:
str_val (str): A string representing a number value
Returns:
(int)
Raises:
ValueError: When the string cannot be parsed correctly
"""
if self.unit and str_val.endswith(self.unit):
# Help it to find the right unit (important for complicated ones like 'px')
str_val, si_prefix, unit = decompose_si_prefix(str_val, self.unit)
else:
str_val, si_prefix, unit = decompose_si_prefix(str_val)
return int(si_scale_val(float(str_val), si_prefix))
class IntegerTextCtrl(_NumberTextCtrl):
""" This class describes a text field that may only hold integer data.
The 'min_val' and 'max_val' keyword arguments may be used to set limits on
the value contained within the control.
When the 'key_inc' argument is set, the value can be altered by the up and
down cursor keys.
The 'choices' keyword argument can be used to pass an iterable containing
valid values
If the object is created with an invalid integer value a ValueError
exception will be raised.
"""
# TODO: should use the same parameter as NumberSlider: val_range instead
# of min_val/max_val
# TODO: refactor to have IntegerTextCtrl a UnitIntegerCtrl with unit=None?
def __init__(self, *args, **kwargs):
min_val = kwargs.pop('min_val', None)
max_val = kwargs.pop('max_val', None)
choices = kwargs.pop('choices', None)
kwargs['validator'] = IntegerValidator(min_val, max_val, choices)
kwargs['key_step'] = kwargs.get('key_step', 1)
_NumberTextCtrl.__init__(self, *args, **kwargs)
def SetValue(self, val):
_NumberTextCtrl.SetValue(self, int(val))
class UnitIntegerCtrl(UnitNumberCtrl):
""" This class represents a text control which is capable of formatting
it's content according to the unit it set to: '<int value> <unit str>'
The value defaults to 0 if none is provided. The 'unit' argument is
mandatory.
When the value is set through the API, the units are shown.
When the control gets the focus, the value is shown without the units
When focus is lost, the units will be shown again.
"""
def __init__(self, *args, **kwargs):
min_val = kwargs.pop('min_val', None)
max_val = kwargs.pop('max_val', None)
choices = kwargs.pop('choices', None)
unit = kwargs.get('unit', None)
kwargs['validator'] = IntegerValidator(min_val, max_val, choices, unit)
if 'key_step' not in kwargs and (min_val != max_val):
kwargs['key_step'] = max(int(round(_step_from_range(min_val, max_val))), 1)
UnitNumberCtrl.__init__(self, *args, **kwargs)
def SetValue(self, val):
UnitNumberCtrl.SetValue(self, int(val))
#########################################
# Float controls
#########################################
class FloatValidator(_NumberValidator):
def __init__(self, min_val=None, max_val=None, choices=None, unit=None):
# The regular expression to check the validity of what is being typed, is a bit different
# from a regular expression that would validate an entire string, because we need to check
# validity as the user types
self.entry_regex = u"[+{negative_sign}]?[\d]*[.]?[\d]*[eE]?[+-]?[\d]*{unit}$"
_NumberValidator.__init__(self, min_val, max_val, choices, unit)
def Clone(self):
""" Required method """
return FloatValidator(self.min_val, self.max_val, self.choices, self.unit)
def _cast(self, str_val):
""" Cast the string value to a float and return it
Args:
str_val (str): A string representing a number value
Returns:
(float)
Raises:
ValueError: When the string cannot be parsed correctly
"""
if self.unit and str_val.endswith(self.unit):
# Help it to find the right unit (important for complicated ones like 'px')
str_val, si_prefix, unit = decompose_si_prefix(str_val, self.unit)
else:
str_val, si_prefix, unit = decompose_si_prefix(str_val)
return si_scale_val(float(str_val), si_prefix)
class FloatTextCtrl(_NumberTextCtrl):
def __init__(self, *args, **kwargs):
min_val = kwargs.pop('min_val', None)
max_val = kwargs.pop('max_val', None)
choices = kwargs.pop('choices', None)
kwargs['validator'] = FloatValidator(min_val, max_val, choices)
kwargs['key_step'] = kwargs.get('key_step', 0.1)
_NumberTextCtrl.__init__(self, *args, **kwargs)
class UnitFloatCtrl(UnitNumberCtrl):
def __init__(self, *args, **kwargs):
min_val = kwargs.pop('min_val', None)
max_val = kwargs.pop('max_val', None)
choices = kwargs.pop('choices', None)
unit = kwargs.get('unit', None)
kwargs['validator'] = FloatValidator(min_val, max_val, choices, unit)
if 'key_step' not in kwargs and (min_val != max_val):
kwargs['key_step'] = _step_from_range(min_val, max_val)
kwargs['accuracy'] = kwargs.get('accuracy', None)
UnitNumberCtrl.__init__(self, *args, **kwargs)
| gpl-2.0 | 7,774,501,785,606,925,000 | 34.39823 | 100 | 0.604056 | false | 3.972195 | false | false | false |
davidt/reviewboard | reviewboard/notifications/webhooks.py | 2 | 14135 | from __future__ import unicode_literals
import hashlib
import hmac
import logging
from django.contrib.sites.models import Site
from django.http.request import HttpRequest
from django.utils.six.moves.urllib.parse import urlencode
from django.utils.six.moves.urllib.request import Request, urlopen
from django.template import Context, Template
from django.template.base import Lexer, Parser
from djblets.siteconfig.models import SiteConfiguration
from djblets.webapi.encoders import (JSONEncoderAdapter, ResourceAPIEncoder,
XMLEncoderAdapter)
from reviewboard import get_package_version
from reviewboard.notifications.models import WebHookTarget
from reviewboard.reviews.models import Review, ReviewRequest
from reviewboard.reviews.signals import (review_request_closed,
review_request_published,
review_request_reopened,
review_published,
reply_published)
from reviewboard.webapi.resources import resources
class FakeHTTPRequest(HttpRequest):
"""A fake HttpRequest implementation.
The WebAPI serialization methods use HttpRequest.build_absolute_uri to
generate all the links, but none of the various signals that generate
webhook events have the request plumbed through. Since we don't actually
need a valid request, this impersonates it enough to get valid results from
build_absolute_uri.
"""
_is_secure = None
_host = None
def __init__(self, user, local_site_name=None):
"""Initialize a FakeHTTPRequest.
Args:
user (django.contrib.auth.models.User):
The user who initiated the request.
local_site_name (unicode, optional):
The local site name (if the request was carried out against a
local site).
"""
super(FakeHTTPRequest, self).__init__()
self.user = user
self._local_site_name = local_site_name
if self._is_secure is None:
siteconfig = SiteConfiguration.objects.get_current()
self._is_secure = siteconfig.get('site_domain_method') == 'https'
self._host = Site.objects.get_current().domain
def is_secure(self):
return self._is_secure
def get_host(self):
return self._host
class CustomPayloadParser(Parser):
"""A custom template parser that blocks certain tags.
This extends Django's Parser class for template parsing, and removes
some built-in tags, in order to prevent mailicious use.
"""
BLACKLISTED_TAGS = ('block', 'debug', 'extends', 'include', 'load', 'ssi')
def __init__(self, *args, **kwargs):
super(CustomPayloadParser, self).__init__(*args, **kwargs)
# Remove some built-in tags that we don't want to expose.
# There are no built-in filters we have to worry about.
for tag_name in self.BLACKLISTED_TAGS:
try:
del self.tags[tag_name]
except KeyError:
pass
def render_custom_content(body, context_data={}):
"""Renders custom content for the payload using Django templating.
This will take the custom payload content template provided by
the user and render it using a stripped down version of Django's
templating system.
In order to keep the payload safe, we use a limited Context along with a
custom Parser that blocks certain template tags. This gives us
tags like {% for %} and {% if %}, but blacklists tags like {% load %}
and {% include %}.
"""
lexer = Lexer(body, origin=None)
parser = CustomPayloadParser(lexer.tokenize())
template = Template('')
template.nodelist = parser.parse()
return template.render(Context(context_data))
def dispatch_webhook_event(request, webhook_targets, event, payload):
"""Dispatch the given event and payload to the given WebHook targets."""
encoder = ResourceAPIEncoder()
bodies = {}
for webhook_target in webhook_targets:
if webhook_target.use_custom_content:
try:
body = render_custom_content(webhook_target.custom_content,
payload)
body = body.encode('utf-8')
except Exception as e:
logging.exception('Could not render WebHook payload: %s', e)
continue
else:
encoding = webhook_target.encoding
if encoding not in bodies:
try:
if encoding == webhook_target.ENCODING_JSON:
adapter = JSONEncoderAdapter(encoder)
body = adapter.encode(payload, request=request)
body = body.encode('utf-8')
elif encoding == webhook_target.ENCODING_XML:
adapter = XMLEncoderAdapter(encoder)
body = adapter.encode(payload, request=request)
elif encoding == webhook_target.ENCODING_FORM_DATA:
adapter = JSONEncoderAdapter(encoder)
body = urlencode({
'payload': adapter.encode(payload,
request=request),
})
body = body.encode('utf-8')
else:
logging.error('Unexpected WebHookTarget encoding "%s" '
'for ID %s',
encoding, webhook_target.pk)
continue
except Exception as e:
logging.exception('Could not encode WebHook payload: %s',
e)
continue
bodies[encoding] = body
else:
body = bodies[encoding]
headers = {
b'X-ReviewBoard-Event': event.encode('utf-8'),
b'Content-Type': webhook_target.encoding.encode('utf-8'),
b'Content-Length': len(body),
b'User-Agent':
('ReviewBoard-WebHook/%s' % get_package_version())
.encode('utf-8'),
}
if webhook_target.secret:
signer = hmac.new(webhook_target.secret.encode('utf-8'), body,
hashlib.sha1)
headers[b'X-Hub-Signature'] = \
('sha1=%s' % signer.hexdigest()).encode('utf-8')
logging.info('Dispatching webhook for event %s to %s',
event, webhook_target.url)
try:
url = webhook_target.url.encode('utf-8')
urlopen(Request(url, body, headers))
except Exception as e:
logging.exception('Could not dispatch WebHook to %s: %s',
webhook_target.url, e)
def _serialize_review(review, request):
return {
'review_request': resources.review_request.serialize_object(
review.review_request, request=request),
'review': resources.review.serialize_object(
review, request=request),
'diff_comments': [
resources.filediff_comment.serialize_object(
comment, request=request)
for comment in review.comments.all()
],
'file_attachment_comments': [
resources.file_attachment_comment.serialize_object(
comment, request=request)
for comment in review.file_attachment_comments.all()
],
'screenshot_comments': [
resources.screenshot_comment.serialize_object(
comment, request=request)
for comment in review.screenshot_comments.all()
],
'general_comments': [
resources.review_general_comment.serialize_object(
comment, request=request)
for comment in review.general_comments.all()
],
}
def _serialize_reply(reply, request):
return {
'review_request': resources.review_request.serialize_object(
reply.review_request, request=request),
'reply': resources.review_reply.serialize_object(
reply, request=request),
'diff_comments': [
resources.review_reply_diff_comment.serialize_object(
comment, request=request)
for comment in reply.comments.all()
],
'file_attachment_comments': [
resources.review_reply_file_attachment_comment.serialize_object(
comment, request=request)
for comment in reply.file_attachment_comments.all()
],
'screenshot_comments': [
resources.review_reply_screenshot_comment.serialize_object(
comment, request=request)
for comment in reply.screenshot_comments.all()
],
'general_comments': [
resources.review_reply_general_comment.serialize_object(
comment, request=request)
for comment in reply.general_comments.all()
],
}
def review_request_closed_cb(user, review_request, type, **kwargs):
event = 'review_request_closed'
webhook_targets = WebHookTarget.objects.for_event(
event, review_request.local_site_id, review_request.repository_id)
if review_request.local_site_id:
local_site_name = review_request.local_site.name
else:
local_site_name = None
if webhook_targets:
if type == review_request.SUBMITTED:
close_type = 'submitted'
elif type == review_request.DISCARDED:
close_type = 'discarded'
else:
logging.error('Unexpected close type %s for review request %s '
'when dispatching webhook.',
type, review_request.pk)
return
if not user:
user = review_request.submitter
request = FakeHTTPRequest(user, local_site_name=local_site_name)
payload = {
'event': event,
'closed_by': resources.user.serialize_object(
user, request=request),
'close_type': close_type,
'review_request': resources.review_request.serialize_object(
review_request, request=request),
}
dispatch_webhook_event(request, webhook_targets, event, payload)
def review_request_published_cb(user, review_request, changedesc,
**kwargs):
event = 'review_request_published'
webhook_targets = WebHookTarget.objects.for_event(
event, review_request.local_site_id, review_request.repository_id)
if review_request.local_site_id:
local_site_name = review_request.local_site.name
else:
local_site_name = None
if webhook_targets:
request = FakeHTTPRequest(user, local_site_name=local_site_name)
payload = {
'event': event,
'is_new': changedesc is None,
'review_request': resources.review_request.serialize_object(
review_request, request=request),
}
if changedesc:
payload['change'] = resources.change.serialize_object(
changedesc, request=request),
dispatch_webhook_event(request, webhook_targets, event, payload)
def review_request_reopened_cb(user, review_request, **kwargs):
event = 'review_request_reopened'
webhook_targets = WebHookTarget.objects.for_event(
event, review_request.local_site_id, review_request.repository_id)
if review_request.local_site_id:
local_site_name = review_request.local_site.name
else:
local_site_name = None
if webhook_targets:
if not user:
user = review_request.submitter
request = FakeHTTPRequest(user, local_site_name=local_site_name)
payload = {
'event': event,
'reopened_by': resources.user.serialize_object(
user, request=request),
'review_request': resources.review_request.serialize_object(
review_request, request=request),
}
dispatch_webhook_event(request, webhook_targets, event, payload)
def review_published_cb(user, review, **kwargs):
event = 'review_published'
review_request = review.review_request
webhook_targets = WebHookTarget.objects.for_event(
event, review_request.local_site_id, review_request.repository_id)
if review_request.local_site_id:
local_site_name = review_request.local_site.name
else:
local_site_name = None
if webhook_targets:
request = FakeHTTPRequest(user, local_site_name=local_site_name)
payload = _serialize_review(review, request)
payload['event'] = event
dispatch_webhook_event(request, webhook_targets, event, payload)
def reply_published_cb(user, reply, **kwargs):
event = 'reply_published'
review_request = reply.review_request
webhook_targets = WebHookTarget.objects.for_event(
event, review_request.local_site_id, review_request.repository_id)
if review_request.local_site_id:
local_site_name = review_request.local_site.name
else:
local_site_name = None
if webhook_targets:
request = FakeHTTPRequest(user, local_site_name=local_site_name)
payload = _serialize_reply(reply, request)
payload['event'] = event
dispatch_webhook_event(request, webhook_targets, event, payload)
def connect_signals():
review_request_closed.connect(review_request_closed_cb,
sender=ReviewRequest)
review_request_published.connect(review_request_published_cb,
sender=ReviewRequest)
review_request_reopened.connect(review_request_reopened_cb,
sender=ReviewRequest)
review_published.connect(review_published_cb, sender=Review)
reply_published.connect(reply_published_cb, sender=Review)
| mit | 1,640,804,939,275,645,000 | 36.693333 | 79 | 0.598444 | false | 4.504461 | false | false | false |
robertnishihara/ray | python/ray/tests/test_tempfile.py | 1 | 4486 | import os
import shutil
import sys
import time
import pytest
import ray
from ray.test_utils import check_call_ray
def unix_socket_create_path(name):
unix = sys.platform != "win32"
return os.path.join(ray.utils.get_user_temp_dir(), name) if unix else None
def unix_socket_verify(unix_socket):
if sys.platform != "win32":
assert os.path.exists(unix_socket), "Socket not found: " + unix_socket
def unix_socket_delete(unix_socket):
unix = sys.platform != "win32"
return os.remove(unix_socket) if unix else None
def test_tempdir(shutdown_only):
shutil.rmtree(ray.utils.get_ray_temp_dir(), ignore_errors=True)
ray.init(
_temp_dir=os.path.join(ray.utils.get_user_temp_dir(),
"i_am_a_temp_dir"))
assert os.path.exists(
os.path.join(ray.utils.get_user_temp_dir(),
"i_am_a_temp_dir")), "Specified temp dir not found."
assert not os.path.exists(
ray.utils.get_ray_temp_dir()), ("Default temp dir should not exist.")
shutil.rmtree(
os.path.join(ray.utils.get_user_temp_dir(), "i_am_a_temp_dir"),
ignore_errors=True)
def test_tempdir_commandline():
shutil.rmtree(ray.utils.get_ray_temp_dir(), ignore_errors=True)
check_call_ray([
"start", "--head", "--temp-dir=" + os.path.join(
ray.utils.get_user_temp_dir(), "i_am_a_temp_dir2")
])
assert os.path.exists(
os.path.join(ray.utils.get_user_temp_dir(),
"i_am_a_temp_dir2")), "Specified temp dir not found."
assert not os.path.exists(
ray.utils.get_ray_temp_dir()), "Default temp dir should not exist."
check_call_ray(["stop"])
shutil.rmtree(
os.path.join(ray.utils.get_user_temp_dir(), "i_am_a_temp_dir2"),
ignore_errors=True)
def test_tempdir_long_path():
if sys.platform != "win32":
# Test AF_UNIX limits for sockaddr_un->sun_path on POSIX OSes
maxlen = 104 if sys.platform.startswith("darwin") else 108
temp_dir = os.path.join(ray.utils.get_user_temp_dir(), "z" * maxlen)
with pytest.raises(OSError):
ray.init(_temp_dir=temp_dir) # path should be too long
def test_raylet_tempfiles(shutdown_only):
expected_socket_files = ({"plasma_store", "raylet"}
if sys.platform != "win32" else set())
ray.init(num_cpus=0)
node = ray.worker._global_node
top_levels = set(os.listdir(node.get_session_dir_path()))
assert top_levels.issuperset({"sockets", "logs"})
log_files = set(os.listdir(node.get_logs_dir_path()))
log_files_expected = {
"log_monitor.out", "log_monitor.err", "plasma_store.out",
"plasma_store.err", "monitor.out", "monitor.err", "redis-shard_0.out",
"redis-shard_0.err", "redis.out", "redis.err", "raylet.out",
"raylet.err", "gcs_server.out", "gcs_server.err"
}
for expected in log_files_expected:
assert expected in log_files
assert log_files_expected.issubset(log_files)
assert log_files.issuperset(log_files_expected)
socket_files = set(os.listdir(node.get_sockets_dir_path()))
assert socket_files == expected_socket_files
ray.shutdown()
ray.init(num_cpus=2)
node = ray.worker._global_node
top_levels = set(os.listdir(node.get_session_dir_path()))
assert top_levels.issuperset({"sockets", "logs"})
time.sleep(3) # wait workers to start
log_files = set(os.listdir(node.get_logs_dir_path()))
assert log_files.issuperset(log_files_expected)
# Check numbers of worker log file.
assert sum(
1 for filename in log_files if filename.startswith("worker")) == 4
socket_files = set(os.listdir(node.get_sockets_dir_path()))
assert socket_files == expected_socket_files
def test_tempdir_privilege(shutdown_only):
os.chmod(ray.utils.get_ray_temp_dir(), 0o000)
ray.init(num_cpus=1)
session_dir = ray.worker._global_node.get_session_dir_path()
assert os.path.exists(session_dir), "Specified socket path not found."
def test_session_dir_uniqueness():
session_dirs = set()
for i in range(2):
ray.init(num_cpus=1)
session_dirs.add(ray.worker._global_node.get_session_dir_path)
ray.shutdown()
assert len(session_dirs) == 2
if __name__ == "__main__":
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main(["-v", __file__]))
| apache-2.0 | 3,022,115,868,531,882,000 | 33.775194 | 78 | 0.633304 | false | 3.18608 | true | false | false |
yasharmaster/scancode-toolkit | tests/textcode/test_pdf.py | 4 | 3476 | #
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
from commoncode.testcase import FileBasedTesting
from textcode import pdf
import os
class TestPdf(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_get_text_lines(self):
test_file = self.get_test_loc('pdf/pdf.pdf')
result = pdf.get_text_lines(test_file)
expected = u'''pdf
"""
Extracts text from a pdf file.
"""
import contextlib
from StringIO import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import TextConverter
def get_text(location):
rs_mgr = PDFResourceManager()
extracted_text = StringIO()
with contextlib.closing(TextConverter(rs_mgr, extracted_text)) as extractor:
with open(location, \'rb\') as pdf_file:
interpreter = PDFPageInterpreter(rs_mgr, extractor)
pages = PDFPage.get_pages(pdf_file, check_extractable=True)
for page in pages:
interpreter.process_page(page)
return extracted_text
Page 1
\x0c'''.splitlines(True)
assert expected == result
def test_pdfminer_cant_parse_faulty_broadcom_doc(self):
# test for https://github.com/euske/pdfminer/issues/118
test_file = self.get_test_loc('pdf/pdfminer_bug_118/faulty.pdf')
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfdocument import PDFEncryptionError
with open(test_file, 'rb') as inputfile:
parser = PDFParser(inputfile)
try:
PDFDocument(parser)
except PDFEncryptionError:
# this should not fail of course, and will when upstream is fixed
pass
def test_get_text_lines_skip_parse_faulty_broadcom_doc(self):
test_file = self.get_test_loc('pdf/pdfminer_bug_118/faulty.pdf')
try:
pdf.get_text_lines(test_file)
self.fail('Exception should be thrown on faulty PDF')
except:
pass
| apache-2.0 | -2,979,898,856,624,776,000 | 38.5 | 82 | 0.709724 | false | 3.832415 | true | false | false |
niklaskorz/pyglet | contrib/wydget/wydget/dragndrop.py | 29 | 1090 |
from pyglet.window import mouse
import event
def DragHandler(rule, buttons=mouse.LEFT):
class _DragHandler(object):
original_position = None
mouse_buttons = buttons
@event.select(rule)
def on_drag(self, widget, x, y, dx, dy, buttons, modifiers):
if not buttons & self.mouse_buttons:
return event.EVENT_UNHANDLED
if self.original_position is None:
self.original_position = (widget.x, widget.y, widget.z)
widget.z += 1
widget.x += dx; widget.y += dy
return event.EVENT_HANDLED
@event.select(rule)
def on_drag_complete(self, widget, x, y, buttons, modifiers, ok):
if ok:
widget.z = self.original_position[2]
self.original_position = None
else:
if self.original_position is None: return
widget.x, widget.y, widget.z = self.original_position
self.original_position = None
return event.EVENT_HANDLED
return _DragHandler()
| bsd-3-clause | -1,381,028,845,524,798,200 | 33.0625 | 73 | 0.569725 | false | 4.160305 | false | false | false |
palletorsson/random_project | project/models.py | 1 | 2628 | #-*-coding:utf-8-*-
from django.db import models
from django.contrib.auth.models import User
from filebrowser.fields import FileBrowseField
import datetime
from posts.models import Post
TYPE_OF_PROJECT = (
('Cases', 'Case'),
('Projects', 'Project'),
('Workshops', 'Workshops'),
('Installation', 'Installation'),
)
PROCESS = (
('Initiated', 'Initiated'),
('Announced', 'Announced'),
('Program', 'Program'),
('Completed', 'Completed'),
('Documented', 'Documented'),
('Reported', 'Reported'),
('Post_process', 'Post_process'),
)
class Project(models.Model):
title = models.CharField(max_length=255, help_text="Title if the project. Can be anything up to 255 characters.")
slug = models.SlugField()
announce = models.CharField(max_length=255, help_text="Can be anything up to 255 characters.")
summery = models.TextField()
image = FileBrowseField("Image", max_length=200, directory="images/", extensions=[".jpg"], blank=True, null=True)
author = models.ForeignKey(User, help_text="who is posting.")
datetime_created = models.DateTimeField(auto_now_add=True)
datetime_modified = models.DateTimeField(auto_now=True)
publish_at = models.DateTimeField(default=datetime.datetime.now(),
help_text="Date and time post should become visible")
realtime_started = models.DateTimeField(blank=True, null=True)
realtime_ended = models.DateTimeField(blank=True, null=True)
active = models.BooleanField(default=False,
help_text="Controls whether or not this item is visable on the site.")
posts = models.ManyToManyField(Post, help_text="What blog does the post belong to?", related_name="posts")
type = models.CharField(max_length=40, choices=TYPE_OF_PROJECT)
process = models.CharField(max_length=12, choices=PROCESS)
credits = models.TextField(blank=True, null=True)
class Meta:
ordering = ['-publish_at',]
verbose_name_plural = 'projects'
def __unicode__(self):
return u'%s' %self.title
def process_procent(self):
if (self.process == 'Initiated'):
procent = 10
elif (self.process == 'Announced'):
procent = 20
elif (self.process == 'Program'):
procent = 40
elif (self.process == 'Completed'):
procent = 50
elif (self.process == 'Documented'):
procent = 70
elif (self.process == 'Reported'):
procent = 95
elif (self.process == 'Postprocess'):
procent = 100
else:
procent = 0
return procent
| gpl-2.0 | -263,856,529,863,515,460 | 34.513514 | 117 | 0.635845 | false | 3.870398 | false | false | false |
srault95/netcall | netcall/base_client.py | 1 | 6680 | # vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0
"""
Base RPC client class
Authors:
* Brian Granger
* Alexander Glyzov
* Axel Voitier
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012-2014. Brian Granger, Min Ragan-Kelley, Alexander Glyzov,
# Axel Voitier
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from sys import exc_info
from random import randint
from logging import getLogger
import zmq
from zmq.utils import jsonapi
from .base import RPCBase
from .errors import RemoteRPCError, RPCError
from .utils import RemoteMethod
#-----------------------------------------------------------------------------
# RPC Client base
#-----------------------------------------------------------------------------
class RPCClientBase(RPCBase):
"""An RPC Client (base class)"""
logger = getLogger('netcall.client')
def _create_socket(self):
super(RPCClientBase, self)._create_socket()
self.socket = self.context.socket(zmq.DEALER)
self.socket.setsockopt(zmq.IDENTITY, self.identity)
def _build_request(self, method, args, kwargs, ignore=False, req_id=None):
req_id = req_id or b'%x' % randint(0, 0xFFFFFFFF)
method = bytes(method)
msg_list = [b'|', req_id, method]
data_list = self._serializer.serialize_args_kwargs(args, kwargs)
msg_list.extend(data_list)
msg_list.append(bytes(int(ignore)))
return req_id, msg_list
def _send_request(self, request):
self.logger.debug('sending %r', request)
self.socket.send_multipart(request)
def _parse_reply(self, msg_list):
"""
Parse a reply from service
(should not raise an exception)
The reply is received as a multipart message:
[b'|', req_id, type, payload ...]
Returns either None or a dict {
'type' : <message_type:bytes> # ACK | OK | YIELD | FAIL
'req_id' : <id:bytes>, # unique message id
'srv_id' : <service_id:bytes> | None # only for ACK messages
'result' : <object>
}
"""
logger = self.logger
if len(msg_list) < 4 or msg_list[0] != b'|':
logger.error('bad reply %r', msg_list)
return None
msg_type = msg_list[2]
data = msg_list[3:]
result = None
srv_id = None
if msg_type == b'ACK':
srv_id = data[0]
elif msg_type in (b'OK', b'YIELD'):
try:
result = self._serializer.deserialize_result(data)
except Exception, e:
msg_type = b'FAIL'
result = e
elif msg_type == b'FAIL':
try:
error = jsonapi.loads(msg_list[3])
if error['ename'] == 'StopIteration':
result = StopIteration()
elif error['ename'] == 'GeneratorExit':
result = GeneratorExit()
else:
result = RemoteRPCError(error['ename'], error['evalue'], error['traceback'])
except Exception, e:
logger.error('unexpected error while decoding FAIL', exc_info=True)
result = RPCError('unexpected error while decoding FAIL: %s' % e)
else:
result = RPCError('bad message type: %r' % msg_type)
return dict(
type = msg_type,
req_id = msg_list[1],
srv_id = srv_id,
result = result,
)
def _generator(self, req_id, get_val_exc):
""" Mirrors a service generator on a client side
"""
#logger = self.logger
def _send_cmd(cmd, args):
_, msg_list = self._build_request(
cmd, args, None, ignore=False, req_id=req_id
)
self._send_request(msg_list)
_send_cmd('_SEND', None)
while True:
val, exc = get_val_exc()
if exc is not None:
raise exc
try:
res = yield val
except GeneratorExit:
_send_cmd('_CLOSE', None)
except:
etype, evalue, _ = exc_info()
_send_cmd('_THROW', [etype.__name__, evalue])
else:
_send_cmd('_SEND', res)
def __getattr__(self, name):
return RemoteMethod(self, name)
def call(self, proc_name, args=[], kwargs={}, result='sync', timeout=None):
"""
Call the remote method with *args and **kwargs
(may raise an exception)
Parameters
----------
proc_name : <bytes> name of the remote procedure to call
args : <tuple> positional arguments of the remote procedure
kwargs : <dict> keyword arguments of the remote procedure
result : 'sync' | 'async' | 'ignore'
timeout : <float> | None
Number of seconds to wait for a reply.
RPCTimeoutError is raised in case of timeout.
Set to None, 0 or a negative number to disable.
Returns
-------
<result:object> if result is 'sync'
<Future> if result is 'async'
None if result is 'ignore'
If remote call fails:
- raises <RemoteRPCError> if result is 'sync'
- sets <RemoteRPCError> into the <Future> if result is 'async'
"""
assert result in ('sync', 'async', 'ignore'), \
'expected any of "sync", "async", "ignore" -- got %r' % result
if not (timeout is None or isinstance(timeout, (int, float))):
raise TypeError("timeout param: <float> or None expected, got %r" % timeout)
if not self._ready:
raise RuntimeError('bind or connect must be called first')
ignore = result == 'ignore'
req_id, msg_list = self._build_request(proc_name, args, kwargs, ignore)
self._send_request(msg_list)
if ignore:
return None
future = self._tools.Future()
self._futures[req_id] = future
if result == 'sync':
# block waiting for a reply passed by _reader
return future.result(timeout=timeout)
else:
# async
return future
| bsd-3-clause | 3,439,111,206,406,363,600 | 31.745098 | 96 | 0.504641 | false | 4.260204 | false | false | false |
edquist/autopyfactory | misc/test-pandamon.py | 1 | 10541 | #!/usr/bin/env python
#
# urllib.urlencode({'abc':'d f', 'def': '-!2'})
# 'abc=d+f&def=-%212'
# urllib.quote_plus()
#
# r = Request(url='http://www.mysite.com')
# r.add_header('User-Agent', 'awesome fetcher')
# r.add_data(urllib.urlencode({'foo': 'bar'})
# response = urlopen(r)
#
# datetime.datetime(2000,1,1)
#
# socket.gethostbyname(platform.node())
# socket.gethostbyaddr("69.59.196.211")
#
# (h, a, n )= socket.gethostbyaddr( socket.gethostbyname(platform.node()) )
# h = host, a = short alias list, n= ip address list
#
import os
import platform
import pwd
import random
import socket
import sys
import urllib
import urllib2
import datetime
'''
curl --connect-timeout 20 --max-time 180 -sS
'http://panda.cern.ch:25980/server/pandamon/query?
autopilot=updateservicelist&
status=running&
name=Job+scheduler&
grp=TestPilot&
type=tpmon&
pid=12345&
userid=sm&
doaction=&
host=gridui10.usatlas.bnl.gov&
tstart=2012-08-14+10%3A17%3A14.900791&
tstop=2000-01-01+00%3A00%3A00&
message=&
lastmod=2012-08-14+10%3A17%3A14.900791&
config=pilotScheduler.py+--queue%3DANALY_NET2-pbs+--pandasite%3DANALY_NET2+--pilot%3DatlasOfficial2&
description=TestPilot+service'
curl --connect-timeout 20 --max-time 180 -sS
'http://panda.cern.ch:25980/server/pandamon/query?
autopilot=updatepilot&
status=active&
queueid=ANALY_NET2&
tsubmit=2012-08-14+10%3A21%3A20.295097&
workernode=unassigned&
tpid=tp_gridui10_88777_9999999-102119_13&
url=http%3A%2F%2Fgridui10.usatlas.bnl.gov%3A25880%2Fschedlogs%2Ftp_gridui10_88888_9999999%2Ftp_gridui10_28847_20120814-102119_13&
nickname=ANALY_NET2-pbs&
tcheck=2012-08-14+10%3A21%3A20.295375&
system=osg&jobid=3333333.0&
tenter=2012-08-14+10%3A21%3A19.521314&
host=gridui10.usatlas.bnl.gov&
state=submitted&
submithost=gridui10&
user=sm&
schedd_name=gridui10.usatlas.bnl.gov&
type=atlasOfficial2&
tstate=2012-08-14+10%3A21%3A20.295097&
errinfo=+'
works:
curl --connect-timeout 20 --max-time 180 -sS
'http://panda.cern.ch:25980/server/pandamon/query?
autopilot=updatepilot&
status=active&
queueid=BNL_CLOUD&
tsubmit=2012-08-15+22%3A58%3A57.528556&
workernode=unassigned&
tpid=9999999.3&
url=http%3A%2F%2Fgridui08.usatlas.bnl.gov%3A25880%2Fschedlogs%2Ftp_gridui12_23036_20120815%2Ftp_gridui12_23036_20120815-225856_624&
nickname=BNL_CLOUD&
tcheck=2012-08-15+22%3A58%3A57.528842&
system=osg&jobid=9999999.0&
tenter=2012-08-15+22%3A58%3A56.771376&
host=gridui08.usatlas.bnl.gov&
state=submitted&
submithost=gridui08&
user=jhover&
schedd_name=gridui08.usatlas.bnl.gov&
type=atlasOfficial2&
tstate=2012-08-15+22%3A58%3A57.528556&
errinfo=+'
NOT working:
http://panda.cern.ch:25980/server/pandamon/query?
autopilot=updatepilot&
status=active&
queueid=BNL_CLOUD&
tsubmit=2012-08-16+18%3A16%3A20.803098&
workernode=unassigned&
tpid=95219.1&
url=http%3A%2F%2Fgridtest03.racf.bnl.gov%3A25880%2F2012-08-16%2FBNL_CLOUD&
type=atlasOfficial2&
tcheck=2012-08-16+18%3A16%3A20.803163&
system=osg&
jobid=14147.1&
tenter=2012-08-16+18%3A16%3A20.803170&
state=submitted&
submithost=gridui08&
user=jhover&
host=gridui08.usatlas.bnl.gov&
schedd_name=gridui08.usatlas.bnl.gov&
nickname=BNL_CLOUD&
tstate=2012-08-16+18%3A16%3A20.803172&
errinfo=
Job status sequence:
[root@gridui12 scheduler]# cat service_gridui12.usatlas.bnl.gov_sm_21388 service_gridui12.usatlas.bnl.gov_sm_707 service_gridui12.usatlas.bnl.gov_sm_1300 | grep tp_gridui12_23036_20120815-225856_624 | grep messageDB
Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=active&queueid=BU_ATLAS_Tier2o&tsubmit=2012-08-15+22%3A58%3A57.528556&workernode=unassigned&tpid=tp_gridui12_23036_20120815-225856_624&url=http%3A%2F%2Fgridui12.usatlas.bnl.gov%3A25880%2Fschedlogs%2Ftp_gridui12_23036_20120815%2Ftp_gridui12_23036_20120815-225856_624&nickname=BU_ATLAS_Tier2o-pbs&tcheck=2012-08-15+22%3A58%3A57.528842&system=osg&jobid=39949228.0&tenter=2012-08-15+22%3A58%3A56.771376&host=gridui12.usatlas.bnl.gov&state=submitted&submithost=gridui12&user=sm&schedd_name=gridui12.usatlas.bnl.gov&type=atlasOfficial2&tstate=2012-08-15+22%3A58%3A57.528556&errinfo=+'
Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=active&tschedule=2012-08-15+23%3A06%3A18.985095&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-15+23%3A06%3A18.985130&jobid=39949228.0&state=scheduled&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-15+23%3A06%3A18.985095'
Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=active&tschedule=2012-08-15+23%3A09%3A15.139811&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-15+23%3A09%3A15.139847&jobid=39949228.0&state=scheduled&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-15+23%3A09%3A15.139811'
Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=active&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-16+05%3A42%3A22.543749&jobid=39949228.0&state=running&tstart=2012-08-16+05%3A42%3A22.543696&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-16+05%3A42%3A22.543696'
Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=finished&PandaID=1577156287&workernode=atlas-cm2.bu.edu&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-16+05%3A43%3A55.515658&host=atlas-cm2.bu.edu&jobid=39949228.0&tdone=2012-08-16+05%3A43%3A55.515617&state=done&errcode=0&message=straggling_pilot_not_on_queue_but_in_DB&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-16+05%3A43%3A55.515617&errinfo=Job+successfully+completed'
Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=finished&PandaID=1577156287&workernode=atlas-cm2.bu.edu&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-16+05%3A45%3A30.689477&host=atlas-cm2.bu.edu&jobid=39949228.0&tdone=2012-08-16+05%3A45%3A30.689436&state=done&errcode=0&message=straggling_pilot_not_on_queue_but_in_DB&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-16+05%3A45%3A30.689436&errinfo=Job+successfully+completed'
Schedulerutils.messageDB(): cmd= curl --connect-timeout 20 --max-time 180 -sS 'http://panda.cern.ch:25980/server/pandamon/query?autopilot=updatepilot&status=finished&PandaID=1577156287&workernode=atlas-cm2.bu.edu&tpid=tp_gridui12_23036_20120815-225856_624&tcheck=2012-08-16+05%3A47%3A07.572424&host=atlas-cm2.bu.edu&jobid=39949228.0&tdone=2012-08-16+05%3A47%3A07.572383&state=done&errcode=0&message=straggling_pilot_not_on_queue_but_in_DB&nickname=BU_ATLAS_Tier2o-pbs&tstate=2012-08-16+05%3A47%3A07.572383&errinfo=Job+successfully+completed'
'''
SERVER='panda.cern.ch'
PORT='25980'
SVCPATH='/server/pandamon/query?'
def runtest1():
print("Running service update...")
(h, a, n )= socket.gethostbyaddr( socket.gethostbyname(platform.node()) )
#h = host, a = short alias list, n= ip address list
tnow = datetime.datetime.utcnow()
am = { 'status' : 'running',
'name' : 'Job scheduler',
'grp' : 'TestPilot',
'type' : 'tpmon',
'pid' : os.getpid(),
'userid' : pwd.getpwuid(os.getuid()).pw_name,
'doaction' : '',
'host' : h,
'tstart' : datetime.datetime.utcnow(),
'lastmod' : datetime.datetime.utcnow(),
'message' : '',
'config' : 'BNL-CLOUD-condor',
# config=pilotScheduler.py+--queue%3DANALY_NET2-pbs+--pandasite%3DANALY_NET2+--pilot%3DatlasOfficial2&
'description': 'TestPilot service',
'cyclesec' : '360'
}
sendQuery(am)
def runtest2():
print("Running job update test...")
(host, alias, n )= socket.gethostbyaddr( socket.gethostbyname(platform.node()) )
#h = host, a = short alias list, n= ip address list
jobid= "%d.1" % (random.random() * 100000 )
am = {
'status' : 'active', # active, or finished
'state' : 'submitted', # or scheduled, running this is equivialent to globus PENDING, ACTIVE
'queueid' : 'BNL_CLOUD',
'tsubmit' : datetime.datetime.utcnow(),
'workernode' : 'unassigned',
'host' : 'unassigned',
'tpid' : jobid,
'nickname' : 'BNL_CLOUD' , # actually panda queuename, i.e. with -condor, etc.
'url' : 'http://gridtest03.racf.bnl.gov:25880/2012-08-16/BNL_CLOUD',
'user' : pwd.getpwuid(os.getuid()).pw_name,
'tcheck' : datetime.datetime.utcnow(),
'system' : 'osg',
'jobid' : jobid,
'submithost' : alias[0],
'tenter' : datetime.datetime.utcnow(),
'schedd_name' : host,
'type' : 'AutoPyFactory',
'tstate' : datetime.datetime.utcnow(),
'errinfo' : ' ', ## MUST HAVE space, or won't work!!!
}
sendQuery(am, 'updatepilot')
def sendQuery(attributemap, querytype='updateservicelist'):
'''
querytype: updateservicelist | updatepilot | currentlyqueued
'''
q = ''
for k in attributemap.keys():
q += "&%s=%s" % (k, urllib.quote_plus(str(attributemap[k])) )
qurl='http://%s:%s%s%s%s' % ( SERVER,
PORT,
SVCPATH,
'autopilot=%s' % querytype ,
q
)
print("%s" % qurl)
r = urllib2.Request(url=qurl)
#r.add_header('User-Agent', 'awesome fetcher')
#r.add_data(urllib.urlencode({'foo': 'bar'})
response = urllib2.urlopen(r)
print(response.read())
if __name__ == '__main__':
#runtest1()
#runtest2()
usage = '''test-pandamon.py <jobid> <state>
jobid, e.g. 9999999.2
state submitted | scheduled | done
'''
print("sys.argv = %s" % sys.argv)
if len(sys.argv < 3):
print(usage)
| gpl-3.0 | 5,064,762,909,898,847,000 | 43.104603 | 736 | 0.676501 | false | 2.533285 | true | false | false |
alanwill/aws-tailor | sam/functions/talr-director/handler.py | 1 | 7337 | # coding: utf-8
from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import boto3
import os
import sys
import time
# Path to modules needed to package local lambda function for upload
currentdir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(currentdir, "./vendored"))
# Modules downloaded into the vendored directory
# Logging for Serverless
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# Initializing AWS services
dynamodb = boto3.resource('dynamodb')
sts = boto3.client('sts')
sns = boto3.client('sns')
def handler(event, context):
log.debug("Received event {}".format(json.dumps(event)))
accountInfo = dynamodb.Table(os.environ['TAILOR_TABLENAME_ACCOUNTINFO'])
taskStatus = dynamodb.Table(os.environ['TAILOR_TABLENAME_TASKSTATUS'])
cbInfo = dynamodb.Table(os.environ['TAILOR_TABLENAME_CBINFO'])
dispatchRequestArn = os.environ['TAILOR_SNSARN_DISPATCH_REQUEST']
incomingMessage = json.loads(event['Records'][0]['Sns']['Message'])
try:
if incomingMessage['info'] == "LinkedAccountCreationStarted":
getAccountInfo = accountInfo.get_item(
Key={
'accountEmailAddress': incomingMessage['email']
}
)
requestId = getAccountInfo['Item']['requestId']
# Update task start status
updateStatus = taskStatus.put_item(
Item={
"requestId": requestId,
"eventTimestamp": str(time.time()),
"period": "start",
"taskName": "CLA_CREATION",
"function": "talr-director",
"message": incomingMessage
}
)
return
except KeyError:
pass
# Look up email address and other account fields in accountInfo table
accountEmailAddress = incomingMessage['linkedAccountEmail']
getAccountInfo = accountInfo.get_item(
Key={
'accountEmailAddress': accountEmailAddress
}
)
requestId = getAccountInfo['Item']['requestId']
accountTagShortProjectName = getAccountInfo['Item']['accountTagShortProjectName']
accountTagEnvironment = getAccountInfo['Item']['accountTagEnvironment']
accountCbAlias = getAccountInfo['Item']['accountCbAlias']
# Look up account division
getCbInfo = cbInfo.get_item(
Key={
'accountCbAlias': accountCbAlias
}
)
accountDivision = getCbInfo['Item']['accountDivision'].lower()
accountCompanyCode = getCbInfo['Item']['accountCompanyCode']
accountCbId = getCbInfo['Item']['accountCbId']
if "linkedAccountId" in incomingMessage and getAccountInfo['Item']['accountEmailAddress'] == accountEmailAddress:
# Update task end status
updateStatus = taskStatus.put_item(
Item={
"requestId": requestId,
"eventTimestamp": str(time.time()),
"period": "end",
"taskName": "CLA_CREATION",
"function": "talr-director",
"message": incomingMessage
}
)
laAccountId = incomingMessage['linkedAccountId']
print("New linked account: " + laAccountId)
updateAccountInfo = accountInfo.update_item(
Key={
'accountEmailAddress': accountEmailAddress
},
UpdateExpression='SET #accountId = :val1',
ExpressionAttributeNames={'#accountId': "accountId"},
ExpressionAttributeValues={':val1': incomingMessage['linkedAccountId']}
)
else:
# Update task failure status
updateStatus = taskStatus.put_item(
Item={
"requestId": requestId,
"eventTimestamp": str(time.time()),
"period": "failed",
"taskName": "CLA_CREATION",
"function": "talr-director",
"message": incomingMessage
}
)
return {"code": "601", "requestId": requestId, "message": "ERROR: Linked account failed to create"}
# Start linked account validation
updateStatus = taskStatus.put_item(
Item={
"requestId": requestId,
"eventTimestamp": str(time.time()),
"period": "start",
"taskName": "CLA_VALIDATION",
"function": "talr-director",
"message": "Linked account: " + laAccountId
}
)
# Payer account credentials
payerAssumeRole = sts.assume_role(
RoleArn="arn:aws:iam::" + accountCbId + ":role/tailor",
RoleSessionName="talrDirectorPayerAssumeRole"
)
payerCredentials = payerAssumeRole['Credentials']
payer_aws_access_key_id = payerCredentials['AccessKeyId']
payer_aws_secret_access_key = payerCredentials['SecretAccessKey']
payer_aws_session_token = payerCredentials['SessionToken']
# Linked account credentials
laSts = boto3.client(
'sts',
aws_access_key_id=payer_aws_access_key_id,
aws_secret_access_key=payer_aws_secret_access_key,
aws_session_token=payer_aws_session_token,
)
laAssumeRole = laSts.assume_role(
RoleArn="arn:aws:iam::" + laAccountId + ":role/PayerAccountAccessRole",
RoleSessionName="talrDirectorLaAssumeRole"
)
laCredentials = laAssumeRole['Credentials']
la_aws_access_key_id = laCredentials['AccessKeyId']
la_aws_secret_access_key = laCredentials['SecretAccessKey']
la_aws_session_token = laCredentials['SessionToken']
# List roles in linked account to validate access
laIam = boto3.client(
'iam',
aws_access_key_id=la_aws_access_key_id,
aws_secret_access_key=la_aws_secret_access_key,
aws_session_token=la_aws_session_token,
)
laListRoles = laIam.list_roles()
print(laListRoles)
# Create IAM Account Alias in Linked Account
accountIamAlias = accountCompanyCode + "-" + accountDivision.lower() + "-" + \
accountTagShortProjectName + "-" + accountTagEnvironment
laCreateAccountIamAlias = laIam.create_account_alias(
AccountAlias=accountIamAlias
)
# Add account IAM alias to accountInfo table
updateAccountInfo = accountInfo.update_item(
Key={
'accountEmailAddress': accountEmailAddress
},
UpdateExpression='SET #accountIamAlias = :val1',
ExpressionAttributeNames={'#accountIamAlias': "accountIamAlias"},
ExpressionAttributeValues={':val1': accountIamAlias}
)
# Update task end status
updateStatus = taskStatus.put_item(
Item={
"requestId": requestId,
"eventTimestamp": str(time.time()),
"period": "end",
"taskName": "CLA_VALIDATION",
"function": "talr-director",
"message": "Linked account: " + laAccountId
}
)
publishToTalrDispatchRequest = sns.publish(
TopicArn=dispatchRequestArn,
Message='{ "default" : { "requestId": "' + requestId + '", "accountEmailAddress": "' +
accountEmailAddress + '" }, "lambda" : { "requestId": "' + requestId +
'", "accountEmailAddress": "' + accountEmailAddress + '" }}'
)
| gpl-3.0 | -1,121,825,964,071,724,800 | 34.616505 | 117 | 0.615102 | false | 4.110364 | false | false | false |
jGaboardi/Facility_Location | LP_File_Creation/PCenter_CODE.py | 1 | 5634 | #p-Center Facility Location Problem
#This script creates a linear programming file to be read into an optimizer.
'''
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
'''
# Developed by: James D. Gaboardi, MSGIS
# 03/2015
# © James Gaboardi
# **Attention** **Adjust the following**
# 66 --> 'c##' needs to changed depending on data and constraint number based on .lp file
# 66 --> '= ##\n' needs to changed for the number of facilities to be sited
# 71 --> 'c##' needs to changed depending on data and constraint number based on .lp file
# 83 --> 'c##' needs to changed depending on data and constraint number based on .lp file
# Terminology & General Background for Facility Location and Summation Notation:
# * The objective of the p-center Facility Location Problem is to minimize the maximum cost
# of travel between service facilities and clients on a network.
# * [i] - a specific origin
# * [j] - a specifc destination
# * [n] - the set of origins
# * [m] - the set of destinations
# * [Cij] - travel costs between nodes
# * [W] - the maximum travel costs between service facilities and clients
# * [x#_#] - the decision variable in # row, # column position in the matrix
# * [y#] - service facility in the # row
# * [p] - the number of facilities to be sited
# 1. IMPORTS
# Other imports may be necessary for matrix creation and manipulation
import numpy as np
# 2. DEFINED FUNCTIONS
# Assignment Constraints
# This indicates a client can only be served by one facility.
# Each column in the matrix must equal 1.
def get_assignment_constraints(rows):
counter = 0
outtext = ''
for i in range(1,cols+1):
counter = counter + 1
temp = ' c' + str(counter) + ': '
for j in range(1,rows+1):
temp += 'x' + str(j) + '_' + str(i) + ' + '
outtext += temp[:-2] + '= 1\n'
return outtext
# Facility Constraint
# *** '= 1\n' indicates 1 facility
def get_p_facilities(rows):
outtext = ''
for i in range(1, rows+1):
temp = ''
temp += 'y' + str(i)
outtext += temp + ' + '
outtext = ' c##: ' + outtext[:-2] + '= #\n'
return outtext
# Opening Constraints
def get_opening_constraints_p_center(Cij):
counter = 151
outtext = ''
for i in range(1, rows+1):
for j in range(1, cols+1):
counter = counter + 1
outtext += ' c' + str(counter) + ': - x' + str(i) + '_' + str(j) + ' + ' + 'y' + str(i) + ' >= 0\n'
return outtext
# Maximum Cost Constraints
# This indicates that the maximum travel cost from any client to service facility is greater than the travel cost from client to client.
# This code chunk works by summing the columns not rows
def get_max_cost(rows):
counter = 1501
outtext = ''
for j in range(cols):
counter = counter + 1
temp = ' c' + str(counter) + ': '
for i in range(rows):
temp += str(Cij[i,j]) + ' x' + str(i+1) + '_' + str(j+1) + ' + '
outtext += temp[:-2] + '- W <= 0\n'
return outtext
# Declaration of Bounds
def get_bounds_allocation(Cij):
outtext = ''
for i in range(rows):
temp = ''
for j in range(cols):
temp += ' 0 <= x' + str(i+1) + '_' + str(j+1) + ' <= 1\n'
outtext += temp
return outtext
def get_bounds_facility(Cij):
outtext = ''
for i in range(rows):
outtext += ' 0 <= y' + str(i+1) + ' <= 1\n'
return outtext
# Declaration of Decision Variable (form can be: Binary, Integer, etc.)
# In this case decision variables are binary.
# *** 0 for no sited facility, 1 for a sited facilty
def get_decision_variables_p_center(Cij):
outtext = ' '
for i in range(1, rows+1):
temp = ''
for j in range(1, cols+1):
temp += 'x' + str(i) + '_' + str(j) + ' '
outtext += temp
return outtext
def get_facility_decision_variables_p_center(rows):
outtext = ''
for i in range (1, rows+1):
outtext += 'y' + str(i) + ' '
#outtext += temp
return outtext
# 3. DATA READS & VARIABLE DECLARATION
'''
########## Cost Matrix
########## Cij --> [ 0, 13, 8, 15,
########## 13, 0, 12, 11,
########## 8, 12, 0, 10,
########## 15, 11, 10, 0]
########## Read Cij in as a vector text file.
'''
Cij = np.fromfile('path/Cij.txt', dtype=float, sep='\n')
Cij = Cij.reshape(#, #)
rows,cols = Cij.shape
# 4. START TEXT FOR .lp FILE
# Declaration of Objective Function
text = "p-Center Facility Location Problem\n"
text += "'''\n"
text += 'Minimize\n'
text += ' obj: W\n'
# Declaration of Constraints
text += 'Subject To\n'
text += get_assignment_constraints(rows)
text += get_p_facilities(rows)
text += get_opening_constraints_p_center(Cij)
text += get_max_cost(rows)
# Declaration of Bounds
text += 'Bounds\n'
text += get_bounds_allocation(Cij)
text += get_bounds_facility(Cij)
# Declaration of Decision Variables form: Binaries
text += 'Binaries\n'
text += get_decision_variables_p_center(Cij)
text += get_facility_decision_variables_p_center(rows)
text += '\n'
text += 'End\n'
text += "'''\n"
text += "© James Gaboardi, 2015"
# 5. CREATE & WRITE .lp FILE TO DISK
# Fill path name -- File name must not have spaces.
outfile = open('path/name.lp', 'w')
outfile.write(text)
outfile.close() | lgpl-3.0 | 3,377,818,703,683,004,400 | 31.373563 | 136 | 0.594993 | false | 3.203641 | false | false | false |
ramnes/qtile | test/widgets/test_check_updates.py | 2 | 5942 | import libqtile.config
from libqtile.bar import Bar
from libqtile.widget.check_updates import CheckUpdates, Popen # noqa: F401
def no_op(*args, **kwargs):
pass
wrong_distro = "Barch"
good_distro = "Arch"
cmd_0_line = "export toto" # quick "monkeypatch" simulating 0 output, ie 0 update
cmd_1_line = "echo toto" # quick "monkeypatch" simulating 1 output, ie 1 update
cmd_error = "false"
nus = "No Update Avalaible"
def test_unknown_distro():
""" test an unknown distribution """
cu = CheckUpdates(distro=wrong_distro)
text = cu.poll()
assert text == "N/A"
def test_update_available(fake_qtile, fake_window):
""" test output with update (check number of updates and color) """
cu2 = CheckUpdates(distro=good_distro,
custom_command=cmd_1_line,
colour_have_updates="#123456"
)
fakebar = Bar([cu2], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
cu2._configure(fake_qtile, fakebar)
text = cu2.poll()
assert text == "Updates: 1"
assert cu2.layout.colour == cu2.colour_have_updates
def test_no_update_available_without_no_update_string(fake_qtile, fake_window):
""" test output with no update (without dedicated string nor color) """
cu3 = CheckUpdates(distro=good_distro, custom_command=cmd_0_line)
fakebar = Bar([cu3], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
cu3._configure(fake_qtile, fakebar)
text = cu3.poll()
assert text == ""
def test_no_update_available_with_no_update_string_and_color_no_updates(
fake_qtile, fake_window
):
""" test output with no update (with dedicated string and color) """
cu4 = CheckUpdates(distro=good_distro,
custom_command=cmd_0_line,
no_update_string=nus,
colour_no_updates="#654321"
)
fakebar = Bar([cu4], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
cu4._configure(fake_qtile, fakebar)
text = cu4.poll()
assert text == nus
assert cu4.layout.colour == cu4.colour_no_updates
def test_update_available_with_restart_indicator(monkeypatch, fake_qtile, fake_window):
""" test output with no indicator where restart needed """
cu5 = CheckUpdates(distro=good_distro,
custom_command=cmd_1_line,
restart_indicator="*",
)
monkeypatch.setattr("os.path.exists", lambda x: True)
fakebar = Bar([cu5], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
cu5._configure(fake_qtile, fakebar)
text = cu5.poll()
assert text == "Updates: 1*"
def test_update_available_with_execute(manager_nospawn, minimal_conf_noscreen, monkeypatch):
""" test polling after executing command """
# Use monkeypatching to patch both Popen (for execute command) and call_process
# This class returns None when first polled (to simulate that the task is still running)
# and then 0 on the second call.
class MockPopen:
def __init__(self, *args, **kwargs):
self.call_count = 0
def poll(self):
if self.call_count == 0:
self.call_count += 1
return None
return 0
# Bit of an ugly hack to replicate the above functionality but for a method.
class MockSpawn:
call_count = 0
@classmethod
def call_process(cls, *args, **kwargs):
if cls.call_count == 0:
cls.call_count += 1
return "Updates"
return ""
cu6 = CheckUpdates(distro=good_distro,
custom_command="dummy",
execute="dummy",
no_update_string=nus,
)
# Patch the necessary object
monkeypatch.setattr(cu6, "call_process", MockSpawn.call_process)
monkeypatch.setattr("libqtile.widget.check_updates.Popen", MockPopen)
config = minimal_conf_noscreen
config.screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar([cu6], 10)
)
]
manager_nospawn.start(config)
topbar = manager_nospawn.c.bar["top"]
assert topbar.info()["widgets"][0]["text"] == "Updates: 1"
# Clicking the widget triggers the execute command
topbar.fake_button_press(0, "top", 0, 0, button=1)
# The second time we poll the widget, the update process is complete
# and there are no more updates
_, result = manager_nospawn.c.widget["checkupdates"].eval("self.poll()")
assert result == nus
def test_update_process_error(fake_qtile, fake_window):
""" test output where update check gives error"""
cu7 = CheckUpdates(distro=good_distro,
custom_command=cmd_error,
no_update_string="ERROR",
)
fakebar = Bar([cu7], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
cu7._configure(fake_qtile, fakebar)
text = cu7.poll()
assert text == "ERROR"
def test_line_truncations(fake_qtile, monkeypatch, fake_window):
""" test update count is reduced"""
# Mock output to return 5 lines of text
def mock_process(*args, **kwargs):
return "1\n2\n3\n4\n5\n"
# Fedora is set up to remove 1 from line count
cu8 = CheckUpdates(distro="Fedora")
monkeypatch.setattr(cu8, "call_process", mock_process)
fakebar = Bar([cu8], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
cu8._configure(fake_qtile, fakebar)
text = cu8.poll()
# Should have 4 updates
assert text == "Updates: 4"
| mit | 970,441,983,041,094,500 | 30.606383 | 92 | 0.612083 | false | 3.640931 | true | false | false |
xq262144/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/messages/tests/urls.py | 119 | 2470 | from django.conf.urls import patterns, url
from django.contrib import messages
from django.core.urlresolvers import reverse
from django import forms
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext, Template
from django.template.response import TemplateResponse
from django.views.decorators.cache import never_cache
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic.edit import FormView
TEMPLATE = """{% if messages %}
<ul class="messages">
{% for message in messages %}
<li{% if message.tags %} class="{{ message.tags }}"{% endif %}>
{{ message }}
</li>
{% endfor %}
</ul>
{% endif %}
"""
@never_cache
def add(request, message_type):
# don't default to False here, because we want to test that it defaults
# to False if unspecified
fail_silently = request.POST.get('fail_silently', None)
for msg in request.POST.getlist('messages'):
if fail_silently is not None:
getattr(messages, message_type)(request, msg,
fail_silently=fail_silently)
else:
getattr(messages, message_type)(request, msg)
show_url = reverse('django.contrib.messages.tests.urls.show')
return HttpResponseRedirect(show_url)
@never_cache
def add_template_response(request, message_type):
for msg in request.POST.getlist('messages'):
getattr(messages, message_type)(request, msg)
show_url = reverse('django.contrib.messages.tests.urls.show_template_response')
return HttpResponseRedirect(show_url)
@never_cache
def show(request):
t = Template(TEMPLATE)
return HttpResponse(t.render(RequestContext(request)))
@never_cache
def show_template_response(request):
return TemplateResponse(request, Template(TEMPLATE))
class ContactForm(forms.Form):
name = forms.CharField(required=True)
slug = forms.SlugField(required=True)
class ContactFormViewWithMsg(SuccessMessageMixin, FormView):
form_class = ContactForm
success_url = show
success_message = "%(name)s was created successfully"
urlpatterns = patterns('',
('^add/(debug|info|success|warning|error)/$', add),
url('^add/msg/$', ContactFormViewWithMsg.as_view(), name='add_success_msg'),
('^show/$', show),
('^template_response/add/(debug|info|success|warning|error)/$', add_template_response),
('^template_response/show/$', show_template_response),
)
| apache-2.0 | -2,582,673,930,275,093,500 | 32.835616 | 91 | 0.703239 | false | 4.04918 | false | false | false |
mfherbst/spack | var/spack/repos/builtin/packages/r-biostrings/package.py | 2 | 1937 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RBiostrings(RPackage):
"""Memory efficient string containers, string matching algorithms, and
other utilities, for fast manipulation of large biological sequences
or sets of sequences."""
homepage = "https://bioconductor.org/packages/Biostrings/"
git = "https://git.bioconductor.org/packages/Biostrings.git"
version('2.44.2', commit='e4a2b320fb21c5cab3ece7b3c6fecaedfb1e5200')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-xvector', type=('build', 'run'))
depends_on('[email protected]:3.4.9', when='@2.44.2')
| lgpl-2.1 | -6,643,904,999,085,127,000 | 45.119048 | 78 | 0.671141 | false | 3.828063 | false | false | false |
hasteur/hasteurbot_task_3 | families/gentoo_family.py | 4 | 2683 | # -*- coding: utf-8 -*-
import family
__version__ = '$Id$'
# An inofficial Gentoo wiki project.
# Ask for permission at http://gentoo-wiki.com/Help:Bots before running a bot.
# Be very careful, and set a long throttle: "until we see it is good one edit
# ever minute and one page fetch every 30 seconds, maybe a *bit* faster later".
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'gentoo'
self.languages_by_size = [
'en', 'ru', 'de', 'fr', 'tr', 'es', 'scratch', 'cs', 'nl', 'fi',
]
for l in self.languages_by_size:
self.langs[l] = '%s.gentoo-wiki.com' % l
# TODO: sort
# he: also uses the default 'Media'
self.namespaces[4] = {
'_default': u'Gentoo Linux Wiki',
}
self.namespaces[5] = {
'_default': u'Gentoo Linux Wiki talk',
'cs': u'Gentoo Linux Wiki diskuse',
'de': u'Gentoo Linux Wiki Diskussion',
'es': u'Gentoo Linux Wiki Discusión',
'fi': u'Keskustelu Gentoo Linux Wikistä',
'fr': u'Discussion Gentoo Linux Wiki',
'nl': u'Overleg Gentoo Linux Wiki',
'ru': u'Обсуждение Gentoo Linux Wiki',
'tr': u'Gentoo Linux Wiki tartışma',
}
self.namespaces[90] = {
'_default': u'Thread',
}
self.namespaces[91] = {
'_default': u'Thread talk',
}
self.namespaces[92] = {
'_default': u'Summary',
}
self.namespaces[93] = {
'_default': u'Summary talk',
}
self.namespaces[100] = {
'_default': u'Index',
'tr': u'Icerik',
}
self.namespaces[101] = {
'_default': u'Index Talk',
'tr': u'Icerik Talk',
}
self.namespaces[102] = {
'_default': u'Ebuild',
}
self.namespaces[103] = {
'_default': u'Ebuild Talk',
}
self.namespaces[104] = {
'_default': u'News',
'tr': u'Haberler',
}
self.namespaces[105] = {
'_default': u'News Talk',
'tr': u'Haberler Talk',
}
self.namespaces[106] = {
'_default': u'Man',
}
self.namespaces[107] = {
'_default': u'Man Talk',
}
self.namespaces[110] = {
'_default': u'Ucpt',
}
self.namespaces[111] = {
'_default': u'Ucpt talk',
}
self.known_families.pop('gentoo-wiki')
def version(self, code):
return "1.16alpha"
| gpl-2.0 | -8,546,192,166,647,687,000 | 27.698925 | 79 | 0.480704 | false | 3.493455 | false | false | false |
zeroSteiner/smoke-zephyr | smoke_zephyr/utilities.py | 1 | 27644 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# smoke_zephyr/utilities.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import functools
import inspect
import ipaddress
import itertools
import logging
import os
import random
import re
import shutil
import string
import subprocess
import sys
import time
import unittest
import urllib.parse
import urllib.request
import weakref
EMAIL_REGEX = re.compile(r'^[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,6}$', flags=re.IGNORECASE)
class AttributeDict(dict):
"""
This class allows dictionary keys to be accessed as attributes. For
example: ``ad = AttributeDict(test=1); ad['test'] == ad.test``
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class BruteforceGenerator(object):
"""
This class allows itarating sequences for bruteforcing.
"""
# requirments = itertools
def __init__(self, startlen, endlen=None, charset=None):
"""
:param int startlen: The minimum sequence size to generate.
:param int endlen: The maximum sequence size to generate.
:param charset: The characters to include in the resulting sequences.
"""
self.startlen = startlen
if endlen is None:
self.endlen = startlen
else:
self.endlen = endlen
if charset is None:
charset = list(map(chr, range(0, 256)))
elif isinstance(charset, str):
charset = list(charset)
elif isinstance(charset, bytes):
charset = list(map(chr, charset))
charset.sort()
self.charset = tuple(charset)
self.length = self.startlen
self._product = itertools.product(self.charset, repeat=self.length)
self._next = self.__next__
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
try:
value = next(self._product)
except StopIteration:
if self.length == self.endlen:
raise StopIteration
self.length += 1
self._product = itertools.product(self.charset, repeat=self.length)
value = next(self._product)
return ''.join(value)
_ArgSpec = collections.namedtuple('_ArgSpec', ('args', 'varargs', 'keywords', 'defaults'))
class Cache(object):
"""
This class provides a simple to use cache object which can be applied
as a decorator.
"""
def __init__(self, timeout):
"""
:param timeout: The amount of time in seconds that a cached
result will be considered valid for.
:type timeout: int, str
"""
if isinstance(timeout, str):
timeout = parse_timespan(timeout)
self.cache_timeout = timeout
self._target_function = None
self._target_function_arg_spec = None
self.__cache = {}
self.__obj = None
def __get__(self, instance, _):
self.__obj = instance
return self
def __call__(self, *args, **kwargs):
if not getattr(self, '_target_function', False):
target_function = args[0]
if not inspect.isfunction(target_function) and not inspect.ismethod(target_function):
raise RuntimeError('the cached object must be a function or method')
arg_spec = inspect.getfullargspec(target_function) # pylint: disable=W1505
arg_spec = _ArgSpec(args=arg_spec.args, varargs=arg_spec.varargs, keywords=arg_spec.kwonlyargs, defaults=arg_spec.defaults)
if arg_spec.varargs or arg_spec.keywords:
raise RuntimeError('the cached function can not use dynamic args or kwargs')
self._target_function = target_function
self._target_function_arg_spec = arg_spec
return functools.wraps(target_function)(self)
self.cache_clean()
if self.__obj is not None:
args = (self.__obj,) + args
self.__obj = None
is_method = True
else:
is_method = False
args = self._flatten_args(args, kwargs)
if is_method:
inst = args.popleft()
args = tuple(args)
ref = weakref.ref(inst, functools.partial(self._ref_callback, args))
cache_args = (ref,) + args
args = (inst,) + args
else:
cache_args = tuple(args)
args = tuple(args)
result, expiration = self.__cache.get(cache_args, (None, 0))
if expiration > time.time():
return result
result = self._target_function(*args)
self.__cache[cache_args] = (result, time.time() + self.cache_timeout)
return result
def __repr__(self):
return "<cached function {0} at 0x{1:x}>".format(self._target_function.__name__, id(self._target_function))
def _flatten_args(self, args, kwargs):
flattened_args = collections.deque(args)
arg_spec = self._target_function_arg_spec
arg_spec_defaults = (arg_spec.defaults or [])
default_args = tuple(arg_spec.args[:-len(arg_spec_defaults)])
default_kwargs = dict(zip(arg_spec.args[-len(arg_spec_defaults):], arg_spec_defaults))
for arg_id in range(len(args), len(arg_spec.args)):
arg_name = arg_spec.args[arg_id]
if arg_name in default_args:
if not arg_name in kwargs:
raise TypeError("{0}() missing required argument '{1}'".format(self._target_function.__name__, arg_name))
flattened_args.append(kwargs.pop(arg_name))
else:
flattened_args.append(kwargs.pop(arg_name, default_kwargs[arg_name]))
if kwargs:
unexpected_kwargs = tuple("'{0}'".format(a) for a in kwargs.keys())
raise TypeError("{0}() got an unexpected keyword argument{1} {2}".format(self._target_function.__name__, ('' if len(unexpected_kwargs) == 1 else 's'), ', '.join(unexpected_kwargs)))
return flattened_args
def _ref_callback(self, args, ref):
args = (ref,) + args
self.__cache.pop(args, None)
def cache_clean(self):
"""
Remove expired items from the cache.
"""
now = time.time()
keys_for_removal = collections.deque()
for key, (_, expiration) in self.__cache.items():
if expiration < now:
keys_for_removal.append(key)
for key in keys_for_removal:
del self.__cache[key]
def cache_clear(self):
"""
Remove all items from the cache.
"""
self.__cache = {}
class FileWalker(object):
"""
This class is used to easily iterate over files and subdirectories of a
specified parent directory.
"""
def __init__(self, filespath, absolute_path=False, skip_files=False, skip_dirs=False, filter_func=None, follow_links=False, max_depth=None):
"""
.. versionchanged:: 1.4.0
Added the *follow_links* and *max_depth* parameters.
:param str filespath: A path to either a file or a directory. If
a file is passed then that will be the only file returned
during the iteration. If a directory is passed, all files and
subdirectories will be recursively returned during the iteration.
:param bool absolute_path: Whether or not the absolute path or a
relative path should be returned.
:param bool skip_files: Whether or not to skip files.
:param bool skip_dirs: Whether or not to skip directories.
:param function filter_func: If defined, the filter_func function will
be called for each path (with the path as the one and only argument)
and if the function returns false the path will be skipped.
:param bool follow_links: Whether or not to follow directories pointed
to by symlinks.
:param max_depth: A maximum depth to recurse into.
"""
if not (os.path.isfile(filespath) or os.path.isdir(filespath)):
raise Exception(filespath + ' is neither a file or directory')
if absolute_path:
self.filespath = os.path.abspath(filespath)
else:
self.filespath = os.path.relpath(filespath)
self.skip_files = skip_files
self.skip_dirs = skip_dirs
self.filter_func = filter_func
self.follow_links = follow_links
self.max_depth = float('inf') if max_depth is None else max_depth
if os.path.isdir(self.filespath):
self._walk = None
self._next = self._next_dir
elif os.path.isfile(self.filespath):
self._next = self._next_file
def __iter__(self):
return self._next()
def _skip(self, cur_file):
if self.skip_files and os.path.isfile(cur_file):
return True
if self.skip_dirs and os.path.isdir(cur_file):
return True
if self.filter_func is not None:
if not self.filter_func(cur_file):
return True
return False
def _next_dir(self):
for root, dirs, files in os.walk(self.filespath, followlinks=self.follow_links):
if root == self.filespath:
depth = 0
else:
depth = os.path.relpath(root, start=self.filespath).count(os.path.sep) + 1
if depth >= self.max_depth:
continue
for entry in itertools.chain(dirs, files):
current_path = os.path.join(root, entry)
if not self._skip(current_path):
yield current_path
if self.max_depth >= 0 and not self._skip(self.filespath):
yield self.filespath
def _next_file(self):
if self.max_depth >= 0 and not self._skip(self.filespath):
yield self.filespath
class SectionConfigParser(object):
"""
Proxy access to a section of a ConfigParser object.
"""
__version__ = '0.2'
def __init__(self, section_name, config_parser):
"""
:param str section_name: Name of the section to proxy access for.
:param config_parser: ConfigParser object to proxy access for.
:type config_parse: :py:class:`ConfigParser.ConfigParser`
"""
self.section_name = section_name
self.config_parser = config_parser
def _get_raw(self, option, opt_type, default=None):
get_func = getattr(self.config_parser, 'get' + opt_type)
if default is None:
return get_func(self.section_name, option)
elif self.config_parser.has_option(self.section_name, option):
return get_func(self.section_name, option)
else:
return default
def get(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
"""
return self._get_raw(option, '', default)
def getint(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
:rtype: int
"""
return self._get_raw(option, 'int', default)
def getfloat(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
:rtype: float
"""
return self._get_raw(option, 'float', default)
def getboolean(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
:rtype: bool
"""
return self._get_raw(option, 'boolean', default)
def has_option(self, option):
"""
Check that *option* exists in the configuration file.
:param str option: The name of the option to check.
:rtype: bool
"""
return self.config_parser.has_option(self.section_name, option)
def options(self):
"""
Get a list of all options that are present in the section of the
configuration.
:return: A list of all set options.
:rtype: list
"""
return self.config_parser.options(self.section_name)
def items(self):
"""
Return all options and their values in the form of a list of tuples.
:return: A list of all values and options.
:rtype: list
"""
return self.config_parser.items(self.section_name)
def set(self, option, value):
"""
Set an option to an arbitrary value.
:param str option: The name of the option to set.
:param value: The value to set the option to.
"""
self.config_parser.set(self.section_name, option, value)
class TestCase(unittest.TestCase):
"""
This class provides additional functionality over the built in
:py:class:`unittest.TestCase` object, including better compatibility for
methods across Python 2.x and Python 3.x.
"""
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
if not hasattr(self, 'assertRegex') and hasattr(self, 'assertRegexpMatches'):
self.assertRegex = self.assertRegexpMatches
if not hasattr(self, 'assertNotRegex') and hasattr(self, 'assertNotRegexpMatches'):
self.assertNotRegex = self.assertNotRegexpMatches
if not hasattr(self, 'assertRaisesRegex') and hasattr(self, 'assertRaisesRegexp'):
self.assertRaisesRegex = self.assertRaisesRegexp
def configure_stream_logger(logger='', level=None, formatter='%(levelname)-8s %(message)s'):
"""
Configure the default stream handler for logging messages to the console,
remove other logging handlers, and enable capturing warnings.
.. versionadded:: 1.3.0
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:param formatter: The format to use for logging messages to the console.
:type formatter: str, :py:class:`logging.Formatter`
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler`
"""
level = level or logging.WARNING
if isinstance(level, str):
level = getattr(logging, level, None)
if level is None:
raise ValueError('invalid log level: ' + level)
root_logger = logging.getLogger('')
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
logging.getLogger(logger).setLevel(logging.DEBUG)
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(level)
if isinstance(formatter, str):
formatter = logging.Formatter(formatter)
elif not isinstance(formatter, logging.Formatter):
raise TypeError('formatter must be an instance of logging.Formatter')
console_log_handler.setFormatter(formatter)
logging.getLogger(logger).addHandler(console_log_handler)
logging.captureWarnings(True)
return console_log_handler
def download(url, filename=None):
"""
Download a file from a url and save it to disk.
:param str url: The URL to fetch the file from.
:param str filename: The destination file to write the data to.
"""
# requirements os, shutil, urllib.parse, urllib.request
if not filename:
url_parts = urllib.parse.urlparse(url)
filename = os.path.basename(url_parts.path)
url_h = urllib.request.urlopen(url)
with open(filename, 'wb') as file_h:
shutil.copyfileobj(url_h, file_h)
url_h.close()
return
def escape_single_quote(unescaped):
"""
Escape a string containing single quotes and backslashes with backslashes.
This is useful when a string is evaluated in some way.
:param str unescaped: The string to escape.
:return: The escaped string.
:rtype: str
"""
# requirements = re
return re.sub(r'(\'|\\)', r'\\\1', unescaped)
def format_bytes_size(val):
"""
Take a number of bytes and convert it to a human readable number.
:param int val: The number of bytes to format.
:return: The size in a human readable format.
:rtype: str
"""
if not val:
return '0 bytes'
for sz_name in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']:
if val < 1024.0:
return "{0:.2f} {1}".format(val, sz_name)
val /= 1024.0
raise OverflowError()
def grep(expression, file, flags=0, invert=False):
"""
Search a file and return a list of all lines that match a regular expression.
:param str expression: The regex to search for.
:param file: The file to search in.
:type file: str, file
:param int flags: The regex flags to use when searching.
:param bool invert: Select non matching lines instead.
:return: All the matching lines.
:rtype: list
"""
# requirements = re
if isinstance(file, str):
file = open(file)
lines = []
for line in file:
if bool(re.search(expression, line, flags=flags)) ^ invert:
lines.append(line)
return lines
def is_valid_email_address(email_address):
"""
Check that the string specified appears to be a valid email address.
:param str email_address: The email address to validate.
:return: Whether the email address appears to be valid or not.
:rtype: bool
"""
# requirements = re
return EMAIL_REGEX.match(email_address) != None
def get_ip_list(ip_network, mask=None):
"""
Quickly convert an IPv4 or IPv6 network (CIDR or Subnet) to a list
of individual IPs in their string representation.
:param str ip_network:
:param int mask:
:return: list
"""
if mask and '/' not in ip_network:
net = ipaddress.ip_network("{0}/{1}".format(ip_network, mask))
elif '/' not in ip_network:
return [str(ipaddress.ip_address(ip_network))]
else:
net = ipaddress.ip_network(ip_network)
hosts = net.hosts()
if net.netmask == ipaddress.IPv4Address('255.255.255.255') and sys.version_info > (3, 9):
# see: https://github.com/zeroSteiner/smoke-zephyr/issues/8
hosts = []
return [host.__str__() for host in hosts]
def sort_ipv4_list(ip_list, unique=True):
"""
Sorts a provided list of IPv4 addresses. Optionally can remove duplicate values
Supports IPv4 addresses with ports included (ex: [10.11.12.13:80, 10.11.12.13:8080])
:param ip_list: (list) iterable of IPv4 Addresses
:param unique: (bool) removes duplicate values if true
:return: sorted list of IP addresses
"""
if unique:
ip_list = list(set(ip_list))
ipv4_list = sorted([i.rstrip(':') for i in ip_list], key=lambda ip: (
int(ip.split(".")[0]),
int(ip.split(".")[1]),
int(ip.split(".")[2]),
int(ip.split(".")[3].split(':')[0]),
int(ip.split(":")[1]) if ":" in ip else 0
))
return ipv4_list
def open_uri(uri):
"""
Open a URI in a platform intelligent way. On Windows this will use
'cmd.exe /c start' and on Linux this will use gvfs-open or xdg-open
depending on which is available. If no suitable application can be
found to open the URI, a RuntimeError will be raised.
.. versionadded:: 1.3.0
:param str uri: The URI to open.
"""
close_fds = True
startupinfo = None
proc_args = []
if sys.platform.startswith('win'):
proc_args.append(which('cmd.exe'))
proc_args.append('/c')
proc_args.append('start')
uri = uri.replace('&', '^&')
close_fds = False
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
elif which('gvfs-open'):
proc_args.append(which('gvfs-open'))
elif which('xdg-open'):
proc_args.append(which('xdg-open'))
else:
raise RuntimeError('could not find suitable application to open uri')
proc_args.append(uri)
proc_h = subprocess.Popen(proc_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=close_fds, startupinfo=startupinfo)
return proc_h.wait() == 0
def parse_case_camel_to_snake(camel):
"""
Convert a string from CamelCase to snake_case.
:param str camel: The CamelCase string to convert.
:return: The snake_case version of string.
:rtype: str
"""
# requirements = re
return re.sub('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r'_\1', camel).lower()
def parse_case_snake_to_camel(snake, upper_first=True):
"""
Convert a string from snake_case to CamelCase.
:param str snake: The snake_case string to convert.
:param bool upper_first: Whether or not to capitalize the first
character of the string.
:return: The CamelCase version of string.
:rtype: str
"""
snake = snake.split('_')
first_part = snake[0]
if upper_first:
first_part = first_part.title()
return first_part + ''.join(word.title() for word in snake[1:])
def parse_server(server, default_port):
"""
Convert a server string to a tuple suitable for passing to connect, for
example converting 'www.google.com:443' to ('www.google.com', 443).
:param str server: The server string to convert.
:param int default_port: The port to use in case one is not specified
in the server string.
:return: The parsed server information.
:rtype: tuple
"""
server = server.rsplit(':', 1)
host = server[0]
if host.startswith('[') and host.endswith(']'):
host = host[1:-1]
if len(server) == 1:
return (host, default_port)
port = server[1]
if not port:
port = default_port
else:
port = int(port)
return (host, port)
def parse_timespan(timedef):
"""
Convert a string timespan definition to seconds, for example converting
'1m30s' to 90. If *timedef* is already an int, the value will be returned
unmodified.
:param timedef: The timespan definition to convert to seconds.
:type timedef: int, str
:return: The converted value in seconds.
:rtype: int
"""
if isinstance(timedef, int):
return timedef
converter_order = ('w', 'd', 'h', 'm', 's')
converters = {
'w': 604800,
'd': 86400,
'h': 3600,
'm': 60,
's': 1
}
timedef = timedef.lower()
if timedef.isdigit():
return int(timedef)
elif len(timedef) == 0:
return 0
seconds = -1
for spec in converter_order:
timedef = timedef.split(spec)
if len(timedef) == 1:
timedef = timedef[0]
continue
elif len(timedef) > 2 or not timedef[0].isdigit():
seconds = -1
break
adjustment = converters[spec]
seconds = max(seconds, 0)
seconds += (int(timedef[0]) * adjustment)
timedef = timedef[1]
if not len(timedef):
break
if seconds < 0:
raise ValueError('invalid time format')
return seconds
def parse_to_slug(words, maxlen=24):
"""
Parse a string into a slug format suitable for use in URLs and other
character restricted applications. Only utf-8 strings are supported at this
time.
:param str words: The words to parse.
:param int maxlen: The maximum length of the slug.
:return: The parsed words as a slug.
:rtype: str
"""
slug = ''
maxlen = min(maxlen, len(words))
for c in words:
if len(slug) == maxlen:
break
c = ord(c)
if c == 0x27:
continue
elif c >= 0x30 and c <= 0x39:
slug += chr(c)
elif c >= 0x41 and c <= 0x5a:
slug += chr(c + 0x20)
elif c >= 0x61 and c <= 0x7a:
slug += chr(c)
elif len(slug) and slug[-1] != '-':
slug += '-'
if len(slug) and slug[-1] == '-':
slug = slug[:-1]
return slug
def random_string_alphanumeric(size):
"""
Generate a random string of *size* length consisting of mixed case letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
"""
# requirements = random, string
return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(size))
def random_string_lower_numeric(size):
"""
Generate a random string of *size* length consisting of lowercase letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
"""
# requirements = random, string
return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(size))
def selection_collision(selections, poolsize):
"""
Calculate the probability that two random values selected from an arbitrary
sized pool of unique values will be equal. This is commonly known as the
"Birthday Problem".
:param int selections: The number of random selections.
:param int poolsize: The number of unique random values in the pool to choose from.
:rtype: float
:return: The chance that a collision will occur as a percentage.
"""
# requirments = sys
probability = 100.0
poolsize = float(poolsize)
for i in range(selections):
probability = probability * (poolsize - i) / poolsize
probability = (100.0 - probability)
return probability
def unescape_single_quote(escaped):
"""
Unescape a string which uses backslashes to escape single quotes.
:param str escaped: The string to unescape.
:return: The unescaped string.
:rtype: str
"""
escaped = escaped.replace('\\\\', '\\')
escaped = escaped.replace('\\\'', '\'')
return escaped
def unique(seq, key=None):
"""
Create a unique list or tuple from a provided list or tuple and preserve the
order.
:param seq: The list or tuple to preserve unique items from.
:type seq: list, tuple
:param key: If key is provided it will be called during the
comparison process.
:type key: function, None
"""
if key is None:
key = lambda x: x
preserved_type = type(seq)
if preserved_type not in (list, tuple):
raise TypeError("unique argument 1 must be list or tuple, not {0}".format(preserved_type.__name__))
seen = []
result = []
for item in seq:
marker = key(item)
if marker in seen:
continue
seen.append(marker)
result.append(item)
return preserved_type(result)
def weighted_choice(choices, weight):
"""
Make a random selection from the specified choices. Apply the *weight*
function to each to return a positive integer representing shares of
selection pool the choice should received. The *weight* function is passed a
single argument of the choice from the *choices* iterable.
:param choices: The choices to select from.
:type choices: list, tuple
:param weight: The function used for gather weight information for choices.
:type weight: function
:return: A randomly selected choice from the provided *choices*.
"""
# requirements = random
weights = []
# get weight values for each of the choices
for choice in choices:
choice_weight = weight(choice)
if not (isinstance(choice_weight, int) and choice_weight > 0):
raise TypeError('weight results must be positive integers')
weights.append(choice_weight)
# make a selection within the acceptable range
selection = random.randint(0, sum(weights) - 1)
# find and return the corresponding choice
for idx, choice in enumerate(choices):
if selection < sum(weights[:idx + 1]):
return choice
raise RuntimeError('no selection could be made')
def which(program):
"""
Locate an executable binary's full path by its name.
:param str program: The executables name.
:return: The full path to the executable.
:rtype: str
"""
# requirements = os
is_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK))
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if is_exe(program):
return os.path.abspath(program)
return None
def xfrange(start, stop=None, step=1):
"""
Iterate through an arithmetic progression.
:param start: Starting number.
:type start: float, int, long
:param stop: Stopping number.
:type stop: float, int, long
:param step: Stepping size.
:type step: float, int, long
"""
if stop is None:
stop = start
start = 0.0
start = float(start)
while start < stop:
yield start
start += step
| bsd-3-clause | -767,310,314,023,425,400 | 30.811277 | 184 | 0.704927 | false | 3.28743 | true | false | false |
dmlc/tvm | python/tvm/micro/artifact.py | 3 | 11630 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""""Defines abstractions around compiler artifacts produced in compiling micro TVM binaries."""
import hashlib
import io
import os
import json
import shutil
import tarfile
class ArtifactFileNotFoundError(Exception):
"""Raised when an artifact file cannot be found on disk."""
class ArtifactBadSymlinkError(Exception):
"""Raised when an artifact symlink points outside the base directory."""
class ArtifactBadArchiveError(Exception):
"""Raised when an artifact archive is malformed."""
class ImmobileArtifactError(Exception):
"""Raised when an artifact is declared immobile and thus cannot be archived."""
class ArchiveModifiedError(Exception):
"""Raised when the underlying files in a metadata-only archive were modified after archiving."""
def sha256_hexdigest(path):
with open(path, "rb") as path_fd:
h = hashlib.sha256()
chunk = path_fd.read(1 * 1024 * 1024)
while chunk:
h.update(chunk)
chunk = path_fd.read(1 * 1024 * 1024)
return h.hexdigest()
def _validate_metadata_only(metadata):
"""Validate that the files in a metadata-only archive have not changed."""
problems = []
for files in metadata["labelled_files"].values():
for f in files:
disk_path = os.path.join(metadata["base_dir"], f)
try:
sha = sha256_hexdigest(disk_path)
except FileNotFoundError:
problems.append(f"{f}: original file not found")
continue
expected_sha = metadata["file_digests"][f]
if sha != expected_sha:
problems.append(f"{f}: sha256 mismatch: expected {expected_sha}, got {sha}")
if problems:
raise ArchiveModifiedError(
"Files in metadata-only archive have been modified:\n"
+ "\n".join([f" * {p}" for p in problems])
)
class Artifact:
"""Describes a compiler artifact and defines common logic to archive it for transport."""
# A version number written to the archive.
ENCODING_VERSION = 2
# A unique string identifying the type of artifact in an archive. Subclasses must redefine this
# variable.
ARTIFACT_TYPE = None
@classmethod
def unarchive(cls, archive_path, base_dir):
"""Unarchive an artifact into base_dir.
Parameters
----------
archive_path : str
Path to the archive file.
base_dir : str
Path to a non-existent, empty directory under which the artifact will live. If working
with a metadata-only archive, this directory will just hold the metadata.json.
Returns
-------
Artifact :
The unarchived artifact.
"""
if os.path.exists(base_dir):
raise ValueError(f"base_dir exists: {base_dir}")
base_dir_parent, base_dir_name = os.path.split(base_dir)
temp_dir = os.path.join(base_dir_parent, f"__tvm__{base_dir_name}")
os.mkdir(temp_dir)
try:
with tarfile.open(archive_path) as tar_f:
tar_f.extractall(temp_dir)
temp_dir_contents = os.listdir(temp_dir)
if len(temp_dir_contents) != 1:
raise ArtifactBadArchiveError(
"Expected exactly 1 subdirectory at root of archive, got "
f"{temp_dir_contents!r}"
)
metadata_path = os.path.join(temp_dir, temp_dir_contents[0], "metadata.json")
if not metadata_path:
raise ArtifactBadArchiveError("No metadata.json found in archive")
with open(metadata_path) as metadata_f:
metadata = json.load(metadata_f)
version = metadata.get("version")
if version != cls.ENCODING_VERSION:
raise ArtifactBadArchiveError(
f"archive version: expect {cls.EXPECTED_VERSION}, found {version}"
)
metadata_only = metadata.get("metadata_only")
if metadata_only:
_validate_metadata_only(metadata)
os.rename(os.path.join(temp_dir, temp_dir_contents[0]), base_dir)
artifact_cls = cls
for sub_cls in cls.__subclasses__():
if sub_cls.ARTIFACT_TYPE is not None and sub_cls.ARTIFACT_TYPE == metadata.get(
"artifact_type"
):
artifact_cls = sub_cls
break
return artifact_cls.from_unarchived(
base_dir if not metadata_only else metadata["base_dir"],
metadata["labelled_files"],
metadata["metadata"],
immobile=metadata.get("immobile"),
)
finally:
shutil.rmtree(temp_dir)
@classmethod
def from_unarchived(cls, base_dir, labelled_files, metadata, immobile):
return cls(base_dir, labelled_files, metadata, immobile)
def __init__(self, base_dir, labelled_files, metadata, immobile=False):
"""Create a new artifact.
Parameters
----------
base_dir : str
The path to a directory on disk which contains all the files in this artifact.
labelled_files : Dict[str, str]
A dict mapping a file label to the relative paths of the files that carry that label.
metadata : Dict
A dict containing artitrary JSON-serializable key-value data describing the artifact.
immobile : bool
True when this artifact can't be used after being moved out of its current location on
disk. This can happen when artifacts contain absolute paths or when it's not feasible to
include enough files in the artifact to reliably re-run commands in arbitrary locations.
Setting this flag will cause archive() to raise ImmboileArtifactError.
"""
self.base_dir = os.path.realpath(base_dir)
self.labelled_files = labelled_files
self.metadata = metadata
self.immobile = immobile
for label, files in labelled_files.items():
for f in files:
f_path = os.path.join(self.base_dir, f)
if not os.path.lexists(f_path):
raise ArtifactFileNotFoundError(f"{f} (label {label}): not found at {f_path}")
if os.path.islink(f_path):
link_path = os.path.readlink(f_path)
if os.path.isabs(link_path):
link_fullpath = link_path
else:
link_fullpath = os.path.join(os.path.dirname(f_path), link_path)
link_fullpath = os.path.realpath(link_fullpath)
if not link_fullpath.startswith(self.base_dir):
raise ArtifactBadSymlinkError(
f"{f} (label {label}): symlink points outside artifact tree"
)
def abspath(self, rel_path):
"""Return absolute path to the member with the given relative path."""
return os.path.join(self.base_dir, rel_path)
def label(self, label):
"""Return a list of relative paths to files with the given label."""
return self.labelled_files[label]
def label_abspath(self, label):
return [self.abspath(p) for p in self.labelled_files[label]]
def archive(self, archive_path, metadata_only=False):
"""Create a relocatable tar archive of the artifacts.
Parameters
----------
archive_path : str
Path to the tar file to create. Or, path to a directory, under which a tar file will be
created named {base_dir}.tar.
metadata_only : bool
If true, don't archive artifacts; instead, just archive metadata plus original
base_path. A metadata-only archive can be unarchived and used like a regular archive
provided none of the files have changed in their original locations on-disk.
Returns
-------
str :
The value of archive_path, after potentially making the computation describe above.
Raises
------
ImmboileArtifactError :
When immobile=True was passed to the constructor.
"""
if self.immobile and not metadata_only:
raise ImmobileArtifactError("This artifact can't be moved")
if os.path.isdir(archive_path):
archive_path = os.path.join(archive_path, f"{os.path.basename(self.base_dir)}.tar")
archive_name = os.path.splitext(os.path.basename(archive_path))[0]
with tarfile.open(archive_path, "w") as tar_f:
def _add_file(name, data, f_type):
tar_info = tarfile.TarInfo(name=name)
tar_info.type = f_type
data_bytes = bytes(data, "utf-8")
tar_info.size = len(data)
tar_f.addfile(tar_info, io.BytesIO(data_bytes))
metadata = {
"version": self.ENCODING_VERSION,
"labelled_files": self.labelled_files,
"metadata": self.metadata,
"metadata_only": False,
}
if metadata_only:
metadata["metadata_only"] = True
metadata["base_dir"] = self.base_dir
metadata["immobile"] = self.immobile
metadata["file_digests"] = {}
for files in self.labelled_files.values():
for f in files:
metadata["file_digests"][f] = sha256_hexdigest(self.abspath(f))
_add_file(
f"{archive_name}/metadata.json",
json.dumps(metadata, indent=2, sort_keys=True),
tarfile.REGTYPE,
)
for dir_path, _, files in os.walk(self.base_dir):
for f in files:
file_path = os.path.join(dir_path, f)
archive_file_path = os.path.join(
archive_name, os.path.relpath(file_path, self.base_dir)
)
if not os.path.islink(file_path):
tar_f.add(file_path, archive_file_path, recursive=False)
continue
link_path = os.readlink(file_path)
if not os.path.isabs(link_path):
tar_f.add(file_path, archive_file_path, recursive=False)
continue
relpath = os.path.relpath(link_path, os.path.dirname(file_path))
_add_file(archive_file_path, relpath, tarfile.LNKTYPE)
return archive_path
| apache-2.0 | -3,022,239,664,703,990,300 | 38.423729 | 100 | 0.580567 | false | 4.38537 | false | false | false |
andrewyoung1991/supriya | supriya/tools/servertools/test/test_Group_append.py | 1 | 2248 | # -*- encoding: utf-8 -*-
import pytest
from abjad.tools import systemtools
from supriya import synthdefs
from supriya.tools import servertools
@pytest.fixture(scope='function')
def server(request):
def server_teardown():
server.quit()
server = servertools.Server().boot()
request.addfinalizer(server_teardown)
return server
def test_Group_append_01(server):
group_a = servertools.Group()
group_a.allocate(target_node=server)
group_b = servertools.Group()
group_b.allocate(target_node=server)
synthdef = synthdefs.test
assert not synthdef.is_allocated
synth_a = servertools.Synth(synthdef)
assert not synthdef.is_allocated
assert not synth_a.is_allocated
group_a.append(synth_a)
assert synthdef.is_allocated
assert synth_a.is_allocated
assert synth_a.parent is group_a
assert synth_a in group_a
assert synth_a not in group_b
server_state = str(server.query_remote_nodes())
assert systemtools.TestManager.compare(
server_state,
'''
NODE TREE 0 group
1 group
1001 group
1000 group
1002 test
''',
), server_state
group_b.append(synth_a)
assert synthdef.is_allocated
assert synth_a.is_allocated
assert synth_a.parent is group_b
assert synth_a in group_b
assert synth_a not in group_a
server_state = str(server.query_remote_nodes())
assert systemtools.TestManager.compare(
server_state,
'''
NODE TREE 0 group
1 group
1001 group
1002 test
1000 group
''',
), server_state
synth_b = servertools.Synth(synthdef)
assert not synth_b.is_allocated
assert synth_b.parent is None
group_b.append(synth_b)
assert synth_b.is_allocated
assert synth_b.parent is group_b
server_state = str(server.query_remote_nodes())
assert systemtools.TestManager.compare(
server_state,
'''
NODE TREE 0 group
1 group
1001 group
1002 test
1003 test
1000 group
''',
), server_state
| mit | 1,280,575,556,176,140,500 | 24.258427 | 51 | 0.604982 | false | 3.803723 | true | false | false |
petecummings/django-blog-zinnia | zinnia/tests/test_templatetags.py | 7 | 52239 | """Test cases for Zinnia's templatetags"""
from datetime import date
from django.test import TestCase
from django.utils import timezone
from django.template import Context
from django.template import Template
from django.template import TemplateSyntaxError
from django.db.models.signals import post_save
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.test.utils import override_settings
from django.contrib.auth.tests.utils import skipIfCustomUser
import django_comments as comments
from django_comments.models import CommentFlag
from tagging.models import Tag
from zinnia.models.entry import Entry
from zinnia.models.author import Author
from zinnia.models.category import Category
from zinnia.managers import DRAFT
from zinnia.managers import PUBLISHED
from zinnia.flags import PINGBACK, TRACKBACK
from zinnia.tests.utils import datetime
from zinnia.tests.utils import urlEqual
from zinnia.signals import disconnect_entry_signals
from zinnia.signals import disconnect_discussion_signals
from zinnia.signals import flush_similar_cache_handler
from zinnia.templatetags.zinnia import widont
from zinnia.templatetags.zinnia import week_number
from zinnia.templatetags.zinnia import get_authors
from zinnia.templatetags.zinnia import get_gravatar
from zinnia.templatetags.zinnia import get_tag_cloud
from zinnia.templatetags.zinnia import get_categories
from zinnia.templatetags.zinnia import get_categories_tree
from zinnia.templatetags.zinnia import zinnia_pagination
from zinnia.templatetags.zinnia import zinnia_statistics
from zinnia.templatetags.zinnia import get_draft_entries
from zinnia.templatetags.zinnia import get_recent_entries
from zinnia.templatetags.zinnia import get_random_entries
from zinnia.templatetags.zinnia import zinnia_breadcrumbs
from zinnia.templatetags.zinnia import get_popular_entries
from zinnia.templatetags.zinnia import get_similar_entries
from zinnia.templatetags.zinnia import get_recent_comments
from zinnia.templatetags.zinnia import get_recent_linkbacks
from zinnia.templatetags.zinnia import get_featured_entries
from zinnia.templatetags.zinnia import get_calendar_entries
from zinnia.templatetags.zinnia import get_archives_entries
from zinnia.templatetags.zinnia import get_archives_entries_tree
from zinnia.templatetags.zinnia import user_admin_urlname
from zinnia.templatetags.zinnia import comment_admin_urlname
class TemplateTagsTestCase(TestCase):
"""Test cases for Template tags"""
def setUp(self):
disconnect_entry_signals()
disconnect_discussion_signals()
params = {'title': 'My entry',
'content': 'My content',
'tags': 'zinnia, test',
'publication_date': datetime(2010, 1, 1, 12),
'slug': 'my-entry'}
self.entry = Entry.objects.create(**params)
self.site = Site.objects.get_current()
def publish_entry(self):
self.entry.status = PUBLISHED
self.entry.featured = True
self.entry.sites.add(self.site)
self.entry.save()
def make_local(self, date_time):
"""
Convert aware datetime to local datetime.
"""
if timezone.is_aware(date_time):
return timezone.localtime(date_time)
return date_time
def test_get_categories(self):
source_context = Context()
with self.assertNumQueries(0):
context = get_categories(source_context)
self.assertEqual(len(context['categories']), 0)
self.assertEqual(context['template'], 'zinnia/tags/categories.html')
self.assertEqual(context['context_category'], None)
category = Category.objects.create(title='Category 1',
slug='category-1')
self.entry.categories.add(category)
self.publish_entry()
source_context = Context({'category': category})
with self.assertNumQueries(0):
context = get_categories(source_context, 'custom_template.html')
self.assertEqual(len(context['categories']), 1)
self.assertEqual(context['categories'][0].count_entries_published, 1)
self.assertEqual(context['template'], 'custom_template.html')
self.assertEqual(context['context_category'], category)
def test_get_categories_tree(self):
source_context = Context()
with self.assertNumQueries(0):
context = get_categories_tree(source_context)
self.assertEqual(len(context['categories']), 0)
self.assertEqual(context['template'],
'zinnia/tags/categories_tree.html')
self.assertEqual(context['context_category'], None)
category = Category.objects.create(title='Category 1',
slug='category-1')
source_context = Context({'category': category})
with self.assertNumQueries(0):
context = get_categories_tree(
source_context, 'custom_template.html')
self.assertEqual(len(context['categories']), 1)
self.assertEqual(context['template'], 'custom_template.html')
self.assertEqual(context['context_category'], category)
@skipIfCustomUser
def test_get_authors(self):
source_context = Context()
with self.assertNumQueries(0):
context = get_authors(source_context)
self.assertEqual(len(context['authors']), 0)
self.assertEqual(context['template'], 'zinnia/tags/authors.html')
self.assertEqual(context['context_author'], None)
author = Author.objects.create_user(username='webmaster',
email='[email protected]')
self.entry.authors.add(author)
self.publish_entry()
source_context = Context({'author': author})
with self.assertNumQueries(0):
context = get_authors(source_context, 'custom_template.html')
self.assertEqual(len(context['authors']), 1)
self.assertEqual(context['authors'][0].count_entries_published, 1)
self.assertEqual(context['template'], 'custom_template.html')
self.assertEqual(context['context_author'], author)
def test_get_recent_entries(self):
with self.assertNumQueries(0):
context = get_recent_entries()
self.assertEqual(len(context['entries']), 0)
self.assertEqual(context['template'],
'zinnia/tags/entries_recent.html')
self.publish_entry()
with self.assertNumQueries(0):
context = get_recent_entries(3, 'custom_template.html')
self.assertEqual(len(context['entries']), 1)
self.assertEqual(context['template'], 'custom_template.html')
with self.assertNumQueries(0):
context = get_recent_entries(0)
self.assertEqual(len(context['entries']), 0)
def test_get_featured_entries(self):
with self.assertNumQueries(0):
context = get_featured_entries()
self.assertEqual(len(context['entries']), 0)
self.assertEqual(context['template'],
'zinnia/tags/entries_featured.html')
self.publish_entry()
with self.assertNumQueries(0):
context = get_featured_entries(3, 'custom_template.html')
self.assertEqual(len(context['entries']), 1)
self.assertEqual(context['template'], 'custom_template.html')
with self.assertNumQueries(0):
context = get_featured_entries(0)
self.assertEqual(len(context['entries']), 0)
def test_draft_entries(self):
with self.assertNumQueries(0):
context = get_draft_entries()
self.assertEqual(len(context['entries']), 1)
self.assertEqual(context['template'],
'zinnia/tags/entries_draft.html')
self.publish_entry()
with self.assertNumQueries(0):
context = get_draft_entries(3, 'custom_template.html')
self.assertEqual(len(context['entries']), 0)
self.assertEqual(context['template'], 'custom_template.html')
with self.assertNumQueries(0):
context = get_draft_entries(0)
self.assertEqual(len(context['entries']), 0)
def test_get_random_entries(self):
with self.assertNumQueries(0):
context = get_random_entries()
self.assertEqual(len(context['entries']), 0)
self.assertEqual(context['template'],
'zinnia/tags/entries_random.html')
self.publish_entry()
with self.assertNumQueries(0):
context = get_random_entries(3, 'custom_template.html')
self.assertEqual(len(context['entries']), 1)
self.assertEqual(context['template'], 'custom_template.html')
with self.assertNumQueries(0):
context = get_random_entries(0)
self.assertEqual(len(context['entries']), 0)
def test_get_popular_entries(self):
with self.assertNumQueries(0):
context = get_popular_entries()
self.assertEqual(len(context['entries']), 0)
self.assertEqual(context['template'],
'zinnia/tags/entries_popular.html')
self.publish_entry()
with self.assertNumQueries(0):
context = get_popular_entries(3, 'custom_template.html')
self.assertEqual(len(context['entries']), 0)
self.assertEqual(context['template'], 'custom_template.html')
params = {'title': 'My second entry',
'content': 'My second content',
'tags': 'zinnia, test',
'status': PUBLISHED,
'comment_count': 2,
'slug': 'my-second-entry'}
second_entry = Entry.objects.create(**params)
second_entry.sites.add(self.site)
self.entry.comment_count = 1
self.entry.save()
with self.assertNumQueries(0):
context = get_popular_entries(3)
self.assertEqual(list(context['entries']), [second_entry, self.entry])
self.entry.comment_count = 2
self.entry.save()
with self.assertNumQueries(0):
context = get_popular_entries(3)
self.assertEqual(list(context['entries']), [second_entry, self.entry])
self.entry.comment_count = 3
self.entry.save()
with self.assertNumQueries(0):
context = get_popular_entries(3)
self.assertEqual(list(context['entries']), [self.entry, second_entry])
self.entry.status = DRAFT
self.entry.save()
with self.assertNumQueries(0):
context = get_popular_entries(3)
self.assertEqual(list(context['entries']), [second_entry])
def test_get_similar_entries(self):
post_save.connect(
flush_similar_cache_handler, sender=Entry,
dispatch_uid='flush_cache')
self.publish_entry()
source_context = Context({'object': self.entry})
with self.assertNumQueries(0):
context = get_similar_entries(source_context)
self.assertEqual(len(context['entries']), 0)
self.assertEqual(context['template'],
'zinnia/tags/entries_similar.html')
source_context = Context({'entry': self.entry})
with self.assertNumQueries(1):
context = get_similar_entries(source_context)
self.assertEqual(len(context['entries']), 0)
self.assertEqual(context['template'],
'zinnia/tags/entries_similar.html')
params = {'title': 'My second entry',
'content': 'This is the second content of my testing',
'excerpt': 'Similarity testing',
'status': PUBLISHED,
'slug': 'my-second-entry'}
second_entry = Entry.objects.create(**params)
second_entry.sites.add(self.site)
params = {'title': 'My third entry',
'content': 'This is the third content for testing',
'excerpt': 'Similarity testing',
'status': PUBLISHED,
'slug': 'my-third-entry'}
third_entry = Entry.objects.create(**params)
third_entry.sites.add(self.site)
with self.assertNumQueries(2):
context = get_similar_entries(source_context, 3,
'custom_template.html')
self.assertEqual(len(context['entries']), 2)
self.assertEqual(context['entries'][0].pk, second_entry.pk)
self.assertEqual(context['template'], 'custom_template.html')
with self.assertNumQueries(0):
context = get_similar_entries(source_context, 3)
second_site = Site.objects.create(domain='second', name='second')
second_entry.sites.add(second_site)
with override_settings(SITE_ID=second_site.pk):
with self.assertNumQueries(2):
context = get_similar_entries(source_context, 3)
self.assertEqual(len(context['entries']), 0)
source_context = Context({'entry': second_entry})
with self.assertNumQueries(1):
context = get_similar_entries(source_context)
self.assertEqual(len(context['entries']), 2)
post_save.disconnect(
sender=Entry, dispatch_uid='flush_cache')
def test_get_archives_entries(self):
with self.assertNumQueries(0):
context = get_archives_entries()
self.assertEqual(len(context['archives']), 0)
self.assertEqual(context['template'],
'zinnia/tags/entries_archives.html')
self.publish_entry()
params = {'title': 'My second entry',
'content': 'My second content',
'tags': 'zinnia, test',
'status': PUBLISHED,
'publication_date': datetime(2009, 1, 1),
'slug': 'my-second-entry'}
second_entry = Entry.objects.create(**params)
second_entry.sites.add(self.site)
with self.assertNumQueries(0):
context = get_archives_entries('custom_template.html')
self.assertEqual(len(context['archives']), 2)
self.assertEqual(
context['archives'][0],
self.make_local(self.entry.publication_date).replace(
day=1, hour=0))
self.assertEqual(
context['archives'][1],
self.make_local(second_entry.publication_date).replace(
day=1, hour=0))
self.assertEqual(context['template'], 'custom_template.html')
def test_get_archives_tree(self):
with self.assertNumQueries(0):
context = get_archives_entries_tree()
self.assertEqual(len(context['archives']), 0)
self.assertEqual(context['template'],
'zinnia/tags/entries_archives_tree.html')
self.publish_entry()
params = {'title': 'My second entry',
'content': 'My second content',
'tags': 'zinnia, test',
'status': PUBLISHED,
'publication_date': datetime(2009, 1, 10),
'slug': 'my-second-entry'}
second_entry = Entry.objects.create(**params)
second_entry.sites.add(self.site)
with self.assertNumQueries(0):
context = get_archives_entries_tree('custom_template.html')
self.assertEqual(len(context['archives']), 2)
self.assertEqual(
context['archives'][0],
self.make_local(
second_entry.publication_date).replace(hour=0))
self.assertEqual(
context['archives'][1],
self.make_local(
self.entry.publication_date).replace(hour=0))
self.assertEqual(context['template'], 'custom_template.html')
def test_get_calendar_entries_no_params(self):
source_context = Context()
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(context['previous_month'], None)
self.assertEqual(context['next_month'], None)
self.assertEqual(context['template'],
'zinnia/tags/entries_calendar.html')
self.publish_entry()
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(
context['previous_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
self.assertEqual(context['next_month'], None)
def test_get_calendar_entries_incomplete_year_month(self):
self.publish_entry()
source_context = Context()
with self.assertNumQueries(2):
context = get_calendar_entries(source_context, year=2009)
self.assertEqual(
context['previous_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
self.assertEqual(context['next_month'], None)
with self.assertNumQueries(2):
context = get_calendar_entries(source_context, month=1)
self.assertEqual(
context['previous_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
self.assertEqual(context['next_month'], None)
def test_get_calendar_entries_full_params(self):
self.publish_entry()
source_context = Context()
with self.assertNumQueries(2):
context = get_calendar_entries(source_context, 2009, 1,
template='custom_template.html')
self.assertEqual(context['previous_month'], None)
self.assertEqual(
context['next_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
self.assertEqual(context['template'], 'custom_template.html')
def test_get_calendar_entries_no_prev_next(self):
self.publish_entry()
source_context = Context()
with self.assertNumQueries(2):
context = get_calendar_entries(source_context, 2010, 1)
self.assertEqual(context['previous_month'], None)
self.assertEqual(context['next_month'], None)
def test_get_calendar_entries_month_context(self):
self.publish_entry()
source_context = Context({'month': date(2009, 1, 1)})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(context['previous_month'], None)
self.assertEqual(
context['next_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
source_context = Context({'month': date(2010, 6, 1)})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(
context['previous_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
self.assertEqual(context['next_month'], None)
source_context = Context({'month': date(2010, 1, 1)})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(context['previous_month'], None)
self.assertEqual(context['next_month'], None)
def test_get_calendar_entries_week_context(self):
self.publish_entry()
source_context = Context({'week': date(2009, 1, 5)})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(context['previous_month'], None)
self.assertEqual(
context['next_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
source_context = Context({'week': date(2010, 5, 31)})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(
context['previous_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
self.assertEqual(context['next_month'], None)
source_context = Context({'week': date(2010, 1, 4)})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(context['previous_month'], None)
self.assertEqual(context['next_month'], None)
def test_get_calendar_entries_day_context(self):
self.publish_entry()
source_context = Context({'day': date(2009, 1, 15)})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(context['previous_month'], None)
self.assertEqual(
context['next_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
source_context = Context({'day': date(2010, 6, 15)})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(
context['previous_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
self.assertEqual(context['next_month'], None)
source_context = Context({'day': date(2010, 1, 15)})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(context['previous_month'], None)
self.assertEqual(context['next_month'], None)
def test_get_calendar_entries_object_context(self):
self.publish_entry()
source_context = Context({'object': object()})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(
context['previous_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
self.assertEqual(context['next_month'], None)
params = {'title': 'My second entry',
'content': 'My second content',
'tags': 'zinnia, test',
'status': PUBLISHED,
'publication_date': datetime(2008, 1, 15),
'slug': 'my-second-entry'}
second_entry = Entry.objects.create(**params)
second_entry.sites.add(self.site)
source_context = Context({'object': self.entry})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(
context['previous_month'],
self.make_local(second_entry.publication_date).date().replace(
day=1))
self.assertEqual(context['next_month'], None)
source_context = Context({'object': second_entry})
with self.assertNumQueries(2):
context = get_calendar_entries(source_context)
self.assertEqual(context['previous_month'], None)
self.assertEqual(
context['next_month'],
self.make_local(self.entry.publication_date).date().replace(day=1))
@skipIfCustomUser
def test_get_recent_comments(self):
with self.assertNumQueries(1):
context = get_recent_comments()
self.assertEqual(len(context['comments']), 0)
self.assertEqual(context['template'],
'zinnia/tags/comments_recent.html')
comment_1 = comments.get_model().objects.create(
comment='My Comment 1', site=self.site,
content_object=self.entry, submit_date=timezone.now())
with self.assertNumQueries(1):
context = get_recent_comments(3, 'custom_template.html')
self.assertEqual(len(context['comments']), 0)
self.assertEqual(context['template'], 'custom_template.html')
self.publish_entry()
with self.assertNumQueries(3):
context = get_recent_comments()
self.assertEqual(len(context['comments']), 1)
self.assertEqual(context['comments'][0].content_object,
self.entry)
author = Author.objects.create_user(username='webmaster',
email='[email protected]')
comment_2 = comments.get_model().objects.create(
comment='My Comment 2', site=self.site,
content_object=self.entry, submit_date=timezone.now())
comment_2.flags.create(user=author,
flag=CommentFlag.MODERATOR_APPROVAL)
with self.assertNumQueries(3):
context = get_recent_comments()
self.assertEqual(list(context['comments']),
[comment_2, comment_1])
self.assertEqual(context['comments'][0].content_object,
self.entry)
self.assertEqual(context['comments'][1].content_object,
self.entry)
@skipIfCustomUser
def test_get_recent_linkbacks(self):
user = Author.objects.create_user(username='webmaster',
email='[email protected]')
with self.assertNumQueries(1):
context = get_recent_linkbacks()
self.assertEqual(len(context['linkbacks']), 0)
self.assertEqual(context['template'],
'zinnia/tags/linkbacks_recent.html')
linkback_1 = comments.get_model().objects.create(
comment='My Linkback 1', site=self.site,
content_object=self.entry, submit_date=timezone.now())
linkback_1.flags.create(user=user, flag=PINGBACK)
with self.assertNumQueries(1):
context = get_recent_linkbacks(3, 'custom_template.html')
self.assertEqual(len(context['linkbacks']), 0)
self.assertEqual(context['template'], 'custom_template.html')
self.publish_entry()
with self.assertNumQueries(3):
context = get_recent_linkbacks()
self.assertEqual(len(context['linkbacks']), 1)
self.assertEqual(context['linkbacks'][0].content_object,
self.entry)
linkback_2 = comments.get_model().objects.create(
comment='My Linkback 2', site=self.site,
content_object=self.entry, submit_date=timezone.now())
linkback_2.flags.create(user=user, flag=TRACKBACK)
with self.assertNumQueries(3):
context = get_recent_linkbacks()
self.assertEqual(list(context['linkbacks']),
[linkback_2, linkback_1])
self.assertEqual(context['linkbacks'][0].content_object,
self.entry)
self.assertEqual(context['linkbacks'][1].content_object,
self.entry)
def test_zinnia_pagination(self):
class FakeRequest(object):
def __init__(self, get_dict):
self.GET = get_dict
source_context = Context({'request': FakeRequest(
{'page': '1', 'key': 'val'})})
paginator = Paginator(range(200), 10)
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(1),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(context['page'].number, 1)
self.assertEqual(list(context['begin']), [1, 2, 3])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']), [18, 19, 20])
self.assertEqual(context['GET_string'], '&key=val')
self.assertEqual(context['template'], 'zinnia/tags/pagination.html')
source_context = Context({'request': FakeRequest({})})
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(2),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(context['page'].number, 2)
self.assertEqual(list(context['begin']), [1, 2, 3, 4])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']), [18, 19, 20])
self.assertEqual(context['GET_string'], '')
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(3),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(list(context['begin']), [1, 2, 3, 4, 5])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']), [18, 19, 20])
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(6),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(list(context['begin']), [1, 2, 3, 4, 5, 6, 7, 8])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']), [18, 19, 20])
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(11),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(list(context['begin']), [1, 2, 3])
self.assertEqual(list(context['middle']), [9, 10, 11, 12, 13])
self.assertEqual(list(context['end']), [18, 19, 20])
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(15),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(list(context['begin']), [1, 2, 3])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']),
[13, 14, 15, 16, 17, 18, 19, 20])
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(18),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(list(context['begin']), [1, 2, 3])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']), [16, 17, 18, 19, 20])
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(19),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(list(context['begin']), [1, 2, 3])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']), [17, 18, 19, 20])
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(20),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(list(context['begin']), [1, 2, 3])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']), [18, 19, 20])
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(10),
begin_pages=1, end_pages=3,
before_pages=4, after_pages=3,
template='custom_template.html')
self.assertEqual(list(context['begin']), [1])
self.assertEqual(list(context['middle']), [6, 7, 8, 9, 10, 11, 12, 13])
self.assertEqual(list(context['end']), [18, 19, 20])
self.assertEqual(context['template'], 'custom_template.html')
paginator = Paginator(range(50), 10)
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(1),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(list(context['begin']), [1, 2, 3, 4, 5])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']), [])
paginator = Paginator(range(60), 10)
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(1),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(list(context['begin']), [1, 2, 3, 4, 5, 6])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']), [])
paginator = Paginator(range(70), 10)
with self.assertNumQueries(0):
context = zinnia_pagination(
source_context, paginator.page(1),
begin_pages=3, end_pages=3,
before_pages=2, after_pages=2)
self.assertEqual(list(context['begin']), [1, 2, 3])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']), [5, 6, 7])
def test_zinnia_pagination_on_my_website(self):
"""
Reproduce the issue encountred on my website,
versus the expected result.
"""
class FakeRequest(object):
def __init__(self, get_dict={}):
self.GET = get_dict
source_context = Context({'request': FakeRequest()})
paginator = Paginator(range(40), 10)
with self.assertNumQueries(0):
for i in range(1, 5):
context = zinnia_pagination(
source_context, paginator.page(i),
begin_pages=1, end_pages=1,
before_pages=2, after_pages=2)
self.assertEqual(context['page'].number, i)
self.assertEqual(list(context['begin']), [1, 2, 3, 4])
self.assertEqual(list(context['middle']), [])
self.assertEqual(list(context['end']), [])
@skipIfCustomUser
def test_zinnia_breadcrumbs(self):
class FakeRequest(object):
def __init__(self, path):
self.path = path
class FakePage(object):
def __init__(self, number):
self.number = number
def check_only_last_have_no_url(crumb_list):
size = len(crumb_list) - 1
for i, crumb in enumerate(crumb_list):
if i != size:
self.assertNotEqual(crumb.url, None)
else:
self.assertEqual(crumb.url, None)
source_context = Context({'request': FakeRequest('/')})
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 1)
self.assertEqual(context['breadcrumbs'][0].name, 'Blog')
self.assertEqual(context['breadcrumbs'][0].url,
reverse('zinnia:entry_archive_index'))
self.assertEqual(context['template'], 'zinnia/tags/breadcrumbs.html')
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context,
'Weblog', 'custom_template.html')
self.assertEqual(len(context['breadcrumbs']), 1)
self.assertEqual(context['breadcrumbs'][0].name, 'Weblog')
self.assertEqual(context['template'], 'custom_template.html')
source_context = Context(
{'request': FakeRequest(self.entry.get_absolute_url()),
'object': self.entry})
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 5)
check_only_last_have_no_url(context['breadcrumbs'])
cat_1 = Category.objects.create(title='Category 1', slug='category-1')
source_context = Context(
{'request': FakeRequest(cat_1.get_absolute_url()),
'object': cat_1})
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 3)
check_only_last_have_no_url(context['breadcrumbs'])
cat_2 = Category.objects.create(title='Category 2', slug='category-2',
parent=cat_1)
source_context = Context(
{'request': FakeRequest(cat_2.get_absolute_url()),
'object': cat_2})
with self.assertNumQueries(1):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 4)
check_only_last_have_no_url(context['breadcrumbs'])
tag = Tag.objects.get(name='test')
source_context = Context(
{'request': FakeRequest(reverse('zinnia:tag_detail',
args=['test'])),
'object': tag})
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 3)
check_only_last_have_no_url(context['breadcrumbs'])
author = Author.objects.create_user(username='webmaster',
email='[email protected]')
source_context = Context(
{'request': FakeRequest(author.get_absolute_url()),
'object': author})
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 3)
check_only_last_have_no_url(context['breadcrumbs'])
source_context = Context(
{'request': FakeRequest(reverse(
'zinnia:entry_archive_year', args=[2011]))})
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 2)
check_only_last_have_no_url(context['breadcrumbs'])
source_context = Context({'request': FakeRequest(reverse(
'zinnia:entry_archive_month', args=[2011, '03']))})
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 3)
check_only_last_have_no_url(context['breadcrumbs'])
source_context = Context({'request': FakeRequest(reverse(
'zinnia:entry_archive_week', args=[2011, 15]))})
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 3)
check_only_last_have_no_url(context['breadcrumbs'])
source_context = Context({'request': FakeRequest(reverse(
'zinnia:entry_archive_day', args=[2011, '03', 15]))})
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 4)
check_only_last_have_no_url(context['breadcrumbs'])
source_context = Context({'request': FakeRequest('%s?page=2' % reverse(
'zinnia:entry_archive_day', args=[2011, '03', 15])),
'page_obj': FakePage(2)})
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 5)
check_only_last_have_no_url(context['breadcrumbs'])
source_context = Context({'request': FakeRequest(reverse(
'zinnia:entry_archive_day_paginated', args=[2011, '03', 15, 2])),
'page_obj': FakePage(2)})
with self.assertNumQueries(0):
context = zinnia_breadcrumbs(source_context)
self.assertEqual(len(context['breadcrumbs']), 5)
check_only_last_have_no_url(context['breadcrumbs'])
# More tests can be done here, for testing path and objects in context
def test_get_gravatar(self):
self.assertTrue(urlEqual(
get_gravatar('[email protected]'),
'http://www.gravatar.com/avatar/86d4fd4a22de452'
'a9228298731a0b592?s=80&r=g'))
self.assertTrue(urlEqual(
get_gravatar(' [email protected] ', 15, 'x', '404'),
'http://www.gravatar.com/avatar/86d4fd4a22de452'
'a9228298731a0b592?s=15&r=x&d=404'))
self.assertTrue(urlEqual(
get_gravatar(' [email protected] ', 15, 'x', '404', 'https'),
'https://secure.gravatar.com/avatar/86d4fd4a22de452'
'a9228298731a0b592?s=15&r=x&d=404'))
def test_get_tags(self):
Tag.objects.create(name='tag')
t = Template("""
{% load zinnia %}
{% get_tags as entry_tags %}
{{ entry_tags|join:", " }}
""")
with self.assertNumQueries(1):
html = t.render(Context())
self.assertEqual(html.strip(), '')
self.publish_entry()
html = t.render(Context())
self.assertEqual(html.strip(), 'test, zinnia')
template_error_as = """
{% load zinnia %}
{% get_tags a_s entry_tags %}"""
self.assertRaises(TemplateSyntaxError, Template, template_error_as)
template_error_args = """
{% load zinnia %}
{% get_tags as entry tags %}"""
self.assertRaises(TemplateSyntaxError, Template, template_error_args)
def test_get_tag_cloud(self):
source_context = Context()
with self.assertNumQueries(1):
context = get_tag_cloud(source_context)
self.assertEqual(len(context['tags']), 0)
self.assertEqual(context['template'], 'zinnia/tags/tag_cloud.html')
self.assertEqual(context['context_tag'], None)
self.publish_entry()
tag = Tag.objects.get(name='test')
source_context = Context({'tag': tag})
with self.assertNumQueries(1):
context = get_tag_cloud(source_context, 6, 1,
'custom_template.html')
self.assertEqual(len(context['tags']), 2)
self.assertEqual(context['template'], 'custom_template.html')
self.assertEqual(context['context_tag'], tag)
def test_widont(self):
self.assertEqual(
widont('Word'), 'Word')
self.assertEqual(
widont('A complete string'),
'A complete string')
self.assertEqual(
widont('A complete\tstring'),
'A complete string')
self.assertEqual(
widont('A complete string'),
'A complete string')
self.assertEqual(
widont('A complete string with trailing spaces '),
'A complete string with trailing spaces ')
self.assertEqual(
widont('A complete string with <markup>', autoescape=False),
'A complete string with <markup>')
self.assertEqual(
widont('A complete string with <markup>', autoescape=True),
'A complete string with <markup>')
def test_widont_pre_punctuation(self):
"""
In some languages like French, applying the widont filter
before a punctuation sign preceded by a space, leads to
ugly visual results, instead of a better visual results.
"""
self.assertEqual(
widont('Releases : django-blog-zinnia'),
'Releases : django-blog-zinnia')
self.assertEqual(
widont('Releases ; django-blog-zinnia'),
'Releases ; django-blog-zinnia')
self.assertEqual(
widont('Releases ! django-blog-zinnia'),
'Releases ! django-blog-zinnia')
self.assertEqual(
widont('Releases ? django-blog-zinnia'),
'Releases ? django-blog-zinnia')
self.assertEqual(
widont('Releases - django-blog-zinnia'),
'Releases - django-blog-zinnia')
self.assertEqual(
widont('Releases + django-blog-zinnia'),
'Releases + django-blog-zinnia')
self.assertEqual(
widont('Releases * django-blog-zinnia'),
'Releases * django-blog-zinnia')
self.assertEqual(
widont('Releases / django-blog-zinnia'),
'Releases / django-blog-zinnia')
self.assertEqual(
widont('Releases % django-blog-zinnia'),
'Releases % django-blog-zinnia')
self.assertEqual(
widont('Releases = django-blog-zinnia'),
'Releases = django-blog-zinnia')
self.assertEqual(
widont('Releases : django-blog-zinnia '),
'Releases : django-blog-zinnia ')
self.assertEqual(
widont('Releases :: django-blog-zinnia'),
'Releases :: django-blog-zinnia')
self.assertEqual(
widont('Releases :z django-blog-zinnia'),
'Releases :z django-blog-zinnia')
def test_widont_post_punctuation(self):
"""
Sometimes applying the widont filter on just a punctuation sign,
leads to ugly visual results, instead of better visual results.
"""
self.assertEqual(
widont('Move !'),
'Move !')
self.assertEqual(
widont('Move it ! '),
'Move it ! ')
self.assertEqual(
widont('Move it ?'),
'Move it ?')
self.assertEqual(
widont('I like to move : it !'),
'I like to move : it !')
self.assertEqual(
widont('I like to : move it !'),
'I like to : move it !')
def test_week_number(self):
self.assertEqual(week_number(datetime(2013, 1, 1)), '0')
self.assertEqual(week_number(datetime(2013, 12, 21)), '50')
def test_comment_admin_urlname(self):
comment_admin_url = comment_admin_urlname('action')
self.assertTrue(comment_admin_url.startswith('admin:'))
self.assertTrue(comment_admin_url.endswith('_action'))
@skipIfCustomUser
def test_user_admin_urlname(self):
user_admin_url = user_admin_urlname('action')
self.assertEqual(user_admin_url, 'admin:auth_user_action')
@skipIfCustomUser
def test_zinnia_statistics(self):
with self.assertNumQueries(8):
context = zinnia_statistics()
self.assertEqual(context['template'], 'zinnia/tags/statistics.html')
self.assertEqual(context['entries'], 0)
self.assertEqual(context['categories'], 0)
self.assertEqual(context['tags'], 0)
self.assertEqual(context['authors'], 0)
self.assertEqual(context['comments'], 0)
self.assertEqual(context['pingbacks'], 0)
self.assertEqual(context['trackbacks'], 0)
self.assertEqual(context['rejects'], 0)
self.assertEqual(context['words_per_entry'], 0)
self.assertEqual(context['words_per_comment'], 0)
self.assertEqual(context['entries_per_month'], 0)
self.assertEqual(context['comments_per_entry'], 0)
self.assertEqual(context['linkbacks_per_entry'], 0)
Category.objects.create(title='Category 1', slug='category-1')
author = Author.objects.create_user(username='webmaster',
email='[email protected]')
comments.get_model().objects.create(
comment='My Comment 1', site=self.site,
content_object=self.entry,
submit_date=timezone.now())
self.entry.authors.add(author)
self.publish_entry()
with self.assertNumQueries(13):
context = zinnia_statistics('custom_template.html')
self.assertEqual(context['template'], 'custom_template.html')
self.assertEqual(context['entries'], 1)
self.assertEqual(context['categories'], 1)
self.assertEqual(context['tags'], 2)
self.assertEqual(context['authors'], 1)
self.assertEqual(context['comments'], 1)
self.assertEqual(context['pingbacks'], 0)
self.assertEqual(context['trackbacks'], 0)
self.assertEqual(context['rejects'], 0)
self.assertEqual(context['words_per_entry'], 2)
self.assertEqual(context['words_per_comment'], 3)
self.assertEqual(context['entries_per_month'], 1)
self.assertEqual(context['comments_per_entry'], 1)
self.assertEqual(context['linkbacks_per_entry'], 0)
class TemplateTagsTimezoneTestCase(TestCase):
def create_published_entry_at(self, publication_date):
params = {'title': 'My entry',
'content': 'My content',
'slug': 'my-entry',
'status': PUBLISHED,
'publication_date': publication_date}
entry = Entry.objects.create(**params)
entry.sites.add(Site.objects.get_current())
return entry
@override_settings(USE_TZ=False)
def test_calendar_entries_no_timezone(self):
template = Template('{% load zinnia %}'
'{% get_calendar_entries 2014 1 %}')
self.create_published_entry_at(datetime(2014, 1, 1, 12, 0))
self.create_published_entry_at(datetime(2014, 1, 1, 23, 0))
self.create_published_entry_at(datetime(2012, 12, 31, 23, 0))
self.create_published_entry_at(datetime(2014, 1, 31, 23, 0))
output = template.render(Context())
self.assertTrue('/2014/01/01/' in output)
self.assertTrue('/2014/01/02/' not in output)
self.assertTrue('/2012/12/' in output)
self.assertTrue('/2014/02/' not in output)
@override_settings(USE_TZ=True, TIME_ZONE='Europe/Paris')
def test_calendar_entries_with_timezone(self):
template = Template('{% load zinnia %}'
'{% get_calendar_entries 2014 1 %}')
self.create_published_entry_at(datetime(2014, 1, 1, 12, 0))
self.create_published_entry_at(datetime(2014, 1, 1, 23, 0))
self.create_published_entry_at(datetime(2012, 12, 31, 23, 0))
self.create_published_entry_at(datetime(2014, 1, 31, 23, 0))
output = template.render(Context())
self.assertTrue('/2014/01/01/' in output)
self.assertTrue('/2014/01/02/' in output)
self.assertTrue('/2013/01/' in output)
self.assertTrue('/2014/02/' in output)
@override_settings(USE_TZ=False)
def test_archives_entries_no_timezone(self):
template = Template('{% load zinnia %}'
'{% get_archives_entries %}')
self.create_published_entry_at(datetime(2014, 1, 1, 12, 0))
self.create_published_entry_at(datetime(2014, 1, 31, 23, 0))
output = template.render(Context())
self.assertTrue('/2014/01/' in output)
self.assertTrue('/2014/02/' not in output)
@override_settings(USE_TZ=True, TIME_ZONE='Europe/Paris')
def test_archives_entries_with_timezone(self):
template = Template('{% load zinnia %}'
'{% get_archives_entries %}')
self.create_published_entry_at(datetime(2014, 1, 1, 12, 0))
self.create_published_entry_at(datetime(2014, 1, 31, 23, 0))
output = template.render(Context())
self.assertTrue('/2014/01/' in output)
self.assertTrue('/2014/02/' in output)
@override_settings(USE_TZ=False)
def test_archives_entries_tree_no_timezone(self):
template = Template('{% load zinnia %}'
'{% get_archives_entries_tree %}')
self.create_published_entry_at(datetime(2014, 1, 1, 12, 0))
self.create_published_entry_at(datetime(2014, 1, 31, 23, 0))
output = template.render(Context())
self.assertTrue('/2014/01/01/' in output)
self.assertTrue('/2014/02/01/' not in output)
@override_settings(USE_TZ=True, TIME_ZONE='Europe/Paris')
def test_archives_entries_tree_with_timezone(self):
template = Template('{% load zinnia %}'
'{% get_archives_entries_tree %}')
self.create_published_entry_at(datetime(2014, 1, 1, 12, 0))
self.create_published_entry_at(datetime(2014, 1, 31, 23, 0))
output = template.render(Context())
self.assertTrue('/2014/01/01/' in output)
self.assertTrue('/2014/02/01/' in output)
| bsd-3-clause | -3,821,941,366,989,071,400 | 43.195431 | 79 | 0.598805 | false | 4.028611 | true | false | false |
ksmaheshkumar/My-Gray-Hacker-Resources | Network_and_802.11/port_knocking/port_knocking.py | 4 | 1374 | #!/usr/bin/python
from scapy.all import *
import random
import requests
conf.verb=0
base_URL = "http://10.13.37.23:"
def knock(ports):
print "[*] Knocking on ports"+str(ports)
for dport in range(0, len(ports)):
ip = IP(dst = "10.13.37.23")
SYN = ip/TCP(dport=ports[dport], flags="S", window=14600, options=[('MSS',1460)])
send(SYN)
def get_flag_part(port,part):
command = ["curl", "-s" ,base_URL+str(port)+"/"+part+"_part_of_flag"]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
result = p.communicate()[0]
return result.strip()
flag=''
ports = [9264,11780,2059,8334]
port = 24931
knock(ports)
flag_part = get_flag_part(port,"first")
flag = ''.join([flag,flag_part])
print flag_part
ports = [42304,53768,3297]
port = 19760
knock(ports)
flag_part = get_flag_part(port,"second")
flag = ''.join([flag,flag_part])
print flag_part
ports= [23106,4250,62532,11655,33844]
port=3695
knock(ports)
flag_part = get_flag_part(port,"third")
flag = ''.join([flag,flag_part])
print flag_part
ports= [49377,48116,54900,8149]
port=31054
knock(ports)
flag_part = get_flag_part(port,"fourth")
flag = ''.join([flag,flag_part])
print flag_part
ports= [16340,59991,37429,60012,15397,21864,12923]
port=8799
knock(ports)
flag_part = get_flag_part(port,"last")
flag = ''.join([flag,flag_part])
print flag_part
print "Flag: %s" % flag
| mit | 4,092,851,726,131,293,000 | 21.9 | 89 | 0.667394 | false | 2.577861 | false | false | false |
manulera/ModellingCourse | ReAct/Python/GenerateMasterEq.py | 1 | 1118 |
import numpy as np
from Gilles import *
import matplotlib.pyplot as plt
from DeviationAnalysis import *
from mpl_toolkits.mplot3d import Axes3D
# Initial conditions
user_input = ['A', 100,
'B', 0]
# Constants (this is not necessary, they could be filled up already in the reaction tuple)
k = (10,10)
# Reaction template ((stoch_1,reactant_1,stoch_2,reactant_2),(stoch_1,product_1,stoch_2,product_2),k)
reactions = (
(1,'A'),(1,'B'),k[0],
(1,'B'),(1,'A'),k[1],
)
# dt is used for the deterministic calculation, and the
dt=0.0001
t = np.arange(0, 0.6, dt)
(solution,(tgill, valsgill,all_mus, all_taus),rows,mode) = ReAct(user_input,reactions,t,rounds=300)
fig = plt.figure()
Gillesplot(solution,t,tgill, valsgill,rows,mode)
j=0
f, axarr = plt.subplots(1, 10)
for i in np.arange(0,0.3,0.03):
A,X,Y = EquationMaker(reactions,tgill,all_mus, all_taus,i,i+0.02)
Y,X=np.meshgrid(Y,X)
#ax = fig.gca(projection='3d')
#ax.plot_surface(X,Y,A, rstride=1, cstride=1, cmap='hot', linewidth=0, antialiased=False)
axarr[j].imshow(A[:5,:], cmap='hot')
j+=1
plt.draw()
plt.show() | gpl-3.0 | 2,223,789,481,659,576,000 | 24.431818 | 101 | 0.662791 | false | 2.457143 | false | false | false |
freevoid/yawf | yawf/messages/submessage.py | 1 | 1640 | from . import Message
class Submessage(object):
need_lock_object = True
def __init__(self, obj, message_id, sender,
params=None, need_lock_object=True, raw_params=None):
self.obj = obj
self.sender = sender
self.message_id = message_id
self.params = params
self.raw_params = raw_params
self.need_lock_object = need_lock_object
super(Submessage, self).__init__()
def as_message(self, parent):
return Message(self.sender, self.message_id,
raw_params=self.raw_params,
clean_params=self.params,
parent_message_id=parent.unique_id,
message_group=parent.message_group,
)
def dispatch(self, parent_obj, parent_message):
from yawf.dispatch import dispatch_message
message = self.as_message(parent_message)
return dispatch_message(
self.obj,
message=message,
defer_side_effect=True,
need_lock_object=self.need_lock_object)
class RecursiveSubmessage(Submessage):
def __init__(self, message_id, sender, params=None, raw_params=None):
super(RecursiveSubmessage, self).__init__(
obj=None,
sender=sender, message_id=message_id,
params=params, raw_params=raw_params)
def dispatch(self, parent_obj, parent_message):
from yawf.dispatch import dispatch_message
message = self.as_message(parent_message)
return dispatch_message(
parent_obj,
message=message,
defer_side_effect=True,
need_lock_object=False)
| mit | -6,258,077,018,383,261,000 | 31.156863 | 73 | 0.608537 | false | 3.970944 | false | false | false |
WheatonCS/Lexos | lexos/receivers/top_words_receiver.py | 1 | 1409 | """This is the topword receiver for the topword model."""
from enum import Enum
from lexos.receivers.base_receiver import BaseReceiver
class TopwordAnalysisType(Enum):
"""This is the class that assigns the options to constants."""
ALL_TO_PARA = "Each Document to the Corpus"
CLASS_TO_PARA = "Each Document to Other Classes"
CLASS_TO_CLASS = "Each Class to Other Classes"
class TopwordReceiver(BaseReceiver):
"""This is the class that receives the options from front end."""
def __init__(self):
"""Get the topword analysis type from front end using this receiver."""
super().__init__()
def options_from_front_end(self) -> TopwordAnalysisType:
"""Get the topword option from front end.
:return: a TopwordAnalysisType object that holds the analysis option.
"""
if self._front_end_data["comparison_method"] == \
"Each Document to the Corpus":
return TopwordAnalysisType.ALL_TO_PARA
elif self._front_end_data["comparison_method"] == \
"Each Document to Other Classes":
return TopwordAnalysisType.CLASS_TO_PARA
elif self._front_end_data["comparison_method"] == \
"Each Class to Other Classes":
return TopwordAnalysisType.CLASS_TO_CLASS
else:
raise ValueError("Invalid topword analysis option from front end.")
| mit | -83,500,371,162,787,890 | 37.081081 | 79 | 0.651526 | false | 4.308869 | false | false | false |
codito/pomito | pomito/plugins/task/text.py | 1 | 1568 | # -*- coding: utf-8 -*-
"""A text file based task plugin implementation."""
import logging
from pomito.plugins import task
from pomito.task import Task
from io import open
__all__ = ['TextTask']
logger = logging.getLogger('pomito.plugins.task.text')
class TextTask(task.TaskPlugin):
"""Implements a plugin to read/write Tasks from a text file.
See doc/sample_tasks.txt for details of task file.
"""
def __init__(self, pomodoro_service):
self._pomodoro_service = pomodoro_service
self.tasks = []
def initialize(self):
# Read plugin configuration
try:
file_path = self._pomodoro_service.get_config("task.text", "file")
with open(file_path, 'r') as f:
for t in f.readlines():
if not t.startswith("--"):
task_tuple = self.parse_task(t)
self.tasks.append(Task(*task_tuple))
except Exception as e:
logger.debug(("Error initializing plugin: {0}".format(e)))
return
def get_tasks(self):
return self.tasks
def parse_task(self, task):
return TextTask._parse_task(task)
@staticmethod
def _parse_task(task):
import re
# Sample task format: I:<id> | E:<estimate> | A:<actual> | T:<tags> | D:<desc>
# Only <desc> can contain spaces. <tags> can be comma separated
p = re.compile("[IEATD]:([\w,\s]*)\|?")
task_tuple = tuple(map(lambda x: x.groups()[-1].strip('\n '), p.finditer(task)))
return task_tuple
| mit | 8,663,580,078,890,446,000 | 29.745098 | 88 | 0.581633 | false | 3.706856 | false | false | false |
pulsar-chem/Pulsar-Core | scripts/newmodulebase.py | 1 | 1154 | #!/usr/bin/env python3
import os
import argparse
import shutil
import psr_common
thispath = os.path.dirname(os.path.realpath(__file__))
modpath = os.path.join(os.path.dirname(thispath), "pulsar", "modulebase")
parser = argparse.ArgumentParser()
parser.add_argument("--author", required=True, help="Author of the file")
parser.add_argument("--desc", required=True, help="Short description of the base module")
parser.add_argument("name", help="Name of the module class")
args = parser.parse_args()
hfilename = args.name + ".hpp"
cfilename = args.name + ".cpp"
hfilepath = os.path.join(modpath, hfilename)
cfilepath = os.path.join(modpath, cfilename)
htemplatepath = os.path.join(modpath, "NewModule.hpp.template")
ctemplatepath = os.path.join(modpath, "NewModule.cpp.template")
hguard = psr_common.GenIncludeGuard(hfilename)
print("Creating {}".format(hfilepath))
with open(hfilepath, 'w') as dest:
for l in open(htemplatepath, 'r').readlines():
l = l.replace("AUTHOR", args.author)
l = l.replace("MODULEDESC", args.desc)
l = l.replace("CLASSNAME", args.name)
l = l.replace("HEADERGUARD", hguard)
dest.write(l)
| bsd-3-clause | 182,136,112,746,278,560 | 30.189189 | 89 | 0.710572 | false | 3.13587 | false | false | false |
cggh/DQXServer | wsgi_api.py | 1 | 3433 | # This file is part of DQXServer - (C) Copyright 2014, Paul Vauterin, Ben Jeffery, Alistair Miles <[email protected]>
# This program is free software licensed under the GNU Affero General Public License.
# You can find a copy of this license in LICENSE in the top directory of the source code or at <http://opensource.org/licenses/AGPL-3.0>
from urlparse import parse_qs
import importlib
import simplejson
import os
import traceback
import DQXUtils
import DQXDbTools
import responders
from responders import uploadfile
# import customresponders
# Try to import all custom modules
customRespondersPath = os.path.join(os.path.dirname(__file__), 'customresponders')
for dirname in os.listdir(customRespondersPath):
tryModulePath = os.path.join(customRespondersPath, dirname)
if os.path.isdir(tryModulePath):
importlib.import_module('customresponders.' + dirname)
def application(environ, start_response):
request_data = dict((k,v[0]) for k,v in parse_qs(environ['QUERY_STRING']).items())
if 'datatype' not in request_data:
DQXUtils.LogServer('--> request does not contain datatype')
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
yield 'Not found: request does not contain datatype'
return
request_type = request_data['datatype']
tm = DQXUtils.Timer()
if request_type == 'custom':
request_custommodule = request_data['respmodule']
request_customid = request_data['respid']
responder = importlib.import_module('customresponders.' + request_custommodule + '.' + request_customid)
else:
try:
#Fetch the handler by request type, using some introspection magic in responders/__init__.py
responder = getattr(responders, request_type)
except AttributeError:
raise Exception("Unknown request {0}".format(request_type))
request_data['environ'] = environ
response = request_data
try:
try:
response = responder.response(request_data)
status = '200 OK'
except DQXDbTools.CredentialException as e:
print('CREDENTIAL EXCEPTION: '+str(e))
response['Error'] = 'Credential problem: ' + str(e)
#Really should be 403 - but I think the JS will break as it expects 200
#status = '403 Forbidden'
status = '200 OK'
except DQXDbTools.Timeout as e:
status = '504 Gateway Timeout'
#Check for a custom response (eg in downloadtable)
if 'handler' in dir(responder):
for item in responder.handler(start_response, response):
yield item
else:
#Default is to respond with JSON
del response['environ']
response = simplejson.dumps(response, use_decimal=True)
response_headers = [('Content-type', 'application/json'),
('Access-Control-Allow-Origin','*'),
('Content-Length', str(len(response)))]
start_response(status, response_headers)
yield response
except Exception as e:
start_response('500 Server Error', [])
traceback.print_exc()
yield str(e)
DQXUtils.LogServer('Responded to {0} in wall={1}s cpu={2}s'.format(request_type, tm.Elapsed(),tm.ElapsedCPU()))
| agpl-3.0 | 3,646,523,974,521,507,000 | 38.388235 | 136 | 0.631809 | false | 4.091776 | false | false | false |
mlibrary/image-conversion-and-validation | falcom/test/hamcrest/has_attrs.py | 1 | 1640 | # Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
from hamcrest.core.base_matcher import BaseMatcher
class HasAttrs (BaseMatcher):
def __init__ (self, description_text, **kwargs):
if description_text is None:
description_text = "attrs"
self.desc = description_text
self.kwargs = kwargs
def _matches (self, item):
self.problem = None
try:
for key, value in self.kwargs.items():
if not hasattr(item, key) \
or getattr(item, key) != value:
self.problem = key
return False
return True
except:
return False
def describe_to (self, description):
if self.problem is None:
attrs = ", ".join("s.{}={}".format(k, repr(v))
for (k, v) in self.kwargs.items())
else:
attrs = "including s.{}={}".format(
self.problem, repr(self.kwargs[self.problem]))
description.append_text("a structure with {} {}".format(
self.desc, attrs))
def describe_mismatch (self, item, description):
if self.problem is None:
super().describe_mismatch(item, description)
elif hasattr(item, self.problem):
description.append_text("had s.{}={}".format(
self.problem, repr(getattr(item, self.problem))))
else:
description.append_text("didn't have s." + self.problem)
| bsd-3-clause | -3,407,238,684,599,043,000 | 31.156863 | 69 | 0.557927 | false | 4.327177 | false | false | false |
open-switch/ops-cli | ops-tests/feature/test_vtysh_banner.py | 1 | 21649 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
OpenSwitch Test for custom login banners
"""
from pytest import mark
TOPOLOGY = """
# +-------+
# | |
# |ops1 | for netop login
# | |
# +-------+
# +-------+
# | |
# |ops2 | for user root
# | |
# +-------+
# Since each topology gets only one bash shell and that same shell session is
# returned by get_shell(), we use 2 containers so that the current user is
# unambiguous. There is no link between the containers.
# Nodes
[type=openswitch name="OpenSwitch 1"] ops1
[type=openswitch name="OpenSwitch 1"] ops2
# Links
"""
# terminal commands
config_command = "configure terminal"
exit_command = "exit"
pre_cmd = "banner"
post_cmd = "banner exec"
disable_command = "no banner"
show_banner_command = "show banner"
ssh_command = "ssh -o StrictHostKeyChecking=no netop@localhost"
cat_issue = "cat /etc/issue.net"
cat_motd = "cat /etc/motd"
# sample custom login banner
line1 = "The use of COBOL cripples the mind;"
line2 = "its teaching should, therefore,"
line3 = "be regarded as a criminal offense"
line4 = "Edsgar Djikstra"
terminator = "\%"
# another sample custom login banner
line1b = "Software is like entropy:"
line2b = "It is diffuclt to grasp, weighs nothing,"
line3b = "and obeys the Second Law of Thermodyanmics;"
line4b = "i.e., it always increases"
line5b = "Norman Augustine"
# default banners
pre_default = "Welcome to OpenSwitch"
post_default = "Please be responsible"
# banner update responses
success = "Banner updated successfully!"
invalid_user = "Only network operators may change login banners."
# terminal prompts
vty_prompt = ".*\#"
bash_prompt = vty_prompt # for readability
banner_readline = ">>"
vty_config = ".*\(config\)#"
conn_closed = "Connection to localhost closed"
# passwords
netop_pw = "netop"
@mark.platform_incompatible(['ostl'])
def test_custom_pre_login_valid_user(topology):
"""
Update the banner as a user in the netop group.
The result should be reflected by the show banner command (OVSDB), and in
the contents of the file /etc/issue.net which are displayed before the
password prompt in an SSH session.
Begin an interactive bash shell as root
1. su to 'netop' inheriting environment
Using vtysh shell from the switch:
1. set banner to an empty string
2. set banner to a known value, checking for success indicator
SSH to switch
1. make sure that known value is displayed between before password prompt
"""
ops1 = topology.get('ops1')
assert ops1 is not None
print("Get bash shell")
shell = ops1.get_shell('bash')
print("Switch to user 'netop' with default shell (vtysh)")
shell.send_command("su - netop", vty_prompt)
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Set banner to empty string")
shell.send_command(disable_command, [success, vty_config])
print("Set banner to a known value")
shell.send_command(" ".join([pre_cmd, terminator]), [banner_readline],
timeout=1)
shell.send_command(line1, [banner_readline])
shell.send_command(line2, [banner_readline])
shell.send_command(line3, [banner_readline])
shell.send_command(line4, [banner_readline])
shell.send_command(terminator, [success])
print("Exit configuration context")
shell.send_command(exit_command, [vty_prompt])
print("Return to bash prompt")
shell.send_command(exit_command, [bash_prompt])
print("SSH to localhost as netop")
shell.send_command(ssh_command, [line1])
shell.send_command(netop_pw, [bash_prompt])
print("Return to bash shell")
shell.send_command(exit_command, [conn_closed])
print("Banner set succesfully")
print("Test custom_pre_login_valid_user PASSED")
@mark.platform_incompatible(['ostl'])
def test_custom_post_login_valid_user(topology):
"""
Update the banner as a user in the netop group.
The result should be reflected by the show banner command (OVSDB), and in
the contents of the file /etc/motd,
Begin an interactive bash shell as root
1. su to 'netop' inheriting environment
Using vtysh shell from the switch:
1. set banner to an empty string
2. set banner to known value, checking for success indicator
SSH to switch
1. make sure that known value is displayed after password is provided
"""
ops1 = topology.get('ops1')
assert ops1 is not None
print("Get bash shell")
shell = ops1.get_shell('bash')
print("Switch to user 'netop' with default shell (vtysh)")
shell.send_command("su - netop", vty_prompt)
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Set banner to empty string")
shell.send_command(" ".join([disable_command, "exec"]),
[success, vty_config])
print("Set banner to a known value")
shell.send_command(" ".join([post_cmd, terminator]), [banner_readline],
timeout=1)
shell.send_command(line1b, [banner_readline])
shell.send_command(line2b, [banner_readline])
shell.send_command(line3b, [banner_readline])
shell.send_command(line4b, [banner_readline])
shell.send_command(line5b, [banner_readline])
shell.send_command(terminator, [success])
print("Exit configuration context")
shell.send_command(exit_command, [vty_prompt])
print("Return to bash prompt")
shell.send_command(exit_command, [bash_prompt])
print("SSH to localhost as netop")
shell.send_command(ssh_command, ["password"])
shell.send_command(netop_pw, [line3b])
print("Return to bash shell")
shell.send_command(exit_command, [conn_closed])
print("Banner set succesfully")
print("Test custom_post_login_valid_user PASSED")
@mark.platform_incompatible(['ostl'])
def test_custom_pre_login_invalid_user(topology):
"""
Update the banner as a user that is not in the netop group.
The requested update should be refused.
Begin an interactive bash shell as root
1. run vtysh
Using vtysh shell from the switch:
1. issue command to change banner
2. check for failure message
"""
ops2 = topology.get('ops2')
assert ops2 is not None
print("Get bash shell")
shell = ops2.get_shell('bash')
print("Run vtysh as root")
shell.send_command("vtysh", vty_prompt)
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Attempt to set banner to custom value")
shell.send_command(" ".join([pre_cmd, terminator]), [banner_readline],
timeout=1)
shell.send_command("hello", [banner_readline])
shell.send_command(terminator, [invalid_user])
print("Exit configuration context")
shell.send_command(exit_command, [vty_prompt])
print("Exit to bash shell")
shell.send_command(exit_command, [bash_prompt])
print("Banner unchanged")
print("Test custom_pre_login_invalid_user PASSED")
@mark.platform_incompatible(['ostl'])
def test_custom_post_login_invalid_user(topology):
"""
Update the banner as a user that is not in the netop group.
The requested update should be refused.
Begin an interactive bash shell as root
Using vtysh shell from the switch:
1. issue command to change banner
2. check for failure message
"""
ops2 = topology.get('ops2')
assert ops2 is not None
print("Get bash shell")
shell = ops2.get_shell('bash')
print("Run vtysh as root")
shell.send_command("vtysh", vty_prompt)
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Attempt to set banner to custom value")
shell.send_command(" ".join([post_cmd, terminator]), [banner_readline],
timeout=1)
shell.send_command("hello", [banner_readline])
shell.send_command(terminator, [invalid_user])
print("Exit configuration context")
shell.send_command(exit_command, [vty_prompt])
print("Exit to bash shell")
shell.send_command(exit_command, [bash_prompt])
print("Banner unchanged")
print("Test custom_post_login_invalid_user PASSED")
@mark.platform_incompatible(['ostl'])
def test_default_pre_login_valid_user(topology):
"""
Restore defualt pre-login banner as a user in the netop group.
The result should be reflected by the show banner command (OVSDB), and in
the contents of the file /etc/issue.net
Begin an interactive bash shell as root
1. su to 'netop' inheriting environment
Using vtysh shell from the switch:
1. restore default banner
SSH to switch
1. make sure that known value is displayed between before password prompt
"""
ops1 = topology.get('ops1')
assert ops1 is not None
print("Get bash shell")
shell = ops1.get_shell('bash')
print("Switch to user 'netop' with default shell (vtysh)")
shell.send_command("su - netop", vty_prompt)
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Set banner to empty string")
shell.send_command(disable_command, [success, vty_config])
print("Set banner to default value")
shell.send_command(" ".join([pre_cmd, "default"]), [success])
print("Exit configuration context")
shell.send_command(exit_command, [vty_prompt])
print("Return to bash prompt")
shell.send_command(exit_command, [bash_prompt])
print("SSH to localhost as netop")
shell.send_command(ssh_command, [pre_default])
shell.send_command(netop_pw, [bash_prompt])
print("Exit to bash shell")
shell.send_command(exit_command, [conn_closed])
print("Banner set succesfully")
print("Test default_pre_login_valid_user PASSED")
@mark.platform_incompatible(['ostl'])
def test_default_post_login_valid_user(topology):
"""
Restore defualt post-login banner as a user in the netop group.
The result should be reflected by the show banner command (OVSDB), and in
the contents of the file /etc/issue.net
Begin an interactive bash shell as root
1. su to 'netop' inheriting environment
Using vtysh shell from the switch:
1. restore default banner
SSH to switch
1. make sure that known value is displayed between before password prompt
"""
ops1 = topology.get('ops1')
assert ops1 is not None
print("Get bash shell")
shell = ops1.get_shell('bash')
print("Switch to user 'netop' with default shell (vtysh)")
shell.send_command("su - netop", vty_prompt)
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Set banner to empty string")
shell.send_command(" ".join([disable_command, "exec"]),
[success, vty_config])
print("Set banner to default value")
shell.send_command(" ".join([post_cmd, "default"]), [success])
print("Exit configuration context")
shell.send_command(exit_command, [vty_prompt])
print("Return to bash prompt")
shell.send_command(exit_command, [bash_prompt])
print("SSH to localhost as netop")
shell.send_command(ssh_command, ["password"])
shell.send_command(netop_pw, [post_default])
print("Exit to bash shell")
shell.send_command(exit_command, [conn_closed])
print("Banner set succesfully")
print("Test default_post_login_valid_user PASSED")
@mark.platform_incompatible(['ostl'])
def test_default_pre_login_invalid_user(topology):
"""
Restore defualt pre-login banner as a user not in the netop group.
The attempt should be rejected.
Begin an interactive bash shell as root
Using vtysh shell from the switch:
1. issue restore default banner command, check for failure message
"""
ops2 = topology.get('ops2')
assert ops2 is not None
print("Get bash shell")
shell = ops2.get_shell('bash')
print("Enter vtysh as root")
shell.send_command("vtysh", vty_prompt)
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Set banner to default value")
shell.send_command(" ".join([pre_cmd, "default"]), [invalid_user])
print("Exit configuration context")
shell.send_command(exit_command, [vty_prompt])
print("Return to bash prompt")
shell.send_command(exit_command, [bash_prompt])
print("Banner set succesfully")
print("Test default_pre_login_invalid_user PASSED")
@mark.platform_incompatible(['ostl'])
def test_default_post_login_invalid_user(topology):
"""
Restore defualt pre-login banner as a user in the netop group.
The result should be reflected by the show banner command (OVSDB), and in
the contents of the file /etc/issue.net
Begin an interactive bash shell as root
Using vtysh shell from the switch:
1. issue restore default banner command, check for failure message
"""
ops2 = topology.get('ops2')
assert ops2 is not None
print("Get bash shell")
shell = ops2.get_shell('bash')
print("Enter vtysh as root")
shell.send_command("vtysh", vty_prompt)
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Set banner to default value")
shell.send_command(" ".join([pre_cmd, "default"]), [invalid_user])
print("Exit configuration context")
shell.send_command(exit_command, [vty_prompt])
print("Return to bash prompt")
shell.send_command(exit_command, [bash_prompt])
print("Banner set succesfully")
print("Test default_pre_login_invalid_user PASSED")
@mark.platform_incompatible(['ostl'])
def test_disable_pre_login_valid_user(topology):
"""
Disable the pre-login banner. If the file /etc/issue.net contains only a
single new line, then OVSDB and the SSH banner have been changed
appropriately.
Begin an interactive bash shell as root
1. su to user netop, inheriting environment
Using vtysh shell from the switch:
1. restore the default banner
2. disable the banner, check for success
3. exit vtysh
Using bash, once again
1. confirm that the length of /etc/issue.net is one byte
"""
ops1 = topology.get('ops1')
assert ops1 is not None
print("Get bash shell")
shell = ops1.get_shell('bash')
print("Switch to user netop, inheriting shell")
shell.send_command("su - netop", [vty_prompt])
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Set banner to default value")
shell.send_command(" ".join([pre_cmd, "default"]), [success, vty_config],
timeout=1)
print("Disable banner, checking for success")
shell.send_command(" ".join(["no", pre_cmd]), [success])
print("Exit to bash")
shell.send_command(exit_command, [vty_prompt])
shell.send_command(exit_command, [bash_prompt])
shell.send_command('du -b /etc/issue.net', ['1', '/etc/issue.net'])
print("Banner disabled succesfully")
print("Test disable_pre_login_invalid_user PASSED")
@mark.platform_incompatible(['ostl'])
def test_disable_post_login_valid_user(topology):
"""
Disable the post-login banner. If the file /etc/motd contains only a single
new line, then OVSDB and the SSH banner have been changed appropriately.
Begin an interactive bash shell as root
1. su to user netop, inheriting environment
Using vtysh shell from the switch:
1. restore the default banner
2. disable the banner, check for success
3. exit vtysh
Using bash, once again
1. confirm the length of /etc/motd is 1 byte
"""
ops1 = topology.get('ops1')
assert ops1 is not None
print("Get bash shell")
shell = ops1.get_shell('bash')
print("Switch to user netop, inheriting shell")
shell.send_command("su - netop", [vty_prompt])
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Set banner to default value")
shell.send_command(" ".join([post_cmd, "default"]), [success, vty_config],
timeout=1)
print("Disable banner, checking for success")
shell.send_command(" ".join(["no", post_cmd]), [success])
print("Exit to bash")
shell.send_command(exit_command, [vty_prompt])
shell.send_command(exit_command, [bash_prompt])
issue_output = ops1(cat_motd, shell='bash')
issue_output = ops1(cat_motd, shell='bash')
shell.send_command('du -b /etc/motd', ['1', '/etc/motd'])
print("Banner disabled succesfully")
print("Test disable_post_login_invalid_user PASSED")
@mark.platform_incompatible(['ostl'])
def test_disable_pre_login_invalid_user(topology):
"""
Attempt to disable the login banner. The attempt should be refused.
Begin an interactive bash shell as root
1. enter vtysh interactive shell
Using vtysh shell from the switch:
1. enter configuration context
2. attempt to change banner, checking for failure
"""
ops1 = topology.get('ops1')
assert ops1 is not None
print("Get bash shell")
shell = ops1.get_shell('bash')
print("Enter vtysh shell")
shell.send_command("vtysh", [vty_prompt])
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Disable banner, checking for failure")
shell.send_command(" ".join(["no", pre_cmd]), [invalid_user])
print("Exit to bash")
shell.send_command(exit_command, [vty_prompt])
shell.send_command(exit_command, [bash_prompt])
@mark.platform_incompatible(['ostl'])
def test_disable_post_login_invalid_user(topology):
"""
Attempt to disable the login banner. The attempt should be refused.
Begin an interactive bash shell as root
1. enter vtysh interactive shell
Using vtysh shell from the switch:
1. enter configuration context
2. attempt to change banner, checking for failure
"""
ops1 = topology.get('ops1')
assert ops1 is not None
print("Get bash shell")
shell = ops1.get_shell('bash')
print("Enter vtysh shell")
shell.send_command("vtysh", [vty_prompt])
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Disable banner, checking for failure")
shell.send_command(" ".join(["no", post_cmd]), [invalid_user])
print("Exit to bash")
shell.send_command(exit_command, [vty_prompt])
shell.send_command(exit_command, [bash_prompt])
@mark.platform_incompatible(['ostl'])
def test_display_pre_login(topology):
"""
Attempt to display the login banner. It should match the expected value.
Begin an interactive bash shell as root
1. su to user netop, inheriting environment
Using vtysh shell from the switch:
1. enter configuration context
2. restore default banner
3. exit configuration context
4. issue command to show banner, confirm it matches default
"""
ops1 = topology.get('ops1')
assert ops1 is not None
print("Get bash shell")
shell = ops1.get_shell('bash')
print("su to user netop")
shell.send_command("su - netop", vty_prompt)
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Set banner to default value")
shell.send_command(" ".join([pre_cmd, "default"]), [success, ""])
print("Exit configuration context")
shell.send_command(exit_command, [vty_prompt])
print("Display the configured banner")
shell.send_command(" ".join(["show", pre_cmd]), [pre_default])
print("Exit to bash shell")
shell.send_command(exit_command, [bash_prompt])
print("Banner displayed succesfully")
print("Test display_pre_login PASSED")
@mark.platform_incompatible(['ostl'])
def test_display_post_login(topology):
"""
Attempt to display the login banner. It should match the expected value.
Begin an interactive bash shell as root
1. su to user netop, inheriting environment
Using vtysh shell from the switch:
1. enter configuration context
2. restore default banner
3. exit configuration context
4. issue command to show banner, confirm it matches default
"""
ops1 = topology.get('ops1')
assert ops1 is not None
ops1 = topology.get('ops1')
assert ops1 is not None
print("Get bash shell")
shell = ops1.get_shell('bash')
print("su to user netop")
shell.send_command("su - netop", vty_prompt)
print("Enter configuration context")
shell.send_command(config_command, [vty_config])
print("Set banner to default value")
shell.send_command(" ".join([post_cmd, "default"]), [success, ""])
print("Exit configuration context")
shell.send_command(exit_command, [vty_prompt])
print("Display the configured banner")
shell.send_command(" ".join(["show", post_cmd]), [post_default])
print("Exit to bash shell")
shell.send_command(exit_command, [bash_prompt])
print("Banner displayed succesfully")
print("Test display_post_login PASSED")
| gpl-2.0 | -2,321,846,893,456,399,000 | 32.254992 | 79 | 0.681602 | false | 3.707655 | true | false | false |
jocelynj/weboob | weboob/backends/canalplus/browser.py | 1 | 2453 | # -*- coding: utf-8 -*-
# Copyright(C) 2010 Nicolas Duhamel
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import urllib
from weboob.tools.browser import BaseBrowser
from weboob.tools.browser.decorators import id2url
from .pages import InitPage, CanalplusVideo, VideoPage
import lxml.etree
class XMLParser:
def parse(self, data, encoding=None):
if encoding is None:
parser = None
else:
parser = lxml.etree.XMLParser(encoding=encoding, strip_cdata=False)
return lxml.etree.XML(data.get_data(), parser)
__all__ = ['CanalplusBrowser']
class CanalplusBrowser(BaseBrowser):
DOMAIN = u'service.canal-plus.com'
ENCODING = 'utf-8'
PAGES = {r"http://service.canal-plus.com/video/rest/initPlayer/cplus/": InitPage,
r"http://service.canal-plus.com/video/rest/search/cplus/.*": VideoPage,
r"http://service.canal-plus.com/video/rest/getVideosLiees/cplus/(?P<id>.+)": VideoPage,
}
#We need lxml.etree.XMLParser for read CDATA
PARSER = XMLParser()
FORMATS = { 'sd': 'BAS_DEBIT',
'hd': 'HD'
}
def __init__(self, quality, *args, **kwargs):
BaseBrowser.__init__(self, parser= self.PARSER, *args, **kwargs)
if quality in self.FORMATS:
self.quality = self.FORMATS[quality]
else:
self.quality = 'HD'
def home(self):
self.location("http://service.canal-plus.com/video/rest/initPlayer/cplus/")
def iter_search_results(self, pattern):
self.location("http://service.canal-plus.com/video/rest/search/cplus/" + urllib.quote_plus(pattern))
return self.page.iter_results()
@id2url(CanalplusVideo.id2url)
def get_video(self, url, video=None):
self.location(url)
return self.page.get_video(video, self.quality)
| gpl-3.0 | 3,005,965,407,463,023,000 | 33.069444 | 108 | 0.67183 | false | 3.560232 | false | false | false |
SciTools/cartopy | lib/cartopy/tests/crs/test_stereographic.py | 2 | 3840 | # Copyright Cartopy Contributors
#
# This file is part of Cartopy and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
import numpy as np
from numpy.testing import assert_almost_equal
import cartopy.crs as ccrs
from .helpers import check_proj_params
class TestStereographic:
def test_default(self):
stereo = ccrs.Stereographic()
other_args = {'ellps=WGS84', 'lat_0=0.0', 'lon_0=0.0', 'x_0=0.0',
'y_0=0.0'}
check_proj_params('stere', stereo, other_args)
assert_almost_equal(np.array(stereo.x_limits),
[-5e7, 5e7], decimal=4)
assert_almost_equal(np.array(stereo.y_limits),
[-5e7, 5e7], decimal=4)
def test_eccentric_globe(self):
globe = ccrs.Globe(semimajor_axis=1000, semiminor_axis=500,
ellipse=None)
stereo = ccrs.Stereographic(globe=globe)
other_args = {'a=1000', 'b=500', 'lat_0=0.0', 'lon_0=0.0', 'x_0=0.0',
'y_0=0.0'}
check_proj_params('stere', stereo, other_args)
# The limits in this test are sensible values, but are by no means
# a "correct" answer - they mean that plotting the crs results in a
# reasonable map.
assert_almost_equal(np.array(stereo.x_limits),
[-7839.27971444, 7839.27971444], decimal=4)
assert_almost_equal(np.array(stereo.y_limits),
[-3932.82587779, 3932.82587779], decimal=4)
def test_true_scale(self):
# The "true_scale_latitude" parameter only makes sense for
# polar stereographic projections (#339 and #455).
# For now only the proj string creation is tested
# See test_scale_factor for test on projection.
globe = ccrs.Globe(ellipse='sphere')
stereo = ccrs.NorthPolarStereo(true_scale_latitude=30, globe=globe)
other_args = {'ellps=sphere', 'lat_0=90', 'lon_0=0.0', 'lat_ts=30',
'x_0=0.0', 'y_0=0.0'}
check_proj_params('stere', stereo, other_args)
def test_scale_factor(self):
# See #455
# Use spherical Earth in North Polar Stereographic to check
# equivalence between true_scale and scale_factor.
# In these conditions a scale factor of 0.75 corresponds exactly to
# a standard parallel of 30N.
globe = ccrs.Globe(ellipse='sphere')
stereo = ccrs.Stereographic(central_latitude=90., scale_factor=0.75,
globe=globe)
other_args = {'ellps=sphere', 'lat_0=90.0', 'lon_0=0.0', 'k_0=0.75',
'x_0=0.0', 'y_0=0.0'}
check_proj_params('stere', stereo, other_args)
# Now test projections
lon, lat = 10, 10
projected_scale_factor = stereo.transform_point(lon, lat,
ccrs.Geodetic())
# should be equivalent to North Polar Stereo with
# true_scale_latitude = 30
nstereo = ccrs.NorthPolarStereo(globe=globe, true_scale_latitude=30)
projected_true_scale = nstereo.transform_point(lon, lat,
ccrs.Geodetic())
assert projected_true_scale == projected_scale_factor
def test_eastings(self):
stereo = ccrs.Stereographic()
stereo_offset = ccrs.Stereographic(false_easting=1234,
false_northing=-4321)
other_args = {'ellps=WGS84', 'lat_0=0.0', 'lon_0=0.0', 'x_0=1234',
'y_0=-4321'}
check_proj_params('stere', stereo_offset, other_args)
assert (tuple(np.array(stereo.x_limits) + 1234) ==
stereo_offset.x_limits)
| lgpl-3.0 | 3,732,546,708,278,523,000 | 42.636364 | 77 | 0.572396 | false | 3.40124 | true | false | false |
Nuttycomputer/pan-os-scripts | panexport.py | 2 | 9602 | #!/usr/bin/env python3
# noinspection PyPackageRequirements
from datetime import datetime
import pan.xapi
import tablib
import xmltodict
import yaml
HEADERS_DEFAULT_MAP = {'rule-type': 'universal', 'negate-source': 'no', 'negate-destination': 'no'}
HEADERS_REMOVE = ['option', 'profile-setting', 'disabled', 'log-end', 'log-start', 'category']
HEADERS_ORDER = ['@name', 'action', 'tag', 'rule-type', 'from', 'source', 'negate-source', 'source-user',
'hip-profiles',
'to', 'destination', 'negate-destination', 'application', 'service', 'profile-setting', 'description']
__author__ = 'Jay Shepherd'
class Config:
def __init__(self, filename):
with open(filename, 'r') as stream:
config = yaml.load(stream)
self.top_domain = config['top_domain']
self.firewall_api_key = config['firewall_api_key']
self.firewall_hostnames = config['firewall_hostnames']
def retrieve_firewall_configuration(hostname, api_key, config='running'):
"""
This takes the FQDN of the firewall and retrieves the requested config.
Defaults to running.
:param hostname: Hostname (FQDN) of firewall to retrieve configuration from
:param api_key: API key to access firewall configuration
;param config: Which config to retrieve, defaults to running.
:return: Dictionary containing firewall configuration
"""
firewall = pan.xapi.PanXapi(hostname=hostname, api_key=api_key)
command = "show config {}".format(config)
firewall.op(cmd=command, cmd_xml=True)
return xmltodict.parse(firewall.xml_result())
def combine_the_rulebase(pushed_config, running_config):
pre_rulebase = safeget(pushed_config, 'policy', 'panorama', 'pre-rulebase', 'security', 'rules', 'entry')
device_rulebase = safeget(running_config, 'config', 'devices', 'entry', 'vsys', 'entry', 'rulebase', 'entry')
post_rulebase = safeget(pushed_config, 'policy', 'panorama', 'post-rulebase', 'security', 'rules', 'entry')
default_rulebase = safeget(pushed_config, 'policy', 'panorama', 'post-rulebase', 'default-security-rules', 'rules',
'entry')
# Combine the pre, on-device, and post rule sets into a single ordered view
combined_rulebase = pre_rulebase + device_rulebase + post_rulebase + default_rulebase
return combined_rulebase
def safeget(dct, *keys):
"""
Takes a dictionary and key path. Checks if key exists and returns value of key
:param dct: Dictionary to iterate over
:param keys: Keys to iterate over
:return: Returns value of key as list if it exists, else returns empty list
"""
dct_as_list = []
for key in keys:
try:
dct = dct[key]
except (KeyError, TypeError):
return list()
if isinstance(dct, list):
return dct
else:
dct_as_list.append(dct)
return dct_as_list
def get_headers(data_dict, preferred_header_order=None, headers_to_remove=None):
"""
Takes a nested dictionary and returns headers as a unique list. For PanOS the top level of each dictionary
database is a entry "ID" field of value xxx. Which then contain additional attributes/keys with values.
:param data_dict: Dictionary in format correctly
:param preferred_header_order: List of headers. If one or more headers in this list are found in the provided
dictionary, they will be returned in the same order they occur in this list. Headers found in the dict but not in
this list will be sorted and appended to the end of the list.
:param headers_to_remove: Collection of headers which will not appear in the returned list.
:return: list of found headers, in an order approximately following the preferred order
"""
if preferred_header_order is None:
preferred_header_order = []
if headers_to_remove is None:
headers_to_remove = []
scraped_headers = set()
for item in data_dict:
for header in item:
scraped_headers.add(header)
ordered_headers = []
scraped_headers = scraped_headers.difference(set(headers_to_remove))
for header in preferred_header_order:
if header in scraped_headers:
ordered_headers.append(header)
scraped_headers.remove(header)
ordered_headers += sorted(list(scraped_headers))
return ordered_headers
def check_default(object_to_check, default_key, default_map=None):
"""
Takes a string_to_check, header, and a default_map table. If string is empty and there is
a default_key mapping returns default.
:param object_to_check: Python object to check against table, the object type must match the default_key ty
:param default_key:
:param default_map:
:return:
"""
if object_to_check is '' and default_key in default_map.keys():
return default_map[default_key]
return object_to_check
def write_to_excel(rule_list, filename, preferred_header_order=None, headers_to_remove=None, default_map=None):
# Initialize Tablib Data
dataset = tablib.Dataset()
# Define headers we would like to include
rule_headers = get_headers(rule_list, preferred_header_order, headers_to_remove)
dataset.headers = ["Order"] + rule_headers
# Add rules to dataset
index_num = 0
for rule in rule_list:
index_num += 1
formatted_rule = [index_num]
for header in rule_headers:
cell = rule.get(header, '')
if isinstance(cell, dict):
cell = cell.get('member', cell)
if isinstance(cell, list):
combined_cell = ''
first_item = True
for item in cell:
if first_item is True:
combined_cell += item
first_item = False
else:
combined_cell += ', {}'.format(item)
formatted_rule.append(combined_cell)
else:
safe_cell = check_default(str(cell), header, default_map)
formatted_rule.append(safe_cell)
dataset.append(formatted_rule)
# Use tablib to write rules
with open(filename, mode='wb') as file:
file.write(dataset.xlsx)
def do_the_things(firewall, api_key, top_domain=''):
"""
This is the primary meat of the script. It takes a firewall and API key and writes out excel
sheets with the rulebase.
:param firewall: Firewall to query
:param api_key: API key to query
;return:
"""
# "Zhu Li, do the thing!"
# Retrieve both possible configurations from firewall
running_config = retrieve_firewall_configuration(firewall,
api_key,
config='running')
pushed_config = retrieve_firewall_configuration(firewall,
api_key,
config='pushed-shared-policy')
# Store objects from config in separate dictionaries.
# Use helper functions to achieve.
# Safety First
address = safeget(pushed_config, 'policy', 'panorama', 'address', 'entry')
address_groups = safeget(pushed_config, 'policy', 'panorama', 'address-group', 'entry')
combined_rulebase = combine_the_rulebase(pushed_config, running_config)
# Define headers we care about being ordered in the order they should be.
rulebase_headers_order = HEADERS_ORDER
# I'm removing excel columns that I don't want in output based upon stupid stuff.
# Perhaps I don't care.
# Perhaps the fields just don't work correctly because PaloAlto output refuses any consistency.
# Yeah I'm going to go with the latter option.
rulebase_headers_remove = HEADERS_REMOVE
# Remember that consistency thing...
# ... yeah this is to populate the excel fields with known default mappings.
# This is for fields I do need to be in output.
rulebase_default_map = HEADERS_DEFAULT_MAP
# Finally let's write the damn thing
write_to_excel(
combined_rulebase,
get_filename(firewall.strip(top_domain)),
rulebase_headers_order,
rulebase_headers_remove,
rulebase_default_map
)
# I should print something to let user know it worked.
# Dharma says feedback is important for good coding.
print('{} processed. Please check directory for output files.'.format(firewall))
def get_filename(firewall):
"""
Generate an excel spreadsheet filename from a firewall name and the current time.
:param firewall: firewall name
:return: A filename in the format YYYY-MM-DD-{firewall}-combined-rules.xlsx
"""
current_time = datetime.now()
return (
"{year}-"
"{month}-"
"{day}-"
"{firewall}-combined-rules"
".xlsx"
).format(
firewall=firewall,
year=pad_to_two_digits(current_time.year),
month=pad_to_two_digits(current_time.month),
day=pad_to_two_digits(current_time.day),
)
def pad_to_two_digits(n):
"""
Add leading zeros to format a number as at least two digits
:param n: any number
:return: The number as a string with at least two digits
"""
return str(n).zfill(2)
def main():
script_config = Config('config.yml')
for firewall in script_config.firewall_hostnames:
do_the_things(firewall,
script_config.firewall_api_key,
script_config.top_domain)
if __name__ == '__main__':
main()
| mit | 8,419,862,138,909,192,000 | 37.103175 | 119 | 0.639763 | false | 4.03615 | true | false | false |
ptphp/PyLib | src/wmi/service.py | 1 | 2638 | # -*- coding=utf-8 -*-
import win32serviceutil
import win32service
import win32event
import os, sys
import time
import wmi,zlib,json
def log(log_string):
f=open("c:\\log.txt",'a+')
f.write(str(log_string)+"\n\n")
f.close()
def get_sys_info():
syinfo = {}
tmplist = []
encrypt_str = ""
c = wmi.WMI ()
cpu_tmp = []
for cpu in c.Win32_Processor():
#cpu 序列号
cpu_item = {}
#print cpu
encrypt_str = encrypt_str + cpu.ProcessorId.strip()
#print "cpu id:", cpu.ProcessorId.strip()
cpu_item['ProcessorId'] = cpu.ProcessorId.strip()
cpu_item['Name'] = cpu.Name.strip()
cpu_tmp.append(cpu_item)
syinfo['cpu'] = cpu_tmp
dis_tmp = []
for physical_disk in c.Win32_DiskDrive():
dis_itm = {}
dis_itm['Caption'] = physical_disk.Caption.strip()
dis_itm['SerialNumber'] = physical_disk.SerialNumber.strip()
dis_itm['Size'] = long(physical_disk.Size)/1000/1000/1000
dis_tmp.append(dis_itm)
encrypt_str = encrypt_str+physical_disk.SerialNumber.strip()
syinfo['disk'] = dis_tmp
tmp = {}
for board_id in c.Win32_BaseBoard():
#print board_id
#主板序列号
tmp['SerialNumber'] = board_id.SerialNumber.strip()
tmp['Manufacturer'] = board_id.Manufacturer.strip()
encrypt_str = encrypt_str+board_id.SerialNumber.strip()
syinfo['board'] = tmp
tmp = {}
for bios_id in c.Win32_BIOS():
#print bios_id
tmp['SerialNumber'] = bios_id.SerialNumber.strip()
#bios 序列号
encrypt_str = encrypt_str+bios_id.SerialNumber.strip()
syinfo['bios'] = tmp
#加密算法
syinfo['encrypt_str'] = zlib.adler32(encrypt_str)
return syinfo
class test1(win32serviceutil.ServiceFramework):
_svc_name_ = "test_python"
_svc_display_name_ = "test_python"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
syinfo = get_sys_info()
while 1:
print syinfo
syinfo = get_sys_info()
log(json.dumps(syinfo))
time.sleep(1)
win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)
if __name__=='__main__':
win32serviceutil.HandleCommandLine(test1)
| apache-2.0 | -5,676,230,241,672,146,000 | 27.988889 | 77 | 0.583589 | false | 3.284635 | false | false | false |
indikaudagedara/frec | scripts/eig_report.py | 1 | 4377 | #!/usr/bin/python
import sys;
import os;
import glob;
import string;
import imp;
import shutil;
import re;
###############################################################
### set up
if len(sys.argv) != 3:
print "usage:"
print "%s <config file> <output dir>" % sys.argv[0];
sys.exit(1);
OutPath = sys.argv[2];
ConfigFile = OutPath + "/" + sys.argv[1];
if os.path.exists(OutPath) == True:
shutil.rmtree(OutPath);
os.mkdir(OutPath);
shutil.copy(sys.argv[1], ConfigFile);
imp.load_source('Conf', ConfigFile);
import Conf;
ImgDir = OutPath + "/" + os.path.basename(Conf.OutImgDir);
shutil.copytree(Conf.OutImgDir, ImgDir);
###############################################################
### read img names
Pattern = Conf.OutImgDir + "/*.jpg";
print Pattern;
ImgList = glob.glob(Pattern);
Imgs = {};
for i in ImgList:
j = os.path.basename(i).split("_");
if Imgs.has_key(j[0]) == False:
Imgs[j[0]] = [];
Imgs[j[0]].append(os.path.basename(i));
f = open(OutPath + "/db.html", 'w+');
f.write("<html>\n");
for i, j in Imgs.iteritems():
f.write("Person: " + i + "<br>\n");
for k in j:
img = "<img src=\"./SavedImages/%s\">" % k;
f.write(img);
f.write("<br><br>");
f.write("</html>\n");
f.close();
###############################################################
### write self test data in html
def PlotPNG(InFile, OutFile, Src, Dst, Tmp):
TmpFile = open(Tmp, 'w+');
TmpFile.truncate();
TmpFile.seek(0, 0);
TmpFile.write("set term png medium\n");
TmpFile.write("set output \'%s/%s\'\n" % (os.path.abspath(Dst), OutFile));
TmpFile.write("set xr[0:6]\n");
TmpFile.write("load \'%s\'\n" % (InFile));
TmpFile.flush();
Cwd = os.getcwd();
os.chdir(Src);
os.system("gnuplot %s/%s" % (Cwd, TmpFile.name));
os.chdir(Cwd);
TmpFile.close();
def WriteHtml(Id, Protected):
PerHtml = open(OutPath + "/Person_" + Id + ".html", 'w+');
PerHtml.write("Self Test for Person: %s<br>" % Id);
PerHtml.write("<br><br>")
PerHtml.write("<a href=\"./results.html\">HOME</a>");
PerHtml.write("<br><br>")
PerHtml.write("<br><br>")
Tmp = OutPath + "/tmp.p";
for i in Protected:
fDir = Conf.EigenMethod_DataDir + "/SelfTest_" + Id + "/Face_" + i[1];
if os.path.exists(fDir):
PerHtml.write("<a href=\"./Face_%s_%s.html\"> Face %s </a><br>" % (Id, i[1], i[1]));
FaceHtml = open("%s/Face_%s_%s.html" % (OutPath, Id, i[1]), 'w+');
FaceHtml.write("<a href=\"./Person_%s.html\"> Person %s </a><br>" % (Id, Id) );
FaceHtml.write("<a href=\"./SavedImages/%s\"> Tested Image </a><br>" % (i[0]));
FaceHtml.write("<br><br>");
FaceHtml.write("<br><br>");
avgImg = "avg_%s_%s.png" % (Id, i[1]);
PlotPNG("person_avg.p", avgImg, fDir, OutPath, Tmp);
FaceHtml.write("Average of persons\n");
FaceHtml.write("<br>");
FaceHtml.write("<img src=\"./%s\"></img>" % avgImg);
FaceHtml.write("<br>");
avgWithInput = glob.glob(Conf.EigenMethod_DataDir + "/SelfTest_%s/Face_%s/avg_with_input*" % (Id, i[1]));
if len(avgWithInput) > 0:
avgWithInputImg = "avg_input_%s_%s.png" % (Id, i[1]);
#print os.path.basename(avgWithInput[0]);
PlotPNG(os.path.basename(avgWithInput[0]), avgWithInputImg, fDir, OutPath, Tmp);
FaceHtml.write("Average with Input Image\n");
FaceHtml.write("<br>");
FaceHtml.write("<img src=\"./%s\"></img>" % avgWithInputImg);
FaceHtml.write("<br>");
FaceHtml.close();
PerHtml.close();
###############################################################
### read self test info
Pattern = Conf.EigenMethod_DataDir + "/SelfTest_*";
PersonDir = glob.glob(Pattern);
ResultsHtml = open(OutPath + "/results.html", 'w+');
ResultsHtml.write('#'*30 + "<br>");
ResultsHtml.write("Results<br>");
ResultsHtml.write('#'*30 + "<br>");
ResultsHtml.write("<br><br>");
ResultsHtml.write("<a href=\"./db.html\">Database</a>");
ResultsHtml.write("<br><br>");
for i in PersonDir:
BaseDir = os.path.basename(i);
Id = BaseDir.split("_")[1];
f = open(i + "/SelfTest." + Id + ".log");
Protected = [];
for line in f:
j = re.search("Protected", line);
if j != None:
k = string.split(string.rstrip(line, '\n'), ":");
k = map(string.rstrip, k);
k = map(string.lstrip, k);
Protected.append(k[1:]);
f.close();
WriteHtml(Id, Protected)
ResultsHtml.write("<a href=\"./Person_%s.html\">Person %s</a>" % (Id, Id));
ResultsHtml.write("<br>");
ResultsHtml.close();
| gpl-2.0 | 6,929,900,721,357,966,000 | 24.300578 | 108 | 0.566598 | false | 2.665652 | true | false | false |
s3nn/NessusExporterMerge | nessus_exporter.py | 1 | 7768 | #!/usr/bin/python
# Modified from averagesecurityguy
# Props to him
# https://github.com/averagesecurityguy/
#
# Command-line parser taken from from below:
# by Konrads Smelkovs (https://github.com/truekonrads)
#
# merger.py
# based off: http://cmikavac.net/2011/07/09/merging-multiple-nessus-scans-python-script/
# by: mastahyeti
#
# Everything glued together by _sen
import requests
import json
import time
import argparse
import os
import sys
import getpass
import xml.etree.ElementTree as etree
# Hard-coded variables
requests.packages.urllib3.disable_warnings()
verify = False
token = ''
parser = argparse.ArgumentParser(description='Download Nesuss results in bulk / Merge Nessus files')
parser.add_argument('--url', '-u', type=str, default='localhost', help="url to nessus instance! This or --merge must be specified")
parser.add_argument('--format','-F', type=str, default="html", choices=['nessus', 'html'], help='Format of nesuss output, defaults to html')
parser.add_argument('-o', '--output', type=str, default=os.getcwd(), help='Output directory')
parser.add_argument('-m', '--merge', action='store_true', help='Merge all .nessus files in output directory')
parser.add_argument('-e', '--export', action='store_true', help='Export files')
parser.add_argument('--folder','-f', type=str, help='Scan Folder from which to download', default=0)
args = parser.parse_args()
def smart_str(x):
if isinstance(x, unicode):
return unicode(x).encode("utf-8")
elif isinstance(x, int) or isinstance(x, float):
return str(x)
return x
def build_url(resource):
nessus_url = "https://"+args.url+":8834"
return '{0}{1}'.format(nessus_url, resource)
def connect(method, resource, data=None):
"""
Send a request
Send a request to Nessus based on the specified data. If the session token
is available add it to the request. Specify the content type as JSON and
convert the data to JSON format.
"""
headers = {'X-Cookie': 'token={0}'.format(token),
'content-type': 'application/json'}
data = json.dumps(data)
if method == 'POST':
r = requests.post(build_url(resource), data=data, headers=headers, verify=verify)
elif method == 'PUT':
r = requests.put(build_url(resource), data=data, headers=headers, verify=verify)
elif method == 'DELETE':
r = requests.delete(build_url(resource), data=data, headers=headers, verify=verify)
else:
r = requests.get(build_url(resource), params=data, headers=headers, verify=verify)
# Exit if there is an error.
if r.status_code != 200:
e = r.json()
print e['error']
sys.exit()
# When downloading a scan we need the raw contents not the JSON data.
if 'download' in resource:
return r.content
else:
return r.json()
def login(usr, pwd):
"""
Login to nessus.
"""
login = {'username': usr, 'password': pwd}
data = connect('POST', '/session', data=login)
print data['token']
return data['token']
def logout():
"""
Logout of nessus.
"""
connect('DELETE', '/session')
def get_format():
# TODO: Add support for more formats if needed
return args.format
def get_scans():
"""
Get Scans from JSON data
"""
scans_to_export = {}
data = connect('GET', '/scans')
all_scans = data['scans']
# Create dictionary mapping scanid:scan_name (This case scan_name = host ip)
folder = args.folder
for scans in all_scans:
if scans['folder_id'] == int(folder):
scans_to_export[scans['id']] = smart_str(scans['name'])
return scans_to_export
def export_status(sid, fid):
"""
Check export status
Check to see if the export is ready for download.
"""
data = connect('GET', '/scans/{0}/export/{1}/status'.format(sid, fid))
return data['status'] == 'ready'
def export(scans):
"""
Make an export request
Request an export of the scan results for the specified scan and
historical run. In this case the format is hard coded as nessus but the
format can be any one of nessus, html, pdf, csv, or db. Once the request
is made, we have to wait for the export to be ready.
"""
# get format for export and handle POST params
export_format = get_format()
params = {'format': export_format, 'chapters': 'vuln_by_host'}
fids = {}
# Create dictionary mapping scan_id:file_id (File ID is used to download the file)
for scan_id in scans.keys():
# Attempt to Export scans
print "Exporting {0}".format(scans[scan_id])
data = connect('POST', '/scans/{0}/export'.format(scan_id), data=params)
fids[scan_id] = data['file']
while export_status(scan_id, fids[scan_id]) is False:
time.sleep(5)
# Attempt to Download scans
print "Downloading {0}".format(scans[scan_id])
data = connect('GET', '/scans/{0}/export/{1}/download'.format(scan_id, fids[scan_id]))
scan_name = '{0}.{1}'.format(scans[scan_id],params['format'])
scan_name_duplicate = 0
while True:
if scan_name in os.listdir(args.output):
print "Duplicate Scan Name!"
scan_name_duplicate += 1
scan_name = '{0}_{1}.{2}'.format(scans[scan_id], str(scan_name_duplicate), params['format'])
else:
break
print('Saving scan results to {0}.'.format(scan_name))
with open(os.path.join(args.output, scan_name), 'w') as f:
f.write(data)
print "All Downloads complete! hax0r"
def merge():
first = 1
for fileName in os.listdir(args.output):
fileName = os.path.join(args.output, fileName)
if ".nessus" in fileName:
print(":: Parsing", fileName)
if first:
mainTree = etree.parse(fileName)
report = mainTree.find('Report')
report.attrib['name'] = 'Merged Report'
first = 0
else:
tree = etree.parse(fileName)
for host in tree.findall('.//ReportHost'):
existing_host = report.find(".//ReportHost[@name='"+host.attrib['name']+"']")
if not existing_host:
print "adding host: " + host.attrib['name']
report.append(host)
else:
for item in host.findall('ReportItem'):
if not existing_host.find("ReportItem[@port='"+ item.attrib['port'] +"'][@pluginID='"+ item.attrib['pluginID'] +"']"):
print "adding finding: " + item.attrib['port'] + ":" + item.attrib['pluginID']
existing_host.append(item)
print(":: => done.")
with open(os.path.join(args.output, "nessus_merged.nessus"), 'w') as merged_file:
mainTree.write(merged_file, encoding="utf-8", xml_declaration=True)
print "All .nessus files merged to 'nessus_merged.nessus' file in current dir"
if __name__ == '__main__':
# Download Files
if args.export or args.merge:
if args.export:
# Login
username = raw_input("Username: ")
password = getpass.getpass("Password: ")
print('Logging in....')
token = login(username, password)
print("Getting scan List....")
scans = get_scans()
print('Downloading and Exporting Scans...')
export(scans)
# Merge files
if args.merge:
merge()
else:
print parser.format_usage() # removes newline + None when print_usage() is used
| gpl-3.0 | 4,584,339,561,658,623,500 | 33.678571 | 146 | 0.597451 | false | 3.750845 | false | false | false |
google-research/google-research | ncsnv3/models/layersv3.py | 1 | 6564 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""Layers for defining NCSNv3.
"""
from . import layers
from . import up_or_down_sampling
import flax.nn as nn
import jax
import jax.numpy as jnp
import numpy as np
conv1x1 = layers.ddpm_conv1x1
conv3x3 = layers.ddpm_conv3x3
NIN = layers.NIN
default_init = layers.default_init
class GaussianFourierProjection(nn.Module):
"""Gaussian Fourier embeddings for noise levels."""
def apply(self, x, embedding_size=256, scale=1.0):
W = self.param('W', (embedding_size,),
jax.nn.initializers.normal(stddev=scale))
W = jax.lax.stop_gradient(W)
x_proj = x[:, None] * W[None, :] * 2 * jnp.pi
return jnp.concatenate([jnp.sin(x_proj), jnp.cos(x_proj)], axis=-1)
class Combine(nn.Module):
"""Combine information from skip connections."""
def apply(self, x, y, method='cat'):
h = conv1x1(x, y.shape[-1])
if method == 'cat':
return jnp.concatenate([h, y], axis=-1)
elif method == 'sum':
return h + y
else:
raise ValueError(f'Method {method} not recognized.')
class AttnBlockv3(nn.Module):
"""Channel-wise self-attention block. Modified from DDPM."""
def apply(self, x, normalize, skip_rescale=False, init_scale=0.):
B, H, W, C = x.shape
h = normalize(x, num_groups=min(x.shape[-1] // 4, 32))
q = NIN(h, C)
k = NIN(h, C)
v = NIN(h, C)
w = jnp.einsum('bhwc,bHWc->bhwHW', q, k) * (int(C) ** (-0.5))
w = jnp.reshape(w, (B, H, W, H * W))
w = jax.nn.softmax(w, axis=-1)
w = jnp.reshape(w, (B, H, W, H, W))
h = jnp.einsum('bhwHW,bHWc->bhwc', w, v)
h = NIN(h, C, init_scale=init_scale)
if not skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.)
class Upsample(nn.Module):
def apply(self, x, out_ch=None, with_conv=False, fir=False,
fir_kernel=[1, 3, 3, 1]):
B, H, W, C = x.shape
out_ch = out_ch if out_ch else C
if not fir:
h = jax.image.resize(x, (x.shape[0], H * 2, W * 2, C), 'nearest')
if with_conv:
h = conv3x3(h, out_ch)
else:
if not with_conv:
h = up_or_down_sampling.upsample_2d(x, fir_kernel, factor=2)
else:
h = up_or_down_sampling.Conv2d(
x,
out_ch,
kernel=3,
up=True,
resample_kernel=fir_kernel,
bias=True,
kernel_init=default_init())
assert h.shape == (B, 2 * H, 2 * W, out_ch)
return h
class Downsample(nn.Module):
def apply(self, x, out_ch=None, with_conv=False, fir=False,
fir_kernel=[1, 3, 3, 1]):
B, H, W, C = x.shape
out_ch = out_ch if out_ch else C
if not fir:
if with_conv:
x = conv3x3(x, out_ch, stride=2)
else:
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2), padding='SAME')
else:
if not with_conv:
x = up_or_down_sampling.downsample_2d(x, fir_kernel, factor=2)
else:
x = up_or_down_sampling.Conv2d(
x,
out_ch,
kernel=3,
down=True,
resample_kernel=fir_kernel,
bias=True,
kernel_init=default_init())
assert x.shape == (B, H // 2, W // 2, out_ch)
return x
class ResnetBlockDDPMv3(nn.Module):
"""ResBlock adapted from DDPM."""
def apply(self,
x,
act,
normalize,
temb=None,
out_ch=None,
conv_shortcut=False,
dropout=0.1,
train=True,
skip_rescale=False,
init_scale=0.):
B, H, W, C = x.shape
out_ch = out_ch if out_ch else C
h = act(normalize(x, num_groups=min(x.shape[-1] // 4, 32)))
h = conv3x3(h, out_ch)
# Add bias to each feature map conditioned on the time embedding
if temb is not None:
h += nn.Dense(
act(temb), out_ch, kernel_init=default_init())[:, None, None, :]
h = act(normalize(h, num_groups=min(h.shape[-1] // 4, 32)))
h = nn.dropout(h, dropout, deterministic=not train)
h = conv3x3(h, out_ch, init_scale=init_scale)
if C != out_ch:
if conv_shortcut:
x = conv3x3(x, out_ch)
else:
x = NIN(x, out_ch)
if not skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.)
class ResnetBlockBigGANv3(nn.Module):
"""ResBlock adapted from BigGAN."""
def apply(self,
x,
act,
normalize,
up=False,
down=False,
temb=None,
out_ch=None,
dropout=0.1,
fir=False,
fir_kernel=[1, 3, 3, 1],
train=True,
skip_rescale=True,
init_scale=0.):
B, H, W, C = x.shape
out_ch = out_ch if out_ch else C
h = act(normalize(x, num_groups=min(x.shape[-1] // 4, 32)))
if up:
if fir:
h = up_or_down_sampling.upsample_2d(h, fir_kernel, factor=2)
x = up_or_down_sampling.upsample_2d(x, fir_kernel, factor=2)
else:
h = up_or_down_sampling.naive_upsample_2d(h, factor=2)
x = up_or_down_sampling.naive_upsample_2d(x, factor=2)
elif down:
if fir:
h = up_or_down_sampling.downsample_2d(h, fir_kernel, factor=2)
x = up_or_down_sampling.downsample_2d(x, fir_kernel, factor=2)
else:
h = up_or_down_sampling.naive_downsample_2d(h, factor=2)
x = up_or_down_sampling.naive_downsample_2d(x, factor=2)
h = conv3x3(h, out_ch)
# Add bias to each feature map conditioned on the time embedding
if temb is not None:
h += nn.Dense(
act(temb), out_ch, kernel_init=default_init())[:, None, None, :]
h = act(normalize(h, num_groups=min(h.shape[-1] // 4, 32)))
h = nn.dropout(h, dropout, deterministic=not train)
h = conv3x3(h, out_ch, init_scale=init_scale)
if C != out_ch or up or down:
x = conv1x1(x, out_ch)
if not skip_rescale:
return x + h
else:
return (x + h) / np.sqrt(2.)
| apache-2.0 | 7,303,468,976,946,167,000 | 28.434978 | 79 | 0.570232 | false | 2.94614 | false | false | false |
Zouyiran/ryu | ryu/app/reduce_t/route_calculator.py | 1 | 3647 | # -*- coding: utf-8 -*-
import networkx as nx
'''
###reduce_t###
--> route calculator
singleton pattern
1) mpls path calculation
2) end to end route calculation
'''
class RouteCalculator(object):
# singletone
_instance = None
def __init__(self):
super(RouteCalculator, self).__init__()
# {
# (dpid,dpid):[[dpid,dpid,dpid],[dpid,dpid,dpid,dpid],...],
# (dpid,dpid):[[dpid,dpid,dpid],[dpid,dpid,dpid,dpid],...],
# ...}
self.path_table = dict()
self.pre_path_table = dict()
self.route_table = dict()
self.pre_route_table = dict()
@staticmethod
def get_instance():
if not RouteCalculator._instance:
RouteCalculator._instance = RouteCalculator()
return RouteCalculator._instance
def get_path_table(self, matrix, dpids_to_access_port):
if matrix:
dpids = matrix.keys()
g = nx.DiGraph()
g.add_nodes_from(dpids)
for i in dpids:
for j in dpids:
if matrix[i][j] == 1:
g.add_edge(i,j,weight=1)
edge_dpids = []
for each_dpid in dpids_to_access_port:
if len(dpids_to_access_port[each_dpid]) != 0:# only for edge_switches
edge_dpids.append(each_dpid)
return self.__graph_to_path(g, edge_dpids)
def __graph_to_path(self, g, edge_dpids): # {(i,j):[i,k,l,j],(i,j):[],...}
path_table = dict()
for i in edge_dpids:
for j in edge_dpids:
if i != j:
path = []
try:
temp = nx.shortest_path(g,i,j)
if len(temp) > 4: # 2
path = temp
except nx.exception.NetworkXNoPath:
pass
path_table[(i,j)] = path
return path_table
def get_route_table(self, matrix, dpids_to_access_port):
if matrix:
dpids = matrix.keys()
g = nx.DiGraph()
g.add_nodes_from(dpids)
for i in dpids:
for j in dpids:
if matrix[i][j] == 1:
g.add_edge(i,j,weight=1)
edge_dpids = []
for each_dpid in dpids_to_access_port:
if len(dpids_to_access_port[each_dpid]) != 0:
edge_dpids.append(each_dpid)
return self.__graph_to_route(g, edge_dpids)
def __graph_to_route(self, g, edge_dpids):
route_table = dict()
for i in edge_dpids:
for j in edge_dpids:
if i != j:
route = []
try:
route = nx.shortest_path(g,i,j)
except nx.exception.NetworkXNoPath:
pass
route_table[(i,j)] = route
return route_table
def get_path(self, src_dpid, dst_dpid):
path = None
if src_dpid != dst_dpid:
path = self.path_table[(src_dpid,dst_dpid)]
return path
def get_route(self, src_dpid, dst_dpid):
route = None
if src_dpid != dst_dpid:
route = self.route_table[(src_dpid,dst_dpid)]
return route
#---------------------Print_to_debug------------------------
def show_path_table(self):
print "---------------------path_table---------------------"
for pair in self.path_table.keys():
print("pair:",pair)
for each in self.path_table[pair]:
print each,
print"" | apache-2.0 | 7,582,138,831,157,655,000 | 31.571429 | 85 | 0.46833 | false | 3.830882 | false | false | false |
pmdscully/python_snippets | function_validation_with_decorators/function_validators.py | 1 | 5428 | """
Module provides function argument type and return type validators for @accept(a,b,c) and @returns(x,y,z) function decorators.
This simplifies the formalisation all function validation, particularly useful for API interfaces definition.
Usage Example:
@accepts(int, int)
@returns(int)
def add(a, b)
return a + b
add(1,"3")
Original Author: Jackson Cooper
Article: Validate Python Function Parameter & Return Types with Decorators
Published at: http://pythoncentral.io/validate-python-function-parameters-and-return-types-with-decorators/
Published: Tuesday 20th August 2013
Last Updated: Friday 23rd August 2013
"""
import functools
def accepts(*accepted_arg_types):
'''
A decorator to validate the parameter types of a given function.
It is passed a tuple of types. eg. (<type 'tuple'>, <type 'int'>)
Note: It doesn't do a deep check, for example checking through a
tuple of types. The argument passed must only be types.
'''
def accept_decorator(validate_function):
# Check if the number of arguments to the validator
# function is the same as the arguments provided
# to the actual function to validate. We don't need
# to check if the function to validate has the right
# amount of arguments, as Python will do this
# automatically (also with a TypeError).
@functools.wraps(validate_function)
def decorator_wrapper(*function_args, **function_args_dict):
if len(accepted_arg_types) is not len(accepted_arg_types):
raise InvalidArgumentNumberError(validate_function.__name__)
# We're using enumerate to get the index, so we can pass the
# argument number with the incorrect type to ArgumentValidationError.
for arg_num, (actual_arg, accepted_arg_type) in enumerate(zip(function_args, accepted_arg_types)):
if not type(actual_arg) is accepted_arg_type:
ord_num = ordinal(arg_num + 1)
raise ArgumentValidationError(ord_num,
validate_function.__name__,
accepted_arg_type)
return validate_function(*function_args)
return decorator_wrapper
return accept_decorator
def returns(*accepted_return_type_tuple):
'''
Validates the return type. Since there's only ever one
return type, this makes life simpler. Along with the
accepts() decorator, this also only does a check for
the top argument. For example you couldn't check
(<type 'tuple'>, <type 'int'>, <type 'str'>).
In that case you could only check if it was a tuple.
'''
def return_decorator(validate_function):
# No return type has been specified.
if len(accepted_return_type_tuple) == 0:
raise TypeError('You must specify a return type.')
@functools.wraps(validate_function)
def decorator_wrapper(*function_args):
# More than one return type has been specified.
if len(accepted_return_type_tuple) > 1:
raise TypeError('You must specify one return type.')
# Since the decorator receives a tuple of arguments
# and the is only ever one object returned, we'll just
# grab the first parameter.
accepted_return_type = accepted_return_type_tuple[0]
# We'll execute the function, and
# take a look at the return type.
return_value = validate_function(*function_args)
return_value_type = type(return_value)
if return_value_type is not accepted_return_type:
raise InvalidReturnType(return_value_type,
validate_function.__name__)
return return_value
return decorator_wrapper
return return_decorator
class ArgumentValidationError(ValueError):
'''
Raised when the type of an argument to a function is not what it should be.
'''
def __init__(self, arg_num, func_name, accepted_arg_type):
self.error = 'The {0} argument of {1}() is not a {2}'.format(arg_num,
func_name,
accepted_arg_type)
def __str__(self):
return self.error
class InvalidArgumentNumberError(ValueError):
'''
Raised when the number of arguments supplied to a function is incorrect.
Note that this check is only performed from the number of arguments
specified in the validate_accept() decorator. If the validate_accept()
call is incorrect, it is possible to have a valid function where this
will report a false validation.
'''
def __init__(self, func_name):
self.error = 'Invalid number of arguments for {0}()'.format(func_name)
def __str__(self):
return self.error
class InvalidReturnType(ValueError):
'''
As the name implies, the return value is the wrong type.
'''
def __init__(self, return_type, func_name):
self.error = 'Invalid return type {0} for {1}()'.format(return_type,
func_name)
def __str__(self):
return self.error
| apache-2.0 | 7,686,753,299,474,606,000 | 38.05036 | 125 | 0.611275 | false | 4.53467 | false | false | false |
googleinterns/data-dependency-graph-analysis | graph_generation/connection_generator.py | 1 | 12654 | """
This module implements methods for generating random connections between nodes in a graph.
Method generate() will create all the necessary connections for the graph:
dataset <-> dataset collection
system <-> system collection
dataset collection <-> collection
system collection <-> collection
dataset read <-> system input
dataset write <-> system output
"""
from itertools import islice
import random
class ConnectionGenerator:
"""
A class to generate random connections between node ids, based on distribution maps.
...
Attributes:
dataset_count: Integer of how many datasets are in a graph.
dataset_count_map: Dictionary int:int that maps number of datasets in collection to count of its collections.
system_count: Integer of how many systems are in a graph.
system_count_map: Dictionary int:int that maps number of systems in collection to count of system collections.
dataset_read_count: Integer of how many dataset reads are in a graph.
dataset_write_count: Integer of how many dataset writes are in a graph.
system_input_count: Integer of how many system inputs are in a graph.
system_output_count: Integer of how many system outputs are in a graph.
dataset_read_count_map: Dictionary int:int that maps number of system inputs of dataset read to count of
dataset reads.
system_input_count_map: Dictionary int:int that maps number of dataset reads by system input to count of
system inputs.
dataset_write_count_map: Dictionary int:int that maps number of system outputs of dataset write to count of
dataset writes.
system_output_count_map: Dictionary int:int that maps number of dataset writes by system output to count of
system outputs.
dataset_collections_conn_collection: Dictionary int:[int] that maps collection id to dataset collection ids.
system_collections_conn_collection: Dictionary int:[int] that maps collection id to system collection ids.
datasets_conn_collection: Dictionary int:[int] that maps dataset collection id to dataset ids.
systems_conn_collection: Dictionary int:[int] that maps system collection id to system ids.
dataset_read_conn_systems: Dictionary int:[int] that maps dataset read id to system ids this dataset inputs to.
dataset_write_conn_systems: Dictionary int:[int] that maps dataset write id to system ids this dataset outputs from.
Methods:
get_one_to_many_connections()
Creates connections between an element and a group. Each element belongs to one group exactly.
get_many_to_many_connections()
Creates connections between two groups with many to many relationship.
_dataset_to_dataset_collection()
Generates dataset - dataset collection connections.
_system_to_system_collection()
Generates system - system collection connections.
_dataset_read_to_system_input()
Generates connections between dataset reads and system inputs.
_dataset_write_to_system_output()
Generates connections between dataset write and system outputs.
generate()
Generates all the needed connections for data dependency mapping graph.
"""
def __init__(self, dataset_params, system_params, dataset_to_system_params, collection_params):
"""
Args:
dataset_params: DatasetParams object.
system_params: SystemParams object.
dataset_to_system_params: DatasetToSystemParams object.
collection_params: CollectionParams object.
"""
self.dataset_count = dataset_params.dataset_count
self.dataset_count_map = collection_params.dataset_count_map
self.dataset_collection_count = collection_params.dataset_collection_count
self.dataset_collection_count_map = collection_params.dataset_collection_count_map
self.system_count = system_params.system_count
self.system_count_map = collection_params.system_count_map
self.system_collection_count = collection_params.system_collection_count
self.system_collection_count_map = collection_params.system_collection_count_map
self.dataset_read_count = dataset_to_system_params.dataset_read_count
self.dataset_write_count = dataset_to_system_params.dataset_write_count
self.system_input_count = dataset_to_system_params.system_input_count
self.system_output_count = dataset_to_system_params.system_output_count
self.dataset_read_count_map = dataset_to_system_params.dataset_read_count_map
self.system_input_count_map = dataset_to_system_params.system_input_count_map
self.dataset_write_count_map = dataset_to_system_params.dataset_write_count_map
self.system_output_count_map = dataset_to_system_params.system_output_count_map
self.dataset_collections_conn_collection = {}
self.system_collections_conn_collection = {}
self.datasets_conn_collection = {}
self.systems_conn_collection = {}
self.dataset_read_conn_systems = {}
self.dataset_write_conn_systems = {}
@staticmethod
def get_one_to_many_connections(element_count, element_count_map):
"""Generate group id for each element, based on number of element in group distribution.
Args:
element_count: Total number of elements.
element_count_map: Dictionary int:int that maps element count in a group to number of groups with that count.
Returns:
Dictionary int:[int] that maps group id to a list of element ids.
"""
# Create element ids.
element_values = list(range(1, element_count + 1))
# Get number of elements for each group id from their count.
elements_per_group = [i for i in element_count_map for _ in range(element_count_map[i])]
# Randomise element ids and group ids.
random.shuffle(element_values)
random.shuffle(elements_per_group)
# Split element ids into chunks to get connections for each group.
group_to_elements = {}
last_index = 0
for i in range(len(elements_per_group)):
group_to_elements[i + 1] = element_values[last_index:last_index + elements_per_group[i]]
last_index += elements_per_group[i]
# In case we don't have a full config - assign rest of elements to a last group.
if last_index != element_count - 1:
group_to_elements[len(elements_per_group)] += element_values[last_index:]
return group_to_elements
@staticmethod
def get_many_to_many_connections(element_1_count, element_2_count, element_1_count_map, element_2_count_map):
"""Generates random connections between elements of type 1 and type 2 that have many-to-many relationship.
Generation is based on element count maps. The output distribution is expected to be exact for most counts,
except for large element group outliers.
Args:
element_1_count: Total number of elements of type 1.
element_2_count: Total number of elements of type 2.
element_1_count_map: Dictionary int:int that maps element 1 count in element 2 group to number of elements 2.
element_2_count_map: Dictionary int:int that maps element 2 count in element 1 group to number of elements 1.
Returns:
Dictionary that maps group 1 id to a list of group 2 ids.
"""
# Count zeros for each group.
element_1_zeros = element_1_count_map[0] if 0 in element_1_count_map else 0
element_2_zeros = element_2_count_map[0] if 0 in element_2_count_map else 0
# Create element ids.
element_1_values = list(range(1, element_1_count - element_1_zeros + 1))
element_2_values = list(range(1, element_2_count - element_2_zeros + 1))
# Get number of elements in each group and remove groups with 0 elements.
elements_per_group_1 = [i for i in element_1_count_map for j in range(element_1_count_map[i]) if i != 0]
elements_per_group_2 = [i for i in element_2_count_map for j in range(element_2_count_map[i]) if i != 0]
element_1_group_counter = {i + 1: elements_per_group_1[i] for i in range(len(elements_per_group_1))}
element_2_group_counter = {i + 1: elements_per_group_2[i] for i in range(len(elements_per_group_2))}
# Create connection dictionary.
element_1_conn_element_2 = {i: set() for i in element_1_values}
# Loop until any group runs out of elements.
while element_1_values and element_2_values:
# Generate a random connection
element_1_gen = random.choice(element_1_values)
element_2_gen = random.choice(element_2_values)
# Check if connection doesn't already exist.
if not element_2_gen in element_1_conn_element_2[element_1_gen]:
# Add to existing connections and reduce count.
element_1_conn_element_2[element_1_gen].add(element_2_gen)
element_1_group_counter[element_1_gen] -= 1
element_2_group_counter[element_2_gen] -= 1
# If have all needed number of connections, remove id from possible options.
if element_1_group_counter[element_1_gen] == 0:
element_1_values.remove(element_1_gen)
if element_2_group_counter[element_2_gen] == 0:
element_2_values.remove(element_2_gen)
# Check if all leftover elements aren't already included in this group.
elif set(element_2_values).issubset(element_1_conn_element_2[element_1_gen]):
element_1_values.remove(element_1_gen)
return element_1_conn_element_2
def _system_collection_to_collection(self):
"""Generates collection - system collection one to many connections."""
self.system_collections_conn_collection = self.get_one_to_many_connections(self.system_collection_count,
self.system_collection_count_map)
def _dataset_collection_to_collection(self):
"""Generates collection - dataset collection one to many connections."""
self.dataset_collections_conn_collection = self.get_one_to_many_connections(self.dataset_collection_count,
self.dataset_collection_count_map)
def _dataset_to_dataset_collection(self):
"""Generates dataset collection - dataset one to many connections."""
self.datasets_conn_collection = self.get_one_to_many_connections(self.dataset_count, self.dataset_count_map)
def _system_to_system_collection(self):
"""Generates system collection - system one to many connections."""
self.systems_conn_collection = self.get_one_to_many_connections(self.system_count, self.system_count_map)
def _dataset_read_to_system_input(self):
"""Generates dataset reads and system inputs many to many connections."""
self.dataset_read_conn_systems = self.get_many_to_many_connections(self.dataset_read_count,
self.system_input_count,
self.dataset_read_count_map,
self.system_input_count_map)
def _dataset_write_to_system_output(self):
"""Generates dataset write and system outputs many to many connections."""
self.dataset_write_conn_systems = self.get_many_to_many_connections(self.dataset_write_count,
self.system_output_count,
self.dataset_write_count_map,
self.system_output_count_map)
def generate(self):
"""Generate all connections for a graph."""
self._dataset_collection_to_collection()
self._system_collection_to_collection()
self._dataset_to_dataset_collection()
self._system_to_system_collection()
self._dataset_read_to_system_input()
self._dataset_write_to_system_output()
| apache-2.0 | 2,970,110,968,751,282,700 | 52.846809 | 124 | 0.642643 | false | 4.374006 | false | false | false |
bgribble/mfp | mfp/gui/modes/patch_edit.py | 1 | 8028 | #! /usr/bin/env python
'''
patch_edit.py: PatchEdit major mode
Copyright (c) 2010 Bill Gribble <[email protected]>
'''
from ..input_mode import InputMode
from .autoplace import AutoplaceMode
from .selection import SingleSelectionEditMode, MultiSelectionEditMode
from ..text_element import TextElement
from ..processor_element import ProcessorElement
from ..connection_element import ConnectionElement
from ..message_element import MessageElement
from ..enum_element import EnumElement
from ..plot_element import PlotElement
from ..slidemeter_element import FaderElement, BarMeterElement, DialElement
from ..via_element import SendViaElement, ReceiveViaElement
from ..via_element import SendSignalViaElement, ReceiveSignalViaElement
from ..button_element import BangButtonElement, ToggleButtonElement, ToggleIndicatorElement
class PatchEditMode (InputMode):
def __init__(self, window):
self.manager = window.input_mgr
self.window = window
self.autoplace_mode = None
self.autoplace_x = None
self.autoplace_y = None
self.selection_edit_mode = None
InputMode.__init__(self, "Edit patch", "Edit")
self.bind('ESC', self.window.control_major_mode, "Exit edit mode")
self.bind("p", lambda: self.add_element(ProcessorElement),
"Add processor box")
self.bind("m", lambda: self.add_element(MessageElement),
"Add message box")
self.bind("n", lambda: self.add_element(EnumElement),
"Add number box")
self.bind("t", lambda: self.add_element(TextElement),
"Add text comment")
self.bind("u", lambda: self.add_element(ToggleButtonElement),
"Add toggle button")
self.bind("g", lambda: self.add_element(BangButtonElement),
"Add bang button")
self.bind("i", lambda: self.add_element(ToggleIndicatorElement),
"Add on/off indicator")
self.bind("s", lambda: self.add_element(FaderElement),
"Add slider")
self.bind("b", lambda: self.add_element(BarMeterElement),
"Add bar meter")
self.bind("d", lambda: self.add_element(DialElement),
"Add dial control")
self.bind("x", lambda: self.add_element(PlotElement),
"Add X/Y plot")
self.bind("v", lambda: self.add_element(SendViaElement),
"Add send message via")
self.bind("V", lambda: self.add_element(ReceiveViaElement),
"Add receive message via")
self.bind("A-v", lambda: self.add_element(SendSignalViaElement),
"Add send signal via")
self.bind("A-V", lambda: self.add_element(ReceiveSignalViaElement),
"Add receive signal via")
self.bind("C-x", self.cut, "Cut selection to clipboard")
self.bind("C-c", self.copy, "Copy selection to clipboard")
self.bind("C-v", self.paste, "Paste clipboard to selection")
self.bind("C-d", self.duplicate, "Duplicate selection")
self.bind("C-n", self.window.layer_new, "Create new layer")
self.bind("C-N", self.window.layer_new_scope, "Create new layer in a new scope")
self.bind("C-U", self.window.layer_move_up, "Move current layer up")
self.bind("C-D", self.window.layer_move_down, "Move current layer down")
self.bind("TAB", self.select_next, "Select next element")
self.bind("S-TAB", self.select_prev, "Select previous element")
self.bind("C-TAB", self.select_mru, "Select most-recent element")
self.bind("C-a", self.select_all, "Select all (in this layer)")
self.bind("a", self.auto_place_below, "Auto-place below")
self.bind("A", self.auto_place_above, "Auto-place above")
self.window.add_callback("select", self.selection_changed_cb)
self.window.add_callback("unselect", self.selection_changed_cb)
def selection_changed_cb(self, obj):
if not self.enabled:
return False
if self.window.selected:
self.update_selection_mode()
else:
self.disable_selection_mode()
def add_element(self, factory):
self.window.unselect_all()
if self.autoplace_mode is None:
self.window.add_element(factory)
else:
dx = factory.style_defaults.get('autoplace-dx', 0)
dy = factory.style_defaults.get('autoplace-dy', 0)
self.window.add_element(factory, self.autoplace_x + dx, self.autoplace_y + dy)
self.manager.disable_minor_mode(self.autoplace_mode)
self.autoplace_mode = None
self.update_selection_mode()
return True
def auto_place_below(self):
self.autoplace_mode = AutoplaceMode(self.window, callback=self.set_autoplace,
initially_below=True)
self.manager.enable_minor_mode(self.autoplace_mode)
return True
def auto_place_above(self):
self.autoplace_mode = AutoplaceMode(self.window, callback=self.set_autoplace,
initially_below=False)
self.manager.enable_minor_mode(self.autoplace_mode)
return True
def set_autoplace(self, x, y):
self.autoplace_x = x
self.autoplace_y = y
if x is None and y is None:
self.manager.disable_minor_mode(self.autoplace_mode)
self.autoplace_mode = None
return True
def select_all(self):
self.window.select_all()
self.update_selection_mode()
def select_next(self):
self.window.select_next()
self.update_selection_mode()
return True
def select_prev(self):
self.window.select_prev()
self.update_selection_mode()
return True
def select_mru(self):
self.window.select_mru()
self.update_selection_mode()
return True
def update_selection_mode(self):
if len(self.window.selected) > 1:
if isinstance(self.selection_edit_mode, SingleSelectionEditMode):
self.manager.disable_minor_mode(self.selection_edit_mode)
self.selection_edit_mode = None
if not self.selection_edit_mode:
self.selection_edit_mode = MultiSelectionEditMode(self.window)
self.manager.enable_minor_mode(self.selection_edit_mode)
elif len(self.window.selected) == 1:
if isinstance(self.selection_edit_mode, MultiSelectionEditMode):
self.manager.disable_minor_mode(self.selection_edit_mode)
self.selection_edit_mode = None
if not self.selection_edit_mode:
self.selection_edit_mode = SingleSelectionEditMode(self.window)
self.manager.enable_minor_mode(self.selection_edit_mode)
return True
def disable_selection_mode(self):
if self.selection_edit_mode is not None:
self.manager.disable_minor_mode(self.selection_edit_mode)
self.selection_edit_mode = None
return True
def enable(self):
self.enabled = True
self.manager.global_mode.allow_selection_drag = True
self.update_selection_mode()
def disable(self):
self.enabled = False
if self.autoplace_mode:
self.manager.disable_minor_mode(self.autoplace_mode)
self.autoplace_mode = None
self.disable_selection_mode()
def cut(self):
return self.window.clipboard_cut((self.manager.pointer_x,
self.manager.pointer_y))
def copy(self):
return self.window.clipboard_copy((self.manager.pointer_x, self.manager.pointer_y))
def paste(self):
return self.window.clipboard_paste()
def duplicate(self):
self.window.clipboard_copy((self.manager.pointer_x, self.manager.pointer_y))
return self.window.clipboard_paste()
| gpl-2.0 | 8,480,811,452,379,209,000 | 38.940299 | 91 | 0.623692 | false | 3.870781 | false | false | false |
wwitzel3/awx | awx/main/models/ad_hoc_commands.py | 1 | 7575 | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import logging
from urlparse import urljoin
# Django
from django.conf import settings
from django.db import models
from django.utils.text import Truncator
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
# AWX
from awx.api.versioning import reverse
from awx.main.models.base import * # noqa
from awx.main.models.events import AdHocCommandEvent
from awx.main.models.unified_jobs import * # noqa
from awx.main.models.notifications import JobNotificationMixin, NotificationTemplate
logger = logging.getLogger('awx.main.models.ad_hoc_commands')
__all__ = ['AdHocCommand']
class AdHocCommand(UnifiedJob, JobNotificationMixin):
class Meta(object):
app_label = 'main'
diff_mode = models.BooleanField(
default=False,
)
job_type = models.CharField(
max_length=64,
choices=AD_HOC_JOB_TYPE_CHOICES,
default='run',
)
inventory = models.ForeignKey(
'Inventory',
related_name='ad_hoc_commands',
null=True,
on_delete=models.SET_NULL,
)
limit = models.CharField(
max_length=1024,
blank=True,
default='',
)
credential = models.ForeignKey(
'Credential',
related_name='ad_hoc_commands',
null=True,
default=None,
on_delete=models.SET_NULL,
)
module_name = models.CharField(
max_length=1024,
default='',
blank=True,
)
module_args = models.TextField(
blank=True,
default='',
)
forks = models.PositiveIntegerField(
blank=True,
default=0,
)
verbosity = models.PositiveIntegerField(
choices=VERBOSITY_CHOICES,
blank=True,
default=0,
)
become_enabled = models.BooleanField(
default=False,
)
hosts = models.ManyToManyField(
'Host',
related_name='ad_hoc_commands',
editable=False,
through='AdHocCommandEvent',
)
extra_vars = prevent_search(models.TextField(
blank=True,
default='',
))
extra_vars_dict = VarsDictProperty('extra_vars', True)
def clean_inventory(self):
inv = self.inventory
if not inv:
raise ValidationError(_('No valid inventory.'))
return inv
def clean_credential(self):
cred = self.credential
if cred and cred.kind != 'ssh':
raise ValidationError(
_('You must provide a machine / SSH credential.'),
)
return cred
def clean_limit(self):
# FIXME: Future feature - check if no hosts would match and reject the
# command, instead of having to run it to find out.
return self.limit
def clean_module_name(self):
if type(self.module_name) not in (str, unicode):
raise ValidationError(_("Invalid type for ad hoc command"))
module_name = self.module_name.strip() or 'command'
if module_name not in settings.AD_HOC_COMMANDS:
raise ValidationError(_('Unsupported module for ad hoc commands.'))
return module_name
def clean_module_args(self):
if type(self.module_args) not in (str, unicode):
raise ValidationError(_("Invalid type for ad hoc command"))
module_args = self.module_args
if self.module_name in ('command', 'shell') and not module_args:
raise ValidationError(_('No argument passed to %s module.') % self.module_name)
return module_args
@property
def event_class(self):
return AdHocCommandEvent
@property
def passwords_needed_to_start(self):
'''Return list of password field names needed to start the job.'''
if self.credential:
return self.credential.passwords_needed
else:
return []
def _get_parent_field_name(self):
return ''
@classmethod
def _get_task_class(cls):
from awx.main.tasks import RunAdHocCommand
return RunAdHocCommand
@classmethod
def supports_isolation(cls):
return True
def get_absolute_url(self, request=None):
return reverse('api:ad_hoc_command_detail', kwargs={'pk': self.pk}, request=request)
def get_ui_url(self):
return urljoin(settings.TOWER_URL_BASE, "/#/jobs/command/{}".format(self.pk))
@property
def notification_templates(self):
all_orgs = set()
for h in self.hosts.all():
all_orgs.add(h.inventory.organization)
active_templates = dict(error=set(),
success=set(),
any=set())
base_notification_templates = NotificationTemplate.objects
for org in all_orgs:
for templ in base_notification_templates.filter(organization_notification_templates_for_errors=org):
active_templates['error'].add(templ)
for templ in base_notification_templates.filter(organization_notification_templates_for_success=org):
active_templates['success'].add(templ)
for templ in base_notification_templates.filter(organization_notification_templates_for_any=org):
active_templates['any'].add(templ)
active_templates['error'] = list(active_templates['error'])
active_templates['any'] = list(active_templates['any'])
active_templates['success'] = list(active_templates['success'])
return active_templates
def get_passwords_needed_to_start(self):
return self.passwords_needed_to_start
@property
def task_impact(self):
# NOTE: We sorta have to assume the host count matches and that forks default to 5
from awx.main.models.inventory import Host
count_hosts = Host.objects.filter( enabled=True, inventory__ad_hoc_commands__pk=self.pk).count()
return min(count_hosts, 5 if self.forks == 0 else self.forks) + 1
def copy(self):
data = {}
for field in ('job_type', 'inventory_id', 'limit', 'credential_id',
'module_name', 'module_args', 'forks', 'verbosity',
'extra_vars', 'become_enabled', 'diff_mode'):
data[field] = getattr(self, field)
return AdHocCommand.objects.create(**data)
def save(self, *args, **kwargs):
update_fields = kwargs.get('update_fields', [])
if not self.name:
self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)
if 'name' not in update_fields:
update_fields.append('name')
super(AdHocCommand, self).save(*args, **kwargs)
@property
def preferred_instance_groups(self):
if self.inventory is not None and self.inventory.organization is not None:
organization_groups = [x for x in self.inventory.organization.instance_groups.all()]
else:
organization_groups = []
if self.inventory is not None:
inventory_groups = [x for x in self.inventory.instance_groups.all()]
else:
inventory_groups = []
selected_groups = inventory_groups + organization_groups
if not selected_groups:
return self.global_instance_groups
return selected_groups
'''
JobNotificationMixin
'''
def get_notification_templates(self):
return self.notification_templates
def get_notification_friendly_name(self):
return "AdHoc Command"
| apache-2.0 | -1,621,048,749,649,910,800 | 32.517699 | 113 | 0.622838 | false | 4.148412 | false | false | false |
demisto/content | Packs/FeedDShield/Integrations/FeedDShield/FeedDShield.py | 1 | 1393 | from CommonServerPython import *
def main():
params = {k: v for k, v in demisto.params().items() if v is not None}
params['indicator_type'] = FeedIndicatorType.CIDR
params['url'] = 'https://www.dshield.org/block.txt'
params['ignore_regex'] = "[#S].*"
params['indicator'] = json.dumps({
"regex": r"^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\t[\d.]*\t(\d{1,2})",
"transform": "\\1/\\2"
})
fields = json.dumps({
"numberofattacks": {
"regex": "^.*\\t.*\\t[0-9]+\\t([0-9]+)",
"transform": "\\1"
},
"networkname": {
"regex": "^.*\\t.*\\t[0-9]+\\t[0-9]+\\t([^\\t]+)",
"transform": "\\1"
},
"geocountry": {
"regex": "^.*\\t.*\\t[0-9]+\\t[0-9]+\\t[^\\t]+\\t([A-Z]+)",
"transform": "\\1"
},
"registrarabuseemail": {
"regex": "^.*\\t.*\\t[0-9]+\\t[0-9]+\\t[^\\t]+\\t[A-Z]+\\t(\\S+)",
"transform": "\\1"
}
})
params['fields'] = fields
params['custom_fields_mapping'] = {
"geocountry": "geocountry",
"registrarabuseemail": "registrarabuseemail"
}
# Call the main execution of the HTTP API module.
feed_main('Dshield Feed', params, 'dshield-')
from HTTPFeedApiModule import * # noqa: E402
if __name__ == '__builtin__' or __name__ == 'builtins':
main()
| mit | -4,287,493,720,206,089,700 | 27.428571 | 78 | 0.460158 | false | 2.976496 | false | false | false |
benhoff/CHATIMUSMAXIMUS | chatimusmaximus/gui/status_bar.py | 1 | 1501 | import os
from PyQt5 import QtWidgets, QtGui, QtCore
class StatusBar(QtWidgets.QStatusBar):
def __init__(self, parent=None):
super(StatusBar, self).__init__(parent)
file_dir = os.path.dirname(__file__)
resource_dir = os.path.join(file_dir, 'resources', 'buttons')
red_button = os.path.join(resource_dir, 'red_button.png')
green_button = os.path.join(resource_dir, 'green_button.png')
self._red_icon = QtGui.QIcon(red_button)
self._green_icon = QtGui.QIcon(green_button)
self.time_label = QtWidgets.QLabel()
self.time_label.setStyleSheet('color: white;')
self.addPermanentWidget(self.time_label)
# set up the status widgets
self._status_widgets = {}
def set_up_helper(self, platform_name):
button = QtWidgets.QPushButton(self._red_icon,
' ' + platform_name)
button.setFlat(True)
button.setAutoFillBackground(True)
button.setStyleSheet('color: white;')
self.addPermanentWidget(button)
self._status_widgets[platform_name.lower()] = button
@QtCore.pyqtSlot(bool, str)
def set_widget_status(self, bool, platform_name):
# get the appropriate status widget
if platform_name:
button = self._status_widgets[platform_name]
else:
return
if bool:
button.setIcon(self._green_icon)
else:
button.setIcon(self._red_icon)
| gpl-3.0 | 3,342,233,628,808,679,000 | 33.113636 | 69 | 0.608261 | false | 3.91906 | false | false | false |
gsathya/flow | backend/app.py | 1 | 2339 | from flask import Flask, json, jsonify, request, render_template, send_from_directory, url_for, redirect
import os
import db
app = Flask(__name__)
@app.route("/", methods=["GET"])
def main():
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend'))
return send_from_directory(path, "index.html")
@app.route("/css/<file>", methods=["GET"])
def get_css(file):
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend', "css"))
return send_from_directory(path, file)
@app.route("/js/<file>", methods=["GET"])
def get_js(file):
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend', "js"))
return send_from_directory(path, file)
@app.route("/tracenow")
def tracenow():
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend'))
return send_from_directory(path, "tracenow.html")
@app.route("/gettrace", methods=['GET'])
def gettrace():
data = {}
return jsonify(data)
@app.route("/monthly")
def monthly():
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend'))
return send_from_directory(path, "monthly.html")
@app.route("/daily")
def daily():
path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'frontend'))
return send_from_directory(path, "daily.html")
@app.route("/monthlystats", methods=['GET'])
def get():
srcip = request.args.get('srcip')
dstip = request.args.get('dstip')
mac = request.args.get('mac')
if dstip is not None:
data = db.getmonthlystats(srcip, dstip)
elif srcip is not None:
data = db.getmonthlystatsforsrcip(srcip)
elif mac is not None:
data = db.getmonthlystatsformac(mac)
result = jsonify(data)
return result
# Return a deduplicated set of src ips
@app.route("/monthlysrc")
def getmonthlysrc():
data = db.getmonthlysrc()
result = jsonify(data)
return result
# Return a deduplicated set of src ips
@app.route("/dailysrc")
def getdailysrc():
data = db.getdailysrc()
result = jsonify(data)
return result
@app.route("/dailystats", methods=['GET'])
def getdailystats():
data = db.getdailystatsforsrcip()
result = jsonify(data)
return result
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
| mit | -5,937,402,188,431,586,000 | 28.987179 | 104 | 0.640445 | false | 3.131191 | false | false | false |
Entropy512/libsigrokdecode | decoders/pwm/pd.py | 3 | 4862 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Torsten Duwe <[email protected]>
## Copyright (C) 2014 Sebastien Bourdelin <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class SamplerateError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = 'pwm'
name = 'PWM'
longname = 'Pulse-width modulation'
desc = 'Analog level encoded in duty cycle percentage.'
license = 'gplv2+'
inputs = ['logic']
outputs = []
tags = ['Encoding']
channels = (
{'id': 'data', 'name': 'Data', 'desc': 'Data line'},
)
options = (
{'id': 'polarity', 'desc': 'Polarity', 'default': 'active-high',
'values': ('active-low', 'active-high')},
)
annotations = (
('duty-cycle', 'Duty cycle'),
('period', 'Period'),
)
annotation_rows = (
('duty-cycle', 'Duty cycle', (0,)),
('period', 'Period', (1,)),
)
binary = (
('raw', 'RAW file'),
)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.ss_block = self.es_block = None
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_binary = self.register(srd.OUTPUT_BINARY)
self.out_average = \
self.register(srd.OUTPUT_META,
meta=(float, 'Average', 'PWM base (cycle) frequency'))
def putx(self, data):
self.put(self.ss_block, self.es_block, self.out_ann, data)
def putp(self, period_t):
# Adjust granularity.
if period_t == 0 or period_t >= 1:
period_s = '%.1f s' % (period_t)
elif period_t <= 1e-12:
period_s = '%.1f fs' % (period_t * 1e15)
elif period_t <= 1e-9:
period_s = '%.1f ps' % (period_t * 1e12)
elif period_t <= 1e-6:
period_s = '%.1f ns' % (period_t * 1e9)
elif period_t <= 1e-3:
period_s = '%.1f μs' % (period_t * 1e6)
else:
period_s = '%.1f ms' % (period_t * 1e3)
self.put(self.ss_block, self.es_block, self.out_ann, [1, [period_s]])
def putb(self, data):
self.put(self.ss_block, self.es_block, self.out_binary, data)
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
num_cycles = 0
average = 0
# Wait for an "active" edge (depends on config). This starts
# the first full period of the inspected signal waveform.
self.wait({0: 'f' if self.options['polarity'] == 'active-low' else 'r'})
self.first_samplenum = self.samplenum
# Keep getting samples for the period's middle and terminal edges.
# At the same time that last sample starts the next period.
while True:
# Get the next two edges. Setup some variables that get
# referenced in the calculation and in put() routines.
start_samplenum = self.samplenum
self.wait({0: 'e'})
end_samplenum = self.samplenum
self.wait({0: 'e'})
self.ss_block = start_samplenum
self.es_block = self.samplenum
# Calculate the period, the duty cycle, and its ratio.
period = self.samplenum - start_samplenum
duty = end_samplenum - start_samplenum
ratio = float(duty / period)
# Report the duty cycle in percent.
percent = float(ratio * 100)
self.putx([0, ['%f%%' % percent]])
# Report the duty cycle in the binary output.
self.putb([0, bytes([int(ratio * 256)])])
# Report the period in units of time.
period_t = float(period / self.samplerate)
self.putp(period_t)
# Update and report the new duty cycle average.
num_cycles += 1
average += percent
self.put(self.first_samplenum, self.es_block, self.out_average,
float(average / num_cycles))
| gpl-3.0 | -7,475,465,038,127,626,000 | 33.475177 | 84 | 0.575602 | false | 3.587454 | false | false | false |
openstax/openstax-cms | global_settings/models.py | 2 | 2455 | from django.db import models
from wagtail.contrib.settings.models import BaseSetting, register_setting
@register_setting(icon='doc-empty')
class StickyNote(BaseSetting):
start = models.DateTimeField(null=True, help_text="Set the start date to override the content of the Give Sticky. Set the header and body below to change.")
expires = models.DateTimeField(null=True, help_text="Set the date to expire overriding the content of the Give Sticky.")
show_popup = models.BooleanField(default=False, help_text="Replaces the top banner with a popup, start and expire dates still control timing.")
header = models.TextField(max_length=255)
body = models.TextField()
link_text = models.CharField(max_length=255)
link = models.URLField()
emergency_expires = models.DateTimeField(null=True, blank=True, help_text="When active, the Sticky Note will not be displayed until the emergency expires.")
emergency_content = models.CharField(max_length=255)
class Meta:
verbose_name = 'Sticky Note'
@register_setting(icon='collapse-down')
class Footer(BaseSetting):
supporters = models.TextField()
copyright = models.TextField()
ap_statement = models.TextField()
facebook_link =models.URLField()
twitter_link = models.URLField()
linkedin_link = models.URLField()
class Meta:
verbose_name = 'Footer'
@register_setting(icon='cogs')
class CloudfrontDistribution(BaseSetting):
distribution_id = models.CharField(max_length=255, null=True, blank=True)
class Meta:
verbose_name = 'CloudFront Distribution'
@register_setting(icon='date')
class GiveToday(BaseSetting):
give_link_text = models.CharField(max_length=255)
give_link = models.URLField("Give link", blank=True, help_text="URL to Rice Give page or something similar")
start = models.DateTimeField(null=True,
help_text="Set the start date for Give Today to display")
expires = models.DateTimeField(null=True,
help_text="Set the date to expire displaying Give Today")
menu_start = models.DateTimeField(null=True,
help_text="Set the start date for Give Today to display in the menu")
menu_expires = models.DateTimeField(null=True,
help_text="Set the date to expire displaying Give Today in the menu")
class Meta:
verbose_name = 'Give Today'
| agpl-3.0 | 6,579,150,917,937,969,000 | 43.636364 | 160 | 0.692872 | false | 4.037829 | false | false | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/traits-4.3.0-py2.7-macosx-10.10-x86_64.egg/traits/traits_listener.py | 1 | 48349 | #-------------------------------------------------------------------------------
#
# Copyright (c) 2007, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 03/05/2007
#
#-------------------------------------------------------------------------------
""" Defines classes used to implement and manage various trait listener
patterns.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from __future__ import absolute_import
import re
import string
import weakref
from weakref import WeakKeyDictionary
from string import whitespace
from types import MethodType
from .has_traits import HasPrivateTraits
from .trait_base import Undefined, Uninitialized
from .traits import Property
from .trait_types import Str, Int, Bool, Instance, List, Enum, Any
from .trait_errors import TraitError
from .trait_notifiers import TraitChangeNotifyWrapper
#---------------------------------------------------------------------------
# Constants:
#---------------------------------------------------------------------------
# The name of the dictionary used to store active listeners
TraitsListener = '__traits_listener__'
# End of String marker
EOS = '\0'
# Types of traits that can be listened to
ANYTRAIT_LISTENER = '_register_anytrait'
SIMPLE_LISTENER = '_register_simple'
LIST_LISTENER = '_register_list'
DICT_LISTENER = '_register_dict'
SET_LISTENER = '_register_set'
# Mapping from trait default value types to listener types
type_map = {
5: LIST_LISTENER,
6: DICT_LISTENER,
9: SET_LISTENER
}
# Listener types:
ANY_LISTENER = 0
SRC_LISTENER = 1
DST_LISTENER = 2
ListenerType = {
0: ANY_LISTENER,
1: DST_LISTENER,
2: DST_LISTENER,
3: SRC_LISTENER,
4: SRC_LISTENER
}
# Invalid destination ( object, name ) reference marker (i.e. ambiguous):
INVALID_DESTINATION = ( None, None )
# Regular expressions used by the parser:
simple_pat = re.compile( r'^([a-zA-Z_]\w*)(\.|:)([a-zA-Z_]\w*)$' )
name_pat = re.compile( r'([a-zA-Z_]\w*)\s*(.*)' )
# Characters valid in a traits name:
name_chars = string.ascii_letters + string.digits + '_'
#-------------------------------------------------------------------------------
# Utility functions:
#-------------------------------------------------------------------------------
def indent ( text, first_line = True, n = 1, width = 4 ):
""" Indent lines of text.
Parameters
----------
text : str
The text to indent.
first_line : bool, optional
If False, then the first line will not be indented (default: True).
n : int, optional
The level of indentation (default: 1).
width : int, optional
The number of spaces in each level of indentation (default: 4).
Returns
-------
indented : str
"""
lines = text.split( '\n' )
if not first_line:
first = lines[0]
lines = lines[1:]
spaces = ' ' * (width * n)
lines2 = [ spaces + x for x in lines ]
if not first_line:
lines2.insert( 0, first )
indented = '\n'.join( lines2 )
return indented
#-------------------------------------------------------------------------------
# Metadata filters:
#-------------------------------------------------------------------------------
def is_not_none ( value ): return (value is not None)
def is_none ( value ): return (value is None)
def not_event ( value ): return (value != 'event')
#-------------------------------------------------------------------------------
# 'ListenerBase' class:
#-------------------------------------------------------------------------------
class ListenerBase ( HasPrivateTraits ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# The handler to be called when any listened to trait is changed:
#handler = Any
# The dispatch mechanism to use when invoking the handler:
#dispatch = Str
# Does the handler go at the beginning (True) or end (False) of the
# notification handlers list?
#priority = Bool( False )
# The next level (if any) of ListenerBase object to be called when any of
# our listened to traits is changed:
#next = Instance( ListenerBase )
# The type of handler being used:
#type = Enum( ANY_LISTENER, SRC_LISTENER, DST_LISTENER )
# Should changes to this item generate a notification to the handler?
# notify = Bool
# Should registering listeners for items reachable from this listener item
# be deferred until the associated trait is first read or set?
# deferred = Bool
#---------------------------------------------------------------------------
# Registers new listeners:
#---------------------------------------------------------------------------
def register ( self, new ):
""" Registers new listeners.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Unregisters any existing listeners:
#---------------------------------------------------------------------------
def unregister ( self, old ):
""" Unregisters any existing listeners.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Handles a trait change for a simple trait:
#---------------------------------------------------------------------------
def handle ( self, object, name, old, new ):
""" Handles a trait change for a simple trait.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Handles a trait change for a list trait:
#---------------------------------------------------------------------------
def handle_list ( self, object, name, old, new ):
""" Handles a trait change for a list trait.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Handles a trait change for a list traits items:
#---------------------------------------------------------------------------
def handle_list_items ( self, object, name, old, new ):
""" Handles a trait change for a list traits items.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Handles a trait change for a dictionary trait:
#---------------------------------------------------------------------------
def handle_dict ( self, object, name, old, new ):
""" Handles a trait change for a dictionary trait.
"""
raise NotImplementedError
#---------------------------------------------------------------------------
# Handles a trait change for a dictionary traits items:
#---------------------------------------------------------------------------
def handle_dict_items ( self, object, name, old, new ):
""" Handles a trait change for a dictionary traits items.
"""
raise NotImplementedError
#-------------------------------------------------------------------------------
# 'ListenerItem' class:
#-------------------------------------------------------------------------------
class ListenerItem ( ListenerBase ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
#: The name of the trait to listen to:
name = Str
#: The name of any metadata that must be present (or not present):
metadata_name = Str
#: Does the specified metadata need to be defined (True) or not defined
#: (False)?
metadata_defined = Bool( True )
#: The handler to be called when any listened-to trait is changed:
handler = Any
#: A weakref 'wrapped' version of 'handler':
wrapped_handler_ref = Any
#: The dispatch mechanism to use when invoking the handler:
dispatch = Str
#: Does the handler go at the beginning (True) or end (False) of the
#: notification handlers list?
priority = Bool( False )
#: The next level (if any) of ListenerBase object to be called when any of
#: this object's listened-to traits is changed:
next = Instance( ListenerBase )
#: The type of handler being used:
type = Enum( ANY_LISTENER, SRC_LISTENER, DST_LISTENER )
#: Should changes to this item generate a notification to the handler?
notify = Bool( True )
#: Should registering listeners for items reachable from this listener item
#: be deferred until the associated trait is first read or set?
deferred = Bool( False )
#: Is this an 'any_trait' change listener, or does it create explicit
#: listeners for each individual trait?
is_any_trait = Bool( False )
#: Is the associated handler a special list handler that handles both
#: 'foo' and 'foo_items' events by receiving a list of 'deleted' and 'added'
#: items as the 'old' and 'new' arguments?
is_list_handler = Bool( False )
#: A dictionary mapping objects to a list of all current active
#: (*name*, *type*) listener pairs, where *type* defines the type of
#: listener, one of: (SIMPLE_LISTENER, LIST_LISTENER, DICT_LISTENER).
active = Instance( WeakKeyDictionary, () )
#-- 'ListenerBase' Class Method Implementations ----------------------------
#---------------------------------------------------------------------------
# String representation:
#---------------------------------------------------------------------------
def __repr__ ( self, seen = None ):
"""Returns a string representation of the object.
Since the object graph may have cycles, we extend the basic __repr__ API
to include a set of objects we've already seen while constructing
a string representation. When this method tries to get the repr of
a ListenerItem or ListenerGroup, we will use the extended API and build
up the set of seen objects. The repr of a seen object will just be
'<cycle>'.
"""
if seen is None:
seen = set()
seen.add( self )
next_repr = 'None'
next = self.next
if next is not None:
if next in seen:
next_repr = '<cycle>'
else:
next_repr = next.__repr__( seen )
return """%s(
name = %r,
metadata_name = %r,
metadata_defined = %r,
is_any_trait = %r,
dispatch = %r,
notify = %r,
is_list_handler = %r,
type = %r,
next = %s,
)""" % ( self.__class__.__name__, self.name, self.metadata_name,
self.metadata_defined, self.is_any_trait, self.dispatch, self.notify,
self.is_list_handler, self.type, indent( next_repr, False ) )
#---------------------------------------------------------------------------
# Registers new listeners:
#---------------------------------------------------------------------------
def register ( self, new ):
""" Registers new listeners.
"""
# Make sure we actually have an object to set listeners on and that it
# has not already been registered (cycle breaking):
if (new is None) or (new is Undefined) or (new in self.active):
return INVALID_DESTINATION
# Create a dictionary of {name: trait_values} that match the object's
# definition for the 'new' object:
name = self.name
last = name[-1:]
if last == '*':
# Handle the special case of an 'anytrait' change listener:
if self.is_any_trait:
try:
self.active[ new ] = [ ( '', ANYTRAIT_LISTENER ) ]
return self._register_anytrait( new, '', False )
except TypeError:
# This error can occur if 'new' is a list or other object
# for which a weakref cannot be created as the dictionary
# key for 'self.active':
return INVALID_DESTINATION
# Handle trait matching based on a common name prefix and/or
# matching trait metadata:
metadata = self._metadata
if metadata is None:
self._metadata = metadata = { 'type': not_event }
if self.metadata_name != '':
if self.metadata_defined:
metadata[ self.metadata_name ] = is_not_none
else:
metadata[ self.metadata_name ] = is_none
# Get all object traits with matching metadata:
names = new.trait_names( **metadata )
# If a name prefix was specified, filter out only the names that
# start with the specified prefix:
name = name[:-1]
if name != '':
n = len( name )
names = [ aname for aname in names if name == aname[ : n ] ]
# Create the dictionary of selected traits:
bt = new.base_trait
traits = dict( [ ( name, bt( name ) ) for name in names ] )
# Handle any new traits added dynamically to the object:
new.on_trait_change( self._new_trait_added, 'trait_added' )
else:
# Determine if the trait is optional or not:
optional = (last == '?')
if optional:
name = name[:-1]
# Else, no wildcard matching, just get the specified trait:
trait = new.base_trait( name )
# Try to get the object trait:
if trait is None:
# Raise an error if trait is not defined and not optional:
# fixme: Properties which are lists don't implement the
# '..._items' sub-trait, which can cause a failure here when
# used with an editor that sets up listeners on the items...
if not optional:
raise TraitError( "'%s' object has no '%s' trait" % (
new.__class__.__name__, name ) )
# Otherwise, just skip it:
traits = {}
else:
# Create a result dictionary containing just the single trait:
traits = { name: trait }
# For each item, determine its type (simple, list, dict):
self.active[ new ] = active = []
for name, trait in traits.items():
# Determine whether the trait type is simple, list, set or
# dictionary:
type = SIMPLE_LISTENER
handler = trait.handler
if handler is not None:
type = type_map.get( handler.default_value_type,
SIMPLE_LISTENER )
# Add the name and type to the list of traits being registered:
active.append( ( name, type ) )
# Set up the appropriate trait listeners on the object for the
# current trait:
value = getattr( self, type )( new, name, False )
if len( traits ) == 1:
return value
return INVALID_DESTINATION
#---------------------------------------------------------------------------
# Unregisters any existing listeners:
#---------------------------------------------------------------------------
def unregister ( self, old ):
""" Unregisters any existing listeners.
"""
if old is not None and old is not Uninitialized:
try:
active = self.active.pop( old, None )
if active is not None:
for name, type in active:
getattr( self, type )( old, name, True )
except TypeError:
# An error can occur if 'old' is a list or other object for
# which a weakref cannot be created and used an a key for
# 'self.active':
pass
#---------------------------------------------------------------------------
# Handles a trait change for an intermediate link trait:
#---------------------------------------------------------------------------
def handle_simple ( self, object, name, old, new ):
""" Handles a trait change for an intermediate link trait.
"""
self.next.unregister( old )
self.next.register( new )
def handle_dst ( self, object, name, old, new ):
""" Handles a trait change for an intermediate link trait when the
notification is for the final destination trait.
"""
self.next.unregister( old )
object, name = self.next.register( new )
if old is not Uninitialized:
if object is None:
raise TraitError( "on_trait_change handler signature is "
"incompatible with a change to an intermediate trait" )
wh = self.wrapped_handler_ref()
if wh is not None:
wh( object, name, old,
getattr( object, name, Undefined ) )
#---------------------------------------------------------------------------
# Handles a trait change for a list (or set) trait:
#---------------------------------------------------------------------------
def handle_list ( self, object, name, old, new ):
""" Handles a trait change for a list (or set) trait.
"""
if old is not None and old is not Uninitialized:
unregister = self.next.unregister
for obj in old:
unregister( obj )
register = self.next.register
for obj in new:
register( obj )
#---------------------------------------------------------------------------
# Handles a trait change for a list (or set) traits items:
#---------------------------------------------------------------------------
def handle_list_items ( self, object, name, old, new ):
""" Handles a trait change for items of a list (or set) trait.
"""
self.handle_list( object, name, new.removed, new.added )
def handle_list_items_special ( self, object, name, old, new ):
""" Handles a trait change for items of a list (or set) trait with
notification.
"""
wh = self.wrapped_handler_ref()
if wh is not None:
wh( object, name, new.removed, new.added )
#---------------------------------------------------------------------------
# Handles a trait change for a dictionary trait:
#---------------------------------------------------------------------------
def handle_dict ( self, object, name, old, new ):
""" Handles a trait change for a dictionary trait.
"""
if old is not Uninitialized:
unregister = self.next.unregister
for obj in old.values():
unregister( obj )
register = self.next.register
for obj in new.values():
register( obj )
#---------------------------------------------------------------------------
# Handles a trait change for a dictionary traits items:
#---------------------------------------------------------------------------
def handle_dict_items ( self, object, name, old, new ):
""" Handles a trait change for items of a dictionary trait.
"""
self.handle_dict( object, name, new.removed, new.added )
if len( new.changed ) > 0:
# If 'name' refers to the '_items' trait, then remove the '_items'
# suffix to get the actual dictionary trait.
#
# fixme: Is there ever a case where 'name' *won't* refer to the
# '_items' trait?
if name.endswith('_items'):
name = name[:-len('_items')]
dict = getattr( object, name )
unregister = self.next.unregister
register = self.next.register
for key, obj in new.changed.items():
unregister( obj )
register( dict[ key ] )
#---------------------------------------------------------------------------
# Handles an invalid intermediate trait change to a handler that must be
# applied to the final destination object.trait:
#---------------------------------------------------------------------------
def handle_error ( self, obj, name, old, new ):
""" Handles an invalid intermediate trait change to a handler that must
be applied to the final destination object.trait.
"""
if old is not None and old is not Uninitialized:
raise TraitError( "on_trait_change handler signature is "
"incompatible with a change to an intermediate trait" )
#-- Event Handlers ---------------------------------------------------------
#---------------------------------------------------------------------------
# Handles the 'handler' trait being changed:
#---------------------------------------------------------------------------
def _handler_changed ( self, handler ):
""" Handles the **handler** trait being changed.
"""
if self.next is not None:
self.next.handler = handler
#---------------------------------------------------------------------------
# Handles the 'wrapped_handler_ref' trait being changed:
#---------------------------------------------------------------------------
def _wrapped_handler_ref_changed ( self, wrapped_handler_ref ):
""" Handles the 'wrapped_handler_ref' trait being changed.
"""
if self.next is not None:
self.next.wrapped_handler_ref = wrapped_handler_ref
#---------------------------------------------------------------------------
# Handles the 'dispatch' trait being changed:
#---------------------------------------------------------------------------
def _dispatch_changed ( self, dispatch ):
""" Handles the **dispatch** trait being changed.
"""
if self.next is not None:
self.next.dispatch = dispatch
#---------------------------------------------------------------------------
# Handles the 'priority' trait being changed:
#---------------------------------------------------------------------------
def _priority_changed ( self, priority ):
""" Handles the **priority** trait being changed.
"""
if self.next is not None:
self.next.priority = priority
#-- Private Methods --------------------------------------------------------
#---------------------------------------------------------------------------
# Registers any 'anytrait' listener:
#---------------------------------------------------------------------------
def _register_anytrait ( self, object, name, remove ):
""" Registers any 'anytrait' listener.
"""
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, remove = remove,
dispatch = self.dispatch,
priority = self.priority )
return ( object, name )
#---------------------------------------------------------------------------
# Registers a handler for a simple trait:
#---------------------------------------------------------------------------
def _register_simple ( self, object, name, remove ):
""" Registers a handler for a simple trait.
"""
next = self.next
if next is None:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
return ( object, name )
tl_handler = self.handle_simple
if self.notify:
if self.type == DST_LISTENER:
if self.dispatch != 'same':
raise TraitError( "Trait notification dispatch type '%s' "
"is not compatible with handler signature and "
"extended trait name notification style" % self.dispatch )
tl_handler = self.handle_dst
else:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
object._on_trait_change( tl_handler, name,
remove = remove,
dispatch = 'extended',
priority = self.priority )
if remove:
return next.unregister( getattr( object, name ) )
if not self.deferred:
return next.register( getattr( object, name ) )
return ( object, name )
#---------------------------------------------------------------------------
# Registers a handler for a list trait:
#---------------------------------------------------------------------------
def _register_list ( self, object, name, remove ):
""" Registers a handler for a list trait.
"""
next = self.next
if next is None:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
if self.is_list_handler:
object._on_trait_change( self.handle_list_items_special,
name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
elif self.type == ANY_LISTENER:
object._on_trait_change( handler, name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
return ( object, name )
tl_handler = self.handle_list
tl_handler_items = self.handle_list_items
if self.notify:
if self.type == DST_LISTENER:
tl_handler = tl_handler_items = self.handle_error
else:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
if self.is_list_handler:
object._on_trait_change( self.handle_list_items_special,
name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
elif self.type == ANY_LISTENER:
object._on_trait_change( handler, name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
object._on_trait_change( tl_handler, name,
remove = remove,
dispatch = 'extended',
priority = self.priority )
object._on_trait_change( tl_handler_items, name + '_items',
remove = remove,
dispatch = 'extended',
priority = self.priority )
if remove:
handler = next.unregister
elif self.deferred:
return INVALID_DESTINATION
else:
handler = next.register
for obj in getattr( object, name ):
handler( obj )
return INVALID_DESTINATION
# Handle 'sets' the same as 'lists':
# Note: Currently the behavior of sets is almost identical to that of lists,
# so we are able to share the same code for both. This includes some 'duck
# typing' that occurs with the TraitListEvent and TraitSetEvent, that define
# 'removed' and 'added' attributes that behave similarly enough (from the
# point of view of this module) that they can be treated as equivalent. If
# the behavior of sets ever diverges from that of lists, then this code may
# need to be changed.
_register_set = _register_list
#---------------------------------------------------------------------------
# Registers a handler for a dictionary trait:
#---------------------------------------------------------------------------
def _register_dict ( self, object, name, remove ):
""" Registers a handler for a dictionary trait.
"""
next = self.next
if next is None:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
if self.type == ANY_LISTENER:
object._on_trait_change( handler, name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
return ( object, name )
tl_handler = self.handle_dict
tl_handler_items = self.handle_dict_items
if self.notify:
if self.type == DST_LISTENER:
tl_handler = tl_handler_items = self.handle_error
else:
handler = self.handler()
if handler is not Undefined:
object._on_trait_change( handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
if self.type == ANY_LISTENER:
object._on_trait_change( handler, name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
object._on_trait_change( tl_handler, name,
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
object._on_trait_change( tl_handler_items, name + '_items',
remove = remove,
dispatch = self.dispatch,
priority = self.priority )
if remove:
handler = next.unregister
elif self.deferred:
return INVALID_DESTINATION
else:
handler = next.register
for obj in getattr( object, name ).values():
handler( obj )
return INVALID_DESTINATION
#---------------------------------------------------------------------------
# Handles new traits being added to an object being monitored:
#---------------------------------------------------------------------------
def _new_trait_added ( self, object, name, new_trait ):
""" Handles new traits being added to an object being monitored.
"""
# Set if the new trait matches our prefix and metadata:
if new_trait.startswith( self.name[:-1] ):
trait = object.base_trait( new_trait )
for meta_name, meta_eval in self._metadata.items():
if not meta_eval( getattr( trait, meta_name ) ):
return
# Determine whether the trait type is simple, list, set or
# dictionary:
type = SIMPLE_LISTENER
handler = trait.handler
if handler is not None:
type = type_map.get( handler.default_value_,
SIMPLE_LISTENER )
# Add the name and type to the list of traits being registered:
self.active[ object ].append( ( new_trait, type ) )
# Set up the appropriate trait listeners on the object for the
# new trait:
getattr( self, type )( object, new_trait, False )
#-------------------------------------------------------------------------------
# 'ListenerGroup' class:
#-------------------------------------------------------------------------------
def _set_value ( self, name, value ):
for item in self.items:
setattr( item, name, value )
def _get_value ( self, name ):
# Use the attribute on the first item. If there are no items, return None.
if self.items:
return getattr( self.items[0], name )
else:
return None
ListProperty = Property( fget = _get_value, fset = _set_value )
class ListenerGroup ( ListenerBase ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
#: The handler to be called when any listened-to trait is changed
handler = Property
#: A weakref 'wrapped' version of 'handler':
wrapped_handler_ref = Property
#: The dispatch mechanism to use when invoking the handler:
dispatch = Property
#: Does the handler go at the beginning (True) or end (False) of the
#: notification handlers list?
priority = ListProperty
#: The next level (if any) of ListenerBase object to be called when any of
#: this object's listened-to traits is changed
next = ListProperty
#: The type of handler being used:
type = ListProperty
#: Should changes to this item generate a notification to the handler?
notify = ListProperty
#: Should registering listeners for items reachable from this listener item
#: be deferred until the associated trait is first read or set?
deferred = ListProperty
# The list of ListenerBase objects in the group
items = List( ListenerBase )
#-- Property Implementations -----------------------------------------------
def _set_handler ( self, handler ):
if self._handler is None:
self._handler = handler
for item in self.items:
item.handler = handler
def _set_wrapped_handler_ref ( self, wrapped_handler_ref ):
if self._wrapped_handler_ref is None:
self._wrapped_handler_ref = wrapped_handler_ref
for item in self.items:
item.wrapped_handler_ref = wrapped_handler_ref
def _set_dispatch ( self, dispatch ):
if self._dispatch is None:
self._dispatch = dispatch
for item in self.items:
item.dispatch = dispatch
#-- 'ListenerBase' Class Method Implementations ----------------------------
#---------------------------------------------------------------------------
# String representation:
#---------------------------------------------------------------------------
def __repr__ ( self, seen = None ):
"""Returns a string representation of the object.
Since the object graph may have cycles, we extend the basic __repr__ API
to include a set of objects we've already seen while constructing
a string representation. When this method tries to get the repr of
a ListenerItem or ListenerGroup, we will use the extended API and build
up the set of seen objects. The repr of a seen object will just be
'<cycle>'.
"""
if seen is None:
seen = set()
seen.add( self )
lines = [ '%s(items = [' % self.__class__.__name__ ]
for item in self.items:
lines.extend( indent( item.__repr__( seen ), True ).split( '\n' ) )
lines[-1] += ','
lines.append( '])' )
return '\n'.join( lines )
#---------------------------------------------------------------------------
# Registers new listeners:
#---------------------------------------------------------------------------
def register ( self, new ):
""" Registers new listeners.
"""
for item in self.items:
item.register( new )
return INVALID_DESTINATION
#---------------------------------------------------------------------------
# Unregisters any existing listeners:
#---------------------------------------------------------------------------
def unregister ( self, old ):
""" Unregisters any existing listeners.
"""
for item in self.items:
item.unregister( old )
#-------------------------------------------------------------------------------
# 'ListenerParser' class:
#-------------------------------------------------------------------------------
class ListenerParser ( HasPrivateTraits ):
#-------------------------------------------------------------------------------
# Trait definitions:
#-------------------------------------------------------------------------------
#: The string being parsed
text = Str
#: The length of the string being parsed.
len_text = Int
#: The current parse index within the string
index = Int
#: The next character from the string being parsed
next = Property
#: The next Python attribute name within the string:
name = Property
#: The next non-whitespace character
skip_ws = Property
#: Backspaces to the last character processed
backspace = Property
#: The ListenerBase object resulting from parsing **text**
listener = Instance( ListenerBase )
#-- Property Implementations -----------------------------------------------
def _get_next ( self ):
index = self.index
self.index += 1
if index >= self.len_text:
return EOS
return self.text[ index ]
def _get_backspace ( self ):
self.index = max( 0, self.index - 1 )
def _get_skip_ws ( self ):
while True:
c = self.next
if c not in whitespace:
return c
def _get_name ( self ):
match = name_pat.match( self.text, self.index - 1 )
if match is None:
return ''
self.index = match.start( 2 )
return match.group( 1 )
#-- object Method Overrides ------------------------------------------------
def __init__ ( self, text = '', **traits ):
self.text = text
super( ListenerParser, self ).__init__( **traits )
#-- Private Methods --------------------------------------------------------
#---------------------------------------------------------------------------
# Parses the text and returns the appropriate collection of ListenerBase
# objects described by the text:
#---------------------------------------------------------------------------
def parse ( self ):
""" Parses the text and returns the appropriate collection of
ListenerBase objects described by the text.
"""
# Try a simple case of 'name1.name2'. The simplest case of a single
# Python name never triggers this parser, so we don't try to make that
# a shortcut too. Whitespace should already have been stripped from the
# start and end.
# TODO: The use of regexes should be used throughout all of the parsing
# functions to speed up all aspects of parsing.
match = simple_pat.match( self.text )
if match is not None:
return ListenerItem(
name = match.group( 1 ),
notify = match.group(2) == '.',
next = ListenerItem( name = match.group( 3 ) ) )
return self.parse_group( EOS )
#---------------------------------------------------------------------------
# Parses the contents of a group:
#---------------------------------------------------------------------------
def parse_group ( self, terminator = ']' ):
""" Parses the contents of a group.
"""
items = []
while True:
items.append( self.parse_item( terminator ) )
c = self.skip_ws
if c is terminator:
break
if c != ',':
if terminator == EOS:
self.error( "Expected ',' or end of string" )
else:
self.error( "Expected ',' or '%s'" % terminator )
if len( items ) == 1:
return items[0]
return ListenerGroup( items = items )
#---------------------------------------------------------------------------
# Parses a single, complete listener item/group string:
#---------------------------------------------------------------------------
def parse_item ( self, terminator ):
""" Parses a single, complete listener item or group string.
"""
c = self.skip_ws
if c == '[':
result = self.parse_group()
c = self.skip_ws
else:
name = self.name
if name != '':
c = self.next
result = ListenerItem( name = name )
if c in '+-':
result.name += '*'
result.metadata_defined = (c == '+')
cn = self.skip_ws
result.metadata_name = metadata = self.name
if metadata != '':
cn = self.skip_ws
result.is_any_trait = ((c == '-') and (name == '') and
(metadata == ''))
c = cn
if result.is_any_trait and (not ((c == terminator) or
((c == ',') and (terminator == ']')))):
self.error( "Expected end of name" )
elif c == '?':
if len( name ) == 0:
self.error( "Expected non-empty name preceding '?'" )
result.name += '?'
c = self.skip_ws
cycle = (c == '*')
if cycle:
c = self.skip_ws
if c in '.:':
result.notify = (c == '.')
next = self.parse_item( terminator )
if cycle:
last = result
while last.next is not None:
last = last.next
last.next = lg = ListenerGroup( items = [ next, result ] )
result = lg
else:
result.next = next
return result
if c == '[':
if (self.skip_ws == ']') and (self.skip_ws == terminator):
self.backspace
result.is_list_handler = True
else:
self.error( "Expected '[]' at the end of an item" )
else:
self.backspace
if cycle:
result.next = result
return result
#---------------------------------------------------------------------------
# Parses the metadata portion of a listener item:
#---------------------------------------------------------------------------
def parse_metadata ( self, item ):
""" Parses the metadata portion of a listener item.
"""
self.skip_ws
item.metadata_name = name = self.name
if name == '':
self.backspace
#---------------------------------------------------------------------------
# Raises a syntax error:
#---------------------------------------------------------------------------
def error ( self, msg ):
""" Raises a syntax error.
"""
raise TraitError( "%s at column %d of '%s'" %
( msg, self.index, self.text ) )
#-- Event Handlers ---------------------------------------------------------
#---------------------------------------------------------------------------
# Handles the 'text' trait being changed:
#---------------------------------------------------------------------------
def _text_changed ( self ):
self.index = 0
self.len_text = len( self.text )
self.listener = self.parse()
#-------------------------------------------------------------------------------
# 'ListenerNotifyWrapper' class:
#-------------------------------------------------------------------------------
class ListenerNotifyWrapper ( TraitChangeNotifyWrapper ):
#-- TraitChangeNotifyWrapper Method Overrides ------------------------------
def __init__ ( self, handler, owner, id, listener, target=None):
self.type = ListenerType.get( self.init( handler,
weakref.ref( owner, self.owner_deleted ), target ) )
self.id = id
self.listener = listener
def listener_deleted ( self, ref ):
owner = self.owner()
if owner is not None:
dict = owner.__dict__.get( TraitsListener )
listeners = dict.get( self.id )
listeners.remove( self )
if len( listeners ) == 0:
del dict[ self.id ]
if len( dict ) == 0:
del owner.__dict__[ TraitsListener ]
# fixme: Is the following line necessary, since all registered
# notifiers should be getting the same 'listener_deleted' call:
self.listener.unregister( owner )
self.object = self.owner = self.listener = None
def owner_deleted ( self, ref ):
self.object = self.owner = None
#-------------------------------------------------------------------------------
# 'ListenerHandler' class:
#-------------------------------------------------------------------------------
class ListenerHandler ( object ):
def __init__ ( self, handler ):
if type( handler ) is MethodType:
object = handler.im_self
if object is not None:
self.object = weakref.ref( object, self.listener_deleted )
self.name = handler.__name__
return
self.handler = handler
def __call__ ( self ):
result = getattr( self, 'handler', None )
if result is not None:
return result
return getattr( self.object(), self.name )
def listener_deleted ( self, ref ):
self.handler = Undefined
| gpl-2.0 | -5,167,762,272,629,971,000 | 37.070079 | 88 | 0.447248 | false | 5.343021 | false | false | false |
joshzarrabi/e-mission-server | emission/analysis/intake/cleaning/cleaning_methods/speed_outlier_detection.py | 1 | 1328 | # Techniques for outlier detection of speeds. Each of these returns a speed threshold that
# can be used with outlier detection techniques.
# Standard imports
import logging
logging.basicConfig(level=logging.DEBUG)
class BoxplotOutlier(object):
MINOR = 1.5
MAJOR = 3
def __init__(self, multiplier = MAJOR, ignore_zeros = False):
self.multiplier = multiplier
self.ignore_zeros = ignore_zeros
def get_threshold(self, with_speeds_df):
if self.ignore_zeros:
df_to_use = with_speeds_df[with_speeds_df.speed > 0]
else:
df_to_use = with_speeds_df
quartile_vals = df_to_use.quantile([0.25, 0.75]).speed
logging.debug("quartile values are %s" % quartile_vals)
iqr = quartile_vals.iloc[1] - quartile_vals.iloc[0]
logging.debug("iqr %s" % iqr)
return quartile_vals.iloc[1] + self.multiplier * iqr
class SimpleQuartileOutlier(object):
def __init__(self, quantile = 0.99, ignore_zeros = False):
self.quantile = quantile
self.ignore_zeros = ignore_zeros
def get_threshold(self, with_speeds_df):
if self.ignore_zeros:
df_to_use = with_speeds_df[with_speeds_df.speed > 0]
else:
df_to_use = with_speeds_df
return df_to_use.speed.quantile(self.quantile)
| bsd-3-clause | -492,698,684,567,668,540 | 33.947368 | 91 | 0.641566 | false | 3.295285 | false | false | false |
veger/ansible | lib/ansible/modules/network/ftd/ftd_file_download.py | 7 | 4441 | #!/usr/bin/python
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ftd_file_download
short_description: Downloads files from Cisco FTD devices over HTTP(S)
description:
- Downloads files from Cisco FTD devices including pending changes, disk files, certificates,
troubleshoot reports, and backups.
version_added: "2.7"
author: "Cisco Systems, Inc. (@annikulin)"
options:
operation:
description:
- The name of the operation to execute.
- Only operations that return a file can be used in this module.
required: true
type: str
path_params:
description:
- Key-value pairs that should be sent as path parameters in a REST API call.
type: dict
destination:
description:
- Absolute path of where to download the file to.
- If destination is a directory, the module uses a filename from 'Content-Disposition' header specified by the server.
required: true
type: path
"""
EXAMPLES = """
- name: Download pending changes
ftd_file_download:
operation: 'getdownload'
path_params:
objId: 'default'
destination: /tmp/
"""
RETURN = """
msg:
description: The error message describing why the module failed.
returned: error
type: string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.ftd.common import FtdServerError, HTTPMethod
from ansible.module_utils.network.ftd.fdm_swagger_client import OperationField, ValidationError, FILE_MODEL_NAME
def is_download_operation(op_spec):
return op_spec[OperationField.METHOD] == HTTPMethod.GET and op_spec[OperationField.MODEL_NAME] == FILE_MODEL_NAME
def validate_params(connection, op_name, path_params):
field_name = 'Invalid path_params provided'
try:
is_valid, validation_report = connection.validate_path_params(op_name, path_params)
if not is_valid:
raise ValidationError({
field_name: validation_report
})
except Exception as e:
raise ValidationError({
field_name: str(e)
})
def main():
fields = dict(
operation=dict(type='str', required=True),
path_params=dict(type='dict'),
destination=dict(type='path', required=True)
)
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
params = module.params
connection = Connection(module._socket_path)
op_name = params['operation']
op_spec = connection.get_operation_spec(op_name)
if op_spec is None:
module.fail_json(msg='Operation with specified name is not found: %s' % op_name)
if not is_download_operation(op_spec):
module.fail_json(
msg='Invalid download operation: %s. The operation must make GET request and return a file.' %
op_name)
try:
path_params = params['path_params']
validate_params(connection, op_name, path_params)
if module.check_mode:
module.exit_json(changed=False)
connection.download_file(op_spec[OperationField.URL], params['destination'], path_params)
module.exit_json(changed=False)
except FtdServerError as e:
module.fail_json(msg='Download request for %s operation failed. Status code: %s. '
'Server response: %s' % (op_name, e.code, e.response))
except ValidationError as e:
module.fail_json(msg=e.args[0])
if __name__ == '__main__':
main()
| gpl-3.0 | 3,861,226,979,782,728,700 | 32.900763 | 124 | 0.675749 | false | 3.993705 | false | false | false |
ioiogoo/Vue-News-Board | server/spider/jobbole_news.py | 1 | 1660 | # *-* coding:utf-8 *-*
'''
@author: ioiogoo
@date: 17-1-7 下午1:31
'''
from bs4 import BeautifulSoup
import requests
from base import Base
from models import Jobbole_news
from peewee import IntegrityError
class Jobbole_new(Base):
def __init__(self):
super(Jobbole_new, self).__init__()
self.name = 'Jobbole_news'
self.url = 'http://blog.jobbole.com/all-posts/'
def parse(self):
try:
print '%s is parse......' % self.name
html = requests.get(url=self.url, headers=self.headers).content
soup = BeautifulSoup(html, 'lxml')
news = []
for new in soup.find_all(class_="post floated-thumb"):
title = new.find('p').a['title']
url = new.find('p').a['href']
time = new.find('p').get_text('////').split('////')[-3].replace(u'·', '').strip()
intro = new.find(class_="excerpt").get_text().strip()
news.append(dict(title=title, url=url, time=time, intro=intro))
return 0, news
except Exception as e:
return 1, e
def handle(self):
status, news = self.parse()
if not status:
for new in news[::-1]:
try:
Jobbole_news(title=new['title'],
url=new['url'],
time=new['time'],
intro=new['intro']).save()
except IntegrityError:
pass
print '%s is done...' % self.name
else:
print news
if __name__ == '__main__':
j = Jobbole_new()
j.handle()
| gpl-2.0 | 1,136,395,338,779,901,300 | 30.826923 | 97 | 0.489426 | false | 3.761364 | false | false | false |
jmartinm/invenio-master | modules/bibcheck/lib/plugins/dates.py | 21 | 2853 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Plugin to correct and validate dates """
from datetime import datetime
try:
from dateutil import parser
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
def check_record(record, fields, dayfirst=True, yearfirst=False,
date_format="%Y-%m-%d", allow_future=True,
minimum_date=datetime(1800,1,1)):
"""
Corrects and validates date fields
For detailed explanation of how dayfirst and yearfirst works, visit
http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
For detailed explanation of the date_format placeholders, visit
http://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
This plugin needs the python-dateutil library to work.
@param dayfirst Consider the day first if ambiguous
@type dayfirst boolean
@param yearfirst Consider year first if ambiguous
@type yearfirst boolean
@param date_format normalized date format
@type date_format string
@param allow_future If False, dates in the future will be marked as invalid
@type allow_future boolean
@param minimum_date dates older than this will be rejected. Default Jan 1 1800
@type minimum_date datetime.datetime
"""
if not HAS_DATEUTIL:
return
for position, value in record.iterfields(fields):
try:
new_date = parser.parse(value, dayfirst=dayfirst, yearfirst=yearfirst)
except (ValueError, TypeError):
record.set_invalid("Non-parseable date format in field %s" % position[0])
continue
if not allow_future and new_date > datetime.now():
record.set_invalid("Date in the future in field %s" % position[0])
if new_date < minimum_date:
record.set_invalid("Date too old (less than minimum_date) in field %s" % position[0])
if new_date < datetime(1900, 1, 1):
continue # strftime doesn't accept older dates
new_date_str = new_date.strftime(date_format)
record.amend_field(position, new_date_str)
| gpl-2.0 | 6,745,335,022,067,054,000 | 34.6625 | 97 | 0.698563 | false | 4.01831 | false | false | false |
andreagrandi/drf3-test | drftest/shop/management/commands/initshopdb.py | 1 | 1247 | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from shop.tests.factories import ProductFactory
from shop.models import Product
class Command(BaseCommand):
args = ''
help = ('Initialize an empty DB creating a User, setting a specific token, creating two'
'products (a Widget and a Gizmo).')
def handle(self, *args, **options):
# Create the default User
if User.objects.count() == 0:
user = User.objects.create_user(username='andrea',
email='[email protected]', password='andreatest')
user.save()
else:
user = User.objects.get(id=1)
if Token.objects.count() == 0:
# Generate the token for the created user
Token.objects.create(user=user)
# Change the Token to a known one
Token.objects.filter(user_id=user.id).update(key='b60868c38b813ea43b36036503e3f5de025dde31')
if Product.objects.count() == 0:
# Create a Widget and a Gizmo products on DB
ProductFactory.create(name='Widget', collect_stamp=True)
ProductFactory.create(name='Gizmo', collect_stamp=False)
| mit | -6,342,357,560,103,815,000 | 39.225806 | 100 | 0.651965 | false | 3.95873 | false | false | false |
JimCircadian/ansible | lib/ansible/module_utils/network/exos/exos.py | 57 | 3764 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.connection import Connection
_DEVICE_CONFIGS = {}
def get_connection(module):
if hasattr(module, '_exos_connection'):
return module._exos_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._exos_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._exos_connection
def get_capabilities(module):
if hasattr(module, '_exos_capabilities'):
return module._exos_capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module._exos_capabilities = json.loads(capabilities)
return module._exos_capabilities
def get_config(module, flags=None):
global _DEVICE_CONFIGS
if _DEVICE_CONFIGS != {}:
return _DEVICE_CONFIGS
else:
connection = get_connection(module)
out = connection.get_config()
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS = cfg
return cfg
def run_commands(module, commands, check_rc=True):
responses = list()
connection = get_connection(module)
for cmd in to_list(commands):
if isinstance(cmd, dict):
command = cmd['command']
prompt = cmd['prompt']
answer = cmd['answer']
else:
command = cmd
prompt = None
answer = None
out = connection.get(command, prompt, answer)
try:
out = to_text(out, errors='surrogate_or_strict')
except UnicodeError:
module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
responses.append(out)
return responses
def load_config(module, commands):
connection = get_connection(module)
out = connection.edit_config(commands)
| gpl-3.0 | -8,666,686,739,159,465,000 | 36.267327 | 94 | 0.707492 | false | 4.306636 | true | false | false |
wcmckee/signinlca | signinlca.py | 1 | 8850 |
# coding: utf-8
# <h1>signinlca</h1>
#
# script to signin for volunteers at lca2015!
#
# The script asks for input of firstname, lastname, tshirt size, amount of coffee volc and comments.
# It creates a python dict with this data along with the current date and hour.
# It gets the username of user and saves the data in the users home dir under the folder signinlca.
# It saves the data as a json object.
# Currently saves the file as firstname + lastname.
#
# How could this be improved?
#
# Signup/Signin System.
#
# For signup - username, firstname, lastname, password(x2) is collected with input. Password is salted and hashed.
# Username, firstname, lastname, and password (salt/hash) is added to a dict. Dict is converted to a json object.
# json object is saved as a json file in the folder signinlca / USERNAME-FOLDER / .signup.json
#
# For signin. Username is collected with input.
# Looks for folder of username. Opens .signup.json file - parsing data.
# Save the value of 'password' as a varible.
#
# Asks for password (getpass.getpass('Password please: ')
# salt/hash this password.
# save password attempt if error, otherwise true complete signin.
#
#
#
# TODO
#
# add option to choose to login or logout. Y/N option for each one.
#
# add logout script that appends to the login data. saves, time/date/comment. anything else?
#
# Asign to jobs/room?
#
# Graph up total hour worked in day/week
#
# scp/rsync data to server/web page.
#
# Make new account, use existing account.
#
# Database of existing accounts... static page of files.
#
# Add password to account
#
# If you signin, doesn't ask if want to sign out.
#
# If you signout, doesn't ask if you want to sign in.
#
# Hash passwords
# When creating account asked for username (which could be firstname + lastname), and password. Passwords are hashed and when user tries to login the password inputed is compared to the hashed password.
#
# Save that hash as a varible that is then complared with the saved hash password.
# I have their signin data. Now what to do with it? Save it as a json object to be then used when they signin later?
#
# More security on it? Hash their usernames, firstnames, 2nd password?
# In[52]:
import os
#import time
import json
import getpass
import arrow
import hashlib
from passlib.hash import pbkdf2_sha256
from walkdir import filtered_walk, dir_paths, all_paths, file_paths
# In[53]:
gmtz = arrow.utcnow()
# In[54]:
yrmt = gmtz.strftime("%Y")
mthza = gmtz.strftime("%m")
dthaq = gmtz.strftime("%d")
gmtz.strftime("%Y")
#yearz = strftime("%y", gmtime())
#monthz = strftime("%m", gmtime())
#dayz = strftime("%d", gmtime())
# In[55]:
yrmt
# In[56]:
mthza
# In[57]:
dthaq
# In[58]:
def returndate():
return (dthaq + '-' + mthza + '-' + yrmt)
def returntime():
return gmtz.strftime('%H:%M:%S')
puser = ('wcmckee')
yrnum = gmtz.strftime("%Y")
mnthnum = gmtz.strftime("%m")
dayzum = gmtz.strftime("%d")
signpath = ('/home/' + puser + '/signinlca')
yrpath = (signpath + '/' + yrnum)
mnthpath = (yrpath + '/' + mnthnum)
dayzpath = (mnthpath + '/' + dayzum)
# In[59]:
if os.path.isdir(signpath) == True:
print 'Path is there'
else:
print 'Path not there'
os.mkdir(signpath)
# In[60]:
if os.path.isdir(yrpath) == True:
print 'Year Path is there'
else:
print 'Year Path not there'
os.mkdir(yrpath)
if os.path.isdir(mnthpath) == True:
print 'Month Path is there'
else:
print 'Month Path not there'
os.mkdir(mnthpath)
if os.path.isdir(dayzpath) == True:
print 'Day Path is there'
else:
print 'Day Path not there'
os.mkdir(dayzpath)
# In[61]:
dayzpath
# In[62]:
os.chdir(dayzpath)
# In[63]:
opsign = open('/home/wcmckee/signinlca/index.json', 'w')
# In[77]:
signup = raw_input('signup y/n ')
signupd = dict()
numchez = 0
if 'y' in signup:
print('Welcome to signup!')
firnam = raw_input('firstname: ')
signupd.update({"firstname":firnam, })
lasnam = raw_input('last name: ')
usenam = raw_input('username: ')
emnam = raw_input('email: ')
os.mkdir('/home/wcmckee/signinlca/usernames/' + usenam)
#passworz = passwd()
pastest = getpass.getpass('password: ')
pasnde = getpass.getpass('enter password again: ')
signupd.update({"firstname":firnam, "lastname":lasnam,
"username":usenam})
hashez = pbkdf2_sha256.encrypt(pastest, rounds=200000, salt_size=16)
emhash = pbkdf2_sha256.encrypt(emnam, rounds=200000, salt_size=16)
signupd.update({"password":hashez, "email":emhash})
savjsn = open('/home/wcmckee/signinlca/usernames/' + usenam + '/.signups.json', 'a')
jsncov = json.dumps(signupd)
savjsn.write(jsncov)
savjsn.close()
usdir = ('useradd -p ' + pastest + ' ' + usenam)
os.system(usdir)
print('Signup Complete. You can now signin with the username and password')
for logy in range(12):
ferzr = (numchez)
numchez = (numchez + 10)
#usfaz = ('/home/wcmckee/signinlca/usernames/' + str(numchez) + usenam + '/index.json', 'w')
os.mkdir('/home/wcmckee/signinlca/usernames/' + str(usenam) + '/' + str(logy))
# In[65]:
#hashez = pbkdf2_sha256.encrypt(pastest, rounds=200000, salt_size=16)
#signupd.update({"password":hashez})
#signin. need to open
print ('signin!')
loginam = raw_input('Username: ')
#Open logins.json, find the username json object
loginpas = getpass.getpass('Password: ')
vercryp = pbkdf2_sha256.verify(loginpas, hashez)
if vercryp == True:
print 'passwords correct - Logged in!'
else:
print 'passwords wrong - Could not log!'
#exit
# In[66]:
type(signupd)
# In[66]:
# In[67]:
#savjsn.write(jsncov)
# In[17]:
#savjsn.close()
# In[19]:
dicsigni = dict()
# In[21]:
signin = raw_input('signin? y/n')
if 'y' in signin:
#uzname = raw_input('firstname: ')
#lzname = raw_input('lastname: ')
uzernam = raw_input('username: ')
dicsigni.update({'username': uzernam})
opsignin = open('/home/wcmckee/signinlca/usernames/' + str(uzernam) + ('/') + ('.signin.json'), 'w')
logtest = getpass.getpass('login password: ')
loghash = pbkdf2_sha256.encrypt(logtest, rounds=200000, salt_size=16)
vercryp = pbkdf2_sha256.verify(logtest, hashez)
dicsigni.update({'password':loghash})
dicjsn = json.dumps(dicsigni)
opsignin.write(dicjsn)
opsignin.close()
#opsignin.write
if pastest == True:
print 'passwords correct'
# In[24]:
ersignin = open('/home/wcmckee/signinlca/usernames/' + str(uzernam) + ('/') + ('.signin.json'), 'r')
paswz = ersignin.read()
# In[28]:
dicvert = json.loads(paswz)
# In[49]:
dicloin = dicvert['password']
# In[39]:
tresignin = open('/home/wcmckee/signinlca/usernames/' + str(uzernam) + ('/') + ('.signups.json'), 'r')
# In[40]:
convea = tresignin.read()
# In[43]:
jsnver = json.loads(convea)
# In[47]:
jpas = jsnver['password']
# In[50]:
jpas
# In[51]:
dicloin
# In[118]:
loginz = raw_input('signin y/n ')
if 'y' in loginz:
print('You signed in')
#logoutz = None
else:
logoutz = raw_input('signouts y/n ')
# In[119]:
if 'y' in loginz:
firnam = raw_input('first name: ')
lasnam = raw_input('last name: ')
tshir = raw_input('tshirt size: ')
cofvol = raw_input('coffee volc: ')
comen = raw_input('comments: ')
betdict = dict()
betdict.update({'first-name' : firnam, 'last-name' : lasnam, 'signin-date' : returndate()})
betdict.update({'signin-hrmin' : returntime()})
betdict.update({'tshirt-size' : tshir})
betdict.update({'coffees' : int(cofvol)})
betdict.update({'comments:' : comen})
convj = json.dumps(betdict)
puser = getpass.getuser()
opday = open((dayzpath + '/' + firnam + lasnam) + '.json', 'w')
opday.write(str(convj))
opday.close()
else:
print ('not signing in')
# In[480]:
if 'y' in logoutz:
comout = raw_input('out comments: ')
outdic = dict()
firnaz = raw_input('first name: ' )
lasnaz = raw_input('last name: ')
outdic.update({'signout-date': returndate()})
outdic.update({'signout-time': returntime()})
outdic.update({'signout-comment': comout})
conout = json.dumps(outdic)
signoutz = open((dayzpath + '/' + firnaz + lasnaz) + '.json', 'a')
signoutz.write(str(conout))
signoutz.close()
else:
print ('not signing out')
# In[481]:
os.listdir(dayzpath)
# In[481]:
# In[68]:
files = file_paths(filtered_walk('/home/wcmckee/signinlca/', depth=100, included_files=['*.json']))
# In[69]:
for fie in files:
#print fie
print fie
# In[72]:
uslis = os.listdir('/home/wcmckee/signinlca/usernames/')
# In[74]:
print ('User List: ')
for usl in uslis:
print usl
# In[ ]:
| mit | -637,597,969,727,885,300 | 19.533643 | 203 | 0.640904 | false | 2.831094 | true | false | false |
desihub/desisim | py/desisim/spec_qa/s2n.py | 1 | 13194 | """
desisim.spec_qa.s2n
=========================
Module to examine S/N in object spectra
"""
from __future__ import print_function, absolute_import, division
import matplotlib
# matplotlib.use('Agg')
import numpy as np
import sys, os, glob
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from astropy.io import fits
from astropy.table import Table, vstack, hstack, MaskedColumn, join
from desiutil.log import get_logger, DEBUG
from desispec.io import get_exposures, findfile, read_fibermap, read_frame
from desisim.spec_qa.utils import get_sty_otype
log = get_logger()
def load_all_s2n_values(nights, channel, sub_exposures=None):
"""
Calculate S/N values for a set of spectra from an input list of nights
Args:
nights: list
channel: str ('b','r','z')
sub_exposures:
Returns:
fdict: dict
Contains all the S/N info for all nights in the given channel
"""
fdict = dict(waves=[], s2n=[], fluxes=[], exptime=[], OII=[], objtype=[])
for night in nights:
if sub_exposures is not None:
exposures = sub_exposures
else:
exposures = get_exposures(night)#, raw=True)
for exposure in exposures:
fibermap_path = findfile(filetype='fibermap', night=night, expid=exposure)
fibermap_data = read_fibermap(fibermap_path)
flavor = fibermap_data.meta['FLAVOR']
if flavor.lower() in ('arc', 'flat', 'bias'):
log.debug('Skipping calibration {} exposure {:08d}'.format(flavor, exposure))
continue
# Load simspec
simspec_file = fibermap_path.replace('fibermap', 'simspec')
log.debug('Getting truth from {}'.format(simspec_file))
sps_hdu = fits.open(simspec_file)
sps_tab = Table(sps_hdu['TRUTH'].data,masked=True)
#- Get OIIFLUX from separate HDU and join
if ('OIIFLUX' not in sps_tab.colnames) and ('TRUTH_ELG' in sps_hdu):
elg_truth = Table(sps_hdu['TRUTH_ELG'].data)
sps_tab = join(sps_tab, elg_truth['TARGETID', 'OIIFLUX'],
keys='TARGETID', join_type='left')
else:
sps_tab['OIIFLUX'] = 0.0
sps_hdu.close()
#objs = sps_tab['TEMPLATETYPE'] == objtype
#if np.sum(objs) == 0:
# continue
# Load spectra (flux or not fluxed; should not matter)
for ii in range(10):
camera = channel+str(ii)
cframe_path = findfile(filetype='cframe', night=night, expid=exposure, camera=camera)
try:
log.debug('Reading from {}'.format(cframe_path))
cframe = read_frame(cframe_path)
except (IOError, OSError):
log.warn("Cannot find file: {:s}".format(cframe_path))
continue
# Calculate S/N per Ang
dwave = cframe.wave - np.roll(cframe.wave,1)
dwave[0] = dwave[1]
# Calculate
s2n = cframe.flux * np.sqrt(cframe.ivar) / np.sqrt(dwave)
#s2n = cframe.flux[iobjs,:] * np.sqrt(cframe.ivar[iobjs,:]) / np.sqrt(dwave)
# Save
fdict['objtype'].append(sps_tab['TEMPLATETYPE'].data[cframe.fibers])
fdict['waves'].append(cframe.wave)
fdict['s2n'].append(s2n)
fdict['fluxes'].append(sps_tab['MAG'].data[cframe.fibers])
fdict['OII'].append(sps_tab['OIIFLUX'].data[cframe.fibers])
fdict['exptime'].append(cframe.meta['EXPTIME'])
# Return
return fdict
def parse_s2n_values(objtype, fdict):
"""
Parse the input set of S/N measurements on objtype
Args:
objtype: str
fdict: dict
Contains all the S/N info for all nights in a given channel
Returns:
pdict: dict
Contains all the S/N info for the given objtype
"""
pdict = dict(waves=[], s2n=[], fluxes=[], exptime=[], OII=[], objtype=[])
# Loop on all the entries
for ss, wave in enumerate(fdict['waves']):
objs = fdict['objtype'][ss] == objtype
if np.sum(objs) == 0:
continue
iobjs = np.where(objs)[0]
# Parse/Save
pdict['waves'].append(wave)
pdict['s2n'].append(fdict['s2n'][ss][iobjs,:])
pdict['fluxes'].append(fdict['fluxes'][ss][iobjs])
if objtype == 'ELG':
pdict['OII'].append(fdict['OII'][ss][iobjs])
pdict['exptime'].append(fdict['exptime'][ss])
# Return
return pdict
def load_s2n_values(objtype, nights, channel, sub_exposures=None):
"""
DEPRECATED
Calculate S/N values for a set of spectra
Args:
objtype: str
nights: list
channel: str
sub_exposures:
Returns:
fdict: dict
Contains S/N info
"""
fdict = dict(waves=[], s2n=[], fluxes=[], exptime=[], OII=[])
for night in nights:
if sub_exposures is not None:
exposures = sub_exposures
else:
exposures = get_exposures(night)#, raw=True)
for exposure in exposures:
fibermap_path = findfile(filetype='fibermap', night=night, expid=exposure)
fibermap_data = read_fibermap(fibermap_path)
flavor = fibermap_data.meta['FLAVOR']
if flavor.lower() in ('arc', 'flat', 'bias'):
log.debug('Skipping calibration {} exposure {:08d}'.format(flavor, exposure))
continue
# Load simspec
simspec_file = fibermap_path.replace('fibermap', 'simspec')
log.debug('Getting {} truth from {}'.format(objtype, simspec_file))
sps_hdu = fits.open(simspec_file)
sps_tab = Table(sps_hdu['TRUTH'].data,masked=True)
#- Get OIIFLUX from separate HDU and join
if ('OIIFLUX' not in sps_tab.colnames) and ('TRUTH_ELG' in sps_hdu):
elg_truth = Table(sps_hdu['TRUTH_ELG'].data)
sps_tab = join(sps_tab, elg_truth['TARGETID', 'OIIFLUX'],
keys='TARGETID', join_type='left')
else:
sps_tab['OIIFLUX'] = 0.0
sps_hdu.close()
objs = sps_tab['TEMPLATETYPE'] == objtype
if np.sum(objs) == 0:
continue
# Load spectra (flux or not fluxed; should not matter)
for ii in range(10):
camera = channel+str(ii)
cframe_path = findfile(filetype='cframe', night=night, expid=exposure, camera=camera)
try:
log.debug('Reading {} from {}'.format(objtype, cframe_path))
cframe = read_frame(cframe_path)
except (IOError, OSError):
log.warn("Cannot find file: {:s}".format(cframe_path))
continue
# Calculate S/N per Ang
dwave = cframe.wave - np.roll(cframe.wave,1)
dwave[0] = dwave[1]
#
iobjs = objs[cframe.fibers]
if np.sum(iobjs) == 0:
continue
s2n = cframe.flux[iobjs,:] * np.sqrt(cframe.ivar[iobjs,:]) / np.sqrt(dwave)
# Save
fdict['waves'].append(cframe.wave)
fdict['s2n'].append(s2n)
fdict['fluxes'].append(sps_tab['MAG'][cframe.fibers[iobjs]])
if objtype == 'ELG':
fdict['OII'].append(sps_tab['OIIFLUX'][cframe.fibers[iobjs]])
fdict['exptime'].append(cframe.meta['EXPTIME'])
# Return
return fdict
def obj_s2n_wave(s2n_dict, wv_bins, flux_bins, otype, outfile=None, ax=None):
"""Generate QA of S/N for a given object type
"""
logs = get_logger()
nwv = wv_bins.size
nfx = flux_bins.size
s2n_sum = np.zeros((nwv-1,nfx-1))
s2n_N = np.zeros((nwv-1,nfx-1)).astype(int)
# Loop on exposures+wedges (can do just once if these are identical for each)
for jj, wave in enumerate(s2n_dict['waves']):
w_i = np.digitize(wave, wv_bins) - 1
m_i = np.digitize(s2n_dict['fluxes'][jj], flux_bins) - 1
mmm = []
for ll in range(nfx-1): # Only need to do once
mmm.append(m_i == ll)
#
for kk in range(nwv-1):
all_s2n = s2n_dict['s2n'][jj][:,w_i==kk]
for ll in range(nfx-1):
if np.any(mmm[ll]):
s2n_sum[kk, ll] += np.sum(all_s2n[mmm[ll],:])
s2n_N[kk, ll] += np.sum(mmm[ll]) * all_s2n.shape[1]
sty_otype = get_sty_otype()
# Plot
if ax is None:
fig = plt.figure(figsize=(6, 6.0))
ax= plt.gca()
# Title
fig.suptitle('{:s}: Summary'.format(sty_otype[otype]['lbl']),
fontsize='large')
# Plot em up
wv_cen = (wv_bins + np.roll(wv_bins,-1))/2.
lstys = ['-', '--', '-.', ':', (0, (3, 1, 1, 1))]
mxy = 1e-9
for ss in range(nfx-1):
if np.sum(s2n_N[:,ss]) == 0:
continue
lbl = 'MAG = [{:0.1f},{:0.1f}]'.format(flux_bins[ss], flux_bins[ss+1])
ax.plot(wv_cen[:-1], s2n_sum[:,ss]/s2n_N[:,ss], linestyle=lstys[ss],
label=lbl, color=sty_otype[otype]['color'])
mxy = max(mxy, np.max(s2n_sum[:,ss]/s2n_N[:,ss]))
ax.set_xlabel('Wavelength (Ang)')
#ax.set_xlim(-ylim, ylim)
ax.set_ylabel('Mean S/N per Ang in bins of 20A')
ax.set_yscale("log", nonposy='clip')
ax.set_ylim(0.1, mxy*1.1)
legend = plt.legend(loc='upper left', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='medium', numpoints=1)
# Finish
plt.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.3)
plt.subplots_adjust(top=0.92)
if outfile is not None:
plt.savefig(outfile, dpi=600)
print("Wrote: {:s}".format(outfile))
def obj_s2n_z(s2n_dict, z_bins, flux_bins, otype, outfile=None, ax=None):
"""Generate QA of S/N for a given object type vs. z (mainly for ELG)
"""
logs = get_logger()
nz = z_bins.size
nfx = flux_bins.size
s2n_sum = np.zeros((nz-1,nfx-1))
s2n_N = np.zeros((nz-1,nfx-1)).astype(int)
# Loop on exposures+wedges (can do just once if these are identical for each)
for jj, wave in enumerate(s2n_dict['waves']):
# Turn wave into z
zELG = wave / 3728. - 1.
z_i = np.digitize(zELG, z_bins) - 1
m_i = np.digitize(s2n_dict['OII'][jj]*1e17, flux_bins) - 1
mmm = []
for ll in range(nfx-1): # Only need to do once
mmm.append(m_i == ll)
#
for kk in range(nz-1):
all_s2n = s2n_dict['s2n'][jj][:,z_i==kk]
for ll in range(nfx-1):
if np.any(mmm[ll]):
s2n_sum[kk, ll] += np.sum(all_s2n[mmm[ll],:])
s2n_N[kk, ll] += np.sum(mmm[ll]) * all_s2n.shape[1]
sty_otype = get_sty_otype()
# Plot
if ax is None:
fig = plt.figure(figsize=(6, 6.0))
ax= plt.gca()
# Title
fig.suptitle('{:s}: Redshift Summary'.format(sty_otype[otype]['lbl']),
fontsize='large')
# Plot em up
z_cen = (z_bins + np.roll(z_bins,-1))/2.
lstys = ['-', '--', '-.', ':', (0, (3, 1, 1, 1))]
mxy = 1e-9
for ss in range(nfx-1):
if np.sum(s2n_N[:,ss]) == 0:
continue
lbl = 'OII(1e-17) = [{:0.1f},{:0.1f}]'.format(flux_bins[ss], flux_bins[ss+1])
ax.plot(z_cen[:-1], s2n_sum[:,ss]/s2n_N[:,ss], linestyle=lstys[ss],
label=lbl, color=sty_otype[otype]['color'])
mxy = max(mxy, np.max(s2n_sum[:,ss]/s2n_N[:,ss]))
ax.set_xlabel('Redshift')
ax.set_xlim(z_bins[0], z_bins[-1])
ax.set_ylabel('Mean S/N per Ang in dz bins')
ax.set_yscale("log", nonposy='clip')
ax.set_ylim(0.1, mxy*1.1)
legend = plt.legend(loc='lower right', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='medium', numpoints=1)
# Finish
plt.tight_layout(pad=0.2,h_pad=0.2,w_pad=0.3)
plt.subplots_adjust(top=0.92)
if outfile is not None:
plt.savefig(outfile, dpi=600)
print("Wrote: {:s}".format(outfile))
# Command line execution
if __name__ == '__main__':
import desispec.io
from astropy.table import Table
from astropy.io import fits
# Test obj_s2n method
if False:
nights = ['20190901']
exposures = [65+i for i in range(6)]
s2n_values = load_s2n_values('ELG', nights, 'b', sub_exposures=exposures)
wv_bins = np.arange(3570., 5950., 20.)
obj_s2n_wave(s2n_values, wv_bins, np.arange(19., 25., 1.0), 'ELG', outfile='tst.pdf')
# Test obj_s2n_z
if True:
nights = ['20190901']
exposures = [65+i for i in range(6)]
s2n_values = load_s2n_values('ELG', nights, 'z', sub_exposures=exposures)
z_bins = np.linspace(1.0, 1.6, 100) # z camera
oii_bins = np.array([1., 6., 10., 30., 100., 1000.])
obj_s2n_z(s2n_values, z_bins, oii_bins, 'ELG', outfile='tstz.pdf')
| bsd-3-clause | 2,979,636,215,285,394,400 | 35.65 | 101 | 0.53926 | false | 3.094278 | false | false | false |
bratsche/Neutron-Drive | google_appengine/google/appengine/ext/ndb/stats.py | 20 | 16377 | """Models to be used when accessing app specific datastore usage statistics.
These entities cannot be created by users, but are populated in the
application's datastore by offline processes run by the Google App Engine team.
"""
# NOTE: All constant strings in this file should be kept in sync with
# those in google/appengine/ext/db/stats.py.
from . import model
__all__ = ['BaseKindStatistic',
'BaseStatistic',
'GlobalStat',
'KindCompositeIndexStat',
'KindNonRootEntityStat',
'KindPropertyNamePropertyTypeStat',
'KindPropertyNameStat',
'KindPropertyTypeStat',
'KindRootEntityStat',
'KindStat',
'NamespaceGlobalStat',
'NamespaceKindCompositeIndexStat',
'NamespaceKindNonRootEntityStat',
'NamespaceKindPropertyNamePropertyTypeStat',
'NamespaceKindPropertyNameStat',
'NamespaceKindPropertyTypeStat',
'NamespaceKindRootEntityStat',
'NamespaceKindStat',
'NamespacePropertyTypeStat',
'NamespaceStat',
'PropertyTypeStat',
]
class BaseStatistic(model.Model):
"""Base Statistic Model class.
Attributes:
bytes: the total number of bytes taken up in the datastore for the
statistic instance.
count: attribute is the total number of occurrences of the statistic
in the datastore.
timestamp: the time the statistic instance was written to the datastore.
"""
# This is necessary for the _get_kind() classmethod override.
STORED_KIND_NAME = '__BaseStatistic__'
# The number of bytes that is taken up.
bytes = model.IntegerProperty()
# The number of entity records.
count = model.IntegerProperty()
# When this statistic was inserted into the datastore.
timestamp = model.DateTimeProperty()
@classmethod
def _get_kind(cls):
"""Kind name override."""
return cls.STORED_KIND_NAME
class BaseKindStatistic(BaseStatistic):
"""Base Statistic Model class for stats associated with kinds.
Attributes:
kind_name: the name of the kind associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
"""
# This is necessary for the _get_kind() classmethod override.
STORED_KIND_NAME = '__BaseKindStatistic__'
# The name of the kind.
kind_name = model.StringProperty()
# The number of bytes that is taken up in entity table. entity_bytes does not
# reflect the storage allocated for indexes, either built-in or composite
# indexes.
entity_bytes = model.IntegerProperty(default=0L)
class GlobalStat(BaseStatistic):
"""An aggregate of all entities across the entire application.
This statistic only has a single instance in the datastore that contains the
total number of entities stored and the total number of bytes they take up.
Attributes:
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Total__'
# The number of bytes that is taken up in entity storage.
entity_bytes = model.IntegerProperty(default=0L)
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
# The number of bytes taken up for composite index entries.
composite_index_bytes = model.IntegerProperty(default=0L)
# The number of composite indexes entries.
composite_index_count = model.IntegerProperty(default=0L)
class NamespaceStat(BaseStatistic):
"""An aggregate of all entities across an entire namespace.
This statistic has one instance per namespace. The key_name is the
represented namespace. NamespaceStat entities will only be found
in the namespace "" (empty string). It contains the total
number of entities stored and the total number of bytes they take up.
Attributes:
subject_namespace: the namespace associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Namespace__'
# The namespace name this NamespaceStat refers to.
subject_namespace = model.StringProperty()
# The number of bytes that is taken up in entity storage.
entity_bytes = model.IntegerProperty(default=0L)
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
# The number of bytes taken up for composite index entries.
composite_index_bytes = model.IntegerProperty(default=0L)
# The number of composite indexes entries.
composite_index_count = model.IntegerProperty(default=0L)
class KindStat(BaseKindStatistic):
"""An aggregate of all entities at the granularity of their Kind.
There is an instance of the KindStat for every Kind that is in the
application's datastore. This stat contains per-Kind statistics.
Attributes:
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Kind__'
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
# The number of bytes taken up for composite index entries.
composite_index_bytes = model.IntegerProperty(default=0L)
# The number of composite indexes entries.
composite_index_count = model.IntegerProperty(default=0L)
class KindRootEntityStat(BaseKindStatistic):
"""Statistics of the number of root entities in the datastore by Kind.
There is an instance of the KindRootEntityState for every Kind that is in the
application's datastore and has an instance that is a root entity. This stat
contains statistics regarding these root entity instances.
"""
STORED_KIND_NAME = '__Stat_Kind_IsRootEntity__'
class KindNonRootEntityStat(BaseKindStatistic):
"""Statistics of the number of non root entities in the datastore by Kind.
There is an instance of the KindNonRootEntityStat for every Kind that is in
the application's datastore that is a not a root entity. This stat contains
statistics regarding thse non root entity instances.
"""
STORED_KIND_NAME = '__Stat_Kind_NotRootEntity__'
class PropertyTypeStat(BaseStatistic):
"""An aggregate of all properties across the entire application by type.
There is an instance of the PropertyTypeStat for every property type
(google.appengine.api.datastore_types._PROPERTY_TYPES) in use by the
application in its datastore.
Attributes:
property_type: the property type associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType__'
# The name of the property_type.
property_type = model.StringProperty()
# The number of bytes that is taken up in entity storage.
entity_bytes = model.IntegerProperty(default=0L)
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindPropertyTypeStat(BaseKindStatistic):
"""Statistics on (kind, property_type) tuples in the app's datastore.
There is an instance of the KindPropertyTypeStat for every
(kind, property_type) tuple in the application's datastore.
Attributes:
property_type: the property type associated with the statistic instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType_Kind__'
# The name of the property_type.
property_type = model.StringProperty()
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindPropertyNameStat(BaseKindStatistic):
"""Statistics on (kind, property_name) tuples in the app's datastore.
There is an instance of the KindPropertyNameStat for every
(kind, property_name) tuple in the application's datastore.
Attributes:
property_name: the name of the property associated with the statistic
instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyName_Kind__'
# The name of the property.
property_name = model.StringProperty()
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindPropertyNamePropertyTypeStat(BaseKindStatistic):
"""Statistic on (kind, property_name, property_type) tuples in the datastore.
There is an instance of the KindPropertyNamePropertyTypeStat for every
(kind, property_name, property_type) tuple in the application's datastore.
Attributes:
property_type: the property type associated with the statistic instance.
property_name: the name of the property associated with the statistic
instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType_PropertyName_Kind__'
# The name of the property type.
property_type = model.StringProperty()
# The name of the property.
property_name = model.StringProperty()
# The number of bytes taken up for built-in index entries.
builtin_index_bytes = model.IntegerProperty(default=0L)
# The number of built-in index entries.
builtin_index_count = model.IntegerProperty(default=0L)
class KindCompositeIndexStat(BaseStatistic):
"""Statistic on (kind, composite_index_id) tuples in the datastore.
There is an instance of the KindCompositeIndexStat for every unique
(kind, composite_index_id) tuple in the application's datastore indexes.
Attributes:
index_id: the id of the composite index associated with the statistic
instance.
kind_name: the name of the kind associated with the statistic instance.
"""
STORED_KIND_NAME = '__Stat_Kind_CompositeIndex__'
# The id of the composite index
index_id = model.IntegerProperty()
# The name of the kind.
kind_name = model.StringProperty()
# The following specify namespace-specific stats.
# These types are specific to the datastore namespace they are located
# within. These will only be produced if datastore entities exist
# in a namespace other than the empty namespace (i.e. namespace="").
class NamespaceGlobalStat(GlobalStat):
"""GlobalStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Total__'
class NamespaceKindStat(KindStat):
"""KindStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind__'
class NamespaceKindRootEntityStat(KindRootEntityStat):
"""KindRootEntityStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_IsRootEntity__'
class NamespaceKindNonRootEntityStat(KindNonRootEntityStat):
"""KindNonRootEntityStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_NotRootEntity__'
class NamespacePropertyTypeStat(PropertyTypeStat):
"""PropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType__'
class NamespaceKindPropertyTypeStat(KindPropertyTypeStat):
"""KindPropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType_Kind__'
class NamespaceKindPropertyNameStat(KindPropertyNameStat):
"""KindPropertyNameStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyName_Kind__'
class NamespaceKindPropertyNamePropertyTypeStat(
KindPropertyNamePropertyTypeStat):
"""KindPropertyNamePropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType_PropertyName_Kind__'
class NamespaceKindCompositeIndexStat(KindCompositeIndexStat):
"""KindCompositeIndexStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_CompositeIndex__'
# Maps a datastore stat entity kind name to its respective model class.
# NOTE: Any new stats added to this module should also be added here.
_DATASTORE_STATS_CLASSES_BY_KIND = {
GlobalStat.STORED_KIND_NAME: GlobalStat,
NamespaceStat.STORED_KIND_NAME: NamespaceStat,
KindStat.STORED_KIND_NAME: KindStat,
KindRootEntityStat.STORED_KIND_NAME: KindRootEntityStat,
KindNonRootEntityStat.STORED_KIND_NAME: KindNonRootEntityStat,
PropertyTypeStat.STORED_KIND_NAME: PropertyTypeStat,
KindPropertyTypeStat.STORED_KIND_NAME: KindPropertyTypeStat,
KindPropertyNameStat.STORED_KIND_NAME: KindPropertyNameStat,
KindPropertyNamePropertyTypeStat.STORED_KIND_NAME:
KindPropertyNamePropertyTypeStat,
KindCompositeIndexStat.STORED_KIND_NAME: KindCompositeIndexStat,
NamespaceGlobalStat.STORED_KIND_NAME: NamespaceGlobalStat,
NamespaceKindStat.STORED_KIND_NAME: NamespaceKindStat,
NamespaceKindRootEntityStat.STORED_KIND_NAME: NamespaceKindRootEntityStat,
NamespaceKindNonRootEntityStat.STORED_KIND_NAME:
NamespaceKindNonRootEntityStat,
NamespacePropertyTypeStat.STORED_KIND_NAME: NamespacePropertyTypeStat,
NamespaceKindPropertyTypeStat.STORED_KIND_NAME:
NamespaceKindPropertyTypeStat,
NamespaceKindPropertyNameStat.STORED_KIND_NAME:
NamespaceKindPropertyNameStat,
NamespaceKindPropertyNamePropertyTypeStat.STORED_KIND_NAME:
NamespaceKindPropertyNamePropertyTypeStat,
NamespaceKindCompositeIndexStat.STORED_KIND_NAME:
NamespaceKindCompositeIndexStat,
}
| bsd-3-clause | -8,705,452,872,683,001,000 | 35.312639 | 79 | 0.749954 | false | 4.193854 | false | false | false |
CINPLA/expipe-dev | py-open-ephys/pyopenephys/OpenEphys.py | 1 | 17635 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 3 15:18:38 2014
@author: Dan Denman and Josh Siegle
Loads .continuous, .events, and .spikes files saved from the Open Ephys GUI
Usage:
import OpenEphys
data = OpenEphys.load(pathToFile) # returns a dict with data, timestamps, etc.
"""
import os
import numpy as np
import scipy.signal
import scipy.io
import time
import struct
from copy import deepcopy
# constants
NUM_HEADER_BYTES = 1024
SAMPLES_PER_RECORD = 1024
BYTES_PER_SAMPLE = 2
RECORD_SIZE = 4 + 8 + SAMPLES_PER_RECORD * BYTES_PER_SAMPLE + 10 # size of each continuous record in bytes
RECORD_MARKER = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 255])
# constants for pre-allocating matrices:
MAX_NUMBER_OF_SPIKES = int(1e6)
MAX_NUMBER_OF_RECORDS = int(1e6)
MAX_NUMBER_OF_EVENTS = int(1e6)
def load(filepath):
# redirects to code for individual file types
if 'continuous' in filepath:
data = loadContinuous(filepath)
elif 'spikes' in filepath:
data = loadSpikes(filepath)
elif 'events' in filepath:
data = loadEvents(filepath)
else:
raise Exception("Not a recognized file type. Please input a .continuous, .spikes, or .events file")
return data
def loadFolder(folderpath, dtype=float, **kwargs):
# load all continuous files in a folder
data = {}
# load all continuous files in a folder
if 'channels' in kwargs.keys():
filelist = ['100_CH' + x + '.continuous' for x in map(str, kwargs['channels'])]
else:
filelist = os.listdir(folderpath)
t0 = time.time()
numFiles = 0
for i, f in enumerate(filelist):
if '.continuous' in f:
data[f.replace('.continuous', '')] = loadContinuous(os.path.join(folderpath, f), dtype=dtype)
numFiles += 1
print(''.join(('Avg. Load Time: ', str((time.time() - t0) / numFiles), ' sec')))
print(''.join(('Total Load Time: ', str((time.time() - t0)), ' sec')))
return data
def loadFolderToArray(folderpath, channels='all', chprefix='CH',
dtype=float, session='0', source='100'):
'''Load continuous files in specified folder to a single numpy array. By default all
CH continous files are loaded in numerical order, ordering can be specified with
optional channels argument which should be a list of channel numbers.'''
if channels == 'all':
channels = _get_sorted_channels(folderpath, chprefix, session, source)
if session == '0':
filelist = [source + '_' + chprefix + x + '.continuous' for x in map(str, channels)]
else:
filelist = [source + '_' + chprefix + x + '_' + session + '.continuous' for x in map(str, channels)]
t0 = time.time()
numFiles = 1
channel_1_data = loadContinuous(os.path.join(folderpath, filelist[0]), dtype)['data']
n_samples = len(channel_1_data)
n_channels = len(filelist)
data_array = np.zeros([n_samples, n_channels], dtype)
data_array[:, 0] = channel_1_data
for i, f in enumerate(filelist[1:]):
data_array[:, i + 1] = loadContinuous(os.path.join(folderpath, f), dtype)['data']
numFiles += 1
print(''.join(('Avg. Load Time: ', str((time.time() - t0) / numFiles), ' sec')))
print(''.join(('Total Load Time: ', str((time.time() - t0)), ' sec')))
return data_array
def loadContinuous(filepath, dtype=float):
assert dtype in (float, np.int16), \
'Invalid data type specified for loadContinous, valid types are float and np.int16'
# print("Loading continuous data...")
ch = {}
# read in the data
f = open(filepath, 'rb')
fileLength = os.fstat(f.fileno()).st_size
# calculate number of samples
recordBytes = fileLength - NUM_HEADER_BYTES
if recordBytes % RECORD_SIZE != 0:
raise Exception("File size is not consistent with a continuous file: may be corrupt")
nrec = recordBytes // RECORD_SIZE
nsamp = nrec * SAMPLES_PER_RECORD
# pre-allocate samples
samples = np.zeros(nsamp, dtype)
timestamps = np.zeros(nrec)
recordingNumbers = np.zeros(nrec)
indices = np.arange(0, nsamp + 1, SAMPLES_PER_RECORD, np.dtype(np.int64))
header = readHeader(f)
recIndices = np.arange(0, nrec)
for recordNumber in recIndices:
timestamps[recordNumber] = np.fromfile(f, np.dtype('<i8'), 1) # little-endian 64-bit signed integer
N = np.fromfile(f, np.dtype('<u2'), 1)[0] # little-endian 16-bit unsigned integer
# print index
if N != SAMPLES_PER_RECORD:
raise Exception('Found corrupted record in block ' + str(recordNumber))
recordingNumbers[recordNumber] = (np.fromfile(f, np.dtype('>u2'), 1)) # big-endian 16-bit unsigned integer
if dtype == float: # Convert data to float array and convert bits to voltage.
data = np.fromfile(f, np.dtype('>i2'), N) * float(
header['bitVolts']) # big-endian 16-bit signed integer, multiplied by bitVolts
else: # Keep data in signed 16 bit integer format.
data = np.fromfile(f, np.dtype('>i2'), N) # big-endian 16-bit signed integer
samples[indices[recordNumber]:indices[recordNumber + 1]] = data
marker = f.read(10) # dump
# print recordNumber
# print index
ch['header'] = header
ch['timestamps'] = timestamps
ch['data'] = samples # OR use downsample(samples,1), to save space
ch['recordingNumber'] = recordingNumbers
f.close()
return ch
def loadSpikes(filepath):
'''
Loads spike waveforms and timestamps from filepath (should be .spikes file)
'''
data = {}
# print('loading spikes...')
f = open(filepath, 'rb')
header = readHeader(f)
if float(header[' version']) < 0.4:
raise Exception('Loader is only compatible with .spikes files with version 0.4 or higher')
data['header'] = header
numChannels = int(header['num_channels'])
numSamples = 40 # **NOT CURRENTLY WRITTEN TO HEADER**
spikes = np.zeros((MAX_NUMBER_OF_SPIKES, numSamples, numChannels))
timestamps = np.zeros(MAX_NUMBER_OF_SPIKES)
source = np.zeros(MAX_NUMBER_OF_SPIKES)
gain = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
thresh = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
sortedId = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
recNum = np.zeros(MAX_NUMBER_OF_SPIKES)
currentSpike = 0
while f.tell() < os.fstat(f.fileno()).st_size:
eventType = np.fromfile(f, np.dtype('<u1'), 1) # always equal to 4, discard
timestamps[currentSpike] = np.fromfile(f, np.dtype('<i8'), 1)
software_timestamp = np.fromfile(f, np.dtype('<i8'), 1)
source[currentSpike] = np.fromfile(f, np.dtype('<u2'), 1)
numChannels = int(np.fromfile(f, np.dtype('<u2'), 1))
numSamples = int(np.fromfile(f, np.dtype('<u2'), 1))
sortedId[currentSpike] = np.fromfile(f, np.dtype('<u2'), 1)
electrodeId = np.fromfile(f, np.dtype('<u2'), 1)
channel = np.fromfile(f, np.dtype('<u2'), 1)
color = np.fromfile(f, np.dtype('<u1'), 3)
pcProj = np.fromfile(f, np.float32, 2)
sampleFreq = np.fromfile(f, np.dtype('<u2'), 1)
waveforms = np.fromfile(f, np.dtype('<u2'), numChannels * numSamples)
gain[currentSpike, :] = np.fromfile(f, np.float32, numChannels)
thresh[currentSpike, :] = np.fromfile(f, np.dtype('<u2'), numChannels)
recNum[currentSpike] = np.fromfile(f, np.dtype('<u2'), 1)
waveforms_reshaped = np.reshape(waveforms, (numChannels, numSamples))
waveforms_reshaped = waveforms_reshaped.astype(float)
waveforms_uv = waveforms_reshaped
for ch in range(numChannels):
waveforms_uv[ch, :] -= 32768
waveforms_uv[ch, :] /= gain[currentSpike, ch] * 1000
spikes[currentSpike] = waveforms_uv.T
currentSpike += 1
data['spikes'] = spikes[:currentSpike, :, :]
data['timestamps'] = timestamps[:currentSpike]
data['source'] = source[:currentSpike]
data['gain'] = gain[:currentSpike, :]
data['thresh'] = thresh[:currentSpike, :]
data['recordingNumber'] = recNum[:currentSpike]
data['sortedId'] = sortedId[:currentSpike]
return data
def loadEvents(filepath):
data = {}
# print('loading events...')
f = open(filepath, 'rb')
header = readHeader(f)
if float(header[' version']) < 0.4:
raise Exception('Loader is only compatible with .events files with version 0.4 or higher')
data['header'] = header
index = -1
channel = np.zeros(MAX_NUMBER_OF_EVENTS)
timestamps = np.zeros(MAX_NUMBER_OF_EVENTS)
sampleNum = np.zeros(MAX_NUMBER_OF_EVENTS)
nodeId = np.zeros(MAX_NUMBER_OF_EVENTS)
eventType = np.zeros(MAX_NUMBER_OF_EVENTS)
eventId = np.zeros(MAX_NUMBER_OF_EVENTS)
recordingNumber = np.zeros(MAX_NUMBER_OF_EVENTS)
while f.tell() < os.fstat(f.fileno()).st_size:
index += 1
timestamps[index] = np.fromfile(f, np.dtype('<i8'), 1)
sampleNum[index] = np.fromfile(f, np.dtype('<i2'), 1)
eventType[index] = np.fromfile(f, np.dtype('<u1'), 1)
nodeId[index] = np.fromfile(f, np.dtype('<u1'), 1)
eventId[index] = np.fromfile(f, np.dtype('<u1'), 1)
channel[index] = np.fromfile(f, np.dtype('<u1'), 1)
recordingNumber[index] = np.fromfile(f, np.dtype('<u2'), 1)
data['channel'] = channel[:index]
data['timestamps'] = timestamps[:index]
data['eventType'] = eventType[:index]
data['nodeId'] = nodeId[:index]
data['eventId'] = eventId[:index]
data['recordingNumber'] = recordingNumber[:index]
data['sampleNum'] = sampleNum[:index]
return data
def readHeader(f):
header = {}
h = f.read(1024).decode().replace('\n', '').replace('header.', '')
for i, item in enumerate(h.split(';')):
if '=' in item:
header[item.split(' = ')[0]] = item.split(' = ')[1]
return header
def downsample(trace, down):
downsampled = scipy.signal.resample(trace, np.shape(trace)[0] / down)
return downsampled
def pack(folderpath, source='100', **kwargs):
# convert single channel open ephys channels to a .dat file for compatibility with the KlustaSuite, Neuroscope and Klusters
# should not be necessary for versions of open ephys which write data into HDF5 format.
# loads .continuous files in the specified folder and saves a .DAT in that folder
# optional arguments:
# source: string name of the source that openephys uses as the prefix. is usually 100, if the headstage is the first source added, but can specify something different
#
# data: pre-loaded data to be packed into a .DAT
# dref: int specifying a channel # to use as a digital reference. is subtracted from all channels.
# order: the order in which the .continuos files are packed into the .DAT. should be a list of .continious channel numbers. length must equal total channels.
# suffix: appended to .DAT filename, which is openephys.DAT if no suffix provided.
# load the openephys data into memory
if 'data' not in kwargs.keys():
if 'channels' not in kwargs.keys():
data = loadFolder(folderpath, dtype=np.int16)
else:
data = loadFolder(folderpath, dtype=np.int16, channels=kwargs['channels'])
else:
data = kwargs['data']
# if specified, do the digital referencing
if 'dref' in kwargs.keys():
ref = load(os.path.join(folderpath, ''.join((source, '_CH', str(kwargs['dref']), '.continuous'))))
for i, channel in enumerate(data.keys()):
data[channel]['data'] = data[channel]['data'] - ref['data']
# specify the order the channels are written in
if 'order' in kwargs.keys():
order = kwargs['order']
else:
order = list(data)
# add a suffix, if one was specified
if 'suffix' in kwargs.keys():
suffix = kwargs['suffix']
else:
suffix = ''
# make a file to write the data back out into .dat format
outpath = os.path.join(folderpath, ''.join(('openephys', suffix, '.dat')))
out = open(outpath, 'wb')
# go through the data and write it out in the .dat format
# .dat format specified here: http://neuroscope.sourceforge.net/UserManual/data-files.html
channelOrder = []
print(''.join(('...saving .dat to ', outpath, '...')))
random_datakey = next(iter(data))
bar = ProgressBar(len(data[random_datakey]['data']))
for i in range(len(data[random_datakey]['data'])):
for j in range(len(order)):
if source in random_datakey:
ch = data[order[j]]['data']
else:
ch = data[''.join(('CH', str(order[j]).replace('CH', '')))]['data']
out.write(struct.pack('h', ch[i])) # signed 16-bit integer
# figure out which order this thing packed the channels in. only do this once.
if i == 0:
channelOrder.append(order[j])
# update how mucb we have list
if i % (len(data[random_datakey]['data']) / 100) == 0:
bar.animate(i)
out.close()
print(''.join(('order: ', str(channelOrder))))
print(''.join(('.dat saved to ', outpath)))
# **********************************************************
# progress bar class used to show progress of pack()
# stolen from some post on stack overflow
import sys
try:
from IPython.display import clear_output
have_ipython = True
except ImportError:
have_ipython = False
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 40
self.__update_amount(0)
if have_ipython:
self.animate = self.animate_ipython
else:
self.animate = self.animate_noipython
def animate_ipython(self, iter):
print('\r', self, )
sys.stdout.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
# *************************************************************
def pack_2(folderpath, filename='', channels='all', chprefix='CH',
dref=None, session='0', source='100'):
'''Alternative version of pack which uses numpy's tofile function to write data.
pack_2 is much faster than pack and avoids quantization noise incurred in pack due
to conversion of data to float voltages during loadContinous followed by rounding
back to integers for packing.
filename: Name of the output file. By default, it follows the same layout of continuous files,
but without the channel number, for example, '100_CHs_3.dat' or '100_ADCs.dat'.
channels: List of channel numbers specifying order in which channels are packed. By default
all CH continous files are packed in numerical order.
chprefix: String name that defines if channels from headstage, auxiliary or ADC inputs
will be loaded.
dref: Digital referencing - either supply a channel number or 'ave' to reference to the
average of packed channels.
source: String name of the source that openephys uses as the prefix. It is usually 100,
if the headstage is the first source added, but can specify something different.
'''
data_array = loadFolderToArray(folderpath, channels, chprefix, np.int16, session, source)
if dref:
if dref == 'ave':
print('Digital referencing to average of all channels.')
reference = np.mean(data_array, 1)
else:
print('Digital referencing to channel ' + str(dref))
if channels == 'all':
channels = _get_sorted_channels(folderpath, chprefix, session, source)
reference = deepcopy(data_array[:, channels.index(dref)])
for i in range(data_array.shape[1]):
data_array[:, i] = data_array[:, i] - reference
if session == '0':
session = ''
else:
session = '_' + session
if not filename: filename = source + '_' + chprefix + 's' + session + '.dat'
print('Packing data to file: ' + filename)
data_array.tofile(os.path.join(folderpath, filename))
def _get_sorted_channels(folderpath, chprefix='CH', session='0', source='100'):
Files = [f for f in os.listdir(folderpath) if '.continuous' in f
and '_' + chprefix in f
and source in f]
if session == '0':
Files = [f for f in Files if len(f.split('_')) == 2]
Chs = sorted([int(f.split('_' + chprefix)[1].split('.')[0]) for f in Files])
else:
Files = [f for f in Files if len(f.split('_')) == 3
and f.split('.')[0].split('_')[2] == session]
Chs = sorted([int(f.split('_' + chprefix)[1].split('_')[0]) for f in Files])
return (Chs) | gpl-3.0 | -8,029,719,130,456,929,000 | 36.364407 | 172 | 0.617522 | false | 3.586537 | false | false | false |
Reilithion/xmms2-reilithion | wafadmin/Environment.py | 1 | 4349 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"""Environment representation
There is one gotcha: getitem returns [] if the contents evals to False
This means env['foo'] = {}; print env['foo'] will print [] not {}
"""
import os, copy, re
import Logs, Options
from Constants import *
re_imp = re.compile('^(#)*?([^#=]*?)\ =\ (.*?)$', re.M)
class Environment(object):
"""A safe-to-use dictionary, but do not attach functions to it please (break cPickle)
An environment instance can be stored into a file and loaded easily
"""
__slots__ = ("table", "parent")
def __init__(self, filename=None):
self.table={}
#self.parent = None <- set only if necessary
if Options.commands['configure']:
# set the prefix once and for everybody on creation (configuration)
self.table['PREFIX'] = os.path.abspath(os.path.expanduser(Options.options.prefix))
if filename:
self.load(filename)
def __contains__(self, key):
if key in self.table: return True
try: return self.parent.__contains__(key)
except AttributeError: return False # parent may not exist
def __str__(self):
keys = set()
cur = self
while cur:
keys.update(cur.table.keys())
cur = getattr(cur, 'parent', None)
keys = list(keys)
keys.sort()
return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in keys])
def set_variant(self, name):
self.table[VARIANT] = name
def variant(self):
env = self
while 1:
try:
return env.table[VARIANT]
except KeyError:
try: env = env.parent
except AttributeError: return DEFAULT
def copy(self):
newenv = Environment()
if Options.commands['configure']:
if self['PREFIX']: del newenv.table['PREFIX']
newenv.parent = self
return newenv
def __getitem__(self, key):
x = self.table.get(key, None)
if not x is None: return x
try:
u = self.parent
except AttributeError:
return []
else:
return u[key]
def __setitem__(self, key, value):
self.table[key] = value
def get_flat(self, key):
s = self[key]
if not s: return ''
elif isinstance(s, list): return ' '.join(s)
else: return s
def _get_list_value_for_modification(self, key):
"""Gets a value that must be a list for further modification. The
list may be modified inplace and there is no need to
"self.table[var] = value" afterwards.
"""
try:
value = self.table[key]
except KeyError:
try: value = self.parent[key]
except AttributeError: value = []
if isinstance(value, list):
value = copy.copy(value)
else:
value = [value]
else:
if not isinstance(value, list):
value = [value]
self.table[key] = value
return value
def append_value(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
current_value.extend(value)
else:
current_value.append(value)
def prepend_value(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
current_value = value + current_value
# a new list: update the dictionary entry
self.table[var] = current_value
else:
current_value.insert(0, value)
# prepend unique would be ambiguous
def append_unique(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
for value_item in value:
if value_item not in current_value:
current_value.append(value_item)
else:
if value not in current_value:
current_value.append(value)
def store(self, filename):
"Write the variables into a file"
file = open(filename, 'w')
# compute a merged table
table_list = []
env = self
while 1:
table_list.insert(0, env.table)
try: env = env.parent
except AttributeError: break
merged_table = {}
for table in table_list:
merged_table.update(table)
keys = merged_table.keys()
keys.sort()
for k in keys: file.write('%s = %r\n' % (k, merged_table[k]))
file.close()
def load(self, filename):
"Retrieve the variables from a file"
tbl = self.table
file = open(filename, 'r')
code = file.read()
file.close()
for m in re_imp.finditer(code):
g = m.group
tbl[g(2)] = eval(g(3))
Logs.debug('env: %s' % str(self.table))
def get_destdir(self):
"return the destdir, useful for installing"
if self.__getitem__('NOINSTALL'): return ''
return Options.options.destdir
| lgpl-2.1 | 3,419,586,826,123,011,600 | 24.582353 | 86 | 0.663831 | false | 3.097578 | false | false | false |
numbas/editor | editor/email_notification.py | 1 | 4221 | from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template.loader import get_template
from accounts.email import unsubscribe_token
class NotificationEmail(object):
plain_template = ''
html_template = ''
def __init__(self,notification):
self.notification = notification
def get_context_data(self):
site = Site.objects.get_current()
context = {
'notification': self.notification,
'site': site,
'domain': 'http://{}'.format(site.domain),
'unsubscribe_token': unsubscribe_token(self.notification.recipient)
}
return context
def can_email(self):
if not getattr(settings,'EMAIL_ABOUT_NOTIFICATIONS',False):
return False
recipient = self.notification.recipient
return not recipient.userprofile.never_email
def send(self):
if not self.can_email():
return
subject = self.get_subject()
context = self.get_context_data()
plain_content = get_template(self.plain_template).render(context)
html_content = get_template(self.html_template).render(context)
from_email = '{title} <{email}>'.format(title=settings.SITE_TITLE, email=settings.DEFAULT_FROM_EMAIL)
recipient = self.notification.recipient
recipient_email = '{name} <{email}>'.format(name=recipient.get_full_name(), email=recipient.email)
send_mail(subject, plain_content, html_message=html_content, from_email=from_email, recipient_list=(recipient_email,))
class EditorItemNotificationEmail(NotificationEmail):
def __init__(self, *args, **kwargs):
super().__init__(*args,**kwargs)
self.editoritem = self.notification.target
self.project = self.editoritem.project
def get_subject(self):
return "[{project}] {user} {verb} \"{item}\"".format(project=self.project.name, user=self.notification.actor.get_full_name(), verb=self.notification.verb, item=self.editoritem.name)
def get_context_data(self):
context = super().get_context_data()
context.update({
'editoritem': self.editoritem,
'project': self.project,
})
return context
class StampNotificationEmail(EditorItemNotificationEmail):
plain_template = 'notifications/email/stamp.txt'
html_template = 'notifications/email/stamp.html'
def get_context_data(self):
stamp = self.notification.action_object
context = super().get_context_data()
context.update({
'stamp': stamp,
})
return context
def can_email(self):
recipient = self.notification.recipient
if not recipient.userprofile.email_about_stamps:
return False
return super().can_email()
class CommentNotificationEmailMixin:
def get_context_data(self):
comment = self.notification.action_object
context = super().get_context_data()
context.update({
'comment': comment,
})
return context
def can_email(self):
recipient = self.notification.recipient
if not recipient.userprofile.email_about_comments:
return False
return super().can_email()
class EditorItemCommentNotificationEmail(CommentNotificationEmailMixin,EditorItemNotificationEmail):
plain_template = 'notifications/email/editoritem_comment.txt'
html_template = 'notifications/email/editoritem_comment.html'
class ProjectCommentNotificationEmail(CommentNotificationEmailMixin,NotificationEmail):
plain_template = 'notifications/email/project_comment.txt'
html_template = 'notifications/email/project_comment.html'
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.project = self.notification.target
def get_subject(self):
return "[{project}] Comment by {user}".format(project=self.project.name, user=self.notification.actor.get_full_name())
def get_context_data(self):
context = super().get_context_data()
context.update({
'project': self.project,
})
return context
| apache-2.0 | -2,600,545,200,062,274,600 | 33.884298 | 189 | 0.660981 | false | 4.237952 | false | false | false |
creimers/graphene-auth-examples | src/config/base_settings.py | 1 | 4743 | """
Django settings for art_app project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ov4p%2ls7+tmi&@qt@=3_n+px*oxqk#+%jeza93j!1p!-cr$n9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
CORS_ORIGIN_WHITELIST = (
'localhost:3000'
)
SITE_ID = 1
AUTH_USER_MODEL = 'account.User'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'corsheaders',
'rest_framework',
'rest_framework_jwt',
'djoser',
'graphene_django',
'apps.account',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'apps.account.middleware.JWTAuthenticationMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.csrf',
'django.template.context_processors.static',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DB_NAME'),
'HOST': os.getenv('DB_HOST'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'PORT': os.getenv('DB_PORT'),
},
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'de'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'collected_static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "media")
MEDIA_URL = "/media/"
LANGUAGES = [
('de', 'German'),
]
GRAPHENE = {
'SCHEMA': 'config.schema.schema'
}
DJOSER = {
'DOMAIN': os.environ.get('DJANGO_DJOSER_DOMAIN', 'localhost:3000'),
'SITE_NAME': os.environ.get('DJANGO_DJOSER_SITE_NAME', 'my site'),
'PASSWORD_RESET_CONFIRM_URL': '?action=set-new-password&uid={uid}&token={token}',
'ACTIVATION_URL': 'activate?uid={uid}&token={token}',
'SEND_ACTIVATION_EMAIL': True,
}
JWT_AUTH = {
'JWT_ALLOW_REFRESH': True,
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| mit | -1,530,724,595,500,124,000 | 25.06044 | 91 | 0.669197 | false | 3.414687 | false | false | false |
hiunnhue/libchewing | contrib/python/chewing.py | 10 | 1836 | from ctypes import *
from functools import partial
import sys
_libchewing = None
if sys.platform == "win32": # Windows
import os.path
# find in current dir first
dll_path = os.path.join(os.path.dirname(__file__), "chewing.dll")
if not os.path.exists(dll_path):
dll_path = "chewing.dll" # search in system path
_libchewing = CDLL(dll_path)
else: # UNIX-like systems
_libchewing = CDLL('libchewing.so.3')
_libchewing.chewing_commit_String.restype = c_char_p
_libchewing.chewing_buffer_String.restype = c_char_p
_libchewing.chewing_cand_String.restype = c_char_p
_libchewing.chewing_zuin_String.restype = c_char_p
_libchewing.chewing_aux_String.restype = c_char_p
_libchewing.chewing_get_KBString.restype = c_char_p
def Init(datadir, userdir):
return _libchewing.chewing_Init(datadir, userdir)
class ChewingContext:
def __init__(self, **kwargs):
if not kwargs:
self.ctx = _libchewing.chewing_new()
else:
syspath = kwargs.get("syspath", None)
userpath = kwargs.get("userpath", None)
self.ctx = _libchewing.chewing_new2(
syspath,
userpath,
None,
None)
def __del__(self):
_libchewing.chewing_delete(self.ctx)
def __getattr__(self, name):
func = 'chewing_' + name
if hasattr(_libchewing, func):
wrap = partial(getattr(_libchewing, func), self.ctx)
setattr(self, name, wrap)
return wrap
else:
raise AttributeError(name)
def Configure(self, cpp, maxlen, direction, space, kbtype):
self.set_candPerPage(cpp)
self.set_maxChiSymbolLen(maxlen)
self.set_addPhraseDirection(direction)
self.set_spaceAsSelection(space)
self.set_KBType(kbtype)
| lgpl-2.1 | -6,890,735,222,982,853,000 | 30.655172 | 69 | 0.626362 | false | 3.170984 | false | false | false |
eonpatapon/contrail-controller | src/config/utils/provision_analytics_node.py | 3 | 6327 | #!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import time
import argparse
import ConfigParser
from vnc_api.vnc_api import *
from vnc_admin_api import VncApiAdmin
from cfgm_common.exceptions import *
class AnalyticsNodeProvisioner(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
connected = False
tries = 0
while not connected:
try:
self._vnc_lib = VncApiAdmin(
self._args.use_admin_api,
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/',
auth_host=self._args.openstack_ip,
api_server_use_ssl=self._args.api_server_use_ssl)
connected = True
except ResourceExhaustionError: # haproxy throws 503
if tries < 10:
tries += 1
time.sleep(3)
else:
raise
gsc_obj = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
self._global_system_config_obj = gsc_obj
if self._args.oper == 'add':
self.add_analytics_node()
elif self._args.oper == 'del':
self.del_analytics_node()
else:
print "Unknown operation %s. Only 'add' and 'del' supported"\
% (self._args.oper)
# end __init__
def _parse_args(self, args_str):
'''
Eg. python provision_analytics_node.py --host_name a3s30.contrail.juniper.net
--host_ip 10.1.1.1
--api_server_ip 127.0.0.1
--api_server_port 8082
--api_server_use_ssl False
--oper <add | del>
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'api_server_use_ssl': False,
'oper': 'add',
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--host_name", help="hostname name of analytics node", required=True)
parser.add_argument("--host_ip", help="IP address of analytics node", required=True)
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument("--api_server_use_ssl",
help="Use SSL to connect with API server")
parser.add_argument(
"--oper", default='add',
help="Provision operation to be done(add or del)")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
parser.add_argument(
"--admin_tenant_name", help="Tenamt name for keystone admin user")
parser.add_argument(
"--openstack_ip", help="IP address of openstack node")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"--api_server_ip", help="IP address of api server",
nargs='+', type=str)
group.add_argument("--use_admin_api",
default=False,
help = "Connect to local api-server on admin port",
action="store_true")
self._args = parser.parse_args(remaining_argv)
# end _parse_args
def add_analytics_node(self):
gsc_obj = self._global_system_config_obj
analytics_node_obj = AnalyticsNode(
self._args.host_name, gsc_obj,
analytics_node_ip_address=self._args.host_ip)
analytics_node_exists = True
try:
analytics_node_obj = self._vnc_lib.analytics_node_read(
fq_name=analytics_node_obj.get_fq_name())
except NoIdError:
analytics_node_exists = False
if analytics_node_exists:
self._vnc_lib.analytics_node_update(analytics_node_obj)
else:
try:
self._vnc_lib.analytics_node_create(analytics_node_obj)
except RefsExistError:
print "Already created!"
# end add_analytics_node
def del_analytics_node(self):
gsc_obj = self._global_system_config_obj
analytics_node_obj = AnalyticsNode(self._args.host_name, gsc_obj)
self._vnc_lib.analytics_node_delete(
fq_name=analytics_node_obj.get_fq_name())
# end del_analytics_node
# end class AnalyticsNodeProvisioner
def main(args_str=None):
AnalyticsNodeProvisioner(args_str)
# end main
if __name__ == "__main__":
main()
| apache-2.0 | 2,034,345,302,968,062,200 | 34.948864 | 92 | 0.544492 | false | 4.124511 | true | false | false |
GaretJax/ipd | ipd/libvirt/types.py | 1 | 5162 | import xdrlib
import struct
from collections import namedtuple
class TypeBase(object):
def pack(self, stream, value):
raise NotImplementedError()
def unpack(self, stream):
raise NotImplementedError()
class Type(TypeBase):
def __init__(self, packer, unpacker):
self.pack = packer
self.unpack = unpacker
class TypeFactoryMeta(type):
pass
class TypeFactory(TypeBase):
__metaclass__ = TypeFactoryMeta
class CustomSimpleType(TypeBase):
def __init__(self, fmt):
self.fmt = fmt
self.length = struct.calcsize(fmt)
def pack(self, stream, value):
stream.get_buffer().write(struct.pack(self.fmt, value))
def unpack(self, stream):
i = stream.get_position()
j = i + self.length
stream.set_position(j)
data = stream.get_buffer()[i:j]
if len(data) < self.length:
raise EOFError
return struct.unpack(self.fmt, data)[0]
def make_xdr_type(name):
packer = getattr(xdrlib.Packer, 'pack_{}'.format(name))
unpacker = getattr(xdrlib.Unpacker, 'unpack_{}'.format(name))
return Type(packer, unpacker)
class FixedLengthString(TypeFactory):
def __init__(self, length):
self.length = length
def pack(self, stream, s):
stream.pack_fstring(self.length, s)
def unpack(self, stream):
return stream.unpack_fstring(self.length)
class FixedLengthData(TypeFactory):
def __init__(self, length):
self.length = length
def pack(self, stream, s):
stream.pack_fopaque(self.length, s)
def unpack(self, stream):
return stream.unpack_fopaque(self.length)
class ComplexType(TypeFactory):
def __init__(self, name, fields):
self.name = name
self.fields = fields
self.model = namedtuple(name, [f[0] for f in fields])
def __repr__(self):
return '{}({})'.format(self.name, ', '.join(f[0] for f in self.fields))
def __str__(self):
return self.name
def unpack(self, stream):
values = (type.unpack(stream) for _, type in self.fields)
return self.model(*values)
def pack(self, stream, value):
for name, type in self.fields:
type.pack(stream, getattr(value, name))
class FixedLengthArray(TypeFactory):
def __init__(self, items_type, length):
self.items_type = items_type
self.length = length
def pack(self, stream, items):
packer = lambda item: self.items_type.pack(stream, item)
stream.pack_farray(self.length, items, packer)
def unpack(self, stream):
unpacker = lambda: self.items_type.unpack(stream)
return stream.unpack_farray(self.length, unpacker)
class VariableLengthArray(TypeFactory):
def __init__(self, items_type, maxlength):
self.maxlength = maxlength
self.items_type = items_type
def pack(self, stream, items):
packer = lambda item: self.items_type.pack(stream, item)
stream.pack_array(items, packer)
def unpack(self, stream):
unpacker = lambda: self.items_type.unpack(stream)
return stream.unpack_array(unpacker)
class Optional(TypeFactory):
def __init__(self, type):
self.type = type
def pack(self, stream, v):
if v:
stream.pack_bool(True)
self.type.pack(stream, v)
else:
stream.pack_bool(False)
def unpack(self, stream):
if stream.unpack_bool():
return self.type.unpack(stream)
else:
return None
class Enum(TypeFactory):
def __init__(self, name, values):
self.name = name
self.values = values
self.ids = set([v[1] for v in values])
self.keys = set([v[0] for v in values])
for k, v in self.values:
setattr(self, k, v)
self._id_to_key = {v: k for k, v in values}
self._key_to_id = {k: v for k, v in values}
def __str__(self):
return self.name
def key(self, id):
return self._id_to_key[id]
def id(self, key):
return self._key_to_id[key]
def pack(self, stream, v):
if isinstance(v, int):
assert v in self.ids
else:
v = self.id(v)
return stream.pack_enum(v)
def unpack(self, stream):
v = stream.unpack_enum()
assert v in self.ids
return v
def __iter__(self):
return iter(self.values)
int = make_xdr_type('int')
uint = make_xdr_type('uint')
hyper = make_xdr_type('hyper')
uhyper = make_xdr_type('uhyper')
char = CustomSimpleType('>b')
uchar = CustomSimpleType('>B')
short = CustomSimpleType('>h')
ushort = CustomSimpleType('>H')
string = make_xdr_type('string')
opaque = make_xdr_type('opaque')
fstring = FixedLengthString
fopaque = FixedLengthData
farray = FixedLengthArray
array = VariableLengthArray
not_implemented = TypeBase
compound = ComplexType
enum = Enum
optional = Optional
def istype(k, v):
return k.islower() and isinstance(v, (TypeBase, TypeFactoryMeta))
__all__ = [k for k, v in locals().items() if istype(k, v)]
TYPES = {k: v for k, v in locals().items() if istype(k, v)}
| mit | -3,772,138,830,028,306,000 | 24.428571 | 79 | 0.612941 | false | 3.53078 | false | false | false |
xantage/code | vilya/models/git/diff/diff.py | 1 | 2142 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from itertools import groupby
from vilya.models.utils.decorators import cached_property
from vilya.models.git.diff.patch import Patch
from vilya.models.git.diff.delta import Delta
class Diff(object):
def __init__(self, repo, diff, linecomments=[]):
self.repo = repo
self.raw_diff = diff
self.old_ref = None
self.new_ref = None
self.old_sha = diff['old_sha']
self.new_sha = diff['new_sha']
self._additions = 0
self._deletions = 0
self._length = 0
# 实例化 Patches
# line comments groupby path
keyfunc_path = lambda x: x.old_path
linecomments_by_path = {}
if linecomments:
linecomments.sort(key=keyfunc_path)
linecomments_by_path = dict(
(k, list(v)) for k, v in groupby(linecomments,
key=keyfunc_path))
self._linecomments_by_path = linecomments_by_path
# TODO: MAX_DIFF_PATCHES
@property
def additions(self):
if self._additions:
return self._additions
for p in self.patches:
self._additions += p.additions
return self._additions
@property
def deletions(self):
if self._deletions:
return self._deletions
for p in self.patches:
self._deletions += p.deletions
return self._deletions
@property
def length(self):
if self._length:
return self._length
self._length = len(self.patches)
return self._length
@cached_property
def deltas(self):
repo = self.repo
diff = self.raw_diff
return [Delta(repo, self, p)
for p in diff['patches']]
@cached_property
def patches(self):
repo = self.repo
diff = self.raw_diff
linecomments_by_path = self._linecomments_by_path
# TODO: use generator
return [Patch(repo, self, p, linecomments_by_path.get(p['old_file_path'], []))
for p in diff['patches']]
| bsd-3-clause | -6,475,592,780,976,578,000 | 28.260274 | 86 | 0.568352 | false | 3.926471 | false | false | false |
mikjo/bigitr | bigitr/gitmerge.py | 1 | 2736 | #
# Copyright 2012 SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigitr import errhandler
from bigitr import util
class Merger(object):
def __init__(self, ctx):
self.ctx = ctx
self.err = errhandler.Errors(ctx)
def mergeBranches(self, repository, Git, requestedBranch=None):
onerror = self.ctx.getMergeError()
try:
for gitbranch in sorted(self.ctx.getMergeBranchMaps(repository).keys()):
if requestedBranch is None or gitbranch == requestedBranch:
self.mergeBranch(repository, Git, gitbranch)
except Exception as e:
self.err(repository, onerror)
@util.saveDir
def mergeBranch(self, repository, Git, gitbranch):
Git.initializeGitRepository(create=False)
self.mergeFrom(repository, Git, gitbranch)
def mergeFrom(self, repository, Git, gitbranch):
success = True
# try to merge downstream branches even if there was nothing to
# commit, because a merge conflict might have been resolved
if not self.merge(repository, Git, gitbranch):
success = False
# Status can report clean with .gitignored files existing
# Remove any .gitignored files added by the "cvs export"
Git.pristine()
if not success:
raise RuntimeError('merge failed for branch %s: see %s' %(
gitbranch, Git.log.thiserr))
def merge(self, repository, Git, gitbranch):
success = True
Git.pristine()
for target in self.ctx.getMergeBranchMaps(repository
).get(gitbranch, set()):
Git.checkout(target)
Git.mergeFastForward('origin/' + target)
mergeMsg = "Automated merge '%s' into '%s'" %(gitbranch, target)
rc = Git.mergeDefault(gitbranch, mergeMsg)
if rc != 0:
Git.log.mailLastOutput(mergeMsg)
success = False
else:
Git.push('origin', target, target)
Git.runImpPostHooks(target)
rc = self.merge(repository, Git, target)
if not rc:
success = False
return success
| apache-2.0 | 19,449,266,310,826,400 | 35.972973 | 84 | 0.62902 | false | 4.255054 | false | false | false |
broadinstitute/pywdl | wdl/spec.py | 1 | 1596 | import subprocess
import re
import os
import tempfile
import toc
def run(command, cwd=None):
proc = subprocess.Popen(
command,
shell=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=cwd
)
stdout, stderr = proc.communicate()
return (proc.returncode, stdout.strip(' \n'), stderr.strip(' \n'))
def write_and_close(contents):
(fd, path) = tempfile.mkstemp()
with os.fdopen(fd, 'w') as fp:
fp.write(wdl_source)
return path
with open('SPEC.md') as fp:
contents = fp.read()
toc.modify_and_write("SPEC.md")
source_regex = re.compile(r'```wdl(.*?)```', re.DOTALL)
count = 0
wdl_lines = 0
def lines(string, index=None):
string = string[:index] if index else string
return sum([1 for c in string if c == '\n']) + 1
for match in source_regex.finditer(contents):
count += 1
wdl_source = match.group(1)
wdl_lines += lines(wdl_source)
line = lines(contents, match.start(1))
wdl_file = write_and_close(wdl_source)
cmd = 'java -jar ../cromwell/target/scala-2.11/cromwell-0.9.jar parse ' + wdl_file
(rc, stdout, stderr) = run(cmd)
if rc != 0:
print("Line {}: Failure".format(line))
print(" rc: " + str(rc))
print(" stdout: " + write_and_close(stdout))
print(" stderr: " + write_and_close(stderr))
print(" WDL: " + wdl_file)
print(" Command: " + cmd)
else:
print("Line {}: Success".format(line))
os.unlink(wdl_file)
print('Total: {}'.format(wdl_lines))
| apache-2.0 | -7,772,061,834,200,016,000 | 27 | 86 | 0.599624 | false | 3.179283 | false | false | false |
daeilkim/refinery | refinery/bnpy/bnpy-dev/bnpy/init/FromScratchBernRel.py | 1 | 1534 | '''
FromScratchMult.py
Initialize params of HModel with multinomial observations from scratch.
'''
import numpy as np
from scipy.special import digamma
from scipy.cluster import vq
hasRexAvailable = True
try:
import KMeansRex
except ImportError:
hasRexAvailable = False
def init_global_params(hmodel, Data, initname='randexamples',
seed=0, K=0, initarg=None, **kwargs):
''' Initialize hmodel's global parameters in-place.
Returns
-------
Nothing. hmodel is updated in place.
Global Paramters are:
lamA, lamB = K x K stochastic block matrix
theta = N x K matrix of community membership probabilities
'''
PRNG = np.random.RandomState(seed)
N = Data.nNodeTotal
if initname == 'randexamples':
# Generate a sparse matrix given observed positive edges
#Data.to_sparse_matrix()
# Create assortative stochastic block matrix
lamA = np.zeros( K ) + (Data.nPosEdges / K) # assortative ( K x 1 ) vs. (K x K)
lamB = np.zeros( K ) + (Data.nAbsEdges / (K*K)) # assortative
# Create theta used for
theta = np.zeros( (N,K) )
alpha = np.ones(K) / K
for ii in xrange(N):
theta[ii, :] = PRNG.dirichlet(alpha)
# Initialize global stick-breaking weights beta to be 1/K (uniform)
beta = np.ones(K) / K
# Set the global parameters for the hmodel
hmodel.set_global_params(K=K, beta=beta, lamA=lamA, lamB=lamB, theta=theta)
return
else:
raise NotImplementedError('Unrecognized initname ' + initname)
| mit | 1,792,730,737,028,571,600 | 31.638298 | 83 | 0.664928 | false | 3.502283 | false | false | false |
josebamartos/np | nplib/pinger.py | 1 | 2341 | '''
Pinger class in nplib library for the np (Network Ping)
Copyright (C) 2015
Joseba Martos <[email protected]>
This file is part of np (Network Ping)
Web site: http://otzarri.net/np
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import netaddr
import subprocess
import threading
try:
import queue
except ImportError:
import Queue as queue # lint:ok
class Pinger:
'''Pinger object'''
hosts_up = list()
hosts_down = list()
def __init__(self, localnetaddr):
self.localnet = netaddr.IPNetwork(localnetaddr)
self.pingqueue = queue.Queue()
self.count = '1'
self.hosts_up = list()
self.hosts_down = list()
def pinger(self,):
'''Sends ping'''
while True:
ip = str(self.pingqueue.get())
retcode = subprocess.call("ping -c %s %s" % (self.count, ip),
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if retcode == 0:
self.hosts_up.append(netaddr.IPAddress(ip))
Pinger.hosts_up.append(netaddr.IPAddress(ip))
else:
self.hosts_down.append(netaddr.IPAddress(ip))
Pinger.hosts_down.append(netaddr.IPAddress(ip))
self.pingqueue.task_done()
def run(self):
thread_num = self.localnet.size - 2
for i in range(thread_num):
worker = threading.Thread(target=self.pinger)
worker.daemon = True
worker.start()
for ip in self.localnet.iter_hosts():
self.pingqueue.put(ip)
self.pingqueue.join()
| gpl-3.0 | -2,401,171,831,415,528,000 | 30.213333 | 73 | 0.599744 | false | 4.128748 | false | false | false |
arkanister/django-flickr-gallery | django_flickr_gallery/admin/photoset.py | 1 | 1194 | from django.contrib import admin
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django_flickr_gallery.admin.forms import PhotosetAdminForm
from django_flickr_gallery.utils.date import parse_unix_datetime
def display_attr(func, short_description=None, boolean=False):
def wrap(*args, **kwargs):
return func(*args, **kwargs)
wrap.short_description = short_description
wrap.boolean = boolean
return wrap
class PhotosetAdmin(admin.ModelAdmin):
list_display = ['primary', 'title', 'description', 'count', 'last_update']
form = PhotosetAdminForm
primary = display_attr(
lambda self, x: mark_safe('<img src="%s" width="48px" height="48px" />' % x.primary.small_square_url),
short_description=_('cover'))
title = display_attr(lambda self, x: x.title, short_description=_('title'))
description = display_attr(lambda self, x: x.description, short_description=_('description'))
count = display_attr(lambda self, x: x.count, short_description=_('photos'))
last_update = display_attr(lambda self, x: parse_unix_datetime(x.date_update), short_description=_('last update'))
| bsd-3-clause | 834,874,264,519,688,200 | 41.642857 | 118 | 0.71273 | false | 3.802548 | false | false | false |
Atush/py_learning | fixture/contact.py | 1 | 6990 | from model.contact import Contact
from selenium.webdriver.common.by import By
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def create(self, contact):
wd = self.app.wd
# go to create contact page
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.firstname)
#self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
#self.change_field_value("nickname", contact.nickname)
#self.change_field_value("title", contact.title)
#self.change_field_value("company", contact.company)
#self.change_field_value("address", contact.address)
#self.change_field_value("home", contact.homephone)
#self.change_field_value("mobile", contact.mobile)
#self.change_field_value("work", contact.workphone)
#self.change_field_value("fax", contact.fax)
#self.change_field_value("email", contact.email)
#self.change_field_value("email2", contact.email2)
#self.change_field_value("email3", contact.email3)
#self.change_field_value("homepage", contact.homepage)
#Select(wd.find_element_by_name('bday')).select_by_visible_text(contact.bday)
#Select(wd.find_element_by_name('bmonth')).select_by_visible_text(contact.bmonth)
#self.change_field_value("byear", contact.byear)
#Select(wd.find_element_by_name('aday')).select_by_visible_text(contact.aday)
#Select(wd.find_element_by_name('amonth')).select_by_visible_text(contact.amonth)
#self.change_field_value("ayear", contact.ayear)
#self.change_field_value("address2", contact.address2)
#self.change_field_value("phone2", contact.phone2)
#self.change_field_value("notes", contact.notes)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.open_home_page()
wd.find_elements_by_name("entry")[index].find_elements(By.TAG_NAME, "td")[7].click()
def open_contact_to_view_by_index(self, index):
wd = self.app.wd
self.open_home_page()
wd.find_elements_by_name("entry")[index].find_elements(By.TAG_NAME, "td")[6].click()
def edit_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
self.fill_contact_form(new_contact_data)
wd.find_element_by_name("update").click()
self.contact_cache = None
def edit_contact_by_id(self, id, new_contact_data):
wd = self.app.wd
#self.select_contact_by_id(id)
wd.find_element_by_xpath("//a[@href='edit.php?id=%s']" % id).click()
self.fill_contact_form(new_contact_data)
wd.find_element_by_name("update").click()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_home_page()
# check index contact
wd.find_elements_by_name("selected[]")[index].click()
# init deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.open_home_page()
# check index contact
self.select_contact_by_id(id)
# init deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def open_home_page(self):
wd = self.app.wd
if not wd.current_url.endswith("/index.php"):
wd.find_element_by_link_text("home").click()
def count(self):
wd = self.app.wd
self.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
lastname = cells[1].text
firstname = cells[2].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
all_phones = cells[5].text
all_emails = cells[4].text
self.contact_cache.append(Contact(id = id, firstname = firstname, lastname = lastname, all_phones_from_home_page = all_phones, all_emails_from_home_page = all_emails))
return list(self.contact_cache)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
id = wd.find_element_by_name("id").get_attribute("value")
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobile = wd.find_element_by_name("mobile").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(id=id, firstname=firstname, lastname=lastname, homephone=homephone, mobile=mobile, workphone=workphone, phone2=phone2, email=email, email2=email2, email3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_to_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
mobile = re.search("M: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
phone2 = re.search("P: (.*)", text).group(1)
return Contact(homephone=homephone, mobile=mobile, workphone=workphone, phone2=phone2) | apache-2.0 | 1,759,000,431,022,635,000 | 43.814103 | 184 | 0.621173 | false | 3.362193 | false | false | false |
sony/nnabla | python/src/nnabla/experimental/parametric_function_class/convolution.py | 1 | 5159 | # Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
from nnabla.parameter import get_parameter_or_create, get_parameter
from nnabla.initializer import (
calc_uniform_lim_glorot,
ConstantInitializer, NormalInitializer, UniformInitializer)
from .module import Module
class Convolution(Module):
"""N-D Convolution with a bias term.
For Dilated Convolution (a.k.a. Atrous Convolution), refer to:
- Chen et al., DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs. https://arxiv.org/abs/1606.00915
- Yu et al., Multi-Scale Context Aggregation by Dilated Convolutions. https://arxiv.org/abs/1511.07122
Note:
Convolution is a computationally intensive operation that
should preferably be run with the `cudnn` backend. NNabla
then uses CuDNN library functions to determine and cache the
fastest algorithm for the given set of convolution parameters,
which results in additional memory consumption which may pose
a problem for GPUs with insufficient memory size. In that
case, the `NNABLA_CUDNN_WORKSPACE_LIMIT` environment variable
can be used to restrict the choice of algorithms to those that
fit the given workspace memory limit, expressed in bytes. In
some cases it may also be desired to restrict the automatic
search to algorithms that produce deterministic (reproducable)
results. This can be requested by setting the the environment
variable `NNABLA_CUDNN_DETERMINISTIC` to a non-zero value.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`: N-D array. See :obj:`~nnabla.functions.convolution` for the output shape.
"""
def __init__(self, inmaps, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
w_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True):
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inmaps, outmaps, tuple(kernel)), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
w_shape = (outmaps, inmaps // group) + tuple(kernel)
w = nn.Variable.from_numpy_array(
w_init(w_shape)).apply(need_grad=not fix_parameters)
b = None
if with_bias:
b_shape = (outmaps, )
b = nn.Variable.from_numpy_array(
b_init(b_shape)).apply(need_grad=not fix_parameters)
self.W = w
self.b = b
self.base_axis = base_axis
self.pad = pad
self.stride = stride
self.dilation = dilation
self.group = group
def __call__(self, inp):
return F.convolution(inp, self.W, self.b, self.base_axis,
self.pad, self.stride, self.dilation, self.group)
Conv1d = Convolution
Conv2d = Convolution
Conv3d = Convolution
ConvNd = Convolution
| apache-2.0 | 2,791,790,847,225,436,000 | 48.133333 | 271 | 0.686179 | false | 3.932165 | false | false | false |
glatard/nipype | nipype/interfaces/freesurfer/preprocess.py | 8 | 61456 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Provides interfaces to various commands provided by FreeSurfer
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
import os
import os.path as op
from glob import glob
#import itertools
import numpy as np
from nibabel import load
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.io import FreeSurferSource
from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec
from nipype.interfaces.base import (TraitedSpec, File, traits,
Directory, InputMultiPath,
OutputMultiPath, CommandLine,
CommandLineInputSpec, isdefined)
from ... import logging
iflogger = logging.getLogger('interface')
class ParseDICOMDirInputSpec(FSTraitedSpec):
dicom_dir = Directory(exists=True, argstr='--d %s', mandatory=True,
desc='path to siemens dicom directory')
dicom_info_file = File('dicominfo.txt', argstr='--o %s', usedefault=True,
desc='file to which results are written')
sortbyrun = traits.Bool(argstr='--sortbyrun', desc='assign run numbers')
summarize = traits.Bool(argstr='--summarize',
desc='only print out info for run leaders')
class ParseDICOMDirOutputSpec(TraitedSpec):
dicom_info_file = File(exists=True,
desc='text file containing dicom information')
class ParseDICOMDir(FSCommand):
"""Uses mri_parse_sdcmdir to get information from dicom directories
Examples
--------
>>> from nipype.interfaces.freesurfer import ParseDICOMDir
>>> dcminfo = ParseDICOMDir()
>>> dcminfo.inputs.dicom_dir = '.'
>>> dcminfo.inputs.sortbyrun = True
>>> dcminfo.inputs.summarize = True
>>> dcminfo.cmdline
'mri_parse_sdcmdir --d . --o dicominfo.txt --sortbyrun --summarize'
"""
_cmd = 'mri_parse_sdcmdir'
input_spec = ParseDICOMDirInputSpec
output_spec = ParseDICOMDirOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.dicom_info_file):
outputs['dicom_info_file'] = os.path.join(os.getcwd(), self.inputs.dicom_info_file)
return outputs
class UnpackSDICOMDirInputSpec(FSTraitedSpec):
source_dir = Directory(exists=True, argstr='-src %s',
mandatory=True,
desc='directory with the DICOM files')
output_dir = Directory(argstr='-targ %s',
desc='top directory into which the files will be unpacked')
run_info = traits.Tuple(traits.Int, traits.Str, traits.Str, traits.Str,
mandatory=True,
argstr='-run %d %s %s %s',
xor=('run_info', 'config', 'seq_config'),
desc='runno subdir format name : spec unpacking rules on cmdline')
config = File(exists=True, argstr='-cfg %s',
mandatory=True,
xor=('run_info', 'config', 'seq_config'),
desc='specify unpacking rules in file')
seq_config = File(exists=True, argstr='-seqcfg %s',
mandatory=True,
xor=('run_info', 'config', 'seq_config'),
desc='specify unpacking rules based on sequence')
dir_structure = traits.Enum('fsfast', 'generic', argstr='-%s',
desc='unpack to specified directory structures')
no_info_dump = traits.Bool(argstr='-noinfodump',
desc='do not create infodump file')
scan_only = File(exists=True, argstr='-scanonly %s',
desc='only scan the directory and put result in file')
log_file = File(exists=True, argstr='-log %s',
desc='explicilty set log file')
spm_zeropad = traits.Int(argstr='-nspmzeropad %d',
desc='set frame number zero padding width for SPM')
no_unpack_err = traits.Bool(argstr='-no-unpackerr',
desc='do not try to unpack runs with errors')
class UnpackSDICOMDir(FSCommand):
"""Use unpacksdcmdir to convert dicom files
Call unpacksdcmdir -help from the command line to see more information on
using this command.
Examples
--------
>>> from nipype.interfaces.freesurfer import UnpackSDICOMDir
>>> unpack = UnpackSDICOMDir()
>>> unpack.inputs.source_dir = '.'
>>> unpack.inputs.output_dir = '.'
>>> unpack.inputs.run_info = (5, 'mprage', 'nii', 'struct')
>>> unpack.inputs.dir_structure = 'generic'
>>> unpack.cmdline
'unpacksdcmdir -generic -targ . -run 5 mprage nii struct -src .'
"""
_cmd = 'unpacksdcmdir'
input_spec = UnpackSDICOMDirInputSpec
class MRIConvertInputSpec(FSTraitedSpec):
read_only = traits.Bool(argstr='--read_only',
desc='read the input volume')
no_write = traits.Bool(argstr='--no_write',
desc='do not write output')
in_info = traits.Bool(argstr='--in_info',
desc='display input info')
out_info = traits.Bool(argstr='--out_info',
desc='display output info')
in_stats = traits.Bool(argstr='--in_stats',
desc='display input stats')
out_stats = traits.Bool(argstr='--out_stats',
desc='display output stats')
in_matrix = traits.Bool(argstr='--in_matrix',
desc='display input matrix')
out_matrix = traits.Bool(argstr='--out_matrix',
desc='display output matrix')
in_i_size = traits.Int(argstr='--in_i_size %d',
desc='input i size')
in_j_size = traits.Int(argstr='--in_j_size %d',
desc='input j size')
in_k_size = traits.Int(argstr='--in_k_size %d',
desc='input k size')
force_ras = traits.Bool(argstr='--force_ras_good',
desc='use default when orientation info absent')
in_i_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--in_i_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
in_j_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--in_j_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
in_k_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--in_k_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
_orientations = ['LAI', 'LIA', 'ALI', 'AIL', 'ILA', 'IAL', 'LAS', 'LSA', 'ALS', 'ASL', 'SLA', 'SAL', 'LPI', 'LIP', 'PLI', 'PIL', 'ILP', 'IPL', 'LPS', 'LSP', 'PLS', 'PSL', 'SLP', 'SPL', 'RAI', 'RIA', 'ARI', 'AIR', 'IRA', 'IAR', 'RAS', 'RSA', 'ARS', 'ASR', 'SRA', 'SAR', 'RPI', 'RIP', 'PRI', 'PIR', 'IRP', 'IPR', 'RPS', 'RSP', 'PRS', 'PSR', 'SRP', 'SPR']
#_orientations = [comb for comb in itertools.chain(*[[''.join(c) for c in itertools.permutations(s)] for s in [a+b+c for a in 'LR' for b in 'AP' for c in 'IS']])]
in_orientation = traits.Enum(_orientations,
argstr='--in_orientation %s',
desc='specify the input orientation')
in_center = traits.List(traits.Float, maxlen=3,
argstr='--in_center %s',
desc='<R coordinate> <A coordinate> <S coordinate>')
sphinx = traits.Bool(argstr='--sphinx',
desc='change orientation info to sphinx')
out_i_count = traits.Int(argstr='--out_i_count %d',
desc='some count ?? in i direction')
out_j_count = traits.Int(argstr='--out_j_count %d',
desc='some count ?? in j direction')
out_k_count = traits.Int(argstr='--out_k_count %d',
desc='some count ?? in k direction')
vox_size = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='-voxsize %f %f %f',
desc='<size_x> <size_y> <size_z> specify the size (mm) - useful for upsampling or downsampling')
out_i_size = traits.Int(argstr='--out_i_size %d',
desc='output i size')
out_j_size = traits.Int(argstr='--out_j_size %d',
desc='output j size')
out_k_size = traits.Int(argstr='--out_k_size %d',
desc='output k size')
out_i_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_i_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
out_j_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_j_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
out_k_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_k_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
out_orientation = traits.Enum(_orientations,
argstr='--out_orientation %s',
desc='specify the output orientation')
out_center = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_center %f %f %f',
desc='<R coordinate> <A coordinate> <S coordinate>')
out_datatype = traits.Enum('uchar', 'short', 'int', 'float',
argstr='--out_data_type %s',
desc='output data type <uchar|short|int|float>')
resample_type = traits.Enum('interpolate', 'weighted', 'nearest', 'sinc', 'cubic',
argstr='--resample_type %s',
desc='<interpolate|weighted|nearest|sinc|cubic> (default is interpolate)')
no_scale = traits.Bool(argstr='--no_scale 1',
desc='dont rescale values for COR')
no_change = traits.Bool(argstr='--nochange',
desc="don't change type of input to that of template")
autoalign_matrix = File(exists=True, argstr='--autoalign %s',
desc='text file with autoalign matrix')
unwarp_gradient = traits.Bool(argstr='--unwarp_gradient_nonlinearity',
desc='unwarp gradient nonlinearity')
apply_transform = File(exists=True, argstr='--apply_transform %s',
desc='apply xfm file')
apply_inv_transform = File(exists=True, argstr='--apply_inverse_transform %s',
desc='apply inverse transformation xfm file')
devolve_transform = traits.Str(argstr='--devolvexfm %s',
desc='subject id')
crop_center = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--crop %d %d %d',
desc='<x> <y> <z> crop to 256 around center (x, y, z)')
crop_size = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--cropsize %d %d %d',
desc='<dx> <dy> <dz> crop to size <dx, dy, dz>')
cut_ends = traits.Int(argstr='--cutends %d',
desc='remove ncut slices from the ends')
slice_crop = traits.Tuple(traits.Int, traits.Int,
argstr='--slice-crop %d %d',
desc='s_start s_end : keep slices s_start to s_end')
slice_reverse = traits.Bool(argstr='--slice-reverse',
desc='reverse order of slices, update vox2ras')
slice_bias = traits.Float(argstr='--slice-bias %f',
desc='apply half-cosine bias field')
fwhm = traits.Float(argstr='--fwhm %f',
desc='smooth input volume by fwhm mm')
_filetypes = ['cor', 'mgh', 'mgz', 'minc', 'analyze',
'analyze4d', 'spm', 'afni', 'brik', 'bshort',
'bfloat', 'sdt', 'outline', 'otl', 'gdf',
'nifti1', 'nii', 'niigz']
_infiletypes = ['ge', 'gelx', 'lx', 'ximg', 'siemens', 'dicom', 'siemens_dicom']
in_type = traits.Enum(_filetypes + _infiletypes, argstr='--in_type %s',
desc='input file type')
out_type = traits.Enum(_filetypes, argstr='--out_type %s',
desc='output file type')
ascii = traits.Bool(argstr='--ascii',
desc='save output as ascii col>row>slice>frame')
reorder = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--reorder %d %d %d',
desc='olddim1 olddim2 olddim3')
invert_contrast = traits.Float(argstr='--invert_contrast %f',
desc='threshold for inversting contrast')
in_file = File(exists=True, mandatory=True,
position=-2,
argstr='--input_volume %s',
desc='File to read/convert')
out_file = File(argstr='--output_volume %s',
position=-1, genfile=True,
desc='output filename or True to generate one')
conform = traits.Bool(argstr='--conform',
desc='conform to 256^3')
conform_min = traits.Bool(argstr='--conform_min',
desc='conform to smallest size')
conform_size = traits.Float(argstr='--conform_size %s',
desc='conform to size_in_mm')
parse_only = traits.Bool(argstr='--parse_only',
desc='parse input only')
subject_name = traits.Str(argstr='--subject_name %s',
desc='subject name ???')
reslice_like = File(exists=True, argstr='--reslice_like %s',
desc='reslice output to match file')
template_type = traits.Enum(_filetypes + _infiletypes,
argstr='--template_type %s',
desc='template file type')
split = traits.Bool(argstr='--split',
desc='split output frames into separate output files.')
frame = traits.Int(argstr='--frame %d',
desc='keep only 0-based frame number')
midframe = traits.Bool(argstr='--mid-frame',
desc='keep only the middle frame')
skip_n = traits.Int(argstr='--nskip %d',
desc='skip the first n frames')
drop_n = traits.Int(argstr='--ndrop %d',
desc='drop the last n frames')
frame_subsample = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--fsubsample %d %d %d',
desc='start delta end : frame subsampling (end = -1 for end)')
in_scale = traits.Float(argstr='--scale %f',
desc='input intensity scale factor')
out_scale = traits.Float(argstr='--out-scale %d',
desc='output intensity scale factor')
in_like = File(exists=True, argstr='--in_like %s',
desc='input looks like')
fill_parcellation = traits.Bool(argstr='--fill_parcellation',
desc='fill parcellation')
smooth_parcellation = traits.Bool(argstr='--smooth_parcellation',
desc='smooth parcellation')
zero_outlines = traits.Bool(argstr='--zero_outlines',
desc='zero outlines')
color_file = File(exists=True, argstr='--color_file %s',
desc='color file')
no_translate = traits.Bool(argstr='--no_translate',
desc='???')
status_file = File(argstr='--status %s',
desc='status file for DICOM conversion')
sdcm_list = File(exists=True, argstr='--sdcmlist %s',
desc='list of DICOM files for conversion')
template_info = traits.Bool('--template_info',
desc='dump info about template')
crop_gdf = traits.Bool(argstr='--crop_gdf',
desc='apply GDF cropping')
zero_ge_z_offset = traits.Bool(argstr='--zero_ge_z_offset',
desc='zero ge z offset ???')
class MRIConvertOutputSpec(TraitedSpec):
out_file = OutputMultiPath(File(exists=True), desc='converted output file')
class MRIConvert(FSCommand):
"""use fs mri_convert to manipulate files
.. note::
Adds niigz as an output type option
Examples
--------
>>> mc = MRIConvert()
>>> mc.inputs.in_file = 'structural.nii'
>>> mc.inputs.out_file = 'outfile.mgz'
>>> mc.inputs.out_type = 'mgz'
>>> mc.cmdline
'mri_convert --out_type mgz --input_volume structural.nii --output_volume outfile.mgz'
"""
_cmd = 'mri_convert'
input_spec = MRIConvertInputSpec
output_spec = MRIConvertOutputSpec
filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc',
afni='brik', brik='brik', bshort='bshort',
spm='img', analyze='img', analyze4d='img',
bfloat='bfloat', nifti1='img', nii='nii',
niigz='nii.gz')
def _format_arg(self, name, spec, value):
if name in ['in_type', 'out_type', 'template_type']:
if value == 'niigz':
return spec.argstr % 'nii'
return super(MRIConvert, self)._format_arg(name, spec, value)
def _get_outfilename(self):
outfile = self.inputs.out_file
if not isdefined(outfile):
if isdefined(self.inputs.out_type):
suffix = '_out.' + self.filemap[self.inputs.out_type]
else:
suffix = '_out.nii.gz'
outfile = fname_presuffix(self.inputs.in_file,
newpath=os.getcwd(),
suffix=suffix,
use_ext=False)
return os.path.abspath(outfile)
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self._get_outfilename()
if isdefined(self.inputs.split) and self.inputs.split:
size = load(self.inputs.in_file).get_shape()
if len(size) == 3:
tp = 1
else:
tp = size[-1]
if outfile.endswith('.mgz'):
stem = outfile.split('.mgz')[0]
ext = '.mgz'
elif outfile.endswith('.nii.gz'):
stem = outfile.split('.nii.gz')[0]
ext = '.nii.gz'
else:
stem = '.'.join(outfile.split('.')[:-1])
ext = '.' + outfile.split('.')[-1]
outfile = []
for idx in range(0, tp):
outfile.append(stem + '%04d' % idx + ext)
if isdefined(self.inputs.out_type):
if self.inputs.out_type in ['spm', 'analyze']:
# generate all outputs
size = load(self.inputs.in_file).get_shape()
if len(size) == 3:
tp = 1
else:
tp = size[-1]
# have to take care of all the frame manipulations
raise Exception('Not taking frame manipulations into account- please warn the developers')
outfiles = []
outfile = self._get_outfilename()
for i in range(tp):
outfiles.append(fname_presuffix(outfile,
suffix='%03d' % (i + 1)))
outfile = outfiles
outputs['out_file'] = outfile
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._get_outfilename()
return None
class DICOMConvertInputSpec(FSTraitedSpec):
dicom_dir = Directory(exists=True, mandatory=True,
desc='dicom directory from which to convert dicom files')
base_output_dir = Directory(mandatory=True,
desc='directory in which subject directories are created')
subject_dir_template = traits.Str('S.%04d', usedefault=True,
desc='template for subject directory name')
subject_id = traits.Any(desc='subject identifier to insert into template')
file_mapping = traits.List(traits.Tuple(traits.Str, traits.Str),
desc='defines the output fields of interface')
out_type = traits.Enum('niigz', MRIConvertInputSpec._filetypes,
usedefault=True,
desc='defines the type of output file produced')
dicom_info = File(exists=True,
desc='File containing summary information from mri_parse_sdcmdir')
seq_list = traits.List(traits.Str,
requires=['dicom_info'],
desc='list of pulse sequence names to be converted.')
ignore_single_slice = traits.Bool(requires=['dicom_info'],
desc='ignore volumes containing a single slice')
class DICOMConvert(FSCommand):
"""use fs mri_convert to convert dicom files
Examples
--------
>>> from nipype.interfaces.freesurfer import DICOMConvert
>>> cvt = DICOMConvert()
>>> cvt.inputs.dicom_dir = 'dicomdir'
>>> cvt.inputs.file_mapping = [('nifti', '*.nii'), ('info', 'dicom*.txt'), ('dti', '*dti.bv*')]
"""
_cmd = 'mri_convert'
input_spec = DICOMConvertInputSpec
def _get_dicomfiles(self):
"""validate fsl bet options
if set to None ignore
"""
return glob(os.path.abspath(os.path.join(self.inputs.dicom_dir,
'*-1.dcm')))
def _get_outdir(self):
"""returns output directory"""
subjid = self.inputs.subject_id
if not isdefined(subjid):
path, fname = os.path.split(self._get_dicomfiles()[0])
subjid = int(fname.split('-')[0])
if isdefined(self.inputs.subject_dir_template):
subjid = self.inputs.subject_dir_template % subjid
basedir = self.inputs.base_output_dir
if not isdefined(basedir):
basedir = os.path.abspath('.')
outdir = os.path.abspath(os.path.join(basedir, subjid))
return outdir
def _get_runs(self):
"""Returns list of dicom series that should be converted.
Requires a dicom info summary file generated by ``DicomDirInfo``
"""
seq = np.genfromtxt(self.inputs.dicom_info, dtype=object)
runs = []
for s in seq:
if self.inputs.seq_list:
if self.inputs.ignore_single_slice:
if (int(s[8]) > 1) and any([s[12].startswith(sn) for sn in self.inputs.seq_list]):
runs.append(int(s[2]))
else:
if any([s[12].startswith(sn) for sn in self.inputs.seq_list]):
runs.append(int(s[2]))
else:
runs.append(int(s[2]))
return runs
def _get_filelist(self, outdir):
"""Returns list of files to be converted"""
filemap = {}
for f in self._get_dicomfiles():
head, fname = os.path.split(f)
fname, ext = os.path.splitext(fname)
fileparts = fname.split('-')
runno = int(fileparts[1])
out_type = MRIConvert.filemap[self.inputs.out_type]
outfile = os.path.join(outdir, '.'.join(('%s-%02d' % (fileparts[0],
runno),
out_type)))
filemap[runno] = (f, outfile)
if self.inputs.dicom_info:
files = [filemap[r] for r in self._get_runs()]
else:
files = [filemap[r] for r in filemap.keys()]
return files
@property
def cmdline(self):
""" `command` plus any arguments (args)
validates arguments and generates command line"""
self._check_mandatory_inputs()
outdir = self._get_outdir()
cmd = []
if not os.path.exists(outdir):
cmdstr = 'python -c "import os; os.makedirs(\'%s\')"' % outdir
cmd.extend([cmdstr])
infofile = os.path.join(outdir, 'shortinfo.txt')
if not os.path.exists(infofile):
cmdstr = 'dcmdir-info-mgh %s > %s' % (self.inputs.dicom_dir,
infofile)
cmd.extend([cmdstr])
files = self._get_filelist(outdir)
for infile, outfile in files:
if not os.path.exists(outfile):
single_cmd = '%s %s %s' % (self.cmd, infile,
os.path.join(outdir, outfile))
cmd.extend([single_cmd])
return '; '.join(cmd)
class ResampleInputSpec(FSTraitedSpec):
in_file = File(exists=True, argstr='-i %s', mandatory=True,
desc='file to resample', position=-2)
resampled_file = File(argstr='-o %s', desc='output filename', genfile=True,
position=-1)
voxel_size = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='-vs %.2f %.2f %.2f', desc='triplet of output voxel sizes',
mandatory=True)
class ResampleOutputSpec(TraitedSpec):
resampled_file = File(exists=True,
desc='output filename')
class Resample(FSCommand):
"""Use FreeSurfer mri_convert to up or down-sample image files
Examples
--------
>>> from nipype.interfaces import freesurfer
>>> resampler = freesurfer.Resample()
>>> resampler.inputs.in_file = 'structural.nii'
>>> resampler.inputs.resampled_file = 'resampled.nii'
>>> resampler.inputs.voxel_size = (2.1, 2.1, 2.1)
>>> resampler.cmdline
'mri_convert -vs 2.10 2.10 2.10 -i structural.nii -o resampled.nii'
"""
_cmd = 'mri_convert'
input_spec = ResampleInputSpec
output_spec = ResampleOutputSpec
def _get_outfilename(self):
if isdefined(self.inputs.resampled_file):
outfile = self.inputs.resampled_file
else:
outfile = fname_presuffix(self.inputs.in_file,
newpath=os.getcwd(),
suffix='_resample')
return outfile
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['resampled_file'] = self._get_outfilename()
return outputs
def _gen_filename(self, name):
if name == 'resampled_file':
return self._get_outfilename()
return None
class ReconAllInputSpec(CommandLineInputSpec):
subject_id = traits.Str("recon_all", argstr='-subjid %s',
desc='subject name', usedefault=True)
directive = traits.Enum('all', 'autorecon1', 'autorecon2', 'autorecon2-cp',
'autorecon2-wm', 'autorecon2-inflate1',
'autorecon2-perhemi', 'autorecon3', 'localGI',
'qcache', argstr='-%s', desc='process directive',
usedefault=True, position=0)
hemi = traits.Enum('lh', 'rh', desc='hemisphere to process',
argstr="-hemi %s")
T1_files = InputMultiPath(File(exists=True), argstr='-i %s...',
desc='name of T1 file to process')
T2_file = File(exists=True, argstr="-T2 %s", min_ver='5.3.0',
desc='Use a T2 image to refine the cortical surface')
openmp = traits.Int(argstr="-openmp %d",
desc="Number of processors to use in parallel")
subjects_dir = Directory(exists=True, argstr='-sd %s', hash_files=False,
desc='path to subjects directory', genfile=True)
flags = traits.Str(argstr='%s', desc='additional parameters')
class ReconAllIOutputSpec(FreeSurferSource.output_spec):
subjects_dir = Directory(exists=True, desc='Freesurfer subjects directory.')
subject_id = traits.Str(desc='Subject name for whom to retrieve data')
class ReconAll(CommandLine):
"""Uses recon-all to generate surfaces and parcellations of structural data
from anatomical images of a subject.
Examples
--------
>>> from nipype.interfaces.freesurfer import ReconAll
>>> reconall = ReconAll()
>>> reconall.inputs.subject_id = 'foo'
>>> reconall.inputs.directive = 'all'
>>> reconall.inputs.subjects_dir = '.'
>>> reconall.inputs.T1_files = 'structural.nii'
>>> reconall.cmdline
'recon-all -all -i structural.nii -subjid foo -sd .'
"""
_cmd = 'recon-all'
_additional_metadata = ['loc', 'altkey']
input_spec = ReconAllInputSpec
output_spec = ReconAllIOutputSpec
_can_resume = True
_steps = [
#autorecon1
('motioncor', ['mri/rawavg.mgz', 'mri/orig.mgz']),
('talairach', ['mri/transforms/talairach.auto.xfm',
'mri/transforms/talairach.xfm']),
('nuintensitycor', ['mri/nu.mgz']),
('normalization', ['mri/T1.mgz']),
('skullstrip',
['mri/brainmask.auto.mgz',
'mri/brainmask.mgz']),
#autorecon2
('gcareg', ['mri/transforms/talairach.lta']),
('canorm', ['mri/norm.mgz']),
('careg', ['mri/transforms/talairach.m3z']),
('careginv', ['mri/transforms/talairach.m3z.inv.x.mgz',
'mri/transforms/talairach.m3z.inv.y.mgz',
'mri/transforms/talairach.m3z.inv.z.mgz']),
('rmneck', ['mri/nu_noneck.mgz']),
('skull-lta', ['mri/transforms/talairach_with_skull_2.lta']),
('calabel',
['mri/aseg.auto_noCCseg.mgz', 'mri/aseg.auto.mgz', 'mri/aseg.mgz']),
('normalization2', ['mri/brain.mgz']),
('maskbfs', ['mri/brain.finalsurfs.mgz']),
('segmentation', ['mri/wm.asegedit.mgz', 'mri/wm.mgz']),
('fill', ['mri/filled.mgz']),
('tessellate', ['surf/lh.orig.nofix', 'surf/rh.orig.nofix']),
('smooth1', ['surf/lh.smoothwm.nofix', 'surf/rh.smoothwm.nofix']),
('inflate1', ['surf/lh.inflated.nofix', 'surf/rh.inflated.nofix']),
('qsphere', ['surf/lh.qsphere.nofix', 'surf/rh.qsphere.nofix']),
('fix', ['surf/lh.orig', 'surf/rh.orig']),
('white',
['surf/lh.white',
'surf/rh.white',
'surf/lh.curv',
'surf/rh.curv',
'surf/lh.area',
'surf/rh.area',
'label/lh.cortex.label',
'label/rh.cortex.label']),
('smooth2', ['surf/lh.smoothwm', 'surf/rh.smoothwm']),
('inflate2',
['surf/lh.inflated',
'surf/rh.inflated',
'surf/lh.sulc',
'surf/rh.sulc',
'surf/lh.inflated.H',
'surf/rh.inflated.H',
'surf/lh.inflated.K',
'surf/rh.inflated.K']),
#autorecon3
('sphere', ['surf/lh.sphere', 'surf/rh.sphere']),
('surfreg', ['surf/lh.sphere.reg', 'surf/rh.sphere.reg']),
('jacobian_white', ['surf/lh.jacobian_white',
'surf/rh.jacobian_white']),
('avgcurv', ['surf/lh.avg_curv', 'surf/rh.avg_curv']),
('cortparc', ['label/lh.aparc.annot', 'label/rh.aparc.annot']),
('pial',
['surf/lh.pial',
'surf/rh.pial',
'surf/lh.curv.pial',
'surf/rh.curv.pial',
'surf/lh.area.pial',
'surf/rh.area.pial',
'surf/lh.thickness',
'surf/rh.thickness']),
('cortparc2', ['label/lh.aparc.a2009s.annot',
'label/rh.aparc.a2009s.annot']),
('parcstats2',
['stats/lh.aparc.a2009s.stats',
'stats/rh.aparc.a2009s.stats',
'stats/aparc.annot.a2009s.ctab']),
('cortribbon', ['mri/lh.ribbon.mgz', 'mri/rh.ribbon.mgz',
'mri/ribbon.mgz']),
('segstats', ['stats/aseg.stats']),
('aparc2aseg', ['mri/aparc+aseg.mgz', 'mri/aparc.a2009s+aseg.mgz']),
('wmparc', ['mri/wmparc.mgz', 'stats/wmparc.stats']),
('balabels', ['BA.ctab', 'BA.thresh.ctab']),
('label-exvivo-ec', ['label/lh.entorhinal_exvivo.label',
'label/rh.entorhinal_exvivo.label'])]
def _gen_subjects_dir(self):
return os.getcwd()
def _gen_filename(self, name):
if name == 'subjects_dir':
return self._gen_subjects_dir()
return None
def _list_outputs(self):
"""
See io.FreeSurferSource.outputs for the list of outputs returned
"""
if isdefined(self.inputs.subjects_dir):
subjects_dir = self.inputs.subjects_dir
else:
subjects_dir = self._gen_subjects_dir()
if isdefined(self.inputs.hemi):
hemi = self.inputs.hemi
else:
hemi = 'both'
outputs = self._outputs().get()
outputs.update(FreeSurferSource(subject_id=self.inputs.subject_id,
subjects_dir=subjects_dir,
hemi=hemi)._list_outputs())
outputs['subject_id'] = self.inputs.subject_id
outputs['subjects_dir'] = subjects_dir
return outputs
def _is_resuming(self):
subjects_dir = self.inputs.subjects_dir
if not isdefined(subjects_dir):
subjects_dir = self._gen_subjects_dir()
if os.path.isdir(os.path.join(subjects_dir, self.inputs.subject_id,
'mri')):
return True
return False
def _format_arg(self, name, trait_spec, value):
if name == 'T1_files':
if self._is_resuming():
return ''
return super(ReconAll, self)._format_arg(name, trait_spec, value)
@property
def cmdline(self):
cmd = super(ReconAll, self).cmdline
if not self._is_resuming():
return cmd
subjects_dir = self.inputs.subjects_dir
if not isdefined(subjects_dir):
subjects_dir = self._gen_subjects_dir()
#cmd = cmd.replace(' -all ', ' -make all ')
iflogger.info('Overriding recon-all directive')
flags = []
directive = 'all'
for idx, step in enumerate(self._steps):
step, outfiles = step
if all([os.path.exists(os.path.join(subjects_dir,
self.inputs.subject_id,f)) for
f in outfiles]):
flags.append('-no%s'%step)
if idx > 4:
directive = 'autorecon2'
elif idx > 23:
directive = 'autorecon3'
else:
flags.append('-%s'%step)
cmd = cmd.replace(' -%s ' % self.inputs.directive, ' -%s ' % directive)
cmd += ' ' + ' '.join(flags)
iflogger.info('resume recon-all : %s' % cmd)
return cmd
class BBRegisterInputSpec(FSTraitedSpec):
subject_id = traits.Str(argstr='--s %s',
desc='freesurfer subject id',
mandatory=True)
source_file = File(argstr='--mov %s',
desc='source file to be registered',
mandatory=True, copyfile=False)
init = traits.Enum('spm', 'fsl', 'header', argstr='--init-%s',
mandatory=True, xor=['init_reg_file'],
desc='initialize registration spm, fsl, header')
init_reg_file = File(exists=True, argstr='--init-reg %s',
desc='existing registration file',
xor=['init'], mandatory=True)
contrast_type = traits.Enum('t1', 't2', argstr='--%s',
desc='contrast type of image',
mandatory=True)
intermediate_file = File(exists=True, argstr="--int %s",
desc="Intermediate image, e.g. in case of partial FOV")
reg_frame = traits.Int(argstr="--frame %d", xor=["reg_middle_frame"],
desc="0-based frame index for 4D source file")
reg_middle_frame = traits.Bool(argstr="--mid-frame", xor=["reg_frame"],
desc="Register middle frame of 4D source file")
out_reg_file = File(argstr='--reg %s',
desc='output registration file',
genfile=True)
spm_nifti = traits.Bool(argstr="--spm-nii",
desc="force use of nifti rather than analyze with SPM")
epi_mask = traits.Bool(argstr="--epi-mask",
desc="mask out B0 regions in stages 1 and 2")
out_fsl_file = traits.Either(traits.Bool, File, argstr="--fslmat %s",
desc="write the transformation matrix in FSL FLIRT format")
registered_file = traits.Either(traits.Bool, File, argstr='--o %s',
desc='output warped sourcefile either True or filename')
class BBRegisterOutputSpec(TraitedSpec):
out_reg_file = File(exists=True, desc='Output registration file')
out_fsl_file = File(desc='Output FLIRT-style registration file')
min_cost_file = File(exists=True, desc='Output registration minimum cost file')
registered_file = File(desc='Registered and resampled source file')
class BBRegister(FSCommand):
"""Use FreeSurfer bbregister to register a volume to the Freesurfer anatomical.
This program performs within-subject, cross-modal registration using a
boundary-based cost function. The registration is constrained to be 6
DOF (rigid). It is required that you have an anatomical scan of the
subject that has already been recon-all-ed using freesurfer.
Examples
--------
>>> from nipype.interfaces.freesurfer import BBRegister
>>> bbreg = BBRegister(subject_id='me', source_file='structural.nii', init='header', contrast_type='t2')
>>> bbreg.cmdline
'bbregister --t2 --init-header --reg structural_bbreg_me.dat --mov structural.nii --s me'
"""
_cmd = 'bbregister'
input_spec = BBRegisterInputSpec
output_spec = BBRegisterOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
_in = self.inputs
if isdefined(_in.out_reg_file):
outputs['out_reg_file'] = op.abspath(_in.out_reg_file)
elif _in.source_file:
suffix = '_bbreg_%s.dat' % _in.subject_id
outputs['out_reg_file'] = fname_presuffix(_in.source_file,
suffix=suffix,
use_ext=False)
if isdefined(_in.registered_file):
if isinstance(_in.registered_file, bool):
outputs['registered_file'] = fname_presuffix(_in.source_file,
suffix='_bbreg')
else:
outputs['registered_file'] = op.abspath(_in.registered_file)
if isdefined(_in.out_fsl_file):
if isinstance(_in.out_fsl_file, bool):
suffix='_bbreg_%s.mat' % _in.subject_id
out_fsl_file = fname_presuffix(_in.source_file,
suffix=suffix,
use_ext=False)
outputs['out_fsl_file'] = out_fsl_file
else:
outputs['out_fsl_file'] = op.abspath(_in.out_fsl_file)
outputs['min_cost_file'] = outputs['out_reg_file'] + '.mincost'
return outputs
def _format_arg(self, name, spec, value):
if name in ['registered_file', 'out_fsl_file']:
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
return super(BBRegister, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'out_reg_file':
return self._list_outputs()[name]
return None
class ApplyVolTransformInputSpec(FSTraitedSpec):
source_file = File(exists=True, argstr='--mov %s',
copyfile=False, mandatory=True,
desc='Input volume you wish to transform')
transformed_file = File(desc='Output volume', argstr='--o %s', genfile=True)
_targ_xor = ('target_file', 'tal', 'fs_target')
target_file = File(exists=True, argstr='--targ %s', xor=_targ_xor,
desc='Output template volume', mandatory=True)
tal = traits.Bool(argstr='--tal', xor=_targ_xor, mandatory=True,
desc='map to a sub FOV of MNI305 (with --reg only)')
tal_resolution = traits.Float(argstr="--talres %.10f",
desc="Resolution to sample when using tal")
fs_target = traits.Bool(argstr='--fstarg', xor=_targ_xor, mandatory=True,
requires=['reg_file'],
desc='use orig.mgz from subject in regfile as target')
_reg_xor = ('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject')
reg_file = File(exists=True, xor=_reg_xor, argstr='--reg %s',
mandatory=True,
desc='tkRAS-to-tkRAS matrix (tkregister2 format)')
fsl_reg_file = File(exists=True, xor=_reg_xor, argstr='--fsl %s',
mandatory=True,
desc='fslRAS-to-fslRAS matrix (FSL format)')
xfm_reg_file = File(exists=True, xor=_reg_xor, argstr='--xfm %s',
mandatory=True,
desc='ScannerRAS-to-ScannerRAS matrix (MNI format)')
reg_header = traits.Bool(xor=_reg_xor, argstr='--regheader',
mandatory=True,
desc='ScannerRAS-to-ScannerRAS matrix = identity')
subject = traits.Str(xor=_reg_xor, argstr='--s %s',
mandatory=True,
desc='set matrix = identity and use subject for any templates')
inverse = traits.Bool(desc='sample from target to source',
argstr='--inv')
interp = traits.Enum('trilin', 'nearest', 'cubic', argstr='--interp %s',
desc='Interpolation method (<trilin> or nearest)')
no_resample = traits.Bool(desc='Do not resample; just change vox2ras matrix',
argstr='--no-resample')
m3z_file = File(argstr="--m3z %s",
desc=('This is the morph to be applied to the volume. '
'Unless the morph is in mri/transforms (eg.: for '
'talairach.m3z computed by reconall), you will need '
'to specify the full path to this morph and use the '
'--noDefM3zPath flag.'))
no_ded_m3z_path = traits.Bool(argstr="--noDefM3zPath",
requires=['m3z_file'],
desc=('To be used with the m3z flag. '
'Instructs the code not to look for the'
'm3z morph in the default location '
'(SUBJECTS_DIR/subj/mri/transforms), '
'but instead just use the path '
'indicated in --m3z.'))
invert_morph = traits.Bool(argstr="--inv-morph",
requires=['m3z_file'],
desc=('Compute and use the inverse of the '
'non-linear morph to resample the input '
'volume. To be used by --m3z.'))
class ApplyVolTransformOutputSpec(TraitedSpec):
transformed_file = File(exists=True, desc='Path to output file if used normally')
class ApplyVolTransform(FSCommand):
"""Use FreeSurfer mri_vol2vol to apply a transform.
Examples
--------
>>> from nipype.interfaces.freesurfer import ApplyVolTransform
>>> applyreg = ApplyVolTransform()
>>> applyreg.inputs.source_file = 'structural.nii'
>>> applyreg.inputs.reg_file = 'register.dat'
>>> applyreg.inputs.transformed_file = 'struct_warped.nii'
>>> applyreg.inputs.fs_target = True
>>> applyreg.cmdline
'mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii'
"""
_cmd = 'mri_vol2vol'
input_spec = ApplyVolTransformInputSpec
output_spec = ApplyVolTransformOutputSpec
def _get_outfile(self):
outfile = self.inputs.transformed_file
if not isdefined(outfile):
if self.inputs.inverse == True:
if self.inputs.fs_target == True:
src = 'orig.mgz'
else:
src = self.inputs.target_file
else:
src = self.inputs.source_file
outfile = fname_presuffix(src,
newpath=os.getcwd(),
suffix='_warped')
return outfile
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['transformed_file'] = os.path.abspath(self._get_outfile())
return outputs
def _gen_filename(self, name):
if name == 'transformed_file':
return self._get_outfile()
return None
class SmoothInputSpec(FSTraitedSpec):
in_file = File(exists=True, desc='source volume',
argstr='--i %s', mandatory=True)
reg_file = File(desc='registers volume to surface anatomical ',
argstr='--reg %s', mandatory=True,
exists=True)
smoothed_file = File(desc='output volume', argstr='--o %s', genfile=True)
proj_frac_avg = traits.Tuple(traits.Float, traits.Float, traits.Float,
xor=['proj_frac'],
desc='average a long normal min max delta',
argstr='--projfrac-avg %.2f %.2f %.2f')
proj_frac = traits.Float(desc='project frac of thickness a long surface normal',
xor=['proj_frac_avg'],
argstr='--projfrac %s')
surface_fwhm = traits.Range(low=0.0, requires=['reg_file'],
mandatory=True, xor=['num_iters'],
desc='surface FWHM in mm', argstr='--fwhm %f')
num_iters = traits.Range(low=1, xor=['surface_fwhm'],
mandatory=True, argstr='--niters %d',
desc='number of iterations instead of fwhm')
vol_fwhm = traits.Range(low=0.0, argstr='--vol-fwhm %f',
desc='volume smoothing outside of surface')
class SmoothOutputSpec(TraitedSpec):
smoothed_file = File(exists=True, desc='smoothed input volume')
class Smooth(FSCommand):
"""Use FreeSurfer mris_volsmooth to smooth a volume
This function smoothes cortical regions on a surface and non-cortical
regions in volume.
.. note::
Cortical voxels are mapped to the surface (3D->2D) and then the
smoothed values from the surface are put back into the volume to fill
the cortical ribbon. If data is smoothed with this algorithm, one has to
be careful about how further processing is interpreted.
Examples
--------
>>> from nipype.interfaces.freesurfer import Smooth
>>> smoothvol = Smooth(in_file='functional.nii', smoothed_file = 'foo_out.nii', reg_file='register.dat', surface_fwhm=10, vol_fwhm=6)
>>> smoothvol.cmdline
'mris_volsmooth --i functional.nii --reg register.dat --o foo_out.nii --fwhm 10.000000 --vol-fwhm 6.000000'
"""
_cmd = 'mris_volsmooth'
input_spec = SmoothInputSpec
output_spec = SmoothOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.smoothed_file
if not isdefined(outfile):
outfile = self._gen_fname(self.inputs.in_file,
suffix='_smooth')
outputs['smoothed_file'] = outfile
return outputs
def _gen_filename(self, name):
if name == 'smoothed_file':
return self._list_outputs()[name]
return None
class RobustRegisterInputSpec(FSTraitedSpec):
source_file = File(mandatory=True, argstr='--mov %s',
desc='volume to be registered')
target_file = File(mandatory=True, argstr='--dst %s',
desc='target volume for the registration')
out_reg_file = File(genfile=True, argstr='--lta %s',
desc='registration file to write')
registered_file = traits.Either(traits.Bool, File, argstr='--warp %s',
desc='registered image; either True or filename')
weights_file = traits.Either(traits.Bool, File, argstr='--weights %s',
desc='weights image to write; either True or filename')
est_int_scale = traits.Bool(argstr='--iscale',
desc='estimate intensity scale (recommended for unnormalized images)')
trans_only = traits.Bool(argstr='--transonly',
desc='find 3 parameter translation only')
in_xfm_file = File(exists=True, argstr='--transform',
desc='use initial transform on source')
half_source = traits.Either(traits.Bool, File, argstr='--halfmov %s',
desc="write source volume mapped to halfway space")
half_targ = traits.Either(traits.Bool, File, argstr="--halfdst %s",
desc="write target volume mapped to halfway space")
half_weights = traits.Either(traits.Bool, File, argstr="--halfweights %s",
desc="write weights volume mapped to halfway space")
half_source_xfm = traits.Either(traits.Bool, File, argstr="--halfmovlta %s",
desc="write transform from source to halfway space")
half_targ_xfm = traits.Either(traits.Bool, File, argstr="--halfdstlta %s",
desc="write transform from target to halfway space")
auto_sens = traits.Bool(argstr='--satit', xor=['outlier_sens'], mandatory=True,
desc='auto-detect good sensitivity')
outlier_sens = traits.Float(argstr='--sat %.4f', xor=['auto_sens'], mandatory=True,
desc='set outlier sensitivity explicitly')
least_squares = traits.Bool(argstr='--leastsquares',
desc='use least squares instead of robust estimator')
no_init = traits.Bool(argstr='--noinit', desc='skip transform init')
init_orient = traits.Bool(argstr='--initorient',
desc='use moments for initial orient (recommended for stripped brains)')
max_iterations = traits.Int(argstr='--maxit %d',
desc='maximum # of times on each resolution')
high_iterations = traits.Int(argstr='--highit %d',
desc='max # of times on highest resolution')
iteration_thresh = traits.Float(argstr='--epsit %.3f',
desc='stop iterations when below threshold')
subsample_thresh = traits.Int(argstr='--subsample %d',
desc='subsample if dimension is above threshold size')
outlier_limit = traits.Float(argstr='--wlimit %.3f',
desc='set maximal outlier limit in satit')
write_vo2vox = traits.Bool(argstr='--vox2vox',
desc='output vox2vox matrix (default is RAS2RAS)')
no_multi = traits.Bool(argstr='--nomulti', desc='work on highest resolution')
mask_source = File(exists=True, argstr='--maskmov %s',
desc='image to mask source volume with')
mask_target = File(exists=True, argstr='--maskdst %s',
desc='image to mask target volume with')
force_double = traits.Bool(argstr='--doubleprec', desc='use double-precision intensities')
force_float = traits.Bool(argstr='--floattype', desc='use float intensities')
class RobustRegisterOutputSpec(TraitedSpec):
out_reg_file = File(exists=True, desc="output registration file")
registered_file = File(desc="output image with registration applied")
weights_file = File(desc="image of weights used")
half_source = File(desc="source image mapped to halfway space")
half_targ = File(desc="target image mapped to halfway space")
half_weights = File(desc="weights image mapped to halfway space")
half_source_xfm = File(desc="transform file to map source image to halfway space")
half_targ_xfm = File(desc="transform file to map target image to halfway space")
class RobustRegister(FSCommand):
"""Perform intramodal linear registration (translation and rotation) using robust statistics.
Examples
--------
>>> from nipype.interfaces.freesurfer import RobustRegister
>>> reg = RobustRegister()
>>> reg.inputs.source_file = 'structural.nii'
>>> reg.inputs.target_file = 'T1.nii'
>>> reg.inputs.auto_sens = True
>>> reg.inputs.init_orient = True
>>> reg.cmdline
'mri_robust_register --satit --initorient --lta structural_robustreg.lta --mov structural.nii --dst T1.nii'
References
----------
Reuter, M, Rosas, HD, and Fischl, B, (2010). Highly Accurate Inverse Consistent Registration:
A Robust Approach. Neuroimage 53(4) 1181-96.
"""
_cmd = 'mri_robust_register'
input_spec = RobustRegisterInputSpec
output_spec = RobustRegisterOutputSpec
def _format_arg(self, name, spec, value):
for option in ["registered_file", "weights_file", "half_source", "half_targ",
"half_weights", "half_source_xfm", "half_targ_xfm"]:
if name == option:
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
return super(RobustRegister, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_reg_file'] = self.inputs.out_reg_file
if not isdefined(self.inputs.out_reg_file) and self.inputs.source_file:
outputs['out_reg_file'] = fname_presuffix(self.inputs.source_file,
suffix='_robustreg.lta', use_ext=False)
prefices = dict(src=self.inputs.source_file, trg=self.inputs.target_file)
suffices = dict(registered_file=("src", "_robustreg", True),
weights_file=("src", "_robustweights", True),
half_source=("src", "_halfway", True),
half_targ=("trg", "_halfway", True),
half_weights=("src", "_halfweights", True),
half_source_xfm=("src", "_robustxfm.lta", False),
half_targ_xfm=("trg", "_robustxfm.lta", False))
for name, sufftup in suffices.items():
value = getattr(self.inputs, name)
if isdefined(value):
if isinstance(value, bool):
outputs[name] = fname_presuffix(prefices[sufftup[0]],
suffix=sufftup[1],
newpath=os.getcwd(),
use_ext=sufftup[2])
else:
outputs[name] = value
return outputs
def _gen_filename(self, name):
if name == 'out_reg_file':
return self._list_outputs()[name]
return None
class FitMSParamsInputSpec(FSTraitedSpec):
in_files = traits.List(File(exists=True), argstr="%s", position=-2, mandatory=True,
desc="list of FLASH images (must be in mgh format)")
tr_list = traits.List(traits.Int, desc="list of TRs of the input files (in msec)")
te_list = traits.List(traits.Float, desc="list of TEs of the input files (in msec)")
flip_list = traits.List(traits.Int, desc="list of flip angles of the input files")
xfm_list = traits.List(File(exists=True),
desc="list of transform files to apply to each FLASH image")
out_dir = Directory(argstr="%s", position=-1, genfile=True,
desc="directory to store output in")
class FitMSParamsOutputSpec(TraitedSpec):
t1_image = File(exists=True, desc="image of estimated T1 relaxation values")
pd_image = File(exists=True, desc="image of estimated proton density values")
t2star_image = File(exists=True, desc="image of estimated T2* values")
class FitMSParams(FSCommand):
"""Estimate tissue paramaters from a set of FLASH images.
Examples
--------
>>> from nipype.interfaces.freesurfer import FitMSParams
>>> msfit = FitMSParams()
>>> msfit.inputs.in_files = ['flash_05.mgz', 'flash_30.mgz']
>>> msfit.inputs.out_dir = 'flash_parameters'
>>> msfit.cmdline
'mri_ms_fitparms flash_05.mgz flash_30.mgz flash_parameters'
"""
_cmd = "mri_ms_fitparms"
input_spec = FitMSParamsInputSpec
output_spec = FitMSParamsOutputSpec
def _format_arg(self, name, spec, value):
if name == "in_files":
cmd = ""
for i, file in enumerate(value):
if isdefined(self.inputs.tr_list):
cmd = " ".join((cmd, "-tr %.1f" % self.inputs.tr_list[i]))
if isdefined(self.inputs.te_list):
cmd = " ".join((cmd, "-te %.3f" % self.inputs.te_list[i]))
if isdefined(self.inputs.flip_list):
cmd = " ".join((cmd, "-fa %.1f" % self.inputs.flip_list[i]))
if isdefined(self.inputs.xfm_list):
cmd = " ".join((cmd, "-at %s" % self.inputs.xfm_list[i]))
cmd = " ".join((cmd, file))
return cmd
return super(FitMSParams, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_dir):
out_dir = self._gen_filename("out_dir")
else:
out_dir = self.inputs.out_dir
outputs["t1_image"] = os.path.join(out_dir, "T1.mgz")
outputs["pd_image"] = os.path.join(out_dir, "PD.mgz")
outputs["t2star_image"] = os.path.join(out_dir, "T2star.mgz")
return outputs
def _gen_filename(self, name):
if name == "out_dir":
return os.getcwd()
return None
class SynthesizeFLASHInputSpec(FSTraitedSpec):
fixed_weighting = traits.Bool(position=1, argstr="-w",
desc="use a fixed weighting to generate optimal gray/white contrast")
tr = traits.Float(mandatory=True, position=2, argstr="%.2f",
desc="repetition time (in msec)")
flip_angle = traits.Float(mandatory=True, position=3, argstr="%.2f",
desc="flip angle (in degrees)")
te = traits.Float(mandatory=True, position=4, argstr="%.3f",
desc="echo time (in msec)")
t1_image = File(exists=True, mandatory=True, position=5, argstr="%s",
desc="image of T1 values")
pd_image = File(exists=True, mandatory=True, position=6, argstr="%s",
desc="image of proton density values")
out_file = File(genfile=True, argstr="%s", desc="image to write")
class SynthesizeFLASHOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="synthesized FLASH acquisition")
class SynthesizeFLASH(FSCommand):
"""Synthesize a FLASH acquisition from T1 and proton density maps.
Examples
--------
>>> from nipype.interfaces.freesurfer import SynthesizeFLASH
>>> syn = SynthesizeFLASH(tr=20, te=3, flip_angle=30)
>>> syn.inputs.t1_image = 'T1.mgz'
>>> syn.inputs.pd_image = 'PD.mgz'
>>> syn.inputs.out_file = 'flash_30syn.mgz'
>>> syn.cmdline
'mri_synthesize 20.00 30.00 3.000 T1.mgz PD.mgz flash_30syn.mgz'
"""
_cmd = "mri_synthesize"
input_spec = SynthesizeFLASHInputSpec
output_spec = SynthesizeFLASHOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.out_file):
outputs["out_file"] = self.inputs.out_file
else:
outputs["out_file"] = self._gen_fname("synth-flash_%02d.mgz" % self.inputs.flip_angle,
suffix="")
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
| bsd-3-clause | 2,728,094,450,304,617,000 | 44.489267 | 356 | 0.548246 | false | 3.887652 | false | false | false |
teamtaverna/core | app/timetables/migrations/0003_auto_20171107_1103.py | 1 | 1250 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-07 11:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetables', '0002_auto_20171005_2209'),
]
operations = [
migrations.AlterField(
model_name='course',
name='name',
field=models.CharField(help_text='Example: appetizer, main course, dessert', max_length=150, verbose_name='Course Name'),
),
migrations.AlterField(
model_name='dish',
name='name',
field=models.CharField(max_length=255, verbose_name='Dish Name'),
),
migrations.AlterField(
model_name='meal',
name='name',
field=models.CharField(max_length=60, verbose_name='Meal Name'),
),
migrations.AlterField(
model_name='timetable',
name='name',
field=models.CharField(max_length=255, verbose_name='Timetable Name'),
),
migrations.AlterField(
model_name='vendor',
name='name',
field=models.CharField(max_length=255, verbose_name='Vendor Name'),
),
]
| mit | 7,383,553,580,154,259,000 | 30.25 | 133 | 0.5696 | false | 4.180602 | false | false | false |
danpoland/pyramid-restful-framework | pyramid_restful/pagination/linkheader.py | 1 | 1989 | import math
from pyramid.response import Response
from .pagenumber import PageNumberPagination
from .utilities import replace_query_param
__all__ = ['LinkHeaderPagination']
class LinkHeaderPagination(PageNumberPagination):
"""
Add a header field to responses called Link. The value of the Link header contains information about
traversing the paginated resource. For more information about link header pagination checkout
githhub's great explanation: https://developer.github.com/v3/guides/traversing-with-pagination/
"""
def get_paginated_response(self, data):
next_url = self.get_next_link()
previous_url = self.get_previous_link()
first_url = self.get_first_link()
last_url = self.get_last_link()
link = ''
if next_url is not None and previous_url is not None:
link = '<{next_url}>; rel="next", <{previous_url}>; rel="prev"'
elif next_url is not None:
link = '<{next_url}>; rel="next"'
elif previous_url is not None:
link = '<{previous_url}>; rel="prev"'
if link:
link += ', <{first_url}>; rel="first", <{last_url}>; rel="last"'
response = Response(json=data) # todo, support renderer, should not hard code json
link = link.format(next_url=next_url, previous_url=previous_url, first_url=first_url, last_url=last_url)
if link:
response.headers['Link'] = link
response.headers['X-Total-Count'] = str(self.page.paginator.count)
return response
def get_first_link(self):
url = self.get_url_root()
return replace_query_param(url, self.page_query_param, 1)
def get_last_link(self):
url = self.get_url_root()
count = self.page.paginator.count
page_size = self.get_page_size(self.request)
total_pages = int(math.ceil(count / float(page_size)))
return replace_query_param(url, self.page_query_param, total_pages)
| bsd-2-clause | -675,978,130,135,537,900 | 33.894737 | 112 | 0.639517 | false | 3.636197 | false | false | false |
braoru/check-openshift | check_nodes_openshift.py | 1 | 14336 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013:
# Sébastien Pasche, [email protected]
# Benoit Chalut, [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
author = "Sebastien Pasche"
maintainer = "Sebastien Pasche"
version = "0.0.1"
import sys
import optparse
import os
import traceback
import json
from pprint import pprint
#TODO : Move to asyncio_mongo
try:
import paramiko
except ImportError:
print("ERROR : this plugin needs the python-paramiko module. Please install it")
sys.exit(2)
#Ok try to load our directory to load the plugin utils.
my_dir = os.path.dirname(__file__)
sys.path.insert(0, my_dir)
try:
from openshift_checks import MongoDBHelper, OutputFormatHelpers, SSHHelper
except ImportError:
print("ERROR : this plugin needs the local openshift_checks lib. Please install it")
sys.exit(2)
#DEFAULT LIMITS
#--------------
DEFAULT_WARNING = 2
DEFAULT_CRITICAL = 3
def is_node_mco_ping(
client,
node_identitiy,
debug=False
):
"""
:param client:
:param node_identitiy:
:return:
"""
cmd = "oo-mco rpc rpcutil ping -j -I {i}".format(
i=node_identitiy
)
if debug:
print("Command to execute")
print(cmd)
stdin, stdout, stderr = client.exec_command(
cmd,
get_pty=True
)
lines = [line for line in stdout]
json_raw = ''.join(lines)
json_array = json.loads(json_raw)
if debug:
print("JSON mco ping output")
pprint(json_array)
if len(json_array) == 1:
mco_ping_status = json.loads(json_raw)[0]
if mco_ping_status:
if mco_ping_status['statusmsg'] == 'OK':
return True
return False
def nodes_mco_ping_status(
client,
mongo_district_dict,
debug=False
):
"""
:param client:
:param mongo_district_dict:
:return:
"""
servers_ping = {
server['name']: is_node_mco_ping(
client,
server['name'],
debug
)
for server in mongo_district_dict['servers']
}
servers_status = {
server_name: {
'unresponsive': not mco_ping,
'active': mco_ping
} for server_name, mco_ping in servers_ping.items()
}
return servers_status
def openshift_district(
mongodb_db_connection,
district_name,
debug=False
):
"""
:param mongodb_db_connection:
:param district_name:
:return:
"""
collection = mongodb_db_connection['districts']
if debug:
print("The db connection")
pprint(mongodb_db_connection)
print("The collection")
pprint(collection)
district = collection.find_one(
{
'name': district_name
},
{
'servers': 1
}
)
if debug:
print('The district')
pprint(district)
return district
def servers_status(
mongo_district_dict
):
"""
:param mongo_district_dict:
:return:
"""
servers_status = {
server['name']: {
'active': server['active'],
'unresponsive': server['unresponsive']
} for server in mongo_district_dict['servers']
}
return servers_status
def nb_unresponsive_servers(
servers_status_dict
):
"""
:param servers_status_dict:
:return:
"""
return sum (
[
status['unresponsive'] for server, status in servers_status_dict.items()
]
)
def nb_active_servers(
servers_status_dict
):
"""
:param servers_status_dict:
:return:
"""
return sum (
[
status['active'] for server, status in servers_status_dict.items()
]
)
# OPT parsing
# -----------
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + version)
#broker ssh param
parser.add_option('--broker-hostname', default='',
dest="broker_hostname", help='Broker to connect to')
parser.add_option('--broker-ssh-port',
dest="broker_ssh_port", type="int", default=22,
help='SSH port to connect to the broker. Default : 22')
parser.add_option('--broker-ssh-key', default=os.path.expanduser('~/.ssh/id_rsa'),
dest="broker_ssh_key_file", help='SSH key file to use. By default will take ~/.ssh/id_rsa.')
parser.add_option('--broker-ssh-user', default='shinken',
dest="broker_ssh_user", help='remote use to use. By default shinken.')
parser.add_option('--broker-passphrase', default='',
dest="broker_ssh_passphrase", help='SSH key passphrase. By default will use void')
#mongodb connection
parser.add_option('--mongo-hostname',
dest="mongo_hostnames",
help='space separated mongodb hostnames:port list to connect to. '
'Example : "server1:27017 server2:27017" ')
parser.add_option('--mongo-user',
dest="mongo_user", default="shinken",
help='remote use to use. By default shinken.')
parser.add_option('--mongo-password',
dest="mongo_password",
help='Password. By default will use void')
parser.add_option('--mongo-source-longon',
dest="mongo_source", default='admin',
help='Source where to log on. Default: admin')
parser.add_option('--mongo-replicaset',
dest="mongo_replicaset",
help='openshift current mongodb replicaset')
parser.add_option('--mongo-openshift-database-name',
dest="mongo_openshift_database",
help='openshift current database')
#openshift relative
parser.add_option('--openshift-district-name',
dest="openshift_district",
help='openshift district to query')
parser.add_option('-w', '--warning',
dest="warning", type="int",default=None,
help='Warning value for number of unresponsive nodes. Default : 2')
parser.add_option('-c', '--critical',
dest="critical", type="int",default=None,
help='Critical value for number of unresponsive nodes. Default : 3')
#generic
parser.add_option('--debug',
dest="debug", default=False, action="store_true",
help='Enable debug')
if __name__ == '__main__':
# Ok first job : parse args
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
#Broker ssh args
#---------------
# get broker server list
if opts.broker_hostname is None:
raise Exception("You must specify a broker server")
# get broker ssh user
if opts.broker_ssh_user is None:
raise Exception("You must specify a broker ssh user")
broker_ssh_host = opts.broker_hostname
broker_ssh_port = opts.broker_ssh_port
broker_ssh_user = opts.broker_ssh_user
broker_ssh_key_path = opts.broker_ssh_key_file
broker_ssh_passphrase = opts.broker_ssh_passphrase
#MongpDB args
#------------
# get mongodb server list
if opts.mongo_hostnames is None:
raise Exception("You must specify a mongodb servers list")
# get mongodb user
if opts.mongo_user is None:
raise Exception("You must specify a mongodb user")
# get mongodb user password
if opts.mongo_password is None:
raise Exception("You must specify a mongodb user password")
# get mongodb source logon
if opts.mongo_source is None:
raise Exception("You must specify a mongodb source longon")
# get mongodb openshift database name
if opts.mongo_openshift_database is None:
raise Exception("You must specify a mongodb openshift database name")
# get mongodb database replicaset
if opts.mongo_replicaset is None:
raise Exception("You must specify a mongodb database replicaset name")
mongodb_hostnames_array = opts.mongo_hostnames.split(' ')
mongodb_user = opts.mongo_user
mongodb_password = opts.mongo_password
mongodb_logon_source = opts.mongo_source
mongodb_openshift_db = opts.mongo_openshift_database
mongodb_replicaset = opts.mongo_replicaset
#Openshift related args
#----------------------
#Get district name
if opts.openshift_district is None:
raise Exception("You must specify a openshift district name")
openshift_district_name = opts.openshift_district
# Try to get numeic warning/critical values
s_warning = opts.warning or DEFAULT_WARNING
s_critical = opts.critical or DEFAULT_CRITICAL
debug = opts.debug
try:
# Ok now got an object that link to our destination
client = SSHHelper.connect(
hostname=broker_ssh_host,
user=broker_ssh_user,
ssh_key_file=broker_ssh_key_path,
passphrase=broker_ssh_passphrase,
port=broker_ssh_port
)
#Connecto to MongoDB
#-------------------
mongodb_client = MongoDBHelper.get_mongodb_connection_to_db(
mongodb_servers=mongodb_hostnames_array,
replicaset=mongodb_replicaset
)
mongodb_db = MongoDBHelper.get_mongodb_auth_db(
mongodb_client=mongodb_client,
database_name=mongodb_openshift_db,
username=mongodb_user,
password=mongodb_password,
source=mongodb_logon_source
)
#get district
#------------
district = openshift_district(
mongodb_db_connection=mongodb_db,
district_name=openshift_district_name,
debug=debug
)
if debug:
pprint(district)
#get server db status
#--------------------
servers_db_status = servers_status(district)
if debug:
print("mongodb servers status")
pprint(servers_db_status)
#get unresponsive/active count from the db
db_nb_unresponsive_servers = nb_unresponsive_servers(servers_db_status)
db_nb_active_servers = nb_active_servers(servers_db_status)
#get mco ping responce
#---------------------
ssh_mco_servers_status = nodes_mco_ping_status(
client,
district,
debug
)
if debug:
print("mco servers status")
pprint(ssh_mco_servers_status)
#get unresponsive/active count from remote mco ping
nb_mco_ping_active_servers = nb_active_servers(ssh_mco_servers_status)
nb_mco_ping_unresponsive_servers = nb_unresponsive_servers(ssh_mco_servers_status)
#format perf data
db_active_servers_data_string = OutputFormatHelpers.perf_data_string(
label="{d}_mongodb_active_nodes".format(d=openshift_district_name),
value=db_nb_active_servers,
)
db_unresponsive_servers_data_string = OutputFormatHelpers.perf_data_string(
label="{d}_mongodb_unresponsive_servers".format(d=openshift_district_name),
value=db_nb_unresponsive_servers,
warn=s_warning,
crit=s_critical
)
mco_active_servers_data_string = OutputFormatHelpers.perf_data_string(
label="{d}_mco_active_nodes".format(d=openshift_district_name),
value=nb_mco_ping_active_servers,
)
mco_unresponsive_servers_data_string = OutputFormatHelpers.perf_data_string(
label="{d}_mco_unresponsive_servers".format(d=openshift_district_name),
value=nb_mco_ping_unresponsive_servers,
warn=s_warning,
crit=s_critical
)
#check
nb_unresponsive_servers = max(db_nb_unresponsive_servers,nb_mco_ping_unresponsive_servers)
nb_active_servers = max(db_nb_active_servers,nb_mco_ping_active_servers)
status = "OK"
state = "active"
nb = nb_active_servers
if nb_unresponsive_servers >= s_warning:
status = "Warning"
state = "unresponsive"
nb = nb_unresponsive_servers
if nb_unresponsive_servers >= s_critical:
status = "Critical"
state = "unresponsive"
nb = nb_unresponsive_servers
#Format and print check result
message = "{nb} {state} openshift nodes".format(
nb=nb,
state=state
)
output = OutputFormatHelpers.check_output_string(
status,
message,
[
db_active_servers_data_string,
db_unresponsive_servers_data_string,
mco_active_servers_data_string,
mco_unresponsive_servers_data_string
]
)
print(output)
except Exception as e:
if debug:
print(e)
the_type, value, tb = sys.exc_info()
traceback.print_tb(tb)
print("Error: {m}".format(m=e))
sys.exit(2)
finally:
if mongodb_client is not None:
MongoDBHelper.close_mongodb_connection(mongodb_client)
if status == "Critical":
sys.exit(2)
if status == "Warning":
sys.exit(1)
sys.exit(0) | agpl-3.0 | -3,173,243,693,299,770,400 | 29.308668 | 110 | 0.604255 | false | 3.954483 | false | false | false |
mbox/django | django/contrib/gis/forms/fields.py | 74 | 4444 | from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
# While this couples the geographic forms to the GEOS library,
# it decouples from database (by not importing SpatialBackend).
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from .widgets import OpenLayersWidget
class GeometryField(forms.Field):
"""
This is the basic form field for a Geometry. Any textual input that is
accepted by GEOSGeometry is accepted by this form. By default,
this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON.
"""
widget = OpenLayersWidget
geom_type = 'GEOMETRY'
default_error_messages = {
'required': _('No geometry value provided.'),
'invalid_geom': _('Invalid geometry value.'),
'invalid_geom_type': _('Invalid geometry type.'),
'transform_error': _('An error occurred when transforming the geometry '
'to the SRID of the geometry form field.'),
}
def __init__(self, **kwargs):
# Pop out attributes from the database field, or use sensible
# defaults (e.g., allow None).
self.srid = kwargs.pop('srid', None)
self.geom_type = kwargs.pop('geom_type', self.geom_type)
super(GeometryField, self).__init__(**kwargs)
self.widget.attrs['geom_type'] = self.geom_type
def to_python(self, value):
"""
Transforms the value to a Geometry object.
"""
if value in self.empty_values:
return None
if not isinstance(value, GEOSGeometry):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom')
# Try to set the srid
if not value.srid:
try:
value.srid = self.widget.map_srid
except AttributeError:
if self.srid:
value.srid = self.srid
return value
def clean(self, value):
"""
Validates that the input value can be converted to a Geometry
object (which is returned). A ValidationError is raised if
the value cannot be instantiated as a Geometry.
"""
geom = super(GeometryField, self).clean(value)
if geom is None:
return geom
# Ensuring that the geometry is of the correct type (indicated
# using the OGC string label).
if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY':
raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type')
# Transforming the geometry if the SRID was set.
if self.srid and self.srid != -1 and self.srid != geom.srid:
try:
geom.transform(self.srid)
except GEOSException:
raise forms.ValidationError(
self.error_messages['transform_error'], code='transform_error')
return geom
def _has_changed(self, initial, data):
""" Compare geographic value of data with its initial value. """
try:
data = self.to_python(data)
initial = self.to_python(initial)
except forms.ValidationError:
return True
# Only do a geographic comparison if both values are available
if initial and data:
data.transform(initial.srid)
# If the initial value was not added by the browser, the geometry
# provided may be slightly different, the first time it is saved.
# The comparison is done with a very low tolerance.
return not initial.equals_exact(data, tolerance=0.000001)
else:
# Check for change of state of existence
return bool(initial) != bool(data)
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
class PointField(GeometryField):
geom_type = 'POINT'
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
class PolygonField(GeometryField):
geom_type = 'POLYGON'
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
| bsd-3-clause | 2,887,019,537,537,169,400 | 33.184615 | 107 | 0.628038 | false | 4.497976 | false | false | false |
biswasvikrant/pairinteraction | gui/pairinteraction/loader.py | 1 | 3284 | # Copyright (c) 2016 Sebastian Weber, Henri Menke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy import sparse
import json
class BinaryLoader:
def __init__(self):
# types
self.typeIds = {1008: 'int8', 1016: 'int16', 1032: 'int32',
1064: 'int64', 1108: 'uint8', 1116: 'uint16',
1132: 'uint32', 1164: 'int64', 2032: 'float32',
2064: 'float64'}
self.type_t = 'uint16'
# bit masks
self.csr_not_csc = 0x01 # xxx0: csc, xxx1: csr
self.complex_not_real = 0x02 # xx0x: real, xx1x: complex
def readNumber(self, f, sz=None):
datatype = self.typeIds[np.fromfile(
f, dtype=np.dtype(self.type_t), count=1)[0]]
if sz is None:
return np.fromfile(f, dtype=np.dtype(datatype), count=1)[0]
else:
return np.fromfile(f, dtype=np.dtype(datatype), count=sz)
def readVector(self, f):
size = self.readNumber(f)
return self.readNumber(f, size)
def readMatrix(self, f):
flags = self.readNumber(f)
rows = self.readNumber(f)
cols = self.readNumber(f)
if flags & self.complex_not_real:
data = self.readVector(f) + self.readVector(f) * 1j
else:
data = self.readVector(f)
indices = self.readVector(f)
indptr = np.append(self.readVector(f), len(data))
if flags & self.csr_not_csc:
return sparse.csr_matrix((data, indices, indptr), shape=(rows, cols))
else:
return sparse.csc_matrix((data, indices, indptr), shape=(rows, cols))
class Eigensystem(BinaryLoader):
def __init__(self, filename):
super().__init__()
self._filename = filename
self._shift = 0
self._params = None
self._energies = None
self._basis = None
@property
def params(self):
if self._params is None:
with open(self._filename + '.json', 'r') as f:
self._params = json.load(f)
return self._params
@property
def energies(self):
if self._energies is None:
with open(self._filename + '.mat', 'rb') as f:
self._energies = np.real(self.readMatrix(f).diagonal())
self._shift = f.tell()
return self._energies
@property
def basis(self):
if self._basis is None:
with open(self._filename + '.mat', 'rb') as f:
if self._shift > 0:
f.seek(self._shift, 0)
else:
self._energies = np.real(self.readMatrix(f).diagonal())
self._basis = self.readMatrix(f)
return self._basis | apache-2.0 | -7,418,713,437,166,621,000 | 32.520408 | 81 | 0.576127 | false | 3.698198 | false | false | false |
hobbe/notifry-o | appengine/model/UserMessages.py | 1 | 2803 | # Notifry - Google App Engine backend
#
# Copyright 2011 Daniel Foote
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import db
from model.UserMessage import UserMessage
class UserMessages(db.Model):
messages = db.ListProperty(int)
owner = db.UserProperty()
def dict(self):
result = {
'type' : 'messages',
'owner': self.owner,
'messages': self.get_messages()
}
try:
result['key'] = self.key().name()
except db.NotSavedError, ex:
# Not saved yet, so it has no ID.
pass
return result
def get_messages(self):
return UserMessage.get_by_id(self.messages[-200:], self)
def get_messages_for_source(self, source):
final_messages = []
for message in self.get_messages():
if message.source.externalKey == source.externalKey:
final_messages.append(message)
return final_messages
def add_message(self, message):
id = message.key().id()
if self.messages:
if not id in self.messages:
self.messages.append(id)
else:
self.messages = []
self.messages.append(id)
# And cull off old messages.
if len(self.messages) > 500:
self.messages = self.messages[-500:]
def remove_message(self, message):
if self.messages:
try:
self.messages.remove(message.key().id())
except ValueError, ex:
# We don't have that device in the list.
pass
def delete_for_source(self, source):
messages = self.get_messages_for_source(source)
def transaction(collection, messages):
db.delete(messages)
for message in messages:
collection.remove_message(message)
collection.put()
db.run_in_transaction(transaction, self, messages)
@staticmethod
def key_for(owner):
return "messages:%s" % owner.nickname()
@staticmethod
def get_user_message_collection(owner):
return UserMessages.get_or_insert(UserMessages.key_for(owner), owner = owner)
@staticmethod
def get_user_message_collection_static(owner):
return UserMessages.get_by_key_name(UserMessages.key_for(owner))
@staticmethod
def get_user_messages(owner):
collection = UserMessages.get_user_message_collection(owner)
return collection.get_messages()
@staticmethod
def get_user_messages_for_source(source):
collection = UserMessages.get_user_message_collection(source.owner)
return collection.get_messages_for_source(source) | apache-2.0 | -6,807,122,138,421,897,000 | 27.612245 | 79 | 0.726365 | false | 3.344869 | false | false | false |
mhvk/astropy | astropy/utils/metadata.py | 5 | 18215 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains helper functions and classes for handling metadata.
"""
from functools import wraps
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from copy import deepcopy
import numpy as np
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import dtype_bytes_or_chars
__all__ = ['MergeConflictError', 'MergeConflictWarning', 'MERGE_STRATEGIES',
'common_dtype', 'MergePlus', 'MergeNpConcatenate', 'MergeStrategy',
'MergeStrategyMeta', 'enable_merge_strategies', 'merge', 'MetaData',
'MetaAttribute']
class MergeConflictError(TypeError):
pass
class MergeConflictWarning(AstropyWarning):
pass
MERGE_STRATEGIES = []
def common_dtype(arrs):
"""
Use numpy to find the common dtype for a list of ndarrays.
Only allow arrays within the following fundamental numpy data types:
``np.bool_``, ``np.object_``, ``np.number``, ``np.character``, ``np.void``
Parameters
----------
arrs : list of ndarray
Arrays for which to find the common dtype
Returns
-------
dtype_str : str
String representation of dytpe (dtype ``str`` attribute)
"""
def dtype(arr):
return getattr(arr, 'dtype', np.dtype('O'))
np_types = (np.bool_, np.object_, np.number, np.character, np.void)
uniq_types = set(tuple(issubclass(dtype(arr).type, np_type) for np_type in np_types)
for arr in arrs)
if len(uniq_types) > 1:
# Embed into the exception the actual list of incompatible types.
incompat_types = [dtype(arr).name for arr in arrs]
tme = MergeConflictError(f'Arrays have incompatible types {incompat_types}')
tme._incompat_types = incompat_types
raise tme
arrs = [np.empty(1, dtype=dtype(arr)) for arr in arrs]
# For string-type arrays need to explicitly fill in non-zero
# values or the final arr_common = .. step is unpredictable.
for i, arr in enumerate(arrs):
if arr.dtype.kind in ('S', 'U'):
arrs[i] = [('0' if arr.dtype.kind == 'U' else b'0') *
dtype_bytes_or_chars(arr.dtype)]
arr_common = np.array([arr[0] for arr in arrs])
return arr_common.dtype.str
class MergeStrategyMeta(type):
"""
Metaclass that registers MergeStrategy subclasses into the
MERGE_STRATEGIES registry.
"""
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
# Wrap ``merge`` classmethod to catch any exception and re-raise as
# MergeConflictError.
if 'merge' in members and isinstance(members['merge'], classmethod):
orig_merge = members['merge'].__func__
@wraps(orig_merge)
def merge(cls, left, right):
try:
return orig_merge(cls, left, right)
except Exception as err:
raise MergeConflictError(err)
cls.merge = classmethod(merge)
# Register merging class (except for base MergeStrategy class)
if 'types' in members:
types = members['types']
if isinstance(types, tuple):
types = [types]
for left, right in reversed(types):
MERGE_STRATEGIES.insert(0, (left, right, cls))
return cls
class MergeStrategy(metaclass=MergeStrategyMeta):
"""
Base class for defining a strategy for merging metadata from two
sources, left and right, into a single output.
The primary functionality for the class is the ``merge(cls, left, right)``
class method. This takes ``left`` and ``right`` side arguments and
returns a single merged output.
The first class attribute is ``types``. This is defined as a list of
(left_types, right_types) tuples that indicate for which input types the
merge strategy applies. In determining whether to apply this merge
strategy to a pair of (left, right) objects, a test is done:
``isinstance(left, left_types) and isinstance(right, right_types)``. For
example::
types = [(np.ndarray, np.ndarray), # Two ndarrays
(np.ndarray, (list, tuple)), # ndarray and (list or tuple)
((list, tuple), np.ndarray)] # (list or tuple) and ndarray
As a convenience, ``types`` can be defined as a single two-tuple instead of
a list of two-tuples, e.g. ``types = (np.ndarray, np.ndarray)``.
The other class attribute is ``enabled``, which defaults to ``False`` in
the base class. By defining a subclass of ``MergeStrategy`` the new merge
strategy is automatically registered to be available for use in
merging. However, by default the new merge strategy is *not enabled*. This
prevents inadvertently changing the behavior of unrelated code that is
performing metadata merge operations.
In most cases (particularly in library code that others might use) it is
recommended to leave custom strategies disabled and use the
`~astropy.utils.metadata.enable_merge_strategies` context manager to locally
enable the desired strategies. However, if one is confident that the
new strategy will not produce unexpected behavior, then one can globally
enable it by setting the ``enabled`` class attribute to ``True``.
Examples
--------
Here we define a custom merge strategy that takes an int or float on
the left and right sides and returns a list with the two values.
>>> from astropy.utils.metadata import MergeStrategy
>>> class MergeNumbersAsList(MergeStrategy):
... types = ((int, float), (int, float)) # (left_types, right_types)
...
... @classmethod
... def merge(cls, left, right):
... return [left, right]
"""
# Set ``enabled = True`` to globally enable applying this merge strategy.
# This is not generally recommended.
enabled = False
# types = [(left_types, right_types), ...]
class MergePlus(MergeStrategy):
"""
Merge ``left`` and ``right`` objects using the plus operator. This
merge strategy is globally enabled by default.
"""
types = [(list, list), (tuple, tuple)]
enabled = True
@classmethod
def merge(cls, left, right):
return left + right
class MergeNpConcatenate(MergeStrategy):
"""
Merge ``left`` and ``right`` objects using np.concatenate. This
merge strategy is globally enabled by default.
This will upcast a list or tuple to np.ndarray and the output is
always ndarray.
"""
types = [(np.ndarray, np.ndarray),
(np.ndarray, (list, tuple)),
((list, tuple), np.ndarray)]
enabled = True
@classmethod
def merge(cls, left, right):
left, right = np.asanyarray(left), np.asanyarray(right)
common_dtype([left, right]) # Ensure left and right have compatible dtype
return np.concatenate([left, right])
def _both_isinstance(left, right, cls):
return isinstance(left, cls) and isinstance(right, cls)
def _not_equal(left, right):
try:
return bool(left != right)
except Exception:
return True
class _EnableMergeStrategies:
def __init__(self, *merge_strategies):
self.merge_strategies = merge_strategies
self.orig_enabled = {}
for left_type, right_type, merge_strategy in MERGE_STRATEGIES:
if issubclass(merge_strategy, merge_strategies):
self.orig_enabled[merge_strategy] = merge_strategy.enabled
merge_strategy.enabled = True
def __enter__(self):
pass
def __exit__(self, type, value, tb):
for merge_strategy, enabled in self.orig_enabled.items():
merge_strategy.enabled = enabled
def enable_merge_strategies(*merge_strategies):
"""
Context manager to temporarily enable one or more custom metadata merge
strategies.
Examples
--------
Here we define a custom merge strategy that takes an int or float on
the left and right sides and returns a list with the two values.
>>> from astropy.utils.metadata import MergeStrategy
>>> class MergeNumbersAsList(MergeStrategy):
... types = ((int, float), # left side types
... (int, float)) # right side types
... @classmethod
... def merge(cls, left, right):
... return [left, right]
By defining this class the merge strategy is automatically registered to be
available for use in merging. However, by default new merge strategies are
*not enabled*. This prevents inadvertently changing the behavior of
unrelated code that is performing metadata merge operations.
In order to use the new merge strategy, use this context manager as in the
following example::
>>> from astropy.table import Table, vstack
>>> from astropy.utils.metadata import enable_merge_strategies
>>> t1 = Table([[1]], names=['a'])
>>> t2 = Table([[2]], names=['a'])
>>> t1.meta = {'m': 1}
>>> t2.meta = {'m': 2}
>>> with enable_merge_strategies(MergeNumbersAsList):
... t12 = vstack([t1, t2])
>>> t12.meta['m']
[1, 2]
One can supply further merge strategies as additional arguments to the
context manager.
As a convenience, the enabling operation is actually done by checking
whether the registered strategies are subclasses of the context manager
arguments. This means one can define a related set of merge strategies and
then enable them all at once by enabling the base class. As a trivial
example, *all* registered merge strategies can be enabled with::
>>> with enable_merge_strategies(MergeStrategy):
... t12 = vstack([t1, t2])
Parameters
----------
*merge_strategies : `~astropy.utils.metadata.MergeStrategy`
Merge strategies that will be enabled.
"""
return _EnableMergeStrategies(*merge_strategies)
def _warn_str_func(key, left, right):
out = ('Cannot merge meta key {0!r} types {1!r}'
' and {2!r}, choosing {0}={3!r}'
.format(key, type(left), type(right), right))
return out
def _error_str_func(key, left, right):
out = f'Cannot merge meta key {key!r} types {type(left)!r} and {type(right)!r}'
return out
def merge(left, right, merge_func=None, metadata_conflicts='warn',
warn_str_func=_warn_str_func,
error_str_func=_error_str_func):
"""
Merge the ``left`` and ``right`` metadata objects.
This is a simplistic and limited implementation at this point.
"""
if not _both_isinstance(left, right, dict):
raise MergeConflictError('Can only merge two dict-based objects')
out = deepcopy(left)
for key, val in right.items():
# If no conflict then insert val into out dict and continue
if key not in out:
out[key] = deepcopy(val)
continue
# There is a conflict that must be resolved
if _both_isinstance(left[key], right[key], dict):
out[key] = merge(left[key], right[key], merge_func,
metadata_conflicts=metadata_conflicts)
else:
try:
if merge_func is None:
for left_type, right_type, merge_cls in MERGE_STRATEGIES:
if not merge_cls.enabled:
continue
if (isinstance(left[key], left_type) and
isinstance(right[key], right_type)):
out[key] = merge_cls.merge(left[key], right[key])
break
else:
raise MergeConflictError
else:
out[key] = merge_func(left[key], right[key])
except MergeConflictError:
# Pick the metadata item that is not None, or they are both not
# None, then if they are equal, there is no conflict, and if
# they are different, there is a conflict and we pick the one
# on the right (or raise an error).
if left[key] is None:
# This may not seem necessary since out[key] gets set to
# right[key], but not all objects support != which is
# needed for one of the if clauses.
out[key] = right[key]
elif right[key] is None:
out[key] = left[key]
elif _not_equal(left[key], right[key]):
if metadata_conflicts == 'warn':
warnings.warn(warn_str_func(key, left[key], right[key]),
MergeConflictWarning)
elif metadata_conflicts == 'error':
raise MergeConflictError(error_str_func(key, left[key], right[key]))
elif metadata_conflicts != 'silent':
raise ValueError('metadata_conflicts argument must be one '
'of "silent", "warn", or "error"')
out[key] = right[key]
else:
out[key] = right[key]
return out
class MetaData:
"""
A descriptor for classes that have a ``meta`` property.
This can be set to any valid `~collections.abc.Mapping`.
Parameters
----------
doc : `str`, optional
Documentation for the attribute of the class.
Default is ``""``.
.. versionadded:: 1.2
copy : `bool`, optional
If ``True`` the the value is deepcopied before setting, otherwise it
is saved as reference.
Default is ``True``.
.. versionadded:: 1.2
"""
def __init__(self, doc="", copy=True):
self.__doc__ = doc
self.copy = copy
def __get__(self, instance, owner):
if instance is None:
return self
if not hasattr(instance, '_meta'):
instance._meta = OrderedDict()
return instance._meta
def __set__(self, instance, value):
if value is None:
instance._meta = OrderedDict()
else:
if isinstance(value, Mapping):
if self.copy:
instance._meta = deepcopy(value)
else:
instance._meta = value
else:
raise TypeError("meta attribute must be dict-like")
class MetaAttribute:
"""
Descriptor to define custom attribute which gets stored in the object
``meta`` dict and can have a defined default.
This descriptor is intended to provide a convenient way to add attributes
to a subclass of a complex class such as ``Table`` or ``NDData``.
This requires that the object has an attribute ``meta`` which is a
dict-like object. The value of the MetaAttribute will be stored in a
new dict meta['__attributes__'] that is created when required.
Classes that define MetaAttributes are encouraged to support initializing
the attributes via the class ``__init__``. For example::
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, MetaAttribute):
setattr(self, attr, kwargs.pop(attr))
The name of a ``MetaAttribute`` cannot be the same as any of the following:
- Keyword argument in the owner class ``__init__``
- Method or attribute of the "parent class", where the parent class is
taken to be ``owner.__mro__[1]``.
:param default: default value
"""
def __init__(self, default=None):
self.default = default
def __get__(self, instance, owner):
# When called without an instance, return self to allow access
# to descriptor attributes.
if instance is None:
return self
# If default is None and value has not been set already then return None
# without doing touching meta['__attributes__'] at all. This helps e.g.
# with the Table._hidden_columns attribute so it doesn't auto-create
# meta['__attributes__'] always.
if (self.default is None
and self.name not in instance.meta.get('__attributes__', {})):
return None
# Get the __attributes__ dict and create if not there already.
attributes = instance.meta.setdefault('__attributes__', {})
try:
value = attributes[self.name]
except KeyError:
if self.default is not None:
attributes[self.name] = deepcopy(self.default)
# Return either specified default or None
value = attributes.get(self.name)
return value
def __set__(self, instance, value):
# Get the __attributes__ dict and create if not there already.
attributes = instance.meta.setdefault('__attributes__', {})
attributes[self.name] = value
def __delete__(self, instance):
# Remove this attribute from meta['__attributes__'] if it exists.
if '__attributes__' in instance.meta:
attrs = instance.meta['__attributes__']
if self.name in attrs:
del attrs[self.name]
# If this was the last attribute then remove the meta key as well
if not attrs:
del instance.meta['__attributes__']
def __set_name__(self, owner, name):
import inspect
params = [param.name for param in inspect.signature(owner).parameters.values()
if param.kind not in (inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL)]
# Reject names from existing params or best guess at parent class
if name in params or hasattr(owner.__mro__[1], name):
raise ValueError(f'{name} not allowed as {self.__class__.__name__}')
self.name = name
def __repr__(self):
return f'<{self.__class__.__name__} name={self.name} default={self.default}>'
| bsd-3-clause | -8,522,740,677,389,237,000 | 35.284861 | 92 | 0.604063 | false | 4.373349 | false | false | false |
muodov/wildfire2 | sample.py | 1 | 1454 | import random
import sys
someconst = 3
def inc(x):
return x + 1
def very_understandable_function(x=5):
def get_eleet():
return x
import platform
print 'Hello, %s (%s)' % (platform.platform(), platform.architecture()[0])
r = 10
print 'I like doing stuff with number: %r' % (r % 42)
for i in range(r):
print i + get_eleet(), get_eleet()
if (r % 10):
print 'wUuUUt'
else:
print 'dont care!'
with open('success', 'w') as f:
f.write('yoooo seems to work bra!')
return 0xdeadbeef
#print 'aaa'
class NewStyleClass(object):
#print 'newstyle'
def __init__(self):
super(NewStyleClass, self).__init__()
def doit(self):
print 'i am new'
class NewStyleClassCustomInit(object):
#print 'newstyle'
def __init__(self):
pass
def doit(self):
print 'i am new'
#print 'between'
class OldStyleClass:
#print 'oldstyle'
def doit(self):
print 'i am old'
#print 'bbb'
def generate_random_strings():
"""Generate a random string"""
print 'ucucuga'
charset = map(chr, range(0, 0x100))
print 'ucucuga1'
return ''.join(random.choice(charset) for i in range(random.randint(10, 100)))
if __name__ == '__main__':
very_understandable_function(293)
NewStyleClass().doit()
OldStyleClass().doit()
for i in xrange(10):
print inc(i)
generate_random_strings()
print someconst
| gpl-2.0 | -1,341,155,777,023,481,600 | 21.369231 | 82 | 0.595598 | false | 3.312073 | false | false | false |
lmaurits/phyltr | tests/clades_tests.py | 1 | 1632 | from phyltr import build_pipeline
from phyltr.commands.clades import Clades
def test_init_from_args():
clades = Clades.init_from_args("")
assert clades.opts.frequency == 0.0
assert clades.opts.ages == False
clades = Clades.init_from_args("--ages")
assert clades.opts.ages == True
clades = Clades.init_from_args("-f 0.42")
assert clades.opts.frequency == 0.42
def test_clades(basictrees):
clades = Clades(ages=True)
# Spin through all trees
list(clades.consume(basictrees))
# Check that the computed probabilities agree
# with hand calculated equivalents
assert clades.cp.clade_probs["A B"] == 4.0 / 6.0
assert clades.cp.clade_probs["A C"] == 2.0 / 6.0
assert clades.cp.clade_probs["A B C"] == 5.0 / 6.0
assert clades.cp.clade_probs["E F"] == 3.0 / 6.0
assert clades.cp.clade_probs["A C"] == 2.0 / 6.0
assert clades.cp.clade_probs["D F"] == 1.0 / 6.0
assert clades.cp.clade_probs["D E"] == 1.0 / 6.0
assert clades.cp.clade_probs["C E"] == 1.0 / 6.0
assert clades.cp.clade_probs["D E F"] == 5.0 / 6.0
assert clades.cp.clade_probs["A B C D E F"] == 6.0 / 6.0
def test_degenerate_clades(treefilenewick):
clades = Clades(ages=True)
list(clades.consume(treefilenewick('single_taxon.trees')))
def test_categorical_annotation(treefilenewick):
# This is just to make sure the clade probability calculator doesnt't
# erroneously try to calculate means etc. of categorical annotations
list(build_pipeline(
"annotate -f tests/argfiles/categorical_annotation.csv -k taxon | clades",
treefilenewick('basic.trees')))
| gpl-3.0 | -6,552,524,085,940,536,000 | 38.804878 | 82 | 0.666667 | false | 2.961887 | false | false | false |
luhn/AutobahnPython | examples/twisted/websocket/echo_variants/client_with_proxy.py | 18 | 2286 | ###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor
from twisted.python import log
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
class EchoClientProtocol(WebSocketClientProtocol):
def sendHello(self):
self.sendMessage("Hello, world!".encode('utf8'))
def onOpen(self):
self.sendHello()
def onMessage(self, payload, isBinary):
if not isBinary:
print("Text message received: {}".format(payload.decode('utf8')))
reactor.callLater(1, self.sendHello)
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Need the WebSocket server address, i.e. ws://localhost:9000")
sys.exit(1)
if len(sys.argv) < 3:
print("Need the Proxy, i.e. 192.168.1.100:8050")
sys.exit(1)
proxyHost, proxyPort = sys.argv[2].split(":")
proxy = {'host': proxyHost, 'port': int(proxyPort)}
if len(sys.argv) > 3 and sys.argv[3] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WebSocketClientFactory(sys.argv[1],
proxy = proxy,
debug = debug,
debugCodePaths = debug)
# uncomment to use Hixie-76 protocol
#factory.setProtocolOptions(allowHixie76 = True, version = 0)
factory.protocol = EchoClientProtocol
connectWS(factory)
reactor.run()
| apache-2.0 | -6,353,372,803,692,614,000 | 30.75 | 79 | 0.583552 | false | 4.217712 | false | false | false |
fffonion/MAClient | plugins/query_tool.py | 1 | 9270 | # coding:utf-8
from _prototype import plugin_prototype
import sys
import re
import os
from cross_platform import *
# start meta
__plugin_name__ = 'query infomation of player'
__author = 'fffonion'
__version__ = 0.40
hooks = {}
extra_cmd = {'q_item':'query_item', 'qi':'query_item', 'q_holo':'query_holo', 'qh':'query_holo', 'qgc':'query_guild_contribution','q_rank':'query_rank','qr':'query_rank'}
# end meta
# query item count
def query_item(plugin_vals):
def do(*args):
logger = plugin_vals['logger']
if 'player' not in plugin_vals or not plugin_vals['player'].item.db:
logger.error('玩家信息未初始化,请随便执行一个操作再试')
return
print(du8('%-17s%s' % ('物品', '个数')))
print('-' * 30)
for (i, [n, j]) in plugin_vals['player'].item.db.items():
if j > 0: # has
# calc utf-8 length
l1 = len(n) # ascii length
n = raw_du8(n)
l2 = len(n) # char count
print(safestr('%s%s%s' % (n, ' ' * int(15 - l2 - (l1 - l2) / 2), j)))
return do
# query holo cards
def query_holo(plugin_vals):
def do(*args):
logger = plugin_vals['logger']
if 'player' not in plugin_vals or not plugin_vals['player'].item.db:
logger.error('玩家信息未初始化,请随便执行一个操作再试')
return
print(du8('%s' % ('当前拥有以下闪卡')))
print('-' * 30)
_player = plugin_vals['player']
cache = []
for c in _player.card.cards:
if c.holography == 1:
ca = _player.card.db[c.master_card_id]
cache.append((ca[0], ca[1], c.lv, c.hp, c.power))
cache = sorted(cache, key = lambda l:(l[1], l[2]))
print('\n'.join(map(lambda x:du8('[%s] ☆%d Lv%d HP:%d ATK:%d' % x), cache)))
return do
def query_guild_contribution(plugin_vals):
def do(*args):
lines = []
if plugin_vals['loc'][:2] == 'cn':
lines += open('events_cn.log').read().split('\n')
elif plugin_vals['loc'] == 'tw':
lines += open('events_tw.log').read().split('\n')
else:
print(du8('不支持%s的查询'%plugin_vals['loc']))
return
if os.path.exists('.IGF.log'):
lines += open('.IGF.log').read().split('\n')
pname, total = plugin_vals['player'].item.db[8001]
cnt = 0
for l in lines:
c = re.findall(pname+'\]\:\+(\d+)\(',l)
if c:
cnt += int(c[0])
print(du8('公会贡献: %d/ %s'%(cnt,total or '?')))
return do
def query_rank(plugin_vals):
def do(*args):
logger = plugin_vals['logger']
loc = plugin_vals['loc']
if loc[:2] not in ['cn','tw']:
logger.error('排位查询不支持日服和韩服')
return
if loc == 'tw':
import _query_rank_tw_lib as _lib
import re
import urllib
if PYTHON3:
import urllib.request as urllib2
opener = urllib2.build_opener(urllib2.ProxyHandler(urllib.request.getproxies()))
else:
import urllib2
opener = urllib2.build_opener(urllib2.ProxyHandler(urllib.getproxies()))
def show_it(content):
strl = '\n%s\n%s\n' %(_lib.query_title(content),'-'*20)
for (k, v) in _lib.query_regex[_guild_mode + _coll_mode if not _country_mode else -1]:
try:
strl += '%s %s\n' % (k, v(content))
except IndexError:
pass
logger.info(strl)
_header = _lib.broswer_headers
_header['cookie'] = plugin_vals['cookie']
_header['User-Agent'] = plugin_vals['poster'].header['User-Agent']
_guild_mode = 2 if raw_inputd('查询个人排名(s)(默认)还是公会排名(g)> ') == 'g' else 0
_country_mode = 0
if not _guild_mode and _lib.query_country_id:#build country selection
ctotal = len(_lib.query_country_id)
while True:
print(du8('\n'.join(['%d.%s' % (i + 1, _lib.query_country_id[i][0]) for i in range(ctotal)])))
_sel = raw_input('> ')
if _sel.isdigit() and 0 < int(_sel) <= ctotal:
_country_mode = _lib.query_country_id[int(_sel) - 1][1]
break
while True:
_goto = raw_inputd('输入要查询的排名开始数,按回车显示自己所在区域> ')
if not _goto or (
_goto.isdigit() and \
((0<int(_goto)<=20000 and not _guild_mode) or (0<int(_goto)<=2000 and _guild_mode))):
break
logger.error('请输入%d以内0以上的数字' % (2000 if _guild_mode else 20000))
#automatically judge
if not _country_mode and ((_guild_mode and _lib.query_rev[2] and _lib.query_rev[3]) or \
(not _guild_mode and _lib.query_rev[0] and _lib.query_rev[1]) or _goto):
_coll_mode = 1 if raw_inputd('查询收集品排名(c)(默认)还是妖精加权排名(f)> ') != 'f' else 0
else:
_coll_mode = (1 if _lib.query_rev[3] else 0) if _guild_mode else \
(1 if _lib.query_rev[1] else 0)
#request
if _country_mode:
if _goto:
_gurl = _lib.query_goto[-1]
x = opener.open(urllib2.Request(_gurl % (_goto, _country_mode),headers = _header)).read()
else:
_gurl = _lib.query_country
x = opener.open(urllib2.Request(_gurl % _country_mode,headers = _header)).read()
elif _goto:
_gurl = _lib.query_goto[_guild_mode + _coll_mode]
x = opener.open(urllib2.Request(_gurl % _goto,headers = _header)).read()
# if True:
# x = open(r'z:/test.htm').read()
else:
_rev = _lib.query_rev[_guild_mode + _coll_mode]
if not _rev:
logger.error('版本不存在,可能是当前活动没有该排名\n请尝试升级_query_rank_lib,或指定排名区域查询')
return
if _lib.now >= _lib.query_lifetime:
logger.error('查询库已过期,请升级_query_rank_lib为新版本\n或指定排名区域查询')
return
_url = _lib.query_base % _rev
x = opener.open(urllib2.Request(_url, headers = _header)).read()
try:
show_it(_lib.pre(x))
except IndexError:
logger.warning('匹配失败,请重新登录;如果问题仍然存在,请更新插件')
else:#cn
from xml2dict import XML2Dict
po = plugin_vals['poster']
po.post('menu/menulist')
sel_rankid = 0
to_top = False
while True:
resp, ct = po.post('ranking/ranking', postdata='move=%d&ranktype_id=%d&top=%d' % (
1 if sel_rankid == 0 else 0, sel_rankid, 1 if to_top else 0))
ct = XML2Dict.fromstring(ct).response.body.ranking
ranktype_id = int(ct.ranktype_id)
allranks = ct.ranktype_list.ranktype
rank_name = allranks[ranktype_id - 1].title
try:
_user = ct.user_list.user
except KeyError:
logger.warning('暂未列入排行榜,请继续努力ww')
return
if not to_top:
me = [_i for _i in _user if _i.id == plugin_vals['player'].id][0]
logger.info(rank_name +
(not to_top and '\n排名:%s 点数:%s\n' % (me.rank, me.battle_event_point) or '\n') +
'可见区域内 Up:%s/%s Down:%s/%s' % (
_user[0].rank, _user[0].battle_event_point,
_user[-1].rank, _user[-1].battle_event_point)
)
while True:
_inp = raw_inputd('\n输入序号查询其他排行:(9.排名至顶 0.退出)\n%s\n> ' %
('\n'.join(map(lambda x : '%s.%s' % (x.id, x.title), allranks)))
) or '0'
if not _inp.isdigit():
continue
else:
if _inp == '0':
return
if _inp == '9':
to_top = True
else:
sel_rankid = int(_inp)
to_top = False
break
return do | gpl-3.0 | 6,578,374,010,661,083,000 | 42.902564 | 170 | 0.451108 | false | 3.327252 | false | false | false |
samitnuk/studentsdb | students/admin.py | 1 | 1802 | from django.contrib import admin
from django.core.urlresolvers import reverse
from django.forms import ModelForm, ValidationError
from .models import Student, Group, Exam, MonthJournal
class StudentAdminForm(ModelForm):
def clean_student_group(self):
"""Check if student is leader in any group.
If yes, then ensure it's the same as selected group."""
# get group where current student is leader
groups = Group.objects.filter(leader=self.instance)
if len(groups) > 0 and \
self.cleaned_data['student_group'] != groups[0]:
raise ValidationError("Студент є старостою іншої групи.",
code='invalid')
return self.cleaned_data['student_group']
class StudentAdmin(admin.ModelAdmin):
list_display = ['last_name', 'first_name', 'ticket', 'student_group']
list_display_links = ['last_name', 'first_name']
list_editable = ['student_group']
ordering = ['last_name']
list_filter = ['student_group']
list_per_page = 10
search_fields = ['last_name', 'first_name', 'middle_name', 'ticket',
'notes']
form = StudentAdminForm
def view_on_site(self, obj):
return reverse('students_edit', kwargs={'pk': obj.id})
class GroupAdmin(admin.ModelAdmin):
list_display = ['title', 'leader']
list_display_links = ['title']
list_editable = ['leader']
ordering = ['title']
# list_filter = ['title']
list_per_page = 10
search_fields = ['tile', 'leader']
def view_on_site(self, obj):
return reverse('groups_edit', kwargs={'pk': obj.id})
admin.site.register(Student, StudentAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Exam)
admin.site.register(MonthJournal)
| mit | -4,402,977,145,337,429,000 | 32.490566 | 73 | 0.642817 | false | 3.705637 | false | false | false |
drammock/mnefun | mnefun/_ssp.py | 3 | 26590 | """Preprocessing (SSP and filtering)."""
from collections import Counter
import os
import os.path as op
import warnings
import numpy as np
from mne import (concatenate_raws, compute_proj_evoked, compute_proj_epochs,
write_proj, pick_types, Epochs, compute_proj_raw, read_proj,
make_fixed_length_events, write_events)
from mne.preprocessing import find_ecg_events, find_eog_events
from mne.filter import filter_data
from mne.io import read_raw_fif
from mne.viz import plot_drop_log
from mne.utils import _pl
from ._epoching import _raise_bad_epochs
from ._paths import get_raw_fnames, get_bad_fname
from ._utils import (get_args, _fix_raw_eog_cals, _handle_dict, _safe_remove,
_get_baseline, _restrict_reject_flat, _get_epo_kwargs)
def _get_fir_kwargs(fir_design):
"""Get FIR kwargs in backward-compatible way."""
fir_kwargs = dict()
old_kwargs = dict()
if 'fir_design' in get_args(filter_data):
fir_kwargs.update(fir_design=fir_design)
old_kwargs.update(fir_design='firwin2')
elif fir_design != 'firwin2':
raise RuntimeError('cannot use fir_design=%s with old MNE'
% fir_design)
return fir_kwargs, old_kwargs
# noinspection PyPep8Naming
def _raw_LRFCP(raw_names, sfreq, l_freq, h_freq, n_jobs, n_jobs_resample,
projs, bad_file, disp_files=False, method='fir',
filter_length=32768, apply_proj=True, preload=True,
force_bads=False, l_trans=0.5, h_trans=0.5,
allow_maxshield=False, phase='zero-double', fir_window='hann',
fir_design='firwin2', pick=True,
skip_by_annotation=('bad', 'skip')):
"""Helper to load, filter, concatenate, then project raw files"""
from mne.io.proj import _needs_eeg_average_ref_proj
from ._sss import _read_raw_prebad
if isinstance(raw_names, str):
raw_names = [raw_names]
if disp_files:
print(f' Loading and filtering {len(raw_names)} '
f'file{_pl(raw_names)}.')
raw = list()
for ri, rn in enumerate(raw_names):
if isinstance(bad_file, tuple):
p, subj, kwargs = bad_file
r = _read_raw_prebad(p, subj, rn, disp=(ri == 0), **kwargs)
else:
r = read_raw_fif(rn, preload=True, allow_maxshield='yes')
r.load_bad_channels(bad_file, force=force_bads)
if pick:
r.pick_types(meg=True, eeg=True, eog=True, ecg=True, exclude=[])
if _needs_eeg_average_ref_proj(r.info):
r.set_eeg_reference(projection=True)
if sfreq is not None:
r.resample(sfreq, n_jobs=n_jobs_resample, npad='auto')
fir_kwargs = _get_fir_kwargs(fir_design)[0]
if l_freq is not None or h_freq is not None:
r.filter(l_freq=l_freq, h_freq=h_freq, picks=None,
n_jobs=n_jobs, method=method,
filter_length=filter_length, phase=phase,
l_trans_bandwidth=l_trans, h_trans_bandwidth=h_trans,
fir_window=fir_window, **fir_kwargs)
raw.append(r)
_fix_raw_eog_cals(raw)
raws_del = raw[1:]
raw = concatenate_raws(raw, preload=preload)
for r in raws_del:
del r
if disp_files and apply_proj and len(projs) > 0:
print(' Adding and applying projectors.')
raw.add_proj(projs)
if apply_proj:
raw.apply_proj()
return raw
def compute_proj_wrap(epochs, average, **kwargs):
if average:
return compute_proj_evoked(epochs.average(), **kwargs)
else:
return compute_proj_epochs(epochs, **kwargs)
def _get_pca_dir(p, subj):
pca_dir = op.join(p.work_dir, subj, p.pca_dir)
if not op.isdir(pca_dir):
os.mkdir(pca_dir)
return pca_dir
def _get_proj_kwargs(p):
proj_kwargs = dict()
p_sl = 1
if 'meg' not in get_args(compute_proj_raw):
if p.proj_meg != 'separate':
raise RuntimeError('MNE is too old for proj_meg option')
else:
proj_kwargs['meg'] = p.proj_meg
if p.proj_meg == 'combined':
p_sl = 2
return proj_kwargs, p_sl
def _compute_erm_proj(p, subj, projs, kind, bad_file, remove_existing=False,
disp_files=None):
disp_files = p.disp_files if disp_files is None else disp_files
assert kind in ('sss', 'raw')
proj_nums = _proj_nums(p, subj)
proj_kwargs, p_sl = _get_proj_kwargs(p)
empty_names = get_raw_fnames(p, subj, kind, 'only')
fir_kwargs, _ = _get_fir_kwargs(p.fir_design)
flat = _handle_dict(p.flat, subj)
raw = _raw_LRFCP(
raw_names=empty_names, sfreq=p.proj_sfreq,
l_freq=p.cont_hp, h_freq=p.cont_lp,
n_jobs=p.n_jobs_fir, apply_proj=not remove_existing,
n_jobs_resample=p.n_jobs_resample, projs=projs,
bad_file=bad_file, disp_files=disp_files, method='fir',
filter_length=p.filter_length, force_bads=True,
l_trans=p.cont_hp_trans, h_trans=p.cont_lp_trans,
phase=p.phase, fir_window=p.fir_window,
skip_by_annotation='edge', **fir_kwargs)
if remove_existing:
raw.del_proj()
raw.pick_types(meg=True, eeg=False, exclude=()) # remove EEG
use_reject, reject_kind = p.cont_reject, 'p.cont_reject'
if use_reject is None:
use_reject, reject_kind = p.reject, 'p.reject'
use_reject, use_flat = _restrict_reject_flat(
_handle_dict(use_reject, subj), flat, raw)
bad = False
pr = []
try:
pr = compute_proj_raw(raw, duration=1, n_grad=proj_nums[2][0],
n_mag=proj_nums[2][1], n_eeg=proj_nums[2][2],
reject=use_reject, flat=use_flat,
n_jobs=p.n_jobs_mkl, **proj_kwargs)
except RuntimeError as exc:
if 'No good epochs' not in str(exc):
raise
bad = True
if bad:
events = make_fixed_length_events(raw)
epochs = Epochs(raw, events, tmin=0, tmax=1. - 1. / raw.info['sfreq'],
proj=False, baseline=None, reject=use_reject,
flat=use_flat).drop_bad()
_raise_bad_epochs(
raw, epochs, events,
f'1-sec empty room via {reject_kind} = {use_reject} (consider '
f'changing p.cont_reject)')
assert len(pr) == np.sum(proj_nums[2][::p_sl])
# When doing eSSS it's a bit weird to put this in pca_dir but why not
pca_dir = _get_pca_dir(p, subj)
cont_proj = op.join(pca_dir, 'preproc_cont-proj.fif')
write_proj(cont_proj, pr)
return pr
def do_preprocessing_combined(p, subjects, run_indices):
"""Do preprocessing on all raw files together.
Calculates projection vectors to use to clean data.
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
run_indices : array-like | None
Run indices to include.
"""
drop_logs = list()
for si, subj in enumerate(subjects):
proj_nums = _proj_nums(p, subj)
ecg_channel = _handle_dict(p.ecg_channel, subj)
flat = _handle_dict(p.flat, subj)
if p.disp_files:
print(' Preprocessing subject %g/%g (%s).'
% (si + 1, len(subjects), subj))
pca_dir = _get_pca_dir(p, subj)
bad_file = get_bad_fname(p, subj, check_exists=False)
# Create SSP projection vectors after marking bad channels
raw_names = get_raw_fnames(p, subj, 'sss', False, False,
run_indices[si])
empty_names = get_raw_fnames(p, subj, 'sss', 'only')
for r in raw_names + empty_names:
if not op.isfile(r):
raise NameError('File not found (' + r + ')')
fir_kwargs, old_kwargs = _get_fir_kwargs(p.fir_design)
if isinstance(p.auto_bad, float):
print(' Creating post SSS bad channel file:\n'
' %s' % bad_file)
# do autobad
raw = _raw_LRFCP(raw_names, p.proj_sfreq, None, None, p.n_jobs_fir,
p.n_jobs_resample, list(), None, p.disp_files,
method='fir', filter_length=p.filter_length,
apply_proj=False, force_bads=False,
l_trans=p.hp_trans, h_trans=p.lp_trans,
phase=p.phase, fir_window=p.fir_window,
pick=True, skip_by_annotation='edge',
**fir_kwargs)
events = fixed_len_events(p, raw)
rtmin = p.reject_tmin \
if p.reject_tmin is not None else p.tmin
rtmax = p.reject_tmax \
if p.reject_tmax is not None else p.tmax
# do not mark eog channels bad
meg, eeg = 'meg' in raw, 'eeg' in raw
picks = pick_types(raw.info, meg=meg, eeg=eeg, eog=False,
exclude=[])
assert p.auto_bad_flat is None or isinstance(p.auto_bad_flat, dict)
assert p.auto_bad_reject is None or \
isinstance(p.auto_bad_reject, dict) or \
p.auto_bad_reject == 'auto'
if p.auto_bad_reject == 'auto':
print(' Auto bad channel selection active. '
'Will try using Autoreject module to '
'compute rejection criterion.')
try:
from autoreject import get_rejection_threshold
except ImportError:
raise ImportError(' Autoreject module not installed.\n'
' Noisy channel detection parameter '
' not defined. To use autobad '
' channel selection either define '
' rejection criteria or install '
' Autoreject module.\n')
print(' Computing thresholds.\n', end='')
temp_epochs = Epochs(
raw, events, event_id=None, tmin=rtmin, tmax=rtmax,
baseline=_get_baseline(p), proj=True, reject=None,
flat=None, preload=True, decim=1)
kwargs = dict()
if 'verbose' in get_args(get_rejection_threshold):
kwargs['verbose'] = False
reject = get_rejection_threshold(temp_epochs, **kwargs)
reject = {kk: vv for kk, vv in reject.items()}
elif p.auto_bad_reject is None and p.auto_bad_flat is None:
raise RuntimeError('Auto bad channel detection active. Noisy '
'and flat channel detection '
'parameters not defined. '
'At least one criterion must be defined.')
else:
reject = p.auto_bad_reject
if 'eog' in reject.keys():
reject.pop('eog', None)
epochs = Epochs(raw, events, None, tmin=rtmin, tmax=rtmax,
baseline=_get_baseline(p), picks=picks,
reject=reject, flat=p.auto_bad_flat,
proj=True, preload=True, decim=1,
reject_tmin=rtmin, reject_tmax=rtmax)
# channel scores from drop log
drops = Counter([ch for d in epochs.drop_log for ch in d])
# get rid of non-channel reasons in drop log
scores = {kk: vv for kk, vv in drops.items() if
kk in epochs.ch_names}
ch_names = np.array(list(scores.keys()))
# channel scores expressed as percentile and rank ordered
counts = (100 * np.array([scores[ch] for ch in ch_names], float) /
len(epochs.drop_log))
order = np.argsort(counts)[::-1]
# boolean array masking out channels with <= % epochs dropped
mask = counts[order] > p.auto_bad
badchs = ch_names[order[mask]]
if len(badchs) > 0:
# Make sure we didn't get too many bad MEG or EEG channels
for m, e, thresh in zip([True, False], [False, True],
[p.auto_bad_meg_thresh,
p.auto_bad_eeg_thresh]):
picks = pick_types(epochs.info, meg=m, eeg=e, exclude=[])
if len(picks) > 0:
ch_names = [epochs.ch_names[pp] for pp in picks]
n_bad_type = sum(ch in ch_names for ch in badchs)
if n_bad_type > thresh:
stype = 'meg' if m else 'eeg'
raise RuntimeError('Too many bad %s channels '
'found: %s > %s'
% (stype, n_bad_type, thresh))
print(' The following channels resulted in greater than '
'{:.0f}% trials dropped:\n'.format(p.auto_bad * 100))
print(badchs)
with open(bad_file, 'w') as f:
f.write('\n'.join(badchs))
if not op.isfile(bad_file):
print(' Clearing bad channels (no file %s)'
% op.sep.join(bad_file.split(op.sep)[-3:]))
bad_file = None
ecg_t_lims = _handle_dict(p.ecg_t_lims, subj)
ecg_f_lims = p.ecg_f_lims
ecg_eve = op.join(pca_dir, 'preproc_ecg-eve.fif')
ecg_epo = op.join(pca_dir, 'preproc_ecg-epo.fif')
ecg_proj = op.join(pca_dir, 'preproc_ecg-proj.fif')
all_proj = op.join(pca_dir, 'preproc_all-proj.fif')
get_projs_from = _handle_dict(p.get_projs_from, subj)
if get_projs_from is None:
get_projs_from = np.arange(len(raw_names))
pre_list = [r for ri, r in enumerate(raw_names)
if ri in get_projs_from]
projs = list()
raw_orig = _raw_LRFCP(
raw_names=pre_list, sfreq=p.proj_sfreq, l_freq=None, h_freq=None,
n_jobs=p.n_jobs_fir, n_jobs_resample=p.n_jobs_resample,
projs=projs, bad_file=bad_file, disp_files=p.disp_files,
method='fir', filter_length=p.filter_length, force_bads=False,
l_trans=p.hp_trans, h_trans=p.lp_trans, phase=p.phase,
fir_window=p.fir_window, pick=True, skip_by_annotation='edge',
**fir_kwargs)
# Apply any user-supplied extra projectors
if p.proj_extra is not None:
if p.disp_files:
print(' Adding extra projectors from "%s".' % p.proj_extra)
projs.extend(read_proj(op.join(pca_dir, p.proj_extra)))
proj_kwargs, p_sl = _get_proj_kwargs(p)
#
# Calculate and apply ERM projectors
#
if not p.cont_as_esss:
if any(proj_nums[2]):
assert proj_nums[2][2] == 0 # no EEG projectors for ERM
if len(empty_names) == 0:
raise RuntimeError('Cannot compute empty-room projectors '
'from continuous raw data')
if p.disp_files:
print(' Computing continuous projectors using ERM.')
# Use empty room(s), but processed the same way
projs.extend(
_compute_erm_proj(p, subj, projs, 'sss', bad_file))
else:
cont_proj = op.join(pca_dir, 'preproc_cont-proj.fif')
_safe_remove(cont_proj)
#
# Calculate and apply the ECG projectors
#
if any(proj_nums[0]):
if p.disp_files:
print(' Computing ECG projectors...', end='')
raw = raw_orig.copy()
raw.filter(ecg_f_lims[0], ecg_f_lims[1], n_jobs=p.n_jobs_fir,
method='fir', filter_length=p.filter_length,
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
phase='zero-double', fir_window='hann',
skip_by_annotation='edge', **old_kwargs)
raw.add_proj(projs)
raw.apply_proj()
find_kwargs = dict()
if 'reject_by_annotation' in get_args(find_ecg_events):
find_kwargs['reject_by_annotation'] = True
elif len(raw.annotations) > 0:
print(' WARNING: ECG event detection will not make use of '
'annotations, please update MNE-Python')
# We've already filtered the data channels above, but this
# filters the ECG channel
ecg_events = find_ecg_events(
raw, 999, ecg_channel, 0., ecg_f_lims[0], ecg_f_lims[1],
qrs_threshold='auto', return_ecg=False, **find_kwargs)[0]
use_reject, use_flat = _restrict_reject_flat(
_handle_dict(p.ssp_ecg_reject, subj), flat, raw)
ecg_epochs = Epochs(
raw, ecg_events, 999, ecg_t_lims[0], ecg_t_lims[1],
baseline=None, reject=use_reject, flat=use_flat, preload=True)
print(' obtained %d epochs from %d events.' % (len(ecg_epochs),
len(ecg_events)))
if len(ecg_epochs) >= 20:
write_events(ecg_eve, ecg_epochs.events)
ecg_epochs.save(ecg_epo, **_get_epo_kwargs())
desc_prefix = 'ECG-%s-%s' % tuple(ecg_t_lims)
pr = compute_proj_wrap(
ecg_epochs, p.proj_ave, n_grad=proj_nums[0][0],
n_mag=proj_nums[0][1], n_eeg=proj_nums[0][2],
desc_prefix=desc_prefix, **proj_kwargs)
assert len(pr) == np.sum(proj_nums[0][::p_sl])
write_proj(ecg_proj, pr)
projs.extend(pr)
else:
_raise_bad_epochs(raw, ecg_epochs, ecg_events, 'ECG')
del raw, ecg_epochs, ecg_events
else:
_safe_remove([ecg_proj, ecg_eve, ecg_epo])
#
# Next calculate and apply the EOG projectors
#
for idx, kind in ((1, 'EOG'), (3, 'HEOG'), (4, 'VEOG')):
_compute_add_eog(
p, subj, raw_orig, projs, proj_nums[idx], kind, pca_dir,
flat, proj_kwargs, old_kwargs, p_sl)
del proj_nums
# save the projectors
write_proj(all_proj, projs)
#
# Look at raw_orig for trial DQs now, it will be quick
#
raw_orig.filter(p.hp_cut, p.lp_cut, n_jobs=p.n_jobs_fir, method='fir',
filter_length=p.filter_length,
l_trans_bandwidth=p.hp_trans, phase=p.phase,
h_trans_bandwidth=p.lp_trans, fir_window=p.fir_window,
skip_by_annotation='edge', **fir_kwargs)
raw_orig.add_proj(projs)
raw_orig.apply_proj()
# now let's epoch with 1-sec windows to look for DQs
events = fixed_len_events(p, raw_orig)
reject = _handle_dict(p.reject, subj)
use_reject, use_flat = _restrict_reject_flat(reject, flat, raw_orig)
epochs = Epochs(raw_orig, events, None, p.tmin, p.tmax, preload=False,
baseline=_get_baseline(p), reject=use_reject,
flat=use_flat, proj=True)
try:
epochs.drop_bad()
except AttributeError: # old way
epochs.drop_bad_epochs()
drop_logs.append(epochs.drop_log)
del raw_orig
del epochs
if p.plot_drop_logs:
for subj, drop_log in zip(subjects, drop_logs):
plot_drop_log(drop_log, p.drop_thresh, subject=subj)
def _proj_nums(p, subj):
proj_nums = np.array(_handle_dict(p.proj_nums, subj), int)
if proj_nums.shape not in ((3, 3), (4, 3), (5, 3)):
raise ValueError('proj_nums for %s must be an array with shape '
'(3, 3), (4, 3), or (5, 3), got %s'
% (subj, proj_nums.shape))
proj_nums = np.pad(
proj_nums, ((0, 5 - proj_nums.shape[0]), (0, 0)), 'constant')
assert proj_nums.shape == (5, 3)
return proj_nums
def _compute_add_eog(p, subj, raw_orig, projs, eog_nums, kind, pca_dir,
flat, proj_kwargs, old_kwargs, p_sl):
assert kind in ('EOG', 'HEOG', 'VEOG')
bk = dict(EOG='blink').get(kind, kind.lower())
eog_eve = op.join(pca_dir, f'preproc_{bk}-eve.fif')
eog_epo = op.join(pca_dir, f'preproc_{bk}-epo.fif')
eog_proj = op.join(pca_dir, f'preproc_{bk}-proj.fif')
eog_t_lims = _handle_dict(getattr(p, f'{kind.lower()}_t_lims'), subj)
eog_f_lims = _handle_dict(getattr(p, f'{kind.lower()}_f_lims'), subj)
eog_channel = _handle_dict(getattr(p, f'{kind.lower()}_channel'), subj)
thresh = _handle_dict(getattr(p, f'{kind.lower()}_thresh'), subj)
if eog_channel is None and kind != 'EOG':
eog_channel = 'EOG061' if kind == 'HEOG' else 'EOG062'
if eog_nums.any():
if p.disp_files:
print(f' Computing {kind} projectors...', end='')
raw = raw_orig.copy()
raw.filter(eog_f_lims[0], eog_f_lims[1], n_jobs=p.n_jobs_fir,
method='fir', filter_length=p.filter_length,
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
phase='zero-double', fir_window='hann',
skip_by_annotation='edge', **old_kwargs)
raw.add_proj(projs)
raw.apply_proj()
eog_events = find_eog_events(
raw, ch_name=eog_channel, reject_by_annotation=True,
thresh=thresh)
use_reject, use_flat = _restrict_reject_flat(
_handle_dict(p.ssp_eog_reject, subj), flat, raw)
eog_epochs = Epochs(
raw, eog_events, 998, eog_t_lims[0], eog_t_lims[1],
baseline=None, reject=use_reject, flat=use_flat, preload=True)
print(' obtained %d epochs from %d events.' % (len(eog_epochs),
len(eog_events)))
del eog_events
if len(eog_epochs) >= 5:
write_events(eog_eve, eog_epochs.events)
eog_epochs.save(eog_epo, **_get_epo_kwargs())
desc_prefix = f'{kind}-%s-%s' % tuple(eog_t_lims)
pr = compute_proj_wrap(
eog_epochs, p.proj_ave, n_grad=eog_nums[0],
n_mag=eog_nums[1], n_eeg=eog_nums[2],
desc_prefix=desc_prefix, **proj_kwargs)
assert len(pr) == np.sum(eog_nums[::p_sl])
write_proj(eog_proj, pr)
projs.extend(pr)
else:
warnings.warn('Only %d usable EOG events!' % len(eog_epochs))
_safe_remove([eog_proj, eog_eve, eog_epo])
del raw, eog_epochs
else:
_safe_remove([eog_proj, eog_eve, eog_epo])
def apply_preprocessing_combined(p, subjects, run_indices):
"""Actually apply and save the preprocessing (projs, filtering)
Can only run after do_preprocessing_combined is done.
Filters data, adds projection vectors, and saves to disk
(overwriting old files).
Parameters
----------
p : instance of Parameters
Analysis parameters.
subjects : list of str
Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
run_indices : array-like | None
Run indices to include.
"""
# Now actually save some data
for si, subj in enumerate(subjects):
if p.disp_files:
print(' Applying processing to subject %g/%g.'
% (si + 1, len(subjects)))
pca_dir = op.join(p.work_dir, subj, p.pca_dir)
names_in = get_raw_fnames(p, subj, 'sss', False, False,
run_indices[si])
names_out = get_raw_fnames(p, subj, 'pca', False, False,
run_indices[si])
erm_in = get_raw_fnames(p, subj, 'sss', 'only')
erm_out = get_raw_fnames(p, subj, 'pca', 'only')
bad_file = get_bad_fname(p, subj)
all_proj = op.join(pca_dir, 'preproc_all-proj.fif')
projs = read_proj(all_proj)
fir_kwargs = _get_fir_kwargs(p.fir_design)[0]
if len(erm_in) > 0:
for ii, (r, o) in enumerate(zip(erm_in, erm_out)):
if p.disp_files:
print(' Processing erm file %d/%d.'
% (ii + 1, len(erm_in)))
raw = _raw_LRFCP(
raw_names=r, sfreq=None, l_freq=p.hp_cut, h_freq=p.lp_cut,
n_jobs=p.n_jobs_fir, n_jobs_resample=p.n_jobs_resample,
projs=projs, bad_file=bad_file, disp_files=False, method='fir',
apply_proj=False, filter_length=p.filter_length,
force_bads=True, l_trans=p.hp_trans, h_trans=p.lp_trans,
phase=p.phase, fir_window=p.fir_window, pick=False,
**fir_kwargs)
raw.save(o, overwrite=True, buffer_size_sec=None)
for ii, (r, o) in enumerate(zip(names_in, names_out)):
if p.disp_files:
print(' Processing file %d/%d.'
% (ii + 1, len(names_in)))
raw = _raw_LRFCP(
raw_names=r, sfreq=None, l_freq=p.hp_cut, h_freq=p.lp_cut,
n_jobs=p.n_jobs_fir, n_jobs_resample=p.n_jobs_resample,
projs=projs, bad_file=bad_file, disp_files=False, method='fir',
apply_proj=False, filter_length=p.filter_length,
force_bads=False, l_trans=p.hp_trans, h_trans=p.lp_trans,
phase=p.phase, fir_window=p.fir_window, pick=False,
**fir_kwargs)
raw.save(o, overwrite=True, buffer_size_sec=None)
# look at raw_clean for ExG events
if p.plot_raw:
from ._viz import _viz_raw_ssp_events
_viz_raw_ssp_events(p, subj, run_indices[si])
class FakeEpochs(object):
"""Make iterable epoch-like class, convenient for MATLAB transition"""
def __init__(self, data, ch_names, tmin=-0.2, sfreq=1000.0):
raise RuntimeError('Use mne.EpochsArray instead')
def fixed_len_events(p, raw):
"""Create fixed length trial events from raw object"""
dur = p.tmax - p.tmin
events = make_fixed_length_events(raw, 1, duration=dur)
return events
| bsd-3-clause | 6,273,652,446,083,029,000 | 44.298126 | 79 | 0.53396 | false | 3.418617 | false | false | false |
Harmon758/discord.py | discord/voice_client.py | 1 | 22914 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
"""Some documentation to refer to:
- Our main web socket (mWS) sends opcode 4 with a guild ID and channel ID.
- The mWS receives VOICE_STATE_UPDATE and VOICE_SERVER_UPDATE.
- We pull the session_id from VOICE_STATE_UPDATE.
- We pull the token, endpoint and server_id from VOICE_SERVER_UPDATE.
- Then we initiate the voice web socket (vWS) pointing to the endpoint.
- We send opcode 0 with the user_id, server_id, session_id and token using the vWS.
- The vWS sends back opcode 2 with an ssrc, port, modes(array) and hearbeat_interval.
- We send a UDP discovery packet to endpoint:port and receive our IP and our port in LE.
- Then we send our IP and port via vWS with opcode 1.
- When that's all done, we receive opcode 4 from the vWS.
- Finally we can transmit data to endpoint:port.
"""
import asyncio
import socket
import logging
import struct
import threading
from typing import Any, Callable
from . import opus, utils
from .backoff import ExponentialBackoff
from .gateway import *
from .errors import ClientException, ConnectionClosed
from .player import AudioPlayer, AudioSource
try:
import nacl.secret
has_nacl = True
except ImportError:
has_nacl = False
__all__ = (
'VoiceProtocol',
'VoiceClient',
)
log = logging.getLogger(__name__)
class VoiceProtocol:
"""A class that represents the Discord voice protocol.
This is an abstract class. The library provides a concrete implementation
under :class:`VoiceClient`.
This class allows you to implement a protocol to allow for an external
method of sending voice, such as Lavalink_ or a native library implementation.
These classes are passed to :meth:`abc.Connectable.connect <VoiceChannel.connect>`.
.. _Lavalink: https://github.com/freyacodes/Lavalink
Parameters
------------
client: :class:`Client`
The client (or its subclasses) that started the connection request.
channel: :class:`abc.Connectable`
The voice channel that is being connected to.
"""
def __init__(self, client, channel):
self.client = client
self.channel = channel
async def on_voice_state_update(self, data):
"""|coro|
An abstract method that is called when the client's voice state
has changed. This corresponds to ``VOICE_STATE_UPDATE``.
Parameters
------------
data: :class:`dict`
The raw `voice state payload`__.
.. _voice_state_update_payload: https://discord.com/developers/docs/resources/voice#voice-state-object
__ voice_state_update_payload_
"""
raise NotImplementedError
async def on_voice_server_update(self, data):
"""|coro|
An abstract method that is called when initially connecting to voice.
This corresponds to ``VOICE_SERVER_UPDATE``.
Parameters
------------
data: :class:`dict`
The raw `voice server update payload`__.
.. _voice_server_update_payload: https://discord.com/developers/docs/topics/gateway#voice-server-update-voice-server-update-event-fields
__ voice_server_update_payload_
"""
raise NotImplementedError
async def connect(self, *, timeout: float, reconnect: bool):
"""|coro|
An abstract method called when the client initiates the connection request.
When a connection is requested initially, the library calls the constructor
under ``__init__`` and then calls :meth:`connect`. If :meth:`connect` fails at
some point then :meth:`disconnect` is called.
Within this method, to start the voice connection flow it is recommended to
use :meth:`Guild.change_voice_state` to start the flow. After which,
:meth:`on_voice_server_update` and :meth:`on_voice_state_update` will be called.
The order that these two are called is unspecified.
Parameters
------------
timeout: :class:`float`
The timeout for the connection.
reconnect: :class:`bool`
Whether reconnection is expected.
"""
raise NotImplementedError
async def disconnect(self, *, force: bool):
"""|coro|
An abstract method called when the client terminates the connection.
See :meth:`cleanup`.
Parameters
------------
force: :class:`bool`
Whether the disconnection was forced.
"""
raise NotImplementedError
def cleanup(self):
"""This method *must* be called to ensure proper clean-up during a disconnect.
It is advisable to call this from within :meth:`disconnect` when you are
completely done with the voice protocol instance.
This method removes it from the internal state cache that keeps track of
currently alive voice clients. Failure to clean-up will cause subsequent
connections to report that it's still connected.
"""
key_id, _ = self.channel._get_voice_client_key()
self.client._connection._remove_voice_client(key_id)
class VoiceClient(VoiceProtocol):
"""Represents a Discord voice connection.
You do not create these, you typically get them from
e.g. :meth:`VoiceChannel.connect`.
Warning
--------
In order to use PCM based AudioSources, you must have the opus library
installed on your system and loaded through :func:`opus.load_opus`.
Otherwise, your AudioSources must be opus encoded (e.g. using :class:`FFmpegOpusAudio`)
or the library will not be able to transmit audio.
Attributes
-----------
session_id: :class:`str`
The voice connection session ID.
token: :class:`str`
The voice connection token.
endpoint: :class:`str`
The endpoint we are connecting to.
channel: :class:`abc.Connectable`
The voice channel connected to.
loop: :class:`asyncio.AbstractEventLoop`
The event loop that the voice client is running on.
"""
def __init__(self, client, channel):
if not has_nacl:
raise RuntimeError("PyNaCl library needed in order to use voice")
super().__init__(client, channel)
state = client._connection
self.token = None
self.socket = None
self.loop = state.loop
self._state = state
# this will be used in the AudioPlayer thread
self._connected = threading.Event()
self._handshaking = False
self._potentially_reconnecting = False
self._voice_state_complete = asyncio.Event()
self._voice_server_complete = asyncio.Event()
self.mode = None
self._connections = 0
self.sequence = 0
self.timestamp = 0
self._runner = None
self._player = None
self.encoder = None
self._lite_nonce = 0
self.ws = None
warn_nacl = not has_nacl
supported_modes = (
'xsalsa20_poly1305_lite',
'xsalsa20_poly1305_suffix',
'xsalsa20_poly1305',
)
@property
def guild(self):
"""Optional[:class:`Guild`]: The guild we're connected to, if applicable."""
return getattr(self.channel, 'guild', None)
@property
def user(self):
""":class:`ClientUser`: The user connected to voice (i.e. ourselves)."""
return self._state.user
def checked_add(self, attr, value, limit):
val = getattr(self, attr)
if val + value > limit:
setattr(self, attr, 0)
else:
setattr(self, attr, val + value)
# connection related
async def on_voice_state_update(self, data):
self.session_id = data['session_id']
channel_id = data['channel_id']
if not self._handshaking or self._potentially_reconnecting:
# If we're done handshaking then we just need to update ourselves
# If we're potentially reconnecting due to a 4014, then we need to differentiate
# a channel move and an actual force disconnect
if channel_id is None:
# We're being disconnected so cleanup
await self.disconnect()
else:
guild = self.guild
self.channel = channel_id and guild and guild.get_channel(int(channel_id))
else:
self._voice_state_complete.set()
async def on_voice_server_update(self, data):
if self._voice_server_complete.is_set():
log.info('Ignoring extraneous voice server update.')
return
self.token = data.get('token')
self.server_id = int(data['guild_id'])
endpoint = data.get('endpoint')
if endpoint is None or self.token is None:
log.warning('Awaiting endpoint... This requires waiting. ' \
'If timeout occurred considering raising the timeout and reconnecting.')
return
self.endpoint, _, _ = endpoint.rpartition(':')
if self.endpoint.startswith('wss://'):
# Just in case, strip it off since we're going to add it later
self.endpoint = self.endpoint[6:]
# This gets set later
self.endpoint_ip = None
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setblocking(False)
if not self._handshaking:
# If we're not handshaking then we need to terminate our previous connection in the websocket
await self.ws.close(4000)
return
self._voice_server_complete.set()
async def voice_connect(self):
await self.channel.guild.change_voice_state(channel=self.channel)
async def voice_disconnect(self):
log.info('The voice handshake is being terminated for Channel ID %s (Guild ID %s)', self.channel.id, self.guild.id)
await self.channel.guild.change_voice_state(channel=None)
def prepare_handshake(self):
self._voice_state_complete.clear()
self._voice_server_complete.clear()
self._handshaking = True
log.info('Starting voice handshake... (connection attempt %d)', self._connections + 1)
self._connections += 1
def finish_handshake(self):
log.info('Voice handshake complete. Endpoint found %s', self.endpoint)
self._handshaking = False
self._voice_server_complete.clear()
self._voice_state_complete.clear()
async def connect_websocket(self):
ws = await DiscordVoiceWebSocket.from_client(self)
self._connected.clear()
while ws.secret_key is None:
await ws.poll_event()
self._connected.set()
return ws
async def connect(self, *, reconnect: bool, timeout: bool):
log.info('Connecting to voice...')
self.timeout = timeout
for i in range(5):
self.prepare_handshake()
# This has to be created before we start the flow.
futures = [
self._voice_state_complete.wait(),
self._voice_server_complete.wait(),
]
# Start the connection flow
await self.voice_connect()
try:
await utils.sane_wait_for(futures, timeout=timeout)
except asyncio.TimeoutError:
await self.disconnect(force=True)
raise
self.finish_handshake()
try:
self.ws = await self.connect_websocket()
break
except (ConnectionClosed, asyncio.TimeoutError):
if reconnect:
log.exception('Failed to connect to voice... Retrying...')
await asyncio.sleep(1 + i * 2.0)
await self.voice_disconnect()
continue
else:
raise
if self._runner is None:
self._runner = self.loop.create_task(self.poll_voice_ws(reconnect))
async def potential_reconnect(self):
# Attempt to stop the player thread from playing early
self._connected.clear()
self.prepare_handshake()
self._potentially_reconnecting = True
try:
# We only care about VOICE_SERVER_UPDATE since VOICE_STATE_UPDATE can come before we get disconnected
await asyncio.wait_for(self._voice_server_complete.wait(), timeout=self.timeout)
except asyncio.TimeoutError:
self._potentially_reconnecting = False
await self.disconnect(force=True)
return False
self.finish_handshake()
self._potentially_reconnecting = False
try:
self.ws = await self.connect_websocket()
except (ConnectionClosed, asyncio.TimeoutError):
return False
else:
return True
@property
def latency(self):
""":class:`float`: Latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This could be referred to as the Discord Voice WebSocket latency and is
an analogue of user's voice latencies as seen in the Discord client.
.. versionadded:: 1.4
"""
ws = self.ws
return float("inf") if not ws else ws.latency
@property
def average_latency(self):
""":class:`float`: Average of most recent 20 HEARTBEAT latencies in seconds.
.. versionadded:: 1.4
"""
ws = self.ws
return float("inf") if not ws else ws.average_latency
async def poll_voice_ws(self, reconnect):
backoff = ExponentialBackoff()
while True:
try:
await self.ws.poll_event()
except (ConnectionClosed, asyncio.TimeoutError) as exc:
if isinstance(exc, ConnectionClosed):
# The following close codes are undocumented so I will document them here.
# 1000 - normal closure (obviously)
# 4014 - voice channel has been deleted.
# 4015 - voice server has crashed
if exc.code in (1000, 4015):
log.info('Disconnecting from voice normally, close code %d.', exc.code)
await self.disconnect()
break
if exc.code == 4014:
log.info('Disconnected from voice by force... potentially reconnecting.')
successful = await self.potential_reconnect()
if not successful:
log.info('Reconnect was unsuccessful, disconnecting from voice normally...')
await self.disconnect()
break
else:
continue
if not reconnect:
await self.disconnect()
raise
retry = backoff.delay()
log.exception('Disconnected from voice... Reconnecting in %.2fs.', retry)
self._connected.clear()
await asyncio.sleep(retry)
await self.voice_disconnect()
try:
await self.connect(reconnect=True, timeout=self.timeout)
except asyncio.TimeoutError:
# at this point we've retried 5 times... let's continue the loop.
log.warning('Could not connect to voice... Retrying...')
continue
async def disconnect(self, *, force: bool = False):
"""|coro|
Disconnects this voice client from voice.
"""
if not force and not self.is_connected():
return
self.stop()
self._connected.clear()
try:
if self.ws:
await self.ws.close()
await self.voice_disconnect()
finally:
self.cleanup()
if self.socket:
self.socket.close()
async def move_to(self, channel):
"""|coro|
Moves you to a different voice channel.
Parameters
-----------
channel: :class:`abc.Snowflake`
The channel to move to. Must be a voice channel.
"""
await self.channel.guild.change_voice_state(channel=channel)
def is_connected(self):
"""Indicates if the voice client is connected to voice."""
return self._connected.is_set()
# audio related
def _get_voice_packet(self, data):
header = bytearray(12)
# Formulate rtp header
header[0] = 0x80
header[1] = 0x78
struct.pack_into('>H', header, 2, self.sequence)
struct.pack_into('>I', header, 4, self.timestamp)
struct.pack_into('>I', header, 8, self.ssrc)
encrypt_packet = getattr(self, '_encrypt_' + self.mode)
return encrypt_packet(header, data)
def _encrypt_xsalsa20_poly1305(self, header, data):
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = bytearray(24)
nonce[:12] = header
return header + box.encrypt(bytes(data), bytes(nonce)).ciphertext
def _encrypt_xsalsa20_poly1305_suffix(self, header, data):
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)
return header + box.encrypt(bytes(data), nonce).ciphertext + nonce
def _encrypt_xsalsa20_poly1305_lite(self, header, data):
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = bytearray(24)
nonce[:4] = struct.pack('>I', self._lite_nonce)
self.checked_add('_lite_nonce', 1, 4294967295)
return header + box.encrypt(bytes(data), bytes(nonce)).ciphertext + nonce[:4]
def play(self, source: AudioSource, *, after: Callable[[Exception], Any]=None):
"""Plays an :class:`AudioSource`.
The finalizer, ``after`` is called after the source has been exhausted
or an error occurred.
If an error happens while the audio player is running, the exception is
caught and the audio player is then stopped. If no after callback is
passed, any caught exception will be displayed as if it were raised.
Parameters
-----------
source: :class:`AudioSource`
The audio source we're reading from.
after: Callable[[:class:`Exception`], Any]
The finalizer that is called after the stream is exhausted.
This function must have a single parameter, ``error``, that
denotes an optional exception that was raised during playing.
Raises
-------
ClientException
Already playing audio or not connected.
TypeError
Source is not a :class:`AudioSource` or after is not a callable.
OpusNotLoaded
Source is not opus encoded and opus is not loaded.
"""
if not self.is_connected():
raise ClientException('Not connected to voice.')
if self.is_playing():
raise ClientException('Already playing audio.')
if not isinstance(source, AudioSource):
raise TypeError(f'source must an AudioSource not {source.__class__.__name__}')
if not self.encoder and not source.is_opus():
self.encoder = opus.Encoder()
self._player = AudioPlayer(source, self, after=after)
self._player.start()
def is_playing(self):
"""Indicates if we're currently playing audio."""
return self._player is not None and self._player.is_playing()
def is_paused(self):
"""Indicates if we're playing audio, but if we're paused."""
return self._player is not None and self._player.is_paused()
def stop(self):
"""Stops playing audio."""
if self._player:
self._player.stop()
self._player = None
def pause(self):
"""Pauses the audio playing."""
if self._player:
self._player.pause()
def resume(self):
"""Resumes the audio playing."""
if self._player:
self._player.resume()
@property
def source(self):
"""Optional[:class:`AudioSource`]: The audio source being played, if playing.
This property can also be used to change the audio source currently being played.
"""
return self._player.source if self._player else None
@source.setter
def source(self, value):
if not isinstance(value, AudioSource):
raise TypeError(f'expected AudioSource not {value.__class__.__name__}.')
if self._player is None:
raise ValueError('Not playing anything.')
self._player._set_source(value)
def send_audio_packet(self, data, *, encode=True):
"""Sends an audio packet composed of the data.
You must be connected to play audio.
Parameters
----------
data: :class:`bytes`
The :term:`py:bytes-like object` denoting PCM or Opus voice data.
encode: :class:`bool`
Indicates if ``data`` should be encoded into Opus.
Raises
-------
ClientException
You are not connected.
opus.OpusError
Encoding the data failed.
"""
self.checked_add('sequence', 1, 65535)
if encode:
encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME)
else:
encoded_data = data
packet = self._get_voice_packet(encoded_data)
try:
self.socket.sendto(packet, (self.endpoint_ip, self.voice_port))
except BlockingIOError:
log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp)
self.checked_add('timestamp', opus.Encoder.SAMPLES_PER_FRAME, 4294967295)
| mit | -3,993,753,975,391,410,000 | 34.470588 | 148 | 0.610282 | false | 4.425261 | false | false | false |
rockfruit/bika.lims | bika/lims/jsonapi/datamanagers.py | 1 | 4059 | # -*- coding: utf-8 -*-
from zope import interface
from AccessControl import Unauthorized
from AccessControl import getSecurityManager
from Products.CMFCore import permissions
from bika.lims.jsonapi.exceptions import APIError
from bika.lims.jsonapi.interfaces import IDataManager
from bika.lims.jsonapi.interfaces import IFieldManager
from bika.lims import logger
class BrainDataManager(object):
"""Adapter to get catalog brain attributes
"""
interface.implements(IDataManager)
def __init__(self, context):
self.context = context
def get(self, name):
"""Get the value by name
"""
# read the attribute
attr = getattr(self.context, name, None)
if callable(attr):
return attr()
return attr
def set(self, name, value, **kw):
"""Not used for catalog brains
"""
logger.warn("set attributes not allowed on catalog brains")
class PortalDataManager(object):
"""Adapter to set and get attributes of the Plone portal
"""
interface.implements(IDataManager)
def __init__(self, context):
self.context = context
def get(self, name):
"""Get the value by name
"""
# check read permission
sm = getSecurityManager()
permission = permissions.View
if not sm.checkPermission(permission, self.context):
raise Unauthorized("Not allowed to view the Plone portal")
# read the attribute
attr = getattr(self.context, name, None)
if callable(attr):
return attr()
# XXX no really nice, but we want the portal to behave like an ordinary
# content type. Therefore we need to inject the neccessary data.
if name == "uid":
return 0
if name == "path":
return "/%s" % self.context.getId()
return attr
def set(self, name, value, **kw):
"""Set the attribute to the given value.
The keyword arguments represent the other attribute values
to integrate constraints to other values.
"""
# check write permission
sm = getSecurityManager()
permission = permissions.ManagePortal
if not sm.checkPermission(permission, self.context):
raise Unauthorized("Not allowed to modify the Plone portal")
# set the attribute
if not hasattr(self.context, name):
return False
self.context[name] = value
return True
class ATDataManager(object):
"""Adapter to set and get field values of AT Content Types
"""
interface.implements(IDataManager)
def __init__(self, context):
self.context = context
def get_schema(self):
"""Get the schema
"""
try:
return self.context.Schema()
except AttributeError:
raise APIError(400, "Can not get Schema of %r" % self.context)
def get_field(self, name):
"""Get the field by name
"""
field = self.context.getField(name)
return field
def set(self, name, value, **kw):
"""Set the field to the given value.
The keyword arguments represent the other field values
to integrate constraints to other values.
"""
# fetch the field by name
field = self.get_field(name)
# bail out if we have no field
if not field:
return None
# call the field adapter and set the value
fieldmanager = IFieldManager(field)
return fieldmanager.set(self.context, value, **kw)
def get(self, name, **kw):
"""Get the value of the field by name
"""
logger.debug("ATDataManager::get: fieldname=%s", name)
# fetch the field by name
field = self.get_field(name)
# bail out if we have no field
if not field:
return None
# call the field adapter and get the value
fieldmanager = IFieldManager(field)
return fieldmanager.get(self.context, **kw)
| agpl-3.0 | -2,641,320,435,018,969,000 | 27.1875 | 79 | 0.612959 | false | 4.480132 | false | false | false |
openspending/os-conductor | conductor/blueprints/package/blueprint.py | 2 | 3436 | import logging
import os
import json
from flask import Blueprint, Response, request, abort
from flask.ext.jsonpify import jsonpify
from werkzeug.contrib.cache import MemcachedCache, SimpleCache
from . import controllers
if 'OS_CONDUCTOR_CACHE' in os.environ:
cache = MemcachedCache([os.environ['OS_CONDUCTOR_CACHE']])
else:
cache = SimpleCache()
logging.info('CACHE=%r', cache)
def cache_get(key):
return cache.get(key)
def cache_set(key, value, timeout):
logging.info('CACHE[%s] <- %r', key, value)
return cache.set(key, value, timeout)
# Controller Proxies
def upload():
jwt = request.values.get('jwt')
datapackage = request.values.get('datapackage')
if datapackage is None:
abort(400)
if jwt is None:
abort(403)
ret = controllers.upload(datapackage, jwt, cache_get, cache_set)
return jsonpify(ret)
def upload_status():
datapackage = request.values.get('datapackage')
if datapackage is None:
abort(400)
ret = controllers.upload_status(datapackage, cache_get)
if ret is None:
abort(404)
return jsonpify(ret)
def toggle_publish():
id = request.values.get('id')
jwt = request.values.get('jwt')
if jwt is None:
abort(403)
value = request.values.get('publish', '')
value = value.lower()
toggle = None
publish = None
if value == 'toggle':
toggle = True
else:
if value in ['true', 'false']:
publish = json.loads(value)
if publish is None and toggle is None:
return Response(status=400)
return jsonpify(controllers.toggle_publish(id, jwt, toggle, publish))
def delete_package():
id = request.values.get('id')
jwt = request.values.get('jwt')
if jwt is None:
abort(403)
return jsonpify(controllers.delete_package(id, jwt))
def run_hooks():
id = request.values.get('id')
jwt = request.values.get('jwt')
pipeline = request.values.get('pipeline')
if jwt is None:
abort(403)
if pipeline is None or id is None:
abort(400)
return jsonpify(controllers.run_hooks(id, jwt, pipeline))
def stats():
return jsonpify(controllers.stats())
def update_params():
jwt = request.values.get('jwt')
datapackage = request.values.get('id')
params = request.get_json()
if 'params' not in params or not isinstance(params['params'], str):
abort(400, "No 'params' key or bad params value.")
if datapackage is None:
abort(400)
if jwt is None:
abort(403)
ret = controllers.update_params(datapackage, jwt, params)
return jsonpify(ret)
def create():
"""Create blueprint.
"""
# Create instance
blueprint = Blueprint('package', 'package')
# Register routes
blueprint.add_url_rule(
'upload', 'load', upload, methods=['POST'])
blueprint.add_url_rule(
'status', 'poll', upload_status, methods=['GET'])
blueprint.add_url_rule(
'publish', 'publish', toggle_publish, methods=['POST'])
blueprint.add_url_rule(
'delete', 'delete', delete_package, methods=['POST'])
blueprint.add_url_rule(
'run-hooks', 'run-hooks', run_hooks, methods=['POST'])
blueprint.add_url_rule(
'stats', 'stats', stats, methods=['GET'])
blueprint.add_url_rule(
'update_params', 'update_params', update_params, methods=['POST'])
# Return blueprint
return blueprint
| mit | -518,462,762,546,354,400 | 25.229008 | 74 | 0.639115 | false | 3.571726 | false | false | false |
evernym/plenum | plenum/server/pool_manager.py | 2 | 17041 | import ipaddress
from abc import abstractmethod
from collections import OrderedDict
from typing import Optional
from typing import List
from common.exceptions import LogicError
from plenum.common.messages.internal_messages import VoteForViewChange
from plenum.server.suspicion_codes import Suspicions
from stp_core.common.log import getlogger
from stp_core.network.auth_mode import AuthMode
from stp_core.network.exceptions import RemoteNotFound
from stp_core.types import HA
from plenum.common.constants import NODE, TARGET_NYM, DATA, ALIAS, \
NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT, VERKEY, SERVICES, \
VALIDATOR, CLIENT_STACK_SUFFIX, BLS_KEY
from plenum.common.stack_manager import TxnStackManager
from plenum.common.txn_util import get_type, get_payload_data
logger = getlogger()
class PoolManager:
@abstractmethod
def getStackParamsAndNodeReg(self, name, keys_dir, nodeRegistry=None,
ha=None, cliname=None, cliha=None):
"""
Returns a tuple(nodestack, clientstack, nodeReg)
"""
@property
@abstractmethod
def merkleRootHash(self) -> str:
"""
"""
@property
@abstractmethod
def txnSeqNo(self) -> int:
"""
"""
@staticmethod
def _get_rank(needle_id: str, haystack_ids: List[str]):
# Return the rank of the node where rank is defined by the order in
# which node was added to the pool
try:
return haystack_ids.index(needle_id)
except ValueError:
return None
@property
@abstractmethod
def id(self):
"""
"""
@abstractmethod
def get_rank_of(self, node_id, node_reg, node_ids) -> Optional[int]:
"""Return node rank among active pool validators by id
:param node_id: node's id
:param node_reg: (optional) node registry to operate with. If not specified,
current one is used.
:return: rank of the node or None if not found
"""
@property
def rank(self) -> Optional[int]:
# Nodes have a total order defined in them, rank is the node's
# position in that order
return self.get_rank_of(self.id, self.nodeReg, self._ordered_node_ids)
@abstractmethod
def get_name_by_rank(self, rank, node_reg, node_ids) -> Optional[str]:
# Needed for communicating primary name to others and also nodeReg
# uses node names (alias) and not ids
# TODO: Should move to using node ids and not node names (alias)
"""Return node name (alias) by rank among active pool validators
:param rank: rank of the node
:param node_reg: (optional) node registry to operate with. If not specified,
current one is used.
:return: name of the node or None if not found
"""
class HasPoolManager:
# noinspection PyUnresolvedReferences, PyTypeChecker
def __init__(self, ledger, state, write_manager, ha=None, cliname=None, cliha=None):
self.poolManager = TxnPoolManager(self, ledger, state, write_manager,
ha=ha, cliname=cliname, cliha=cliha)
class TxnPoolManager(PoolManager, TxnStackManager):
def __init__(self, node, ledger, state, write_manager, ha=None, cliname=None, cliha=None):
self.node = node
self.name = node.name
self.config = node.config
self.genesis_dir = node.genesis_dir
self.keys_dir = node.keys_dir
self.ledger = ledger
self._id = None
TxnStackManager.__init__(
self, self.name, node.keys_dir, isNode=True)
self.state = state
self.write_manager = write_manager
self._load_nodes_order_from_ledger()
self.nstack, self.cstack, self.nodeReg, self.cliNodeReg = \
self.getStackParamsAndNodeReg(self.name, self.keys_dir, ha=ha,
cliname=cliname, cliha=cliha)
self._dataFieldsValidators = (
(NODE_IP, self._isIpAddressValid),
(CLIENT_IP, self._isIpAddressValid),
(NODE_PORT, self._isPortValid),
(CLIENT_PORT, self._isPortValid),
)
def __repr__(self):
return self.node.name
def getStackParamsAndNodeReg(self, name, keys_dir, nodeRegistry=None,
ha=None, cliname=None, cliha=None):
nodeReg, cliNodeReg, nodeKeys = self.parseLedgerForHaAndKeys(
self.ledger)
self.addRemoteKeysFromLedger(nodeKeys)
# If node name was not found in the pool transactions file
if not ha:
ha = nodeReg[name]
nstack = dict(name=name,
ha=HA(*ha),
main=True,
auth_mode=AuthMode.RESTRICTED.value,
queue_size=self.config.ZMQ_NODE_QUEUE_SIZE)
cliname = cliname or (name + CLIENT_STACK_SUFFIX)
if not cliha:
cliha = cliNodeReg[cliname]
cstack = dict(name=cliname or (name + CLIENT_STACK_SUFFIX),
ha=HA(*cliha),
main=True,
auth_mode=AuthMode.ALLOW_ANY.value,
queue_size=self.config.ZMQ_CLIENT_QUEUE_SIZE)
if keys_dir:
nstack['basedirpath'] = keys_dir
cstack['basedirpath'] = keys_dir
return nstack, cstack, nodeReg, cliNodeReg
def onPoolMembershipChange(self, txn) -> bool:
# `onPoolMembershipChange` method can be called only after txn added to ledger
if get_type(txn) != NODE:
return False
txn_data = get_payload_data(txn)
if DATA not in txn_data:
return False
nodeName = txn_data[DATA][ALIAS]
nodeNym = txn_data[TARGET_NYM]
self._set_node_ids_in_cache(nodeNym, nodeName)
def _updateNode(txn_data):
node_reg_changed = False
if SERVICES in txn_data[DATA]:
node_reg_changed = self.nodeServicesChanged(txn_data)
if txn_data[DATA][ALIAS] in self.node.nodeReg:
if {NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT}. \
intersection(set(txn_data[DATA].keys())):
self.nodeHaChanged(txn_data)
if VERKEY in txn_data:
self.nodeKeysChanged(txn_data)
if BLS_KEY in txn_data[DATA]:
self.node_blskey_changed(txn_data)
return node_reg_changed
node_reg_changed = False
# If nodeNym is never added in self._ordered_node_services,
# nodeNym is never added in ledger
if nodeNym not in self._ordered_node_services:
if VALIDATOR in txn_data[DATA].get(SERVICES, []):
self.addNewNodeAndConnect(txn_data)
node_reg_changed = True
self._set_node_services_in_cache(nodeNym, txn_data[DATA].get(SERVICES, []))
else:
node_reg_changed = _updateNode(txn_data)
self._set_node_services_in_cache(nodeNym, txn_data[DATA].get(SERVICES, None))
return node_reg_changed
def addNewNodeAndConnect(self, txn_data):
nodeName = txn_data[DATA][ALIAS]
if nodeName == self.name:
logger.debug("{} adding itself to node registry".
format(self.name))
self.node.nodeReg[nodeName] = HA(txn_data[DATA][NODE_IP],
txn_data[DATA][NODE_PORT])
self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX] = \
HA(txn_data[DATA][CLIENT_IP],
txn_data[DATA][CLIENT_PORT])
else:
self.connectNewRemote(txn_data, nodeName, self.node, nodeName != self.name)
def node_about_to_be_disconnected(self, nodeName):
if self.node.master_primary_name == nodeName:
self.node.master_replica.internal_bus.send(
VoteForViewChange(Suspicions.PRIMARY_ABOUT_TO_BE_DISCONNECTED))
def nodeHaChanged(self, txn_data):
nodeNym = txn_data[TARGET_NYM]
nodeName = self.getNodeName(nodeNym)
# TODO: Check if new HA is same as old HA and only update if
# new HA is different.
if nodeName == self.name:
# Update itself in node registry if needed
ha_changed = False
(ip, port) = self.node.nodeReg[nodeName]
if NODE_IP in txn_data[DATA] and ip != txn_data[DATA][NODE_IP]:
ip = txn_data[DATA][NODE_IP]
ha_changed = True
if NODE_PORT in txn_data[DATA] and port != txn_data[DATA][NODE_PORT]:
port = txn_data[DATA][NODE_PORT]
ha_changed = True
if ha_changed:
self.node.nodeReg[nodeName] = HA(ip, port)
ha_changed = False
(ip, port) = self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX]
if CLIENT_IP in txn_data[DATA] and ip != txn_data[DATA][CLIENT_IP]:
ip = txn_data[DATA][CLIENT_IP]
ha_changed = True
if CLIENT_PORT in txn_data[DATA] and port != txn_data[DATA][CLIENT_PORT]:
port = txn_data[DATA][CLIENT_PORT]
ha_changed = True
if ha_changed:
self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX] = HA(ip, port)
self.node.nodestack.onHostAddressChanged()
self.node.clientstack.onHostAddressChanged()
else:
rid = self.stackHaChanged(txn_data, nodeName, self.node)
if rid:
self.node.nodestack.outBoxes.pop(rid, None)
self.node_about_to_be_disconnected(nodeName)
def nodeKeysChanged(self, txn_data):
# TODO: if the node whose keys are being changed is primary for any
# protocol instance, then we should trigger an election for that
# protocol instance. For doing that, for every replica of that
# protocol instance, `_primaryName` as None, and then the node should
# call its `decidePrimaries`.
nodeNym = txn_data[TARGET_NYM]
nodeName = self.getNodeName(nodeNym)
# TODO: Check if new keys are same as old keys and only update if
# new keys are different.
if nodeName == self.name:
# TODO: Why?
logger.debug("{} not changing itself's keep".
format(self.name))
return
else:
rid = self.stackKeysChanged(txn_data, nodeName, self.node)
if rid:
self.node.nodestack.outBoxes.pop(rid, None)
self.node_about_to_be_disconnected(nodeName)
def nodeServicesChanged(self, txn_data) -> bool:
nodeNym = txn_data[TARGET_NYM]
nodeName = self.getNodeName(nodeNym)
oldServices = set(self._ordered_node_services.get(nodeNym, []))
newServices = set(txn_data[DATA].get(SERVICES, []))
if oldServices == newServices:
logger.info("Node {} not changing {} since it is same as existing".format(nodeNym, SERVICES))
return False
node_count_changed = False
if VALIDATOR in newServices.difference(oldServices):
node_count_changed = True
# If validator service is enabled
node_info = self.write_manager.get_node_data(nodeNym)
self.node.nodeReg[nodeName] = HA(node_info[NODE_IP],
node_info[NODE_PORT])
self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX] = HA(node_info[CLIENT_IP],
node_info[CLIENT_PORT])
self.updateNodeTxns({DATA: node_info, }, txn_data)
if self.name != nodeName:
self.connectNewRemote({DATA: node_info,
TARGET_NYM: nodeNym}, nodeName, self.node)
else:
logger.debug("{} adding itself to node registry".
format(self.name))
if VALIDATOR in oldServices.difference(newServices):
node_count_changed = True
# If validator service is disabled
del self.node.nodeReg[nodeName]
del self.node.cliNodeReg[nodeName + CLIENT_STACK_SUFFIX]
if self.name != nodeName:
try:
rid = TxnStackManager.removeRemote(
self.node.nodestack, nodeName)
if rid:
self.node.nodestack.outBoxes.pop(rid, None)
except RemoteNotFound:
logger.info('{} did not find remote {} to remove'.format(self, nodeName))
self.node_about_to_be_disconnected(nodeName)
return node_count_changed
def node_blskey_changed(self, txn_data):
# if BLS key changes for my Node, then re-init BLS crypto signer with new keys
node_nym = txn_data[TARGET_NYM]
node_name = self.getNodeName(node_nym)
if node_name == self.name:
bls_key = txn_data[DATA][BLS_KEY]
self.node.update_bls_key(bls_key)
def getNodeName(self, nym):
# Assuming ALIAS does not change
return self._ordered_node_ids[nym]
@property
def merkleRootHash(self) -> str:
return self.ledger.root_hash
@property
def txnSeqNo(self) -> int:
return self.ledger.seqNo
# Question: Why are `_isIpAddressValid` and `_isPortValid` part of
# pool_manager?
@staticmethod
def _isIpAddressValid(ipAddress):
try:
ipaddress.ip_address(ipAddress)
except ValueError:
return False
else:
return ipAddress != '0.0.0.0'
@staticmethod
def _isPortValid(port):
return isinstance(port, int) and 0 < port <= 65535
@property
def id(self):
if not self._id:
for _, txn in self.ledger.getAllTxn():
txn_data = get_payload_data(txn)
if self.name == txn_data[DATA][ALIAS]:
self._id = txn_data[TARGET_NYM]
return self._id
def _load_nodes_order_from_ledger(self):
self._ordered_node_ids = OrderedDict()
self._ordered_node_services = {}
for _, txn in self.ledger.getAllTxn():
if get_type(txn) == NODE:
txn_data = get_payload_data(txn)
self._set_node_ids_in_cache(txn_data[TARGET_NYM],
txn_data[DATA][ALIAS])
self._set_node_services_in_cache(txn_data[TARGET_NYM],
txn_data[DATA].get(SERVICES, None))
def _set_node_ids_in_cache(self, node_nym, node_name):
curName = self._ordered_node_ids.get(node_nym)
if curName is None:
self._ordered_node_ids[node_nym] = node_name
logger.info("{} sets node {} ({}) order to {}".format(
self.name, node_name, node_nym,
len(self._ordered_node_ids[node_nym])))
elif curName != node_name:
msg = "{} is trying to order already ordered node {} ({}) with other alias {}" \
.format(self.name, curName, node_nym, node_name)
logger.error(msg)
raise LogicError(msg)
def _set_node_services_in_cache(self, node_nym, node_services):
if node_services is not None:
self._ordered_node_services[node_nym] = node_services
def node_ids_ordered_by_rank(self, node_reg, node_ids) -> List:
return [nym for nym, name in node_ids.items()
if name in node_reg]
def node_names_ordered_by_rank(self) -> List:
return self.calc_node_names_ordered_by_rank(self.nodeReg, self._ordered_node_ids)
@staticmethod
def calc_node_names_ordered_by_rank(node_reg, node_ids) -> List:
return [name for nym, name in node_ids.items()
if name in node_reg]
def get_rank_of(self, node_id, node_reg, node_ids) -> Optional[int]:
if self.id is None:
# This can happen if a non-genesis node starts
return None
return self._get_rank(node_id, self.node_ids_ordered_by_rank(node_reg, node_ids))
def get_rank_by_name(self, name, node_reg, node_ids) -> Optional[int]:
for nym, nm in node_ids.items():
if name == nm:
return self.get_rank_of(nym, node_reg, node_ids)
def get_name_by_rank(self, rank, node_reg, node_ids) -> Optional[str]:
try:
nym = self.node_ids_ordered_by_rank(node_reg, node_ids)[rank]
except IndexError:
return None
else:
return node_ids[nym]
def get_nym_by_name(self, node_name) -> Optional[str]:
for nym, name in self._ordered_node_ids.items():
if name == node_name:
return nym
return None
def get_node_ids(self):
return self._ordered_node_ids
| apache-2.0 | 7,421,510,375,693,587,000 | 37.729545 | 105 | 0.579367 | false | 3.836335 | false | false | false |
ssoloff/tower-of-hanoi | functional/test/test_tower_of_hanoi.py | 1 | 8489 | import unittest
import unittest.mock as mock
from tower_of_hanoi import Disk, Game, Peg
class DiskTestCase(unittest.TestCase):
def test____eq____when_self_equals_other__returns_true(self):
self.assertEqual(Disk(1), Disk(1))
def test____eq____when_self_size_not_equals_other__returns_false(self):
self.assertNotEqual(Disk(1), Disk(2))
def test____lt____when_self_equals_other__returns_false(self):
self.assertFalse(Disk(1) < Disk(1))
def test____lt____when_self_greater_than_other__returns_false(self):
self.assertFalse(Disk(2) < Disk(1))
def test____lt____when_self_less_than_other__returns_true(self):
self.assertTrue(Disk(1) < Disk(2))
class PegTestCase(unittest.TestCase):
def _create_peg(self, name=None, disks=[]):
return Peg(name if None else self._name, disks)
def setUp(self):
self._disk_1 = Disk(1)
self._disk_2 = Disk(2)
self._disk_3 = Disk(3)
self._name = 'name'
def test____eq____when_self_equals_other__returns_true(self):
self.assertEqual(Peg(self._name, [self._disk_1]), Peg(self._name, [self._disk_1]))
def test____eq____when_self_disks_not_equals_other__returns_false(self):
self.assertNotEqual(Peg(self._name, [self._disk_1]), Peg(self._name, [self._disk_2]))
def test____eq____when_self_name_not_equals_other__returns_false(self):
self.assertNotEqual(Peg(self._name, [self._disk_1]), Peg('other-name', [self._disk_1]))
def test__disks__returns_copy(self):
peg = self._create_peg()
peg.disks().append(self._disk_1)
self.assertEqual([], peg.disks())
def test__disks__returns_in_order_from_bottom_to_top(self):
peg = self._create_peg(disks=[self._disk_3, self._disk_2, self._disk_1])
self.assertEqual([self._disk_3, self._disk_2, self._disk_1], peg.disks())
def test__is_empty__when_empty__returns_true(self):
peg = self._create_peg()
self.assertTrue(peg.is_empty())
def test__is_empty__when_not_empty__returns_false(self):
peg = self._create_peg(disks=[self._disk_1])
self.assertFalse(peg.is_empty())
def test__pop__when_empty__raises_exception(self):
peg = self._create_peg()
with self.assertRaises(Exception):
peg.pop()
def test__pop__when_not_empty__returns_new_peg_with_top_disk_removed_and_removed_disk(self):
peg = self._create_peg(disks=[self._disk_2, self._disk_1])
new_peg, popped_disk = peg.pop()
self.assertEqual(self._create_peg(disks=[self._disk_2]), new_peg)
self.assertEqual(self._disk_1, popped_disk)
def test__push__when_empty__returns_new_peg_with_added_disk(self):
peg = self._create_peg()
new_peg = peg.push(self._disk_1)
self.assertEqual(self._create_peg(disks=[self._disk_1]), new_peg)
def test__push__when_disk_smaller_than_top_disk__returns_peg_with_added_disk_on_top(self):
peg = self._create_peg(disks=[self._disk_2])
new_peg = peg.push(self._disk_1)
self.assertEqual(self._create_peg(disks=[self._disk_2, self._disk_1]), new_peg)
def test__push__when_disk_same_as_top_disk__raises_exception(self):
peg = self._create_peg(disks=[self._disk_1])
with self.assertRaises(Exception):
peg.push(self._disk_1)
def test__push__when_disk_larger_than_top_disk__raises_exception(self):
peg = self._create_peg(disks=[self._disk_1])
with self.assertRaises(Exception):
peg.push(self._disk_2)
class GameTestCase(unittest.TestCase):
def _create_peg_a(self, disks):
return Peg('a', disks)
def _create_peg_b(self, disks=[]):
return Peg('b', disks)
def _create_peg_c(self, disks=[]):
return Peg('c', disks)
def setUp(self):
self._disk_1 = Disk(1)
self._disk_2 = Disk(2)
self._disk_3 = Disk(3)
self._disk_4 = Disk(4)
self._peg_b = self._create_peg_b()
self._peg_c = self._create_peg_c()
self._game = Game()
def test__create_peg__returns_peg_with_specified_name(self):
name = 'name'
peg = self._game.create_peg(name)
self.assertEqual(name, peg.name())
def test__create_peg__when_disk_count_is_0__returns_empty_peg(self):
peg = self._game.create_peg('name', 0)
self.assertEqual([], peg.disks())
def test__create_peg__when_disk_count_is_1__returns_peg_with_1_disk(self):
peg = self._game.create_peg('name', 1)
self.assertEqual([self._disk_1], peg.disks())
def test__create_peg__when_disk_count_is_3__returns_peg_with_3_disks_in_ascending_order_from_top(self):
peg = self._game.create_peg('name', 3)
self.assertEqual([self._disk_3, self._disk_2, self._disk_1], peg.disks())
def test__move__when_disk_count_is_1__invokes_callback_after_each_move(self):
move_spy = mock.Mock()
peg_a = self._create_peg_a([self._disk_1])
self._game.move(1, peg_a, self._peg_c, self._peg_b, move_spy)
expected_move_spy_call_args_list = [
mock.call([
self._create_peg_a([]),
self._create_peg_c([self._disk_1]),
self._create_peg_b([])
])
]
self.assertEqual(expected_move_spy_call_args_list, move_spy.call_args_list)
def test__move__when_disk_count_is_1__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_1])
new_peg_a, new_peg_c, new_peg_b = self._game.move(1, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), new_peg_a)
self.assertEqual(self._create_peg_b([]), new_peg_b)
self.assertEqual(self._create_peg_c([self._disk_1]), new_peg_c)
def test__move__when_disk_count_is_2__invokes_callback_after_each_move(self):
move_spy = mock.Mock()
peg_a = self._create_peg_a([self._disk_2, self._disk_1])
self._game.move(2, peg_a, self._peg_c, self._peg_b, move_spy)
expected_move_spy_call_args_list = [
mock.call([
self._create_peg_a([self._disk_2]),
self._create_peg_b([self._disk_1]),
self._create_peg_c([])
]),
mock.call([
self._create_peg_a([]),
self._create_peg_c([self._disk_2]),
self._create_peg_b([self._disk_1])
]),
mock.call([
self._create_peg_b([]),
self._create_peg_c([self._disk_2, self._disk_1]),
self._create_peg_a([])
])
]
self.assertSequenceEqual(expected_move_spy_call_args_list, move_spy.call_args_list)
def test__move__when_disk_count_is_2__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_2, self._disk_1])
new_peg_a, new_peg_c, new_peg_b = self._game.move(2, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), new_peg_a)
self.assertEqual(self._create_peg_b([]), new_peg_b)
self.assertEqual(self._create_peg_c([self._disk_2, self._disk_1]), new_peg_c)
def test__move__when_disk_count_is_3__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_3, self._disk_2, self._disk_1])
new_peg_a, new_peg_c, new_peg_b = self._game.move(3, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), new_peg_a)
self.assertEqual(self._create_peg_b([]), new_peg_b)
self.assertEqual(self._create_peg_c([self._disk_3, self._disk_2, self._disk_1]), new_peg_c)
def test__move__when_disk_count_is_4__moves_disks_from_peg_a_to_peg_c(self):
peg_a = self._create_peg_a([self._disk_4, self._disk_3, self._disk_2, self._disk_1])
new_peg_a, new_peg_c, new_peg_b = self._game.move(4, peg_a, self._peg_c, self._peg_b)
self.assertEqual(self._create_peg_a([]), new_peg_a)
self.assertEqual(self._create_peg_b([]), new_peg_b)
self.assertEqual(self._create_peg_c([self._disk_4, self._disk_3, self._disk_2, self._disk_1]), new_peg_c)
def test__move__when_disk_count_exceeds_source_peg_disk_count__raises_exception(self):
peg_a = self._create_peg_a([self._disk_1])
with self.assertRaises(Exception):
self._game.move(2, peg_a, self._peg_c, self._peg_b)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -5,961,525,183,534,607,000 | 36.396476 | 113 | 0.58888 | false | 3.026381 | true | false | false |
ArchiveTeam/blogger-discovery | discover.py | 1 | 3496 | import gzip
import re
import requests
import string
import sys
import time
import random
DEFAULT_HEADERS = {'User-Agent': 'ArchiveTeam'}
class FetchError(Exception):
'''Custom error class when fetching does not meet our expectation.'''
def main():
# Take the program arguments given to this script
# Normal programs use 'argparse' but this keeps things simple
start_num = int(sys.argv[1])
end_num = int(sys.argv[2])
output_filename = sys.argv[3] # this should be something like myfile.txt.gz
assert start_num <= end_num
print('Starting', start_num, end_num)
gzip_file = gzip.GzipFile(output_filename, 'wb')
for shortcode in check_range(start_num, end_num):
# Write the valid result one per line to the file
line = '{0}\n'.format(shortcode)
gzip_file.write(line.encode('ascii'))
gzip_file.close()
print('Done')
def check_range(start_num, end_num):
for num in range(start_num, end_num + 1):
shortcode = num
url = 'https://www.blogger.com/profile/{0}'.format(shortcode)
counter = 0
while True:
# Try 20 times before giving up
if counter > 4:
# This will stop the script with an error
raise Exception('Giving up!')
try:
text = fetch(url)
except FetchError:
# The server may be overloaded so wait a bit
print('Sleeping...')
sys.stdout.flush()
time.sleep(10)
else:
if text:
yield 'id:{0}'.format(shortcode)
userid = extract_handle(text)
if userid:
yield 'user:{0}'.format(userid)
for blog in extract_blogs(text):
yield 'blog:{0}'.format(blog)
break # stop the while loop
counter += 1
def fetch(url):
'''Fetch the URL and check if it returns OK.
Returns True, returns the response text. Otherwise, returns None
'''
time.sleep(random.randint(10, 25))
print('Fetch', url)
sys.stdout.flush()
response = requests.get(url, headers=DEFAULT_HEADERS)
# response doesn't have a reason attribute all the time??
print('Got', response.status_code, getattr(response, 'reason'))
sys.stdout.flush()
if response.status_code == 200:
# The item exists
if not response.text:
# If HTML is empty maybe server broke
raise FetchError()
return response.text
elif response.status_code == 404:
# Does not exist
return
elif response.status_code == 503:
# Captcha!
print('You are receiving a temporary captcha from Google. Sleep 45 minutes.')
sys.stdout.flush()
time.sleep(2700)
raise FetchError()
else:
# Problem
raise FetchError()
def extract_handle(text):
'''Return the page creator from the text.'''
# Search for something like
# "http://www.blogger.com/feeds/14366755180455532991/blogs"
match = re.search(r'"https?://www\.blogger\.[a-z]+/feeds/([0-9]+)/', text)
if match:
return match.group(1)
def extract_blogs(text):
'''Return a list of tags from the text.'''
# Search for "http://onwonder.blogspot.com/"
return re.findall(r'"(https?://[^"]+)" rel="contributor\-to nofollow"', text)
if __name__ == '__main__':
main()
| unlicense | 4,426,961,121,204,855,000 | 26.527559 | 85 | 0.580378 | false | 4.032295 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.