repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
amit0701/rally | rally/task/runner.py | 1 | 10850 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import multiprocessing
import time
import jsonschema
from rally.common import logging
from rally.common import objects
from rally.common.plugin import plugin
from rally.common import utils as rutils
from rally import consts
from rally.task import context
from rally.task import scenario
from rally.task import types
from rally.task import utils
LOG = logging.getLogger(__name__)
def format_result_on_timeout(exc, timeout):
return {
"duration": timeout,
"idle_duration": 0,
"output": {"additive": [], "complete": []},
"atomic_actions": {},
"error": utils.format_exc(exc)
}
def _get_scenario_context(context_obj):
return context.ContextManager(context_obj).map_for_scenario()
def _run_scenario_once(args):
iteration, cls, method_name, context_obj, kwargs = args
LOG.info("Task %(task)s | ITER: %(iteration)s START" %
{"task": context_obj["task"]["uuid"], "iteration": iteration})
context_obj["iteration"] = iteration
scenario_inst = cls(context_obj)
error = []
output = {"additive": [], "complete": []}
try:
with rutils.Timer() as timer:
# NOTE(amaretskiy): Output as return value is deprecated
# but supported for backward compatibility
deprecated_output = getattr(scenario_inst, method_name)(**kwargs)
warning = ""
if deprecated_output:
warning = ("Returning output data by scenario is deprecated "
"in favor of calling add_output().")
if scenario_inst._output != {"complete": [], "additive": []}:
output = scenario_inst._output
if deprecated_output:
warning += (" Output data both returned and passed to "
"add_output() so returned one is ignored!")
elif deprecated_output:
output["additive"].append({
"title": "Scenario output",
"description": "",
"chart_plugin": "StackedArea",
"data": [list(item)
for item in deprecated_output["data"].items()]})
if warning:
LOG.warning(warning)
except Exception as e:
error = utils.format_exc(e)
if logging.is_debug():
LOG.exception(e)
finally:
status = "Error %s: %s" % tuple(error[0:2]) if error else "OK"
LOG.info("Task %(task)s | ITER: %(iteration)s END: %(status)s" %
{"task": context_obj["task"]["uuid"], "iteration": iteration,
"status": status})
return {"duration": timer.duration() - scenario_inst.idle_duration(),
"timestamp": timer.timestamp(),
"idle_duration": scenario_inst.idle_duration(),
"error": error,
"output": output,
"atomic_actions": scenario_inst.atomic_actions()}
def _worker_thread(queue, args):
queue.put(_run_scenario_once(args))
def _log_worker_info(**info):
"""Log worker parameters for debugging.
:param info: key-value pairs to be logged
"""
info_message = "\n\t".join(["%s: %s" % (k, v)
for k, v in info.items()])
LOG.debug("Starting a worker.\n\t%s" % info_message)
class ScenarioRunnerResult(dict):
"""Class for all scenario runners' result."""
RESULT_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"duration": {
"type": "number"
},
"timestamp": {
"type": "number"
},
"idle_duration": {
"type": "number"
},
"output": objects.task.OUTPUT_SCHEMA,
"atomic_actions": {
"type": "object",
"patternProperties": {
".*": {"type": ["number", "null"]}
}
},
"error": {
"type": "array",
"items": {
"type": "string"
}
}
},
"additionalProperties": False
}
def __init__(self, result_list):
super(ScenarioRunnerResult, self).__init__(result_list)
jsonschema.validate(result_list, self.RESULT_SCHEMA)
def configure(name, namespace="default"):
return plugin.configure(name=name, namespace=namespace)
@configure(name="base_runner")
class ScenarioRunner(plugin.Plugin):
"""Base class for all scenario runners.
Scenario runner is an entity that implements a certain strategy of
launching benchmark scenarios, e.g. running them continuously or
periodically for a given number of times or seconds.
These strategies should be implemented in subclasses of ScenarioRunner
in the_run_scenario() method.
"""
CONFIG_SCHEMA = {}
def __init__(self, task, config, batch_size=0):
"""Runner constructor.
It sets task and config to local variables. Also initialize
result_queue, where results will be put by _send_result method.
:param task: Instance of objects.Task
:param config: Dict with runner section from benchmark configuration
"""
self.task = task
self.config = config
self.result_queue = collections.deque()
self.aborted = multiprocessing.Event()
self.run_duration = 0
self.batch_size = batch_size
self.result_batch = []
@staticmethod
def validate(config):
"""Validates runner's part of task config."""
runner = ScenarioRunner.get(config.get("type", "serial"))
jsonschema.validate(config, runner.CONFIG_SCHEMA)
@abc.abstractmethod
def _run_scenario(self, cls, method_name, context, args):
"""Runs the specified benchmark scenario with given arguments.
:param cls: The Scenario class where the scenario is implemented
:param method_name: Name of the method that implements the scenario
:param context: Benchmark context that contains users, admin & other
information, that was created before benchmark started.
:param args: Arguments to call the scenario method with
:returns: List of results fore each single scenario iteration,
where each result is a dictionary
"""
def run(self, name, context, args):
cls_name, method_name = name.split(".", 1)
cls = scenario.Scenario.get(name)._meta_get("cls_ref")
# NOTE(boris-42): processing @types decorators
args = types.preprocess(name, context, args)
with rutils.Timer() as timer:
self._run_scenario(cls, method_name, context, args)
self.run_duration = timer.duration()
return self.run_duration
def abort(self):
"""Abort the execution of further benchmark scenario iterations."""
self.aborted.set()
@staticmethod
def _create_process_pool(processes_to_start, worker_process,
worker_args_gen):
"""Create a pool of processes with some defined target function.
:param processes_to_start: number of processes to create in the pool
:param worker_process: target function for all processes in the pool
:param worker_args_gen: generator of arguments for the target function
:returns: the process pool as a deque
"""
process_pool = collections.deque()
for i in range(processes_to_start):
kwrgs = {"processes_to_start": processes_to_start,
"processes_counter": i}
process = multiprocessing.Process(target=worker_process,
args=next(worker_args_gen),
kwargs={"info": kwrgs})
process.start()
process_pool.append(process)
return process_pool
def _join_processes(self, process_pool, result_queue):
"""Join the processes in the pool and send their results to the queue.
:param process_pool: pool of processes to join
:result_queue: multiprocessing.Queue that receives the results
"""
while process_pool:
while process_pool and not process_pool[0].is_alive():
process_pool.popleft().join()
if result_queue.empty():
# sleep a bit to avoid 100% usage of CPU by this method
time.sleep(0.001)
while not result_queue.empty():
self._send_result(result_queue.get())
self._flush_results()
result_queue.close()
def _flush_results(self):
if self.result_batch:
sorted_batch = sorted(self.result_batch)
self.result_queue.append(sorted_batch)
self.result_batch = []
def _send_result(self, result):
"""Store partial result to send it to consumer later.
:param result: Result dict to be sent. It should match the
ScenarioRunnerResult schema, otherwise
ValidationError is raised.
"""
r = ScenarioRunnerResult(result)
self.result_batch.append(r)
if len(self.result_batch) >= self.batch_size:
sorted_batch = sorted(self.result_batch,
key=lambda r: r["timestamp"])
self.result_queue.append(sorted_batch)
self.result_batch = []
def _log_debug_info(self, **info):
"""Log runner parameters for debugging.
The method logs the runner name, the task id as well as the values
passed as arguments.
:param info: key-value pairs to be logged
"""
info_message = "\n\t".join(["%s: %s" % (k, v)
for k, v in info.items()])
LOG.debug("Starting the %(name)s runner (task UUID: %(task)s)."
"\n\t%(info)s" %
{"name": self._meta_get("name"),
"task": self.task["uuid"],
"info": info_message})
| apache-2.0 | 6,968,100,981,351,991,000 | 34.457516 | 79 | 0.577512 | false |
jianglab/tomography | tomoThickness.py | 1 | 32556 | #!/usr/bin/env python
#
# Author: Rui Yan <[email protected]>, Sep 2015
# Copyright (c) 2012 Purdue University
#
# This software is issued under a joint BSD/GNU license. You may use the
# source code in this file under either license. However, note that the
# complete EMAN2 and SPARX software packages have some GPL dependencies,
# so you are responsible for compliance with the licenses of these packages
# if you opt to use BSD licensing. The warranty disclaimer below holds
# in either instance.
#
# This complete copyright notice must be included in any revised version of the
# source code. Additional authorship citations may be added, but existing
# author citations must be preserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 2111-1307 USA
#
#
from EMAN2 import *
import os, sys, math, itertools
import numpy as np
import scipy
from scipy.optimize import minimize
from scipy.optimize import basinhopping
from scipy.optimize import leastsq
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import collections
from itertools import chain
from scipy import stats
import matplotlib.cm as cm
from matplotlib.backends.backend_pdf import PdfPages
def main():
progname = os.path.basename(sys.argv[0])
usage = """
Determine the thickness, sample tilt and mean free path of tomographic tilt series
Example:
python tomoThickness.py --tiltseries 6hSINVc1s2_17.ali --tiltangles 6hSINVc1s2_17.tlt --boxsize 200 --MFP 200 --B 1600 --d0 200 --theta0 5 --alpha0 0 --niter 200 --interval 50 --x0 1200,1400,1000,2400,2900,2600,1400,800 --y0 1100,1400,2000,3600,2900,600,2800,2400
python tomoThickness.py --tiltseries virus009.ali --tiltangles virus009.tlt --boxsize 200 --B 240 --d0 100 --alpha0 0 --theta0 0 --niter 400 --interval 50 --x0 1600,1500,1600,300 --y0 1600,1700,1800,300
"""
parser = EMArgumentParser(usage=usage,version=EMANVERSION)
parser.add_argument("--tiltseries", type=str, default='', help="tilt series with tilt axis along Y")
parser.add_argument('--tiltangles',type=str,default='',help='File in .tlt format containing the tilt angle of each image in the tiltseries.')
parser.add_argument("--boxsize", type=int, default=200, help="perform grid boxing using given box size. default to 200")
parser.add_argument("--x0", type=str, default=0, help="for test on some regions, multiple regions are allowed, --x0 100,200,300")
parser.add_argument("--y0", type=str, default=0, help="for test on some regions, multiple regions are allowed, --y0 100,200,300")
parser.add_argument("--adaptiveBox", action="store_true", default=False, help="squeeze the x side of boxsize by cos(theta(tlt))")
parser.add_argument("--writeClippedRegions", action="store_true", default=False, help="write out the clipped region of interest, test only")
#try to determine I0 from the intercept of the graph
#parser.add_argument("--I0", type=float, default=2000, help="whole spectrum I0")
parser.add_argument("--d0", type=float, default=100, help="initial thickness")
parser.add_argument("--theta0", type=float, default=0, help="offset of angle theta (the initial offset angle around y-axis)")
parser.add_argument("--alpha0", type=float, default=0, help="offset of angle alpha (the initial offset angle around x-axis)")
#assume A == 1
parser.add_argument("--A", type=float, default=1, help="scaling factor of I0")
parser.add_argument("--B", type=float, default=0, help="# of electrons = gain * pixel_value + B")
parser.add_argument("--MFP", type=float, default=350, help="mean free path, for vitreous ice, 350nm@300kV, 300nm@200kV")
#parser.add_argument("--k", type=float, default=0, help="I0(theta) = I0/(cos(theta)**k), and 0=<k<=1")
parser.add_argument("--addOffset", type=str, default='-32000', help="Add options.addOffset to pixel values")
#parser.add_argument("--inversePixel", action="store_true", default=False, help="inverse pixel values")
parser.add_argument("--plotData", action="store_true", default=False, help="plot the original data, including curvilinear mode and linear mode")
parser.add_argument("--plotResults", action="store_true", default=False, help="plot the original data and fitted results, including curvilinear mode and linear mode")
parser.add_argument("--mode", type=int, default=0, help="")
parser.add_argument("--niter", type=int, default=200, help="niter in basinhopping")
parser.add_argument("--interval", type=int, default=50, help="interval in basinhopping")
parser.add_argument("--T", type=float, default=1e-4, help="T in basinhopping")
parser.add_argument("--modifyTiltFile", action="store_true", default=False, help="modify the .tlt file by returned theta0")
parser.add_argument('--modifiedTiltName',type=str,default='',help='the filename of modified tilt angles')
#parser.add_argument("--refineRegion", action="store_true", default=False, help="use returned theta0 to re-clip region and re-do optimization")
parser.add_argument('--logName',type=str,default='',help='the name of the log file which contains the output')
parser.add_argument("--verbose", "-v", dest="verbose", action="store", metavar="n", type=int, default=0, help="verbose level, higner number means higher level of verboseness")
parser.add_argument("--ppid", type=int, help="Set the PID of the parent process, used for cross platform PPID",default=-1)
global options
(options, args) = parser.parse_args()
logger = E2init(sys.argv, options.ppid)
serieshdr = EMData(options.tiltseries,0,True)
global nslices
nslices = serieshdr['nz']
nx = serieshdr['nx']
ny = serieshdr['ny']
print "\ntiltseries %s: %d*%d*%d"%(options.tiltseries, nx, ny, nslices)
#read in tilt angles file, *.tlt
anglesfile = open(options.tiltangles,'r') #Open tilt angles file
alines = anglesfile.readlines() #Read its lines
anglesfile.close() #Close the file
#global tiltangles
tiltangles = [ alines[i].replace('\n','') for i in range(len(alines)) ] #Eliminate trailing return character, '\n', for each line in the tiltangles file
ntiltangles = len(tiltangles)
tiltanglesArray = np.array(tiltangles)
if (options.verbose>=10): print tiltangles
blocks = []
boxsize = options.boxsize
if (options.x0 and options.y0):
x0 = [int(x) for x in options.x0.split(',')]
y0 = [int(y) for y in options.y0.split(',')]
else:
print "Please provide the X/Y coordinates of selected regions using --x0 --y0\n"
sys.exit(0)
origDictionary = collections.OrderedDict()
for k in range(nslices):
angle = float(tiltangles[k])
r0 = Region(0, 0, k, nx, ny, 1)
tiltedImg = EMData(options.tiltseries, 0, 0, r0)
blockMeanList = []
for i in range(len(x0)):
testname = options.tiltseries.split('.')[0]+'_x0%g_y0%g_clip.hdf'%(x0[i], y0[i])
xp = (x0[i] - nx/2.0) * math.cos(math.radians(angle)) + nx/2.0
yp = y0[i]
if (options.adaptiveBox):
boxsizeX = int(boxsize * math.cos(math.radians(angle)))
else:
boxsizeX = boxsize
#extract the whole image at each tilt
xp = xp-boxsizeX/2
yp = yp-boxsize/2
r = Region(xp, yp, boxsizeX, boxsize)
img = tiltedImg.get_clip(r)
if (options.writeClippedRegions): img.write_image(testname, k)
blockMeanValues = blockMean(img, boxsizeX, boxsize)
blockMeanList.append(blockMeanValues)
origDictionary[tiltangles[k]] = flattenList(blockMeanList)
#if (options.verbose>=10): print origDictionary
assert(len(origDictionary)==len(tiltangles))
startZ = 0
endZ = nslices
stepZ = 1
dictionary0 = collections.OrderedDict()
n=0
for key, value in origDictionary.items()[startZ:endZ]:
if (math.fmod(n, stepZ) == 0): dictionary0[key] = value
n+=1
#print "len(dictionary)=", len(dictionary0)
#check if the tilt angles are from negative to positive, if not, reverse the order of dictionary
if (float(tiltangles[0]) > 0):
print "Reversing the order of tilt angles since we usually start from negative tilts to positive tilts"
items = dictionary0.items()
items.reverse()
dictionary0 = collections.OrderedDict(items)
if (options.verbose>=10): print dictionary0
if (options.plotData): plotOriginalData(dictionary0, options)
global dictionary
#dictionary = averageRescaledResultDict(rescaledResultDict, options)
dictionary = dictionary0
#use intercept as the initial value of I0 and set gain (A) == 1
thetaCurve, IntensityCurve, thetaLinear, IntensityLinear = generateData2(dictionary, options)
oneResultDict = fitLinearRegression3(thetaLinear, IntensityLinear, tiltanglesArray, thetaCurve, IntensityCurve, options)
I0 = calculateIntercept(oneResultDict, options)
print "initial I0 =", I0
#options.I0 = I0
global maxVal, minVal
maxKey, maxVal = max(dictionary.iteritems(), key=lambda x:x[1])
maxVal = maxVal[0]
minKey, minVal = min(dictionary.iteritems(), key=lambda x:x[1])
minVal = minVal[0]
print "max: max average pixel value = %g @ tilt angles =%s"%(maxVal, maxKey)
print "min: min average pixel value = %g @ tilt angles =%s"%(minVal, minKey)
if (options.mode == 0): #use complete model, use multiple regions
print "Using complete model and %g boxes!"%len(x0)
#I0 = options.I0
d0 = options.d0
theta0 = options.theta0
alpha0 = options.alpha0
A = options.A
B = options.B
MFP = options.MFP
niter = options.niter
interval = options.interval
p0 = [I0, d0, theta0, alpha0, A, B, MFP]
#p0 = [I0, d0, theta0, alpha0, B, MFP]
x0 = p0
boundsList = [(maxVal, None),(10, 250), (-10, 10), (-10, 10), (0.01, None), (None, int(minVal)), (1, None)]
#boundsList = [(maxVal, None),(10, 250), (-10, 10), (-10, 10), (None, int(minVal)), (1, None)]
minimizer_kwargs = dict(method="L-BFGS-B", bounds=boundsList)
mybounds = MyBounds()
mybounds.xmax=[float('inf'), 250.0, 10.0, 10.0, float('inf'), int(minVal), float('inf')]
mybounds.xmin=[maxVal, 10.0, -10.0, -10.0, 0.01, (-1)*(float('inf')), 1.0]
#mybounds.xmax=[float('inf'), 250.0, 10.0, 10.0, int(minVal), float('inf')]
#mybounds.xmin=[maxVal, 10.0, -10.0, -10.0, (-1)*(float('inf')), 1.0]
mytakestep = MyTakeStep3()
res = scipy.optimize.basinhopping(optimizationFuncFullModel0, x0, T=options.T, stepsize=0.01, minimizer_kwargs=minimizer_kwargs, niter=niter, take_step=mytakestep, accept_test=mybounds, \
callback=None, interval=interval, disp=False, niter_success=None)
#print res
tmp = res.x.tolist()
#tmp[1] = tmp[1]+100
I0, d0, theta0, alpha0, A, B, MFP = tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6]
#I0, d0, theta0, alpha0, B, MFP = tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5]
gamma0 = calculateGamma0(theta0, alpha0)
print "[I0, d0, theta0, alpha0, A, B, MFP, gamma0] =", I0, d0, theta0, alpha0, A, B, MFP, gamma0
print "B/I0 = ", B/I0
print "***************************************************"
print "Tilt series: %s"%options.tiltseries
print "Fitting results:"
print "Thickness = %g nm"%d0
print "Sample tilt: theta0 = %g degree, alpha0 = %g degree, gamma0 = %g degree"%(theta0, alpha0, gamma0)
print "Mean free path = %g nm"%MFP
if (options.logName):
logName = options.logName
else:
logName = options.tiltseries.split(".")[0] + ".log"
fp = open(logName, 'w')
fp.write("Tilt series: %s\n"%options.tiltseries)
fp.write("Fitting results:\n")
fp.write("Thickness = %g nm\n"%d0)
fp.write("Sample tilt: theta0 = %g degree, alpha0 = %g degree, gamma0 = %g degree\n"%(theta0, alpha0, gamma0))
fp.write("Mean free path = %g nm\n"%MFP)
fp.close()
if (options.plotResults):
compareFitData(dictionary, tmp, options)
if (options.modifyTiltFile):
if (options.modifiedTiltName):
tiltFile = options.modifiedTiltName
else:
tiltFile = options.tiltseries.split(".")[0] + "_modified.tlt"
fp = open(tiltFile, 'w')
for i in tiltangles:
tlt = float(i) + theta0
#print float(tlt)
line = "%g\n"%(tlt)
fp.write(line)
fp.close()
def calculateIntercept(oneResultDict, options):
interceptLeftArray = np.array([])
interceptRightArray = np.array([])
for boxPosition, value in oneResultDict.iteritems():
interceptLeftArray = np.append(interceptLeftArray, value['interceptLeft'])
interceptRightArray = np.append(interceptRightArray, value['interceptRight'])
interceptArray = np.append(interceptLeftArray, interceptRightArray)
interceptMedian = np.median(interceptArray)
#print interceptArray
initialI0 = exp(interceptMedian)
return initialI0
def fitLinearRegression3(thetaLinear, IntensityLinear, tiltanglesArray, thetaCurve, IntensityCurve, options):
x0 = [int(x) for x in options.x0.split(',')]
y0 = [int(y) for y in options.y0.split(',')]
resultDict = collections.OrderedDict()
#returnDict = collections.OrderedDict()
allResLeft = []
allResRight = []
for i in range(len(x0)):
iIntensityLinear = IntensityLinear[:, i]
iIntensityCurve = IntensityCurve[:, i]
key = '%g %g'%(x0[i], y0[i])
#print "x0, y0 =", key
ret = fitOneLinearRegression(thetaLinear, iIntensityLinear, tiltanglesArray, options)
fres, stdRes, xLeft, yLeft, fitLeft, xRight, yRight, fitRight, indexLargeLeft, indexLargeRight, indexSmallLeft, indexSmallRight, resLeft, resRight, slopeLeft, interceptLeft, slopeRight, interceptRight = ret
resultDict[key] = {}
resultDict[key]['SSE'] = fres
resultDict[key]['intensityCurve'] = iIntensityCurve
resultDict[key]['tiltAngles'] = thetaCurve
resultDict[key]['stdRes'] = stdRes
resultDict[key]['xLeft'] = xLeft
resultDict[key]['yLeft'] = yLeft
resultDict[key]['fitLeft'] = fitLeft
resultDict[key]['xRight'] = xRight
resultDict[key]['yRight'] = yRight
resultDict[key]['fitRight'] = fitRight
resultDict[key]['indexLargeLeft'] = indexLargeLeft
resultDict[key]['indexLargeRight'] = indexLargeRight
resultDict[key]['indexSmallLeft'] = indexSmallLeft
resultDict[key]['indexSmallRight'] = indexSmallRight
resultDict[key]['resLeft'] = resLeft
resultDict[key]['resRight'] = resRight
resultDict[key]['slopeLeft'] = slopeLeft
resultDict[key]['interceptLeft'] = interceptLeft
resultDict[key]['slopeRight'] = slopeRight
resultDict[key]['interceptRight'] = interceptRight
return resultDict
def fitOneLinearRegression(thetaLinear, IntensityLinear, tiltanglesArray, options):
if (len(tiltanglesArray)%2 == 1):
halfN = int(len(tiltanglesArray)/2) + 1
xLeft, yLeft = thetaLinear[0:halfN], IntensityLinear[0:halfN]
xRight, yRight = thetaLinear[halfN-1:], IntensityLinear[halfN-1:]
else:
halfN = int(len(tiltanglesArray)/2)
xLeft, yLeft = thetaLinear[0:halfN], IntensityLinear[0:halfN]
xRight, yRight = thetaLinear[halfN:], IntensityLinear[halfN:]
slopeLeft, interceptLeft, r2Left = linearRegression(xLeft, yLeft)
slopeRight, interceptRight, r2Right = linearRegression(xRight, yRight)
assert(len(xLeft)==len(xRight))
fitLeft = slopeLeft*xLeft + interceptLeft
fitRight = slopeRight*xRight + interceptRight
#the sum of squared residuals
resLeft = yLeft - fitLeft
resLeft = resLeft / fitLeft
#print "resLeft", resLeft
resRight = yRight - fitRight
resRight = resRight / fitRight
#print "resRight", resRight
fresLeft = sum(resLeft**2)
fresRight = sum(resRight**2)
fres = [fresLeft*1000000, fresRight*1000000]
#find the points with the largest 3 residuals in left and right branches, use numpy.argpartition
#N = options.largestNRes
N=3
negN = (-1)*N
indexLargeLeft = np.argpartition(resLeft**2, negN)[negN:]
indexLargeRight = np.argpartition(resRight**2, negN)[negN:]
M=3
#M = options.smallestNRes
posM = M
indexSmallLeft = np.argpartition(resLeft**2, posM)[:posM]
indexSmallRight = np.argpartition(resRight**2, posM)[:posM]
#MSE, under the assumption that the population error term has a constant variance, the estimate of that variance is given by MSE, mean square error
#The denominator is the sample size reduced by the number of model parameters estimated from the same data, (n-p) for p regressors or (n-p-1) if an intercept is used.
#In this case, p=1 so the denominator is n-2.
stdResLeft = np.std(resLeft, ddof=2)
stdResRight = np.std(resRight, ddof=2)
stdRes = [stdResLeft*1000, stdResRight*1000]
ret = fres, stdRes, xLeft, yLeft, fitLeft, xRight, yRight, fitRight, indexLargeLeft, indexLargeRight, indexSmallLeft, indexSmallRight, resLeft, resRight, slopeLeft, interceptLeft, slopeRight, interceptRight
return ret
def linearRegression(x, y):
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get slope, intercept and coefficient of determination (r_squared)
return slope, intercept, r_value**2
def generateData2(dictionary, options):
x0 = [int(x) for x in options.x0.split(',')]
thetaLst = []
intensityLst = []
thetaLinearLst = []
for theta, intensity in dictionary.iteritems():
thetaLst.append(float(theta))
intensityLst.append(intensity)
cosAngle = math.cos((float(theta)/360.)*math.pi*2)
tmp = (1./(cosAngle))
thetaLinearLst.append(tmp)
thetaArray = np.asarray(thetaLst)
thetaLinearArray = np.asarray(thetaLinearLst)
intensityArray = np.asarray(intensityLst)
intensityLinearArray = np.log(intensityArray)
return thetaArray, intensityArray, thetaLinearArray, intensityLinearArray
def plotOriginalData(dictionary, options):
#plot the curve mode and log-ratio mode of original data
thetaLst = []
xlinearLst=[]
intensityLst = []
for theta, intensity in dictionary.iteritems():
thetaLst.append(float(theta))
intensityLst.append(intensity)
cosAngle = math.cos((float(theta)/360.)*math.pi*2)
x = (1./(cosAngle))
xlinearLst.append(x)
xdata = np.asarray(thetaLst)
ydata = np.asarray(intensityLst)
ydataInv = ydata[::-1]
#print xdata, ydata
x0 = [int(x) for x in options.x0.split(',')]
y0 = [int(x) for x in options.y0.split(',')]
colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k', 'b', 'r', 'g', 'c', 'm', 'y', 'k']
markers = ['s', 'o', '^', 'v', 'x', '*', '+', 'd', 'D', '<', '>', 'p', '8', 'H']
plt.figure(figsize=(12.5, 10))
#plt.subplot(221)
for i in range(len(x0)):
boxPosition = '%g,%g'%(x0[i], y0[i])
if (i<len(colors)):
plt.plot(xdata, ydata[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i])
else:
i = i-len(colors)
plt.plot(xdata, ydata[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i])
plt.axvline(0, linestyle='--', color='k', linewidth=2.0)
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.legend(fontsize = 18)
ax = plt.gca()
ax.tick_params(pad = 10)
plt.xlabel(r'$\theta$ ($^\circ$)', fontsize = 24, labelpad = 10)
plt.ylabel('Intensity', fontsize = 24, labelpad = 10)
#plt.xlim(-70, 70)
plt.grid(True, linestyle = '--', alpha = 0.5)
#plot the linear format (log-ratio mode) of original data
xlinear = np.asarray(xlinearLst)
ylinear = np.log(ydata)
plt.figure(figsize=(12.5, 10))
#plt.subplot(222)
for i in range(len(x0)):
boxPosition = '%g,%g'%(x0[i], y0[i])
if (i<len(colors)):
plt.plot(xlinear, ylinear[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i])
else:
i = i-len(colors)
plt.plot(xlinear, ylinear[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i])
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.legend(fontsize = 18)
ax = plt.gca()
ax.tick_params(pad = 10)
plt.xlabel(r'1/cos($\theta$)', fontsize = 24, labelpad = 10)
plt.ylabel('ln(Intensity)', fontsize = 24, labelpad = 10)
plt.grid(True, linestyle = '--', alpha = 0.5)
plt.show()
def compareFitData(dictionary, tmp, options):
thetaLst = []
xlinearLst=[]
intensityLst = []
for theta, intensity in dictionary.iteritems():
thetaLst.append(float(theta))
intensityLst.append(intensity)
cosAngle = math.cos((float(theta)/360.)*math.pi*2)
x = (1./(cosAngle))
xlinearLst.append(x)
xdata = np.asarray(thetaLst)
ydata = np.asarray(intensityLst)
ydataInv = ydata[::-1]
#print xdata, ydata
x0 = [int(x) for x in options.x0.split(',')]
y0 = [int(x) for x in options.y0.split(',')]
colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k', 'b', 'r', 'g', 'c', 'm', 'y', 'k']
markers = ['s', 'o', '^', 'v', 'x', '*', '+', 'd', 'D', '<', '>', 'p', '8', 'H']
plt.figure(figsize=(25, 20))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.4, hspace=0.4)
#plot the curvilinear format of original data
plt.subplot(221)
for i in range(len(x0)):
boxPosition = '%g,%g'%(x0[i], y0[i])
if (i<len(colors)):
plt.plot(xdata, ydata[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i])
else:
i = i-len(colors)
plt.plot(xdata, ydata[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i])
plt.axvline(0, linestyle='--', color='k', linewidth=2.0)
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.legend(fontsize = 18)
ax = plt.gca()
ax.tick_params(pad = 10)
plt.xlabel(r'$\theta$ ($^\circ$)', fontsize = 24, labelpad = 10)
plt.ylabel('Intensity', fontsize = 24, labelpad = 10)
plt.title('Original: %s'%options.tiltseries)
#plt.xlim(-70, 70)
plt.grid(True, linestyle = '--', alpha = 0.5)
#plot the linear format (log-ratio mode) of original data
xlinear = np.asarray(xlinearLst)
ylinear = np.log(ydata)
#plt.figure(figsize=(12.5, 10))
plt.subplot(222)
for i in range(len(x0)):
boxPosition = '%g,%g'%(x0[i], y0[i])
if (i<len(colors)):
plt.plot(xlinear, ylinear[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i])
else:
i = i-len(colors)
plt.plot(xlinear, ylinear[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i])
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.legend(fontsize = 18)
ax = plt.gca()
ax.tick_params(pad = 10)
plt.xlabel(r'1/cos($\theta$)', fontsize = 24, labelpad = 10)
plt.ylabel('ln(Intensity)', fontsize = 24, labelpad = 10)
plt.title('Original: %s'%options.tiltseries)
plt.grid(True, linestyle = '--', alpha = 0.5)
I0, d0, theta0, alpha0, A, B, MFP = tmp
x0 = [int(x) for x in options.x0.split(',')]
y0 = [int(x) for x in options.y0.split(',')]
xfit = []
yfit = []
xdata = []
xModified=[]
ydata = []
ydataLinear = []
I0Lst = []
for theta, intensity in dictionary.iteritems():
for i in range(len(intensity)):
theta_i = float(theta) + theta0
xModified.append(theta_i)
#angle.append(theta_i)
cosAngle = math.cos((float(theta)/360.)*math.pi*2)
cosTheta = math.cos((theta_i/360.)*math.pi*2)
cosAlpha = math.cos((alpha0/360.)*math.pi*2)
intensityIn = math.log(I0)
y = intensityIn - (1./(MFP * cosTheta * cosAlpha)) * d0
yfit.append(y)
#print intensity
########which one is used as ydata in corrected plots
y2 = math.log(intensity[i])
#y2 = math.log(A * (intensity[i] - B))
ydataLinear.append(y2)
ydata.append(intensity[i])
#x = (-1) * (1./(MFP * cosTheta * cosAlpha))
x = (1./(cosTheta))
xfit.append(x)
#x2 = (-1) * (1./(MFP * cosAngle))
x2 = (1./(cosAngle))
xdata.append(x2)
colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k', 'b', 'r', 'g', 'c', 'm', 'y', 'k']
markers = ['s', 'o', '^', 'v', 'x', '*', '+', 'd', 'D', '<', '>', 'p', '8', 'H']
#plot the linear format (log-ratio mode) of fitted data after determination of parameters
xfit = np.asarray(xfit)
xfit2 = np.reshape(xfit, (nslices, len(x0)))
yfit = np.asarray(yfit)
yfit2 = np.reshape(yfit, (nslices, len(x0)))
xdata = np.asarray(xdata)
xdata2 = np.reshape(xdata, (nslices, len(x0)))
ydataLinear = np.asarray(ydataLinear)
ydataLinear2 = np.reshape(ydataLinear, (nslices, len(x0)))
residuals = ydataLinear - yfit
fres = sum(residuals**2)
text_str = 'I0=%g\nd0=%g\ntheta0=%g\nalpha0=%g\ngain=%g\nB=%g\nMFP=%g\nres=%g'%(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], fres)
plt.subplot(224)
#plt.figure(figsize=(12.5, 10))
for i in range(len(x0)):
boxPosition = '%g,%g'%(x0[i], y0[i])
plt.plot(xfit2[:, i], ydataLinear2[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i])
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.legend(fontsize = 18)
ax = plt.gca()
ax.tick_params(pad = 10)
#plt.plot(xfit, ydata, 'g^')
plt.title('Least-squares fitting: %s'%options.tiltseries)
plt.xlabel(r'1/cos($\theta$+$\theta_0$)', fontsize = 24, labelpad = 10)
plt.ylabel('ln(Intensity)', fontsize = 24, labelpad = 10)
plt.grid(True, linestyle = '--', alpha = 0.5)
#plt.show()
#plot the curvilinear format of fitted data after determination of parameters
#xdata, xModified, ydata, yfit = fitDataCurve(dictionary, tmp)
xdata = np.asarray(xdata)
xdata2 = np.reshape(xdata, (nslices, len(x0)))
xModified = np.asarray(xModified)
xModified2 = np.reshape(xModified, (nslices, len(x0)))
ydata = np.asarray(ydata)
ydata2 = np.reshape(ydata, (nslices, len(x0)))
ydata2Inv = ydata2[::-1]
yfit = np.asarray(yfit)
yfit2 = np.reshape(yfit, (nslices, len(x0)))
residuals = ydata - yfit
fres = sum(residuals**2)
text_str = 'I0=%g\nd0=%g\ntheta0=%g\nalpha0=%g\ngain=%g\nB=%g\nMFP=%g\nres=%g'%(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], fres)
#plt.plot(xModified2, yfit2, 'r--', linewidth=2.0)
#plt.figure(figsize=(12.5, 10))
plt.subplot(223)
for i in range(len(x0)):
boxPosition = '%g,%g'%(x0[i], y0[i])
plt.plot(xModified2[:, i], ydata2[:, i], markers[i], label = boxPosition, markersize=5, color = colors[i])
plt.axvline(0, linestyle='--', color='k', linewidth=2.0)
plt.xticks(fontsize = 20)
plt.yticks(fontsize = 20)
plt.legend(fontsize = 18)
ax = plt.gca()
ax.tick_params(pad = 10)
plt.title('Least-squares fitting: %s'%options.tiltseries)
plt.xlabel(r'$\theta$+$\theta_0$ ($^\circ$)', fontsize = 24, labelpad = 10)
plt.ylabel('Intensity', fontsize = 24, labelpad = 10)
#plt.xlim(-70, 70)
plt.grid(True, linestyle = '--', alpha = 0.5)
pdfName = options.tiltseries.split('.')[0]+'_results.pdf'
print pdfName
with PdfPages(pdfName) as pdf:
pdf.savefig()
plt.close()
#plt.show()
def calculateGamma0(theta0, alpha0):
cosTheta0 = math.cos((theta0/360.)*math.pi*2)
cosAlpha0 = math.cos((alpha0/360.)*math.pi*2)
tanTheta0 = math.tan((theta0/360.)*math.pi*2)
tanAlpha0 = math.tan((alpha0/360.)*math.pi*2)
#tmp = 1./(cosTheta0 * cosTheta0 * cosAlpha0 * cosAlpha0) - tanTheta0 * tanTheta0 * tanAlpha0 * tanAlpha0
tmp = tanTheta0 * tanTheta0 + tanAlpha0 * tanAlpha0 + 1
cosGamma0 = math.pow(tmp, -0.5)
gamma0 = math.acos(cosGamma0)*360./(math.pi*2)
return gamma0
def optimizationFuncFullModel0(x): # use complete model
I0, d0, theta0, alpha0, A, B, MFP = x
#I0, d0, theta0, alpha0, B, MFP = x
#A = 1
cosTheta0 = math.cos((theta0/360.)*math.pi*2)
cosAlpha0 = math.cos((alpha0/360.)*math.pi*2)
tanTheta0 = math.tan((theta0/360.)*math.pi*2)
tanAlpha0 = math.tan((alpha0/360.)*math.pi*2)
#tmp = 1./(cosTheta0 * cosTheta0 * cosAlpha0 * cosAlpha0) - tanTheta0 * tanTheta0 * tanAlpha0 * tanAlpha0
tmp = tanTheta0 * tanTheta0 + tanAlpha0 * tanAlpha0 + 1
cosGamma0 = math.pow(tmp, -0.5)
func = 0
n = 0
for theta, intensity in dictionary.iteritems():
for i in range(len(intensity)):
A = math.fabs(A)
I0 = math.fabs(I0)
intensityExit = math.log(A * (intensity[i] - B))
intensityIn = math.log(I0)
theta_i = float(theta) + theta0
cosTheta = math.cos((theta_i/360.)*math.pi*2)
#cosAlpha = math.cos((alpha0/360.)*math.pi*2)
#err = intensityIn - (1./(MFP * cosTheta * cosAlpha)) * d0 - intensityExit
err = intensityIn - (1./(MFP * cosTheta * cosGamma0)) * d0 * cosTheta0 - intensityExit
func += err * err
n+=1
func = func/n
return func
class MyBounds(object):
def __init__(self, xmax = [], xmin = []):
self.xmax = np.array(xmax)
self.xmin = np.array(xmin)
def __call__(self, **kwargs):
x = kwargs["x_new"]
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
return tmax and tmin
class MyTakeStep3(object):
def __init__(self, stepsize=0.01):
self.stepsize = stepsize
def __call__(self, x):
s = self.stepsize
#p0 = [I0, d0, theta0, alpha0, A, B, MFP]
x = np.float64(x)
x[0] += np.random.uniform(-1000.*s, 1000.*s)
x[1] += np.random.uniform(-10.*s, 10.*s)
x[2] += np.random.uniform(-s, s)
x[3] += np.random.uniform(-s, s)
x[4] += np.random.uniform(-10.*s, 10.*s)
x[5] += np.random.uniform(-100.*s, 100.*s)
x[6] += np.random.uniform(-10.*s, 10.*s)
return x
def flattenList(nestedLst):
flattenLst = list(chain.from_iterable(nestedLst))
return flattenLst
def blockMean(img, boxsizeX, boxsize):
nx, ny = img.get_xsize(), img.get_ysize()
nxBlock = int(nx/boxsizeX)
nyBlock = int(ny/boxsize)
#print nxBlock, nyBlock
blockMeanValues = []
for i in range(nxBlock):
x0 = i*boxsizeX
for j in range(nyBlock):
y0 = j*boxsize
r = Region(x0, y0, boxsizeX, boxsize)
blkImg = img.get_clip(r)
blockMeanValue = oneBlockMean(blkImg)
blockMeanValues.append(blockMeanValue)
return blockMeanValues
def oneBlockMean(img):
nx, ny = img.get_xsize(), img.get_ysize()
ary=EMNumPy.em2numpy(img)
ary = reject_outliers(ary, m = 3)
blkMean = np.mean(ary)
blkSigma = np.std(ary)
if (blkMean < 0):
blkMean = blkMean * (-1) #average pixel values must be positive
if (blkMean > 30000):
offset = float(options.addOffset)
blkMean = blkMean + offset
return blkMean
def reject_outliers(data, m=2):
return data[abs(data - np.mean(data)) < m * np.std(data)]
if __name__ == "__main__":
main() | gpl-2.0 | -8,677,815,489,985,896,000 | 40.003778 | 266 | 0.6252 | false |
masamitsu-murase/pausable_unittest | pausable_unittest/utils/winutils.py | 1 | 1893 | # coding: utf-8
import subprocess
import tempfile
import os
def register_schtasks(task_name, path, user, password=None, admin=True):
command = ["schtasks.exe", "/Create", "/RU", user]
if password:
command += ["/RP", password]
command += [
"/SC", "ONLOGON", "/TN", task_name, "/TR", '"' + path + '"', "/F"
]
if admin:
command += ["/RL", "HIGHEST"]
else:
command += ["/RL", "LIMITED"]
subprocess.check_output(command, stderr=subprocess.STDOUT)
command = ["schtasks.exe", "/Query", "/TN", task_name, "/XML", "ONE"]
xml = subprocess.check_output(command,
stderr=subprocess.STDOUT,
universal_newlines=True)
xml = xml.replace(
"<DisallowStartIfOnBatteries>true</DisallowStartIfOnBatteries>",
"<DisallowStartIfOnBatteries>false</DisallowStartIfOnBatteries>")
xml = xml.replace(
"<StopIfGoingOnBatteries>true</StopIfGoingOnBatteries>",
"<StopIfGoingOnBatteries>false</StopIfGoingOnBatteries>")
with tempfile.NamedTemporaryFile(delete=False, mode="w") as xml_file:
xml_file.write(xml)
xml_file.close()
xml_filename = xml_file.name
try:
command = [
"schtasks.exe", "/Create", "/TN", task_name, "/F", "/XML",
xml_filename
]
if password:
command += ["/RU", user, "/RP", password]
subprocess.check_output(command, stderr=subprocess.STDOUT)
finally:
os.remove(xml_filename)
def unregister_schtasks(task_name):
command = ["schtasks.exe", "/Delete", "/TN", task_name, "/F"]
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if e.returncode not in (0, 1):
raise
| mit | -6,031,662,333,350,813,000 | 32.418182 | 73 | 0.570523 | false |
listyque/TACTIC-Handler | thlib/side/client/examples/checkin_render_layer.py | 1 | 4903 | import sys, os, shutil, getopt
from tactic_client_lib import TacticServerStub
SEARCH_TYPE = "prod/render"
def move_file(file_paths, new_dir):
'''move file to the handoff dir'''
new_file_paths = []
for file_path in file_paths:
file_name = os.path.basename(file_path)
new_file_path = '%s/%s' %(new_dir, file_name)
shutil.move(file_path, new_file_path)
'''
while not os.path.exists(new_file_path):
sys.stdout.write('.')
'''
new_file_paths.append(new_file_path)
return new_file_paths
def expand_paths( file_path, file_range ):
'''expands the file paths, replacing # as specified in the file_range'''
file_paths = []
# frame_by is not really used here yet
frame_by = 1
if file_range.find("/") != -1:
file_range, frame_by = file_range.split("/")
frame_start, frame_end = file_range.split("-")
frame_start = int(frame_start)
frame_end = int(frame_end)
frame_by = int(frame_by)
# find out the number of #'s in the path
padding = len( file_path[file_path.index('#'):file_path.rindex('#')] )+1
for i in range(frame_start, frame_end+1, frame_by):
expanded = file_path.replace( '#'*padding, str(i).zfill(padding) )
file_paths.append(expanded)
return file_paths
def main(args, login=None):
# USAGE: checkin_render_layer.py
shot_code = args[0]
layer_name = args[1]
version = args[2]
context = args[3]
file_range = args[4]
pattern = args[5]
server = TacticServerStub(login)
# do the actual work
server.start("Checked in file group [%s] to shot [%s] layer [%s]" % (pattern, shot_code, layer_name))
try:
# move the file
dir = server.get_handoff_dir()
paths = expand_paths(pattern, file_range)
move_file(paths, dir)
file_name = os.path.basename(pattern)
new_pattern = '%s/%s' % (dir, file_name)
print("Files moved to handoff dir.\n")
# checkin the moved files
filters = []
filters.append(('shot_code', shot_code))
filters.append(('name', layer_name))
results = server.query('prod/layer', filters)
# take the first one
if results:
id = results[0].get('id')
search_type = server.build_search_type('prod/layer')
# find the layer snapshot
filters = []
filters.append(('version', version))
filters.append(('search_type', search_type))
filters.append(('search_id', id))
#TODO : may need a context to search for the proper layer
results = server.query('sthpw/snapshot', filters)
snap_code = ''
if results:
snap_code = results[0].get('code')
# find the render
render = None
filters = []
filters.append(('search_type', search_type))
filters.append(('search_id', id))
filters.append(('snapshot_code', snap_code))
results = server.query(SEARCH_TYPE, filters)
if results:
render = results[0]
if not render:
render_data = {
'search_type': search_type,
'search_id': id,
'snapshot_code': snap_code
}
render = server.insert("prod/render", render_data)
'''
results = server.query(SEARCH_TYPE, filters)
render_id = 0
if results:
render_id = results[0].get('id')
# find the render id
search_key = server.build_search_key(SEARCH_TYPE, render_id, column='id')
'''
file_type = 'main'
# run group checkin
server.group_checkin(render.get("__search_key__"), context=context, file_path=new_pattern, file_type=file_type, file_range=file_range)
except:
server.abort()
raise
else:
server.finish()
if __name__ == '__main__':
executable = sys.argv[0]
#args = sys.argv[1:]
login = None
try:
opts, args = getopt.getopt(sys.argv[1:], "l:h", ["login=","help"])
except getopt.error as msg:
print(msg)
sys.exit(2)
# process options
for o, a in opts:
if o in ("-l", "--login"):
login = a
if o in ("-h", "--help"):
print("python checkin_render_layer.py <shot_code> <layer_name> <version> <context> <file_range> <file_pattern>")
print("python checkin_render_layer.py S0001 layer1 1 lgt 1-20 D:/file_dir/plates.####.png")
sys.exit(0)
print(args, len(args))
if len(args) != 6:
print("python checkin_render_layer.py <shot_code> <layer_name> <version> <context> <file_range> <file_pattern>")
print("python checkin_render_layer.py S0001 layer1 1 lgt 1-20 D:/file_dir/plates.####.png")
sys.exit(2)
main(args, login=login)
| epl-1.0 | 7,825,519,813,304,723,000 | 31.686667 | 142 | 0.563533 | false |
beezz/trustpaylib | trustpaylib.py | 1 | 17356 | # -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
trustpaylib
===========
TrustPay payment solution constants and utils.
"""
import sys
import hmac
import hashlib
import collections
try:
unicode
from urllib import urlencode
except NameError:
def unicode(s):
return s
from urllib.parse import urlencode
#: Default test service url (TrustCard doesn't have testing service)
TEST_API_URL = "https://ib.test.trustpay.eu/mapi/pay.aspx"
#: TrustPay service url.
API_URL = "https://ib.trustpay.eu/mapi/pay.aspx"
#: TrustCard service url.
TRUSTCARD_API_URL = "https://ib.trustpay.eu/mapi/cardpayments.aspx"
__currencies = (
"CZK", "EUR", "GBP", "HUF", "PLN",
"USD", "RON", "BGN", "HRK", "LTL", "TRY",
)
#: Supported currencies.
CURRENCIES = collections.namedtuple(
"TrustPayCurrencies",
__currencies,
)(*__currencies)
__languages = (
"bg", "bs", "cs", "de", "en", "es",
"et", "hr", "hu", "it", "lt", "lv",
"pl", "ro", "ru", "sk", "sl", "sr",
"uk",
)
#: Suported languages.
LANGUAGES = collections.namedtuple(
"TrustPayLanguages",
__languages,
)(*__languages)
__countries = (
"CZ", "HU", "PL", "SK", "EE", "BG",
"RO", "HR", "LV", "LT", "SI", "TR",
"FI",
)
__countries_verbose = (
"Czech Republic", "Hungary", "Poland",
"Slovak Republic", "Estonia", "Bulgaria",
"Romania", "Croatia", "Latvia", "Lithuania",
"Slovenia", "Turkey", "Finland",
)
#: Supported countries
COUNTRIES = collections.namedtuple(
"TrustPayCountries",
__countries,
)(*__countries)
#: Supported countries verbose version.
COUNTRIES_VERBOSE = collections.namedtuple(
"TrustPayCountriesVerbose",
__countries,
)(*__countries_verbose)
__ResultCodes = collections.namedtuple(
"TrustPayResultCodes",
[
"SUCCESS", "PENDING", "ANNOUNCED", "AUTHORIZED",
"PROCESSING", "AUTHORIZED_ONLY", "INVALID_REQUEST",
"UNKNOWN_ACCOUNT", "MERCHANT_ACCOUNT_DISABLED",
"INVALID_SIGN", "USER_CANCEL", "INVALID_AUTHENTICATION",
"DISPOSABLE_BALANCE", "SERVICE_NOT_ALLOWED", "PAYSAFECARD_TIMEOUT",
"GENERAL_ERROR", "UNSUPPORTED_CURRENCY_CONVERSION",
]
)
#: Result codes of redirects and notifications.
RESULT_CODES = __ResultCodes(
"0", "1", "2", "3", "4", "5", "1001", "1002", "1003", "1004",
"1005", "1006", "1007", "1008", "1009", "1100", "1101",
)
__rc_desc = collections.namedtuple(
"TrustPayResultCodesDesc",
["short", "long"],
)
#: Result codes of redirects and notifications.
#: In verbose form with short and long description of result code.
RESULT_CODES_DESC = {
RESULT_CODES.SUCCESS: __rc_desc(
"Success",
"Payment was successfully processed.",
),
RESULT_CODES.PENDING: __rc_desc(
"Pending",
"Payment is pending (offline payment)",
),
RESULT_CODES.ANNOUNCED: __rc_desc(
"Announced",
(
"TrustPay has been notified that the client"
"placed a payment order or has made payment,"
" but further confirmation from 3rd party is needed."
),
),
RESULT_CODES.AUTHORIZED: __rc_desc(
"Authorized",
(
"Payment was successfully authorized. Another"
" notification (with result code 0 - success)"
" will be sent when TrustPay receives and processes"
" payment from 3rd party."
),
),
RESULT_CODES.PROCESSING: __rc_desc(
"Processing",
(
"TrustPay has received the payment, but it"
" must be internally processed before it is"
" settled on the merchant‘s account."
),
),
RESULT_CODES.AUTHORIZED_ONLY: __rc_desc(
"Authorized only",
(
"Card payment was successfully authorized,"
" but not captured. Subsequent MAPI call(s)"
" is (are) required to capture payment."
),
),
RESULT_CODES.INVALID_REQUEST: __rc_desc(
"Invalid request",
"Data sent is not properly formatted.",
),
RESULT_CODES.UNKNOWN_ACCOUNT: __rc_desc(
"Unknown account",
"Account with specified ID was not found.",
),
RESULT_CODES.MERCHANT_ACCOUNT_DISABLED: __rc_desc(
"Merchant's account disabled",
"Merchant's account has been disabled.",
),
RESULT_CODES.INVALID_SIGN: __rc_desc(
"Invalid sign",
"The message is not signed correctly.",
),
RESULT_CODES.USER_CANCEL: __rc_desc(
"User cancel",
"Customer has cancelled the payment.",
),
RESULT_CODES.INVALID_AUTHENTICATION: __rc_desc(
"Invalid authentication",
"Request was not properly authenticated",
),
RESULT_CODES.DISPOSABLE_BALANCE: __rc_desc(
"Disposable balance",
"Requested transaction amount is greater than disposable balance.",
),
RESULT_CODES.SERVICE_NOT_ALLOWED: __rc_desc(
"Service not allowed",
(
"Service cannot be used or permission to"
" use given service has not been granted."
),
),
RESULT_CODES.PAYSAFECARD_TIMEOUT: __rc_desc(
"PaySafeCard timeout",
"Cards allocation will be cancelled.",
),
RESULT_CODES.GENERAL_ERROR: __rc_desc(
"General Error",
"Internal error has occurred.",
),
RESULT_CODES.UNSUPPORTED_CURRENCY_CONVERSION: __rc_desc(
"Unsupported currency conversion",
"Currency conversion for requested currencies is not supported.",
),
}
#: TrustPay environment class
#: Just attributes holder for TrustPay's variables.
TrustPayEnvironment = collections.namedtuple(
"TrustPayEnvironment",
[
"api_url",
"redirect_url",
"success_url",
"error_url",
"cancel_url",
"notification_url",
"aid",
"secret_key",
"currency",
"language",
"country",
],
)
TrustPayRequest = collections.namedtuple(
"TrustPayRequest",
[
"AID", "AMT", "CUR", "REF", "URL", "RURL",
"CURL", "EURL", "NURL", "SIG", "LNG",
"CNT", "DSC", "EMA",
],
)
TrustPayNotification = collections.namedtuple(
"TrustPayNotification",
[
"AID", "TYP", "AMT", "CUR", "REF",
"RES", "TID", "OID", "TSS", "SIG",
],
)
TrustPayRedirect = collections.namedtuple(
"TrustPayRedirect",
["REF", "RES", "PID"],
)
def _build_nt_cls(
cls,
kw,
fnc=lambda v: v if v is None else unicode(v),
):
_kw = kw.copy()
inst = cls(*[fnc(_kw.pop(attr, None)) for attr in cls._fields])
if _kw:
raise ValueError("Got unexpected field names: %r" % _kw.keys())
return inst
def build_redirect(**kw):
return _build_nt_cls(TrustPayRedirect, kw)
def build_notification(**kw):
return _build_nt_cls(TrustPayNotification, kw)
def build_pay_request(**kw):
return _build_nt_cls(TrustPayRequest, kw)
def build_test_environment(**kw):
kw["api_url"] = kw.get("api_url", TEST_API_URL)
return _build_nt_cls(TrustPayEnvironment, kw, fnc=lambda v: v)
def build_environment(**kw):
kw["api_url"] = kw.get("api_url", API_URL)
return _build_nt_cls(TrustPayEnvironment, kw, fnc=lambda v: v)
def sign_message(key, msg):
if sys.version_info[0] == 3:
msg, key = str.encode(msg), str.encode(key)
return hmac.new(key, msg, hashlib.sha256).hexdigest().upper()
def extract_attrs(obj, attrs):
return [getattr(obj, attr) for attr in attrs]
def merge_env_with_request(
env,
request,
fnc=lambda v1, v2: v1 if v2 is None else v2,
):
kw = {}
kw['AID'] = fnc(env.aid, request.AID)
kw['URL'] = fnc(env.redirect_url, request.URL)
kw['RURL'] = fnc(env.success_url, request.RURL)
kw['CURL'] = fnc(env.cancel_url, request.CURL)
kw['EURL'] = fnc(env.error_url, request.EURL)
kw['NURL'] = fnc(env.notification_url, request.NURL)
kw['CUR'] = fnc(env.currency, request.CUR)
kw['LNG'] = fnc(env.language, request.LNG)
kw['CNT'] = fnc(env.country, request.CNT)
return request._replace(**kw)
def _build_link(url, query_dict, fmt="{url}?{params}"):
return fmt.format(url=url, params=urlencode(query_dict))
def _filter_dict_nones(d):
res = {}
for key, value in d.items():
if value is not None:
res[key] = value
return res
def _initial_data(pay_request):
return _filter_dict_nones(pay_request._asdict())
def build_link_for_request(url, request):
return _build_link(url, _initial_data(request))
class TrustPay(object):
#: Requests attributes from which signature message is
#: concatenated (in this specific order).
SIGNATURE_ATTRS = ("AID", "AMT", "CUR", "REF")
#: Notification signature attributes.
NOTIFICATION_SIGNATURE_ATTRS = (
"AID", "TYP", "AMT", "CUR", "REF",
"RES", "TID", "OID", "TSS"
)
#: Not signed request required attributes.
REQUEST_REQUIRED_ATTRS = ("AID", "CUR")
#: Signed request required attributes.
SIGNED_REQUEST_REQUIRED_ATTRS = REQUEST_REQUIRED_ATTRS + (
"AMT", "REF", "SIG")
#: Supported currencies (:attr:`trustpaylib.CURRENCIES`)
CURRENCIES = CURRENCIES
#: Supported languages
LANGUAGES = LANGUAGES
#: Supported countries
COUNTRIES = COUNTRIES
RESULT_CODES = RESULT_CODES
RESULT_CODES_DESC = RESULT_CODES_DESC
def __init__(self, environment):
self.environment = environment
def sign_request(self, pay_request):
"""
Sign payment request.
Args:
pay_request (:class:`trustpaylib.TrustPayRequest`):
Payment request already prepared for signing.
Returns:
New :class:`trustpaylib.TrustPayRequest` instance
with `SIG` attribute set to generated signature.
"""
return pay_request._replace(
SIG=self.pay_request_signature(pay_request))
def pay_request_signature(self, pay_request):
"""
Use environments secret key to generate hash
to sign pay request.
Args:
pay_request (:class:`trustpaylib.TrustPayRequest`):
Payment request already prepared for signing.
Returns:
Hash.
"""
return sign_message(
self.environment.secret_key,
self.create_signature_msg(pay_request),
)
def merge_env_with_request(self, pay_request):
"""
Merge specific attributes of environment with payment request.
Args:
pay_request (:class:`trustpaylib.TrustPayRequest`):
Payment request to merge.
Returns:
New :class:`trustpaylib.TrustPayRequest` instance
with attributes merged with those in environment
if not already set on `pay_request`.
"""
return merge_env_with_request(
self.environment,
pay_request,
)
def finalize_request(
self,
pay_request,
sign=True,
validate=True,
merge_env=True
):
"""
Raw payment request is merged with environment, signed and
validated.
Args:
pay_request (:class:`trustpaylib.TrustPayRequest`):
sign (bool): If `False`, don't sign pay request.
validate (bool): If `False`, don't validate pay request.
merge_env (bool): If `False`, don't merge pay request with env.
Returns:
New :class:`trustpaylib.TrustPayRequest` prepared for
building link or creating form.
"""
pr = pay_request
if merge_env:
pr = self.merge_env_with_request(pay_request)
if sign:
pr = self.sign_request(pr)
if validate:
pr = self.validate_request(pr)
return pr
def build_link(
self,
pay_request,
sign=True,
validate=True,
merge_env=True
):
"""
Finalizes raw payment request and generates redirect link.
Args:
pay_request (:class:`trustpaylib.TrustPayRequest`):
sign (bool): If `False`, don't sign pay request.
validate (bool): If `False`, don't validate pay request.
merge_env (bool): If `False`, don't merge pay request with env.
Returns:
string: Redirect link.
"""
return _build_link(
self.environment.api_url,
self.initial_data(
self.finalize_request(
pay_request,
sign=sign,
validate=validate,
merge_env=merge_env,
)
),
)
def check_notification_signature(self, notification):
"""
Check if notification is signed with environment's secret key.
Args:
notification (:class:`trustpaylib.TrustPayNotification`)
Returns:
bool
"""
msg = unicode("").join(
[self.environment.aid, ] +
extract_attrs(notification, self.NOTIFICATION_SIGNATURE_ATTRS[1:])
)
return sign_message(
self.environment.secret_key, msg) == notification.SIG
@classmethod
def create_signature_msg(cls, pay_request):
"""
Concatenate set of payment request attributes and creates
message to be hashed.
Args:
pay_request (:class:`trustpaylib.TrustPayRequest`):
Returns:
string: Signature message.
"""
return unicode("").join(
[
attr for attr in cls.extract_signature_attrs(pay_request)
if attr is not None
]
)
@classmethod
def get_result_desc(cls, rc):
"""Returns description of result code.
Args:
rc (int|string):
Result code from redirect or notification.
Returns:
Named tuple with `short` and `long` attributes
for short, long description.
(:attr:`trustpaylib.RESULT_CODES_DESC`)
>>> TrustPay.get_result_desc(1001).short
'Invalid request'
>>> TrustPay.get_result_desc(1001).long
'Data sent is not properly formatted.'
"""
return cls.RESULT_CODES_DESC[str(rc)]
@classmethod
def get_result_desc_from_notification(cls, notif):
return cls.get_result_desc(notif.RES)
@classmethod
def get_result_desc_from_redirect(cls, redirect):
return cls.get_result_desc(redirect.RES)
@classmethod
def validate_currency(cls, pay_request):
if (
pay_request.CUR is not None
and pay_request.CUR not in cls.CURRENCIES
):
raise ValueError(
"Currency [%r] not in supported currencies [%r]" % (
pay_request.CUR, cls.CURRENCIES,
)
)
@classmethod
def validate_language(cls, pay_request):
if (
pay_request.LNG is not None
and pay_request.LNG not in cls.LANGUAGES
):
raise ValueError(
"Language [%r] not int supported languages [%r]" % (
pay_request.LNG, cls.LANGUAGES,
)
)
@classmethod
def validate_country(cls, pay_request):
if (
pay_request.CNT is not None
and pay_request.CNT not in cls.COUNTRIES
):
raise ValueError(
"Country [%r] not int supported countries [%r]" % (
pay_request.CNT, cls.COUNTRIES,
)
)
@classmethod
def validate_request(cls, pay_request):
"""Validate payment request.
Check if all attributes for signed/non-signed payment request
are present. Check if amount has at max two decimal places.
On validation errors, raises :exc:`ValueError`.
Args:
pay_request (:class:`trustpaylib.TrustPayRequest`):
Returns:
Given `pay_request`.
Raises:
ValueError
"""
missing = []
required_attrs = (
cls.REQUEST_REQUIRED_ATTRS
if pay_request.SIG is None else
cls.SIGNED_REQUEST_REQUIRED_ATTRS
)
for attr in required_attrs:
if attr not in cls.initial_data(pay_request):
missing.append(attr)
if pay_request.AMT is not None and '.' in pay_request.AMT:
if len(pay_request.AMT.split('.')[1]) > 2:
raise ValueError(
"Amount can have at max"
" 2 decimal places. [%s]" % pay_request.AMT)
if missing:
raise ValueError("Required attributes missing: %r" % missing)
cls.validate_currency(pay_request)
cls.validate_language(pay_request)
cls.validate_country(pay_request)
return pay_request
@classmethod
def extract_signature_attrs(cls, pay_request):
return extract_attrs(pay_request, cls.SIGNATURE_ATTRS)
@staticmethod
def initial_data(pay_request):
return _initial_data(pay_request)
if __name__ == "__main__":
import doctest
doctest.testmod()
| bsd-3-clause | 4,386,366,306,178,861,600 | 26.546032 | 78 | 0.576755 | false |
andrewleech/pydio-sync | src/pydio/sdk/remote.py | 1 | 44472 | #
# Copyright 2007-2014 Charles du Jeu - Abstrium SAS <team (at) pyd.io>
# This file is part of Pydio.
#
# Pydio is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pydio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pydio. If not, see <http://www.gnu.org/licenses/>.
#
# The latest code can be found at <http://pyd.io/>.
#
import urllib
import json
import hmac
import random
import unicodedata
import platform
from pydio.utils.functions import hashfile
from hashlib import sha256
from hashlib import sha1
from urlparse import urlparse
from requests.exceptions import ConnectionError, RequestException
import keyring
from keyring.errors import PasswordSetError
import xml.etree.ElementTree as ET
from exceptions import PydioSdkException, PydioSdkBasicAuthException, PydioSdkTokenAuthException, \
PydioSdkQuotaException, PydioSdkPermissionException, PydioSdkTokenAuthNotSupportedException
from .utils import *
from pydio import TRANSFER_RATE_SIGNAL, TRANSFER_CALLBACK_SIGNAL
from pydio.utils import i18n
_ = i18n.language.ugettext
# -*- coding: utf-8 -*-
PYDIO_SDK_MAX_UPLOAD_PIECES = 40 * 1024 * 1024
class PydioSdk():
def __init__(self, url='', ws_id='', remote_folder='', user_id='', auth=(), device_id='python_client',
skip_ssl_verify=False, proxies=None):
self.ws_id = ws_id
self.device_id = device_id
self.verify_ssl = not skip_ssl_verify
if self.verify_ssl and "REQUESTS_CA_BUNDLE" in os.environ:
self.verify_ssl = os.environ["REQUESTS_CA_BUNDLE"]
self.base_url = url.rstrip('/') + '/api/'
self.url = url.rstrip('/') + '/api/' + ws_id
self.remote_folder = remote_folder
self.user_id = user_id
self.interrupt_tasks = False
self.upload_max_size = PYDIO_SDK_MAX_UPLOAD_PIECES
self.rsync_server_support = False
self.stat_slice_number = 200
self.stick_to_basic = False
if user_id:
self.auth = (user_id, keyring.get_password(url, user_id))
else:
self.auth = auth
self.tokens = None
self.rsync_supported = False
self.proxies = proxies
def set_server_configs(self, configs):
"""
Server specific capacities and limitations, provided by the server itself
:param configs: dict()
:return:
"""
if 'UPLOAD_MAX_SIZE' in configs and configs['UPLOAD_MAX_SIZE']:
self.upload_max_size = min(int(float(configs['UPLOAD_MAX_SIZE'])), PYDIO_SDK_MAX_UPLOAD_PIECES)
if 'RSYNC_SUPPORTED' in configs and configs['RSYNC_SUPPORTED'] == "true":
self.rsync_server_support = True
#self.upload_max_size = 8*1024*1024;
if 'RSYNC_SUPPORTED' in configs:
self.rsync_supported = configs['RSYNC_SUPPORTED'] == 'true'
pass
def set_interrupt(self):
self.interrupt_tasks = True
def remove_interrupt(self):
self.interrupt_tasks = False
def urlencode_normalized(self, unicode_path):
"""
Make sure the urlencoding is consistent between various platforms
E.g, we force the accented chars to be encoded as one char, not the ascci + accent.
:param unicode_path:
:return:
"""
if platform.system() == 'Darwin':
try:
test = unicodedata.normalize('NFC', unicode_path)
unicode_path = test
except ValueError as e:
pass
return urllib.pathname2url(unicode_path.encode('utf-8'))
def normalize(self, unicode_path):
if platform.system() == 'Darwin':
try:
test = unicodedata.normalize('NFC', unicode_path)
return test
except ValueError as e:
return unicode_path
else:
return unicode_path
def normalize_reverse(self, unicode_path):
if platform.system() == 'Darwin':
try:
test = unicodedata.normalize('NFD', unicode_path)
return test
except ValueError as e:
return unicode_path
else:
return unicode_path
def set_tokens(self, tokens):
self.tokens = tokens
try:
keyring.set_password(self.url, self.user_id + '-token', tokens['t'] + ':' + tokens['p'])
except PasswordSetError:
logging.error(_("Cannot store tokens in keychain, there might be an OS permission issue!"))
def get_tokens(self):
if not self.tokens:
k_tok = keyring.get_password(self.url, self.user_id + '-token')
if k_tok:
parts = k_tok.split(':')
self.tokens = {'t': parts[0], 'p': parts[1]}
return self.tokens
def basic_authenticate(self):
"""
Use basic-http authenticate to get a key/pair token instead of passing the
users credentials at each requests
:return:dict()
"""
url = self.base_url + 'pydio/keystore_generate_auth_token/' + self.device_id
resp = requests.get(url=url, auth=self.auth, verify=self.verify_ssl, proxies=self.proxies)
if resp.status_code == 401:
raise PydioSdkBasicAuthException(_('Authentication Error'))
# If content is empty (but not error status code), the token based auth may not be active
# We should switch to basic
if resp.content == '':
raise PydioSdkTokenAuthNotSupportedException("token_auth")
try:
tokens = json.loads(resp.content)
except ValueError as v:
raise PydioSdkException("basic_auth", "", "Cannot parse JSON result: " + resp.content + "")
#return False
self.set_tokens(tokens)
return tokens
def perform_basic(self, url, request_type='get', data=None, files=None, headers=None, stream=False, with_progress=False):
"""
:param headers:
:param url: str url to query
:param request_type: str http method, default is "get"
:param data: dict query parameters
:param files: dict files, described as {'fieldname':'path/to/file'}
:param stream: bool get response as a stream
:param with_progress: dict an object that can be updated with various progress data
:return: Http response
"""
if request_type == 'get':
try:
resp = requests.get(url=url, stream=stream, timeout=20, verify=self.verify_ssl, headers=headers,
auth=self.auth, proxies=self.proxies)
except ConnectionError as e:
raise
elif request_type == 'post':
if not data:
data = {}
if files:
resp = self.upload_file_with_progress(url, dict(**data), files, stream, with_progress,
max_size=self.upload_max_size, auth=self.auth)
else:
resp = requests.post(
url=url,
data=data,
stream=stream,
timeout=20,
verify=self.verify_ssl,
headers=headers,
auth=self.auth,
proxies=self.proxies)
else:
raise PydioSdkTokenAuthException(_("Unsupported HTTP method"))
if resp.status_code == 401:
raise PydioSdkTokenAuthException(_("Authentication Exception"))
return resp
def perform_with_tokens(self, token, private, url, request_type='get', data=None, files=None, headers=None, stream=False,
with_progress=False):
"""
:param headers:
:param token: str the token.
:param private: str private key associated to token
:param url: str url to query
:param request_type: str http method, default is "get"
:param data: dict query parameters
:param files: dict files, described as {'fieldname':'path/to/file'}
:param stream: bool get response as a stream
:param with_progress: dict an object that can be updated with various progress data
:return: Http response
"""
nonce = sha1(str(random.random())).hexdigest()
uri = urlparse(url).path.rstrip('/')
msg = uri + ':' + nonce + ':' + private
the_hash = hmac.new(str(token), str(msg), sha256)
auth_hash = nonce + ':' + the_hash.hexdigest()
if request_type == 'get':
auth_string = 'auth_token=' + token + '&auth_hash=' + auth_hash
if '?' in url:
url += '&' + auth_string
else:
url += '?' + auth_string
try:
resp = requests.get(url=url, stream=stream, timeout=20, verify=self.verify_ssl,
headers=headers, proxies=self.proxies)
except ConnectionError as e:
raise
elif request_type == 'post':
if not data:
data = {}
data['auth_token'] = token
data['auth_hash'] = auth_hash
if files:
resp = self.upload_file_with_progress(url, dict(**data), files, stream, with_progress,
max_size=self.upload_max_size)
else:
resp = requests.post(
url=url,
data=data,
stream=stream,
timeout=20,
verify=self.verify_ssl,
headers=headers,
proxies=self.proxies)
else:
raise PydioSdkTokenAuthException(_("Unsupported HTTP method"))
if resp.status_code == 401:
raise PydioSdkTokenAuthException(_("Authentication Exception"))
return resp
def perform_request(self, url, type='get', data=None, files=None, headers=None, stream=False, with_progress=False):
"""
Perform an http request.
There's a one-time loop, as it first tries to use the auth tokens. If the the token auth fails, it may just
mean that the token key/pair is expired. So we try once to get fresh new tokens with basic_http auth and
re-run query with new tokens.
:param headers:
:param url: str url to query
:param type: str http method, default is "get"
:param data: dict query parameters
:param files: dict files, described as {'fieldname':'path/to/file'}
:param stream: bool get response as a stream
:param with_progress: dict an object that can be updated with various progress data
:return:
"""
# We knwo that token auth is not supported anyway
if self.stick_to_basic:
return self.perform_basic(url, request_type=type, data=data, files=files, headers=headers, stream=stream,
with_progress=with_progress)
tokens = self.get_tokens()
if not tokens:
try:
tokens = self.basic_authenticate()
except PydioSdkTokenAuthNotSupportedException as pne:
logging.info('Switching to permanent basic auth, as tokens were not correctly received. This is not '
'good for performances, but might be necessary for session credential based setups.')
self.stick_to_basic = True
return self.perform_basic(url, request_type=type, data=data, files=files, headers=headers, stream=stream,
with_progress=with_progress)
return self.perform_with_tokens(tokens['t'], tokens['p'], url, type, data, files,
headers=headers, stream=stream)
else:
try:
resp = self.perform_with_tokens(tokens['t'], tokens['p'], url, type, data, files, headers=headers,
stream=stream, with_progress=with_progress)
return resp
except requests.exceptions.ConnectionError:
raise
except PydioSdkTokenAuthException as pTok:
# Tokens may be revoked? Retry
try:
tokens = self.basic_authenticate()
except PydioSdkTokenAuthNotSupportedException:
self.stick_to_basic = True
logging.info('Switching to permanent basic auth, as tokens were not correctly received. This is not '
'good for performances, but might be necessary for session credential based setups.')
return self.perform_basic(url, request_type=type, data=data, files=files, headers=headers, stream=stream,
with_progress=with_progress)
try:
return self.perform_with_tokens(tokens['t'], tokens['p'], url, type, data, files,
headers=headers, stream=stream, with_progress=with_progress)
except PydioSdkTokenAuthException as secTok:
logging.error('Second Auth Error, what is wrong?')
raise secTok
def changes(self, last_seq):
"""
Get the list of changes detected on server since a given sequence number
:param last_seq:int
:return:list a list of changes
"""
url = self.url + '/changes/' + str(last_seq)
if self.remote_folder:
url += '?filter=' + self.remote_folder
try:
resp = self.perform_request(url=url)
except requests.exceptions.ConnectionError:
raise
try:
return json.loads(resp.content)
except ValueError as v:
raise Exception(_("Invalid JSON value received while getting remote changes. Is the server correctly configured?"))
def changes_stream(self, last_seq, callback):
"""
Get the list of changes detected on server since a given sequence number
:param last_seq:int
:change_store: AbstractChangeStore
:return:list a list of changes
"""
if last_seq == 0:
perform_flattening = "true"
else:
perform_flattening = "false"
url = self.url + '/changes/' + str(last_seq) + '/?stream=true'
if self.remote_folder:
url += '&filter=' + self.remote_folder
url += '&flatten=' + perform_flattening
resp = self.perform_request(url=url, stream=True)
info = dict()
info['max_seq'] = last_seq
for line in resp.iter_lines(chunk_size=512):
if line:
if str(line).startswith('LAST_SEQ'):
#call the merge function with NULL row
callback('remote', None, info)
return int(line.split(':')[1])
else:
try:
one_change = json.loads(line)
node = one_change.pop('node')
one_change = dict(node.items() + one_change.items())
callback('remote', one_change, info)
except ValueError as v:
logging.error('Invalid JSON Response, line was ' + line)
raise Exception(_('Invalid JSON value received while getting remote changes'))
except Exception as e:
raise e
def stat(self, path, with_hash=False, partial_hash=None):
"""
Equivalent of the local fstat() on the remote server.
:param path: path of node from the workspace root
:param with_hash: stat result can be enriched with the node hash
:return:dict a list of key like
{
dev: 16777218,
ino: 4062280,
mode: 16895,
nlink: 15,
uid: 70,
gid: 20,
rdev: 0,
size: 510,
atime: 1401915891,
mtime: 1399883020,
ctime: 1399883020,
blksize: 4096,
blocks: 0
}
"""
if self.interrupt_tasks:
raise PydioSdkException("stat", path=path, detail=_('Task interrupted by user'))
path = self.remote_folder + path
action = '/stat_hash' if with_hash else '/stat'
try:
url = self.url + action + self.urlencode_normalized(path)
if partial_hash:
h = {'range': 'bytes=%i-%i' % (partial_hash[0], partial_hash[1])}
resp = self.perform_request(url, headers=h)
else:
resp = self.perform_request(url)
try:
data = json.loads(resp.content)
except ValueError as ve:
return False
logging.debug("data: %s" % data)
if not data:
return False
if len(data) > 0 and 'size' in data:
return data
else:
return False
except requests.exceptions.ConnectionError:
raise
except Exception, ex:
logging.warning("Stat failed", exc_info=ex)
return False
def bulk_stat(self, pathes, result=None, with_hash=False):
"""
Perform a stat operation (see self.stat()) but on a set of nodes. Very important to use that method instead
of sending tons of small stat requests to server. To keep POST content reasonable, pathes will be sent 200 by
200.
:param pathes: list() of node pathes
:param result: dict() an accumulator for the results
:param with_hash: bool whether to ask for files hash or not (md5)
:return:
"""
if self.interrupt_tasks:
raise PydioSdkException("stat", path=pathes[0], detail=_('Task interrupted by user'))
from requests.exceptions import Timeout
# NORMALIZE PATHES FROM START
pathes = map(lambda p: self.normalize(p), pathes)
action = '/stat_hash' if with_hash else '/stat'
data = dict()
maxlen = min(len(pathes), self.stat_slice_number)
clean_pathes = map(lambda t: self.remote_folder + t.replace('\\', '/'),
filter(lambda x: x != '', pathes[:maxlen]))
data['nodes[]'] = map(lambda p: self.normalize(p), clean_pathes)
url = self.url + action + self.urlencode_normalized(clean_pathes[0])
try:
resp = self.perform_request(url, type='post', data=data)
except Timeout:
if self.stat_slice_number < 20:
raise
self.stat_slice_number = int(math.floor(self.stat_slice_number / 2))
logging.info('Reduce bulk stat slice number to %d', self.stat_slice_number)
return self.bulk_stat(pathes, result=result, with_hash=with_hash)
try:
data = json.loads(resp.content)
except ValueError:
logging.debug("url: %s" % url)
logging.debug("resp.content: %s" % resp.content)
raise
if len(pathes) == 1:
englob = dict()
englob[self.remote_folder + pathes[0]] = data
data = englob
if result:
replaced = result
else:
replaced = dict()
for (p, stat) in data.items():
if self.remote_folder:
p = p[len(self.remote_folder):]
#replaced[os.path.normpath(p)] = stat
p1 = os.path.normpath(p)
p2 = os.path.normpath(self.normalize_reverse(p))
p3 = p
p4 = self.normalize_reverse(p)
if p2 in pathes:
replaced[p2] = stat
pathes.remove(p2)
elif p1 in pathes:
replaced[p1] = stat
pathes.remove(p1)
elif p3 in pathes:
replaced[p3] = stat
pathes.remove(p3)
elif p4 in pathes:
replaced[p4] = stat
pathes.remove(p4)
else:
#pass
logging.info('Fatal charset error, cannot find files (%s, %s, %s, %s) in %s' % (repr(p1), repr(p2), repr(p3), repr(p4), repr(pathes),))
raise PydioSdkException('bulk_stat', p1, "Encoding problem, failed emptying bulk_stat, "
"exiting to avoid infinite loop")
if len(pathes):
self.bulk_stat(pathes, result=replaced, with_hash=with_hash)
return replaced
def mkdir(self, path):
"""
Create a directory of the server
:param path: path of the new directory to create
:return: result of the server query, see API
"""
url = self.url + '/mkdir' + self.urlencode_normalized((self.remote_folder + path))
resp = self.perform_request(url=url)
self.is_pydio_error_response(resp)
return resp.content
def bulk_mkdir(self, pathes):
"""
Create many directories at once
:param pathes: a set of directories to create
:return: content of the response
"""
data = dict()
data['ignore_exists'] = 'true'
data['nodes[]'] = map(lambda t: self.normalize(self.remote_folder + t), filter(lambda x: x != '', pathes))
url = self.url + '/mkdir' + self.urlencode_normalized(self.remote_folder + pathes[0])
resp = self.perform_request(url=url, type='post', data=data)
self.is_pydio_error_response(resp)
return resp.content
def mkfile(self, path):
"""
Create an empty file on the server
:param path: node path
:return: result of the server query
"""
url = self.url + '/mkfile' + self.urlencode_normalized((self.remote_folder + path)) + '?force=true'
resp = self.perform_request(url=url)
self.is_pydio_error_response(resp)
return resp.content
def rename(self, source, target):
"""
Rename a path to another. Will decide automatically to trigger a rename or a move in the API.
:param source: origin path
:param target: target path
:return: response of the server
"""
if os.path.dirname(source) == os.path.dirname(target):
url = self.url + '/rename'
data = dict(file=self.normalize(self.remote_folder + source).encode('utf-8'),
dest=self.normalize(self.remote_folder + target).encode('utf-8'))
else:
url = self.url + '/move'
data = dict(
file=(self.normalize(self.remote_folder + source)).encode('utf-8'),
dest=os.path.dirname((self.normalize(self.remote_folder + target).encode('utf-8'))))
resp = self.perform_request(url=url, type='post', data=data)
self.is_pydio_error_response(resp)
return resp.content
def lsync(self, source=None, target=None, copy=False):
"""
Rename a path to another. Will decide automatically to trigger a rename or a move in the API.
:param source: origin path
:param target: target path
:return: response of the server
"""
url = self.url + '/lsync'
data = dict()
if source:
data['from'] = self.normalize(self.remote_folder + source).encode('utf-8')
if target:
data['to'] = self.normalize(self.remote_folder + target).encode('utf-8')
if copy:
data['copy'] = 'true'
resp = self.perform_request(url=url, type='post', data=data)
self.is_pydio_error_response(resp)
return resp.content
def delete(self, path):
"""
Delete a resource on the server
:param path: node path
:return: response of the server
"""
url = self.url + '/delete' + self.urlencode_normalized((self.remote_folder + path))
data = dict(file=self.normalize(self.remote_folder + path).encode('utf-8'))
resp = self.perform_request(url=url, type='post', data=data)
self.is_pydio_error_response(resp)
return resp.content
def load_server_configs(self):
"""
Load the plugins from the registry and parse some of the exposed parameters of the plugins.
Currently supports the uploaders paramaters, and the filehasher.
:return: dict() parsed configs
"""
url = self.base_url + 'pydio/state/plugins?format=json'
resp = self.perform_request(url=url)
server_data = dict()
try:
data = json.loads(resp.content)
plugins = data['plugins']
for p in plugins['ajxpcore']:
if p['@id'] == 'core.uploader':
if 'plugin_configs' in p and 'property' in p['plugin_configs']:
properties = p['plugin_configs']['property']
for prop in properties:
server_data[prop['@name']] = prop['$']
for p in plugins['meta']:
if p['@id'] == 'meta.filehasher':
if 'plugin_configs' in p and 'property' in p['plugin_configs']:
properties = p['plugin_configs']['property']
if '@name' in properties:
server_data[properties['@name']] = properties['$']
else:
for prop in properties:
server_data[prop['@name']] = prop['$']
except KeyError, ValueError:
pass
return server_data
def upload(self, local, local_stat, path, callback_dict=None, max_upload_size=-1):
"""
Upload a file to the server.
:param local: file path
:param local_stat: stat of the file
:param path: target path on the server
:param callback_dict: an dict that can be fed with progress data
:param max_upload_size: a known or arbitrary upload max size. If the file file is bigger, it will be
chunked into many POST requests
:return: Server response
"""
if not local_stat:
raise PydioSdkException('upload', path, _('Local file to upload not found!'))
if local_stat['size'] == 0:
self.mkfile(path)
new = self.stat(path)
if not new or not (new['size'] == local_stat['size']):
raise PydioSdkException('upload', path, _('File not correct after upload (expected size was 0 bytes)'))
return True
existing_part = False
if (self.upload_max_size - 4096) < local_stat['size']:
self.has_disk_space_for_upload(path, local_stat['size'])
existing_part = self.stat(path+'.dlpart', True)
dirpath = os.path.dirname(path)
if dirpath and dirpath != '/':
folder = self.stat(dirpath)
if not folder:
self.mkdir(os.path.dirname(path))
url = self.url + '/upload/put' + self.urlencode_normalized((self.remote_folder + os.path.dirname(path)))
files = {
'userfile_0': local
}
if existing_part:
files['existing_dlpart'] = existing_part
data = {
'force_post': 'true',
'xhr_uploader': 'true',
'urlencoded_filename': self.urlencode_normalized(os.path.basename(path))
}
try:
self.perform_request(url=url, type='post', data=data, files=files, with_progress=callback_dict)
except PydioSdkDefaultException as e:
if e.message == '507':
usage, total = self.quota_usage()
raise PydioSdkQuotaException(path, local_stat['size'], usage, total)
if e.message == '412':
raise PydioSdkPermissionException('Cannot upload '+os.path.basename(path)+' in directory '+os.path.dirname(path))
else:
raise e
except RequestException as ce:
raise PydioSdkException("upload", path, 'RequestException: ' + ce.message)
new = self.stat(path)
if not new or not (new['size'] == local_stat['size']):
raise PydioSdkException('upload', path, _('File is incorrect after upload'))
return True
def download(self, path, local, callback_dict=None):
"""
Download the content of a server file to a local file.
:param path: node path on the server
:param local: local path on filesystem
:param callback_dict: a dict() than can be updated by with progress data
:return: Server response
"""
orig = self.stat(path)
if not orig:
raise PydioSdkException('download', path, _('Original file was not found on server'))
url = self.url + '/download' + self.urlencode_normalized((self.remote_folder + path))
local_tmp = local + '.pydio_dl'
headers = None
write_mode = 'wb'
dl = 0
if not os.path.exists(os.path.dirname(local)):
os.makedirs(os.path.dirname(local))
elif os.path.exists(local_tmp):
# A .pydio_dl already exists, maybe it's a chunk of the original?
# Try to get an md5 of the corresponding chunk
current_size = os.path.getsize(local_tmp)
chunk_local_hash = hashfile(open(local_tmp, 'rb'), hashlib.md5())
chunk_remote_stat = self.stat(path, True, partial_hash=[0, current_size])
if chunk_remote_stat and chunk_local_hash == chunk_remote_stat['hash']:
headers = {'range':'bytes=%i-%i' % (current_size, chunk_remote_stat['size'])}
write_mode = 'a+'
dl = current_size
if callback_dict:
callback_dict['bytes_sent'] = float(current_size)
callback_dict['total_bytes_sent'] = float(current_size)
callback_dict['total_size'] = float(chunk_remote_stat['size'])
callback_dict['transfer_rate'] = 0
dispatcher.send(signal=TRANSFER_CALLBACK_SIGNAL, send=self, change=callback_dict)
else:
os.unlink(local_tmp)
try:
with open(local_tmp, write_mode) as fd:
start = time.clock()
r = self.perform_request(url=url, stream=True, headers=headers)
total_length = r.headers.get('content-length')
if total_length is None: # no content length header
fd.write(r.content)
else:
previous_done = 0
for chunk in r.iter_content(1024 * 8):
if self.interrupt_tasks:
raise PydioSdkException("interrupt", path=path, detail=_('Task interrupted by user'))
dl += len(chunk)
fd.write(chunk)
done = int(50 * dl / int(total_length))
if done != previous_done:
transfer_rate = dl // (time.clock() - start)
logging.debug("\r[%s%s] %s bps" % ('=' * done, ' ' * (50 - done), transfer_rate))
dispatcher.send(signal=TRANSFER_RATE_SIGNAL, send=self, transfer_rate=transfer_rate)
if callback_dict:
callback_dict['bytes_sent'] = float(len(chunk))
callback_dict['total_bytes_sent'] = float(dl)
callback_dict['total_size'] = float(total_length)
callback_dict['transfer_rate'] = transfer_rate
dispatcher.send(signal=TRANSFER_CALLBACK_SIGNAL, send=self, change=callback_dict)
previous_done = done
if not os.path.exists(local_tmp):
raise PydioSdkException('download', local, _('File not found after download'))
else:
stat_result = os.stat(local_tmp)
if not orig['size'] == stat_result.st_size:
os.unlink(local_tmp)
raise PydioSdkException('download', path, _('File is not correct after download'))
else:
is_system_windows = platform.system().lower().startswith('win')
if is_system_windows and os.path.exists(local):
os.unlink(local)
os.rename(local_tmp, local)
return True
except PydioSdkException as pe:
if pe.operation == 'interrupt':
raise pe
else:
if os.path.exists(local_tmp):
os.unlink(local_tmp)
raise pe
except Exception as e:
if os.path.exists(local_tmp):
os.unlink(local_tmp)
raise PydioSdkException('download', path, _('Error while downloading file: %s') % e.message)
def list(self, dir=None, nodes=list(), options='al', recursive=False, max_depth=1, remote_order='', order_column='', order_direction='', max_nodes=0, call_back=None):
url = self.url + '/ls' + self.urlencode_normalized(self.remote_folder)
data = dict()
if dir:
data['dir'] = dir
if nodes:
data['nodes'] = nodes
data['options'] = options
if recursive:
data['recursive'] = 'true'
if max_depth > 1:
data['max_depth'] = max_depth
if max_nodes:
data['max_nodes'] = max_nodes
if remote_order:
data['remote_order'] = remote_order
if order_column:
data['order_column'] = order_column
if order_direction:
data['order_direction'] = order_direction
resp = self.perform_request(url=url, type='post', data=data)
self.is_pydio_error_response(resp)
queue = [ET.ElementTree(ET.fromstring(resp.content)).getroot()]
snapshot = dict()
while len(queue):
tree = queue.pop(0)
if tree.get('ajxp_mime') == 'ajxp_folder':
for subtree in tree.findall('tree'):
queue.append(subtree)
path = tree.get('filename')
bytesize = tree.get('bytesize')
dict_tree = dict(tree.items())
if path:
if call_back:
call_back(dict_tree)
else:
snapshot[path] = bytesize
return snapshot if not call_back else None
def snapshot_from_changes(self, call_back=None):
url = self.url + '/changes/0/?stream=true&flatten=true'
if self.remote_folder:
url += '&filter=' + self.urlencode_normalized(self.remote_folder)
resp = self.perform_request(url=url, stream=True)
files = dict()
for line in resp.iter_lines(chunk_size=512):
if not str(line).startswith('LAST_SEQ'):
element = json.loads(line)
if call_back:
call_back(element)
else:
path = element.pop('target')
bytesize = element['node']['bytesize']
if path != 'NULL':
files[path] = bytesize
return files if not call_back else None
def apply_check_hook(self, hook_name='', hook_arg='', file='/'):
url = self.url + '/apply_check_hook/'+hook_name+'/'+str(hook_arg)+'/'
resp = self.perform_request(url=url, type='post', data={'file': self.normalize(file)})
return resp
def quota_usage(self):
url = self.url + '/monitor_quota/'
resp = self.perform_request(url=url, type='post')
quota = json.loads(resp.text)
return quota['USAGE'], quota['TOTAL']
def has_disk_space_for_upload(self, path, file_size):
resp = self.apply_check_hook(hook_name='before_create', hook_arg=file_size, file=path)
if str(resp.text).count("type=\"ERROR\""):
usage, total = self.quota_usage()
raise PydioSdkQuotaException(path, file_size, usage, total)
def is_pydio_error_response(self, resp):
error = False
message = 'Unknown error'
try:
element = ET.ElementTree(ET.fromstring(resp.content)).getroot()
error = str(element.get('type')).lower() == 'error'
message = element[0].text
except Exception as e:
pass
if error:
raise PydioSdkDefaultException(message)
def rsync_delta(self, path, signature, delta_path):
url = self.url + ('/filehasher_delta' + self.urlencode_normalized(self.remote_folder + path.replace("\\", "/")))
resp = self.perform_request(url=url, type='post', files={'userfile_0': signature}, stream=True,
with_progress=False)
fd = open(delta_path, 'wb')
for chunk in resp.iter_content(8192):
fd.write(chunk)
fd.close()
def rsync_signature(self, path, signature):
url = self.url + ('/filehasher_signature'+ self.urlencode_normalized(self.remote_folder + path.replace("\\", "/")))
resp = self.perform_request(url=url, type='post', stream=True, with_progress=False)
fd = open(signature, 'wb')
for chunk in resp.iter_content(8192):
fd.write(chunk)
fd.close()
def rsync_patch(self, path, delta_path):
url = self.url + ('/filehasher_patch'+ self.urlencode_normalized(self.remote_folder + path.replace("\\", "/")))
resp = self.perform_request(url=url, type='post', files={'userfile_0': delta_path}, with_progress=False)
self.is_pydio_error_response(resp)
def is_rsync_supported(self):
return self.rsync_supported
def upload_file_with_progress(self, url, fields, files, stream, with_progress, max_size=0, auth=None):
"""
Upload a file with progress, file chunking if necessary, and stream content directly from file.
:param url: url to post
:param fields: dict() query parameters
:param files: dict() {'fieldname' : '/path/to/file'}
:param stream: whether to get response as stream or not
:param with_progress: dict() updatable dict with progress data
:param max_size: upload max size
:return: response of the last requests if there were many of them
"""
if with_progress:
def cb(size=0, progress=0, delta=0, rate=0):
with_progress['total_size'] = size
with_progress['bytes_sent'] = delta
with_progress['total_bytes_sent'] = progress
dispatcher.send(signal=TRANSFER_CALLBACK_SIGNAL, sender=self, change=with_progress)
else:
def cb(size=0, progress=0, delta=0, rate=0):
logging.debug('Current transfer rate ' + str(rate))
def parse_upload_rep(http_response):
if http_response.headers.get('content-type') != 'application/octet-stream':
if unicode(http_response.text).count('message type="ERROR"'):
if unicode(http_response.text).lower().count("(507)"):
raise PydioSdkDefaultException('507')
if unicode(http_response.text).lower().count("(412)"):
raise PydioSdkDefaultException('412')
import re
# Remove XML tags
text = re.sub('<[^<]+>', '', unicode(http_response.text))
raise PydioSdkDefaultException(text)
if unicode(http_response.text).lower().count("(507)"):
raise PydioSdkDefaultException('507')
if unicode(http_response.text).lower().count("(412)"):
raise PydioSdkDefaultException('412')
if unicode(http_response.text).lower().count("(410)") or unicode(http_response.text).lower().count("(411)"):
raise PydioSdkDefaultException(unicode(http_response.text))
filesize = os.stat(files['userfile_0']).st_size
if max_size:
# Reduce max size to leave some room for data header
max_size -= 4096
existing_pieces_number = 0
if max_size and filesize > max_size:
fields['partial_upload'] = 'true'
fields['partial_target_bytesize'] = str(filesize)
# Check if there is already a .dlpart on the server.
# If it's the case, maybe it's already the beginning of this?
if 'existing_dlpart' in files:
existing_dlpart = files['existing_dlpart']
existing_dlpart_size = existing_dlpart['size']
if filesize > existing_dlpart_size and \
file_start_hash_match(files['userfile_0'], existing_dlpart_size, existing_dlpart['hash']):
logging.info('Found the beggining of this file on the other file, skipping the first pieces')
existing_pieces_number = existing_dlpart_size / max_size
cb(filesize, existing_dlpart_size, existing_dlpart_size, 0)
if not existing_pieces_number:
# try:
# import http.client as http_client
# except ImportError:
# # Python 2
# import httplib as http_client
# http_client.HTTPConnection.debuglevel = 1
#
# logging.getLogger().setLevel(logging.DEBUG)
# requests_log = logging.getLogger("requests.packages.urllib3")
# requests_log.setLevel(logging.DEBUG)
# requests_log.propagate = True
(header_body, close_body, content_type) = encode_multiparts(fields)
body = BytesIOWithFile(header_body, close_body, files['userfile_0'], callback=cb, chunk_size=max_size,
file_part=0, signal_sender=self)
resp = requests.post(
url,
data=body,
headers={'Content-Type': content_type},
stream=True,
timeout=20,
verify=self.verify_ssl,
auth=auth,
proxies=self.proxies
)
existing_pieces_number = 1
parse_upload_rep(resp)
if resp.status_code == 401:
return resp
if max_size and filesize > max_size:
fields['appendto_urlencoded_part'] = fields['urlencoded_filename']
del fields['urlencoded_filename']
(header_body, close_body, content_type) = encode_multiparts(fields)
for i in range(existing_pieces_number, int(math.ceil(filesize / max_size)) + 1):
if self.interrupt_tasks:
raise PydioSdkException("upload", path=os.path.basename(files['userfile_0']), detail=_('Task interrupted by user'))
before = time.time()
body = BytesIOWithFile(header_body, close_body, files['userfile_0'],
callback=cb, chunk_size=max_size, file_part=i, signal_sender=self)
resp = requests.post(
url,
data=body,
headers={'Content-Type': content_type},
stream=True,
verify=self.verify_ssl,
auth=auth,
proxies=self.proxies
)
parse_upload_rep(resp)
if resp.status_code == 401:
return resp
duration = time.time() - before
logging.info('Uploaded '+str(max_size)+' bytes of data in about %'+str(duration)+' s')
return resp
| gpl-3.0 | 2,665,800,779,976,125,000 | 42.093023 | 170 | 0.553742 | false |
MOOCworkbench/MOOCworkbench | quality_manager/models.py | 1 | 2510 | from django.db import models
from django.template.defaultfilters import slugify
from model_utils.models import TimeStampedModel
from experiments_manager.models import ChosenExperimentSteps
class ExperimentMeasure(models.Model):
name = models.CharField(max_length=255, editable=False)
description = models.TextField()
high_message = models.CharField(max_length=255, default='High')
medium_message = models.CharField(max_length=255, default='Medium')
low_message = models.CharField(max_length=255, default='Low')
def __str__(self):
return 'Measurement of {0}'.format(self.name)
def get_low_message(self):
return '{0}: {1}'.format(self.name, self.low_message)
def get_medium_message(self):
return '{0}: {1}'.format(self.name, self.medium_message)
def get_high_message(self):
return '{0}: {1}'.format(self.name, self.high_message)
def slug(self):
return slugify(self.name).replace('-', '_')
class RawMeasureResult(models.Model):
key = models.CharField(max_length=255)
value = models.CharField(max_length=1000)
def __str__(self):
return 'Key: {0} with value: {1}'.format(self.key, str(self.value))
class ExperimentMeasureResult(TimeStampedModel):
HIGH = 'H'
MEDIUM = 'M'
LOW = 'L'
SCALE = (
(HIGH, 'High'),
(MEDIUM, 'Medium'),
(LOW, 'Low'),
)
step = models.ForeignKey(to=ChosenExperimentSteps)
measurement = models.ForeignKey(to=ExperimentMeasure)
result = models.CharField(max_length=1, choices=SCALE)
raw_values = models.ManyToManyField(to=RawMeasureResult)
def get_message(self):
message_dict = {ExperimentMeasureResult.LOW: self.measurement.get_low_message(),
ExperimentMeasureResult.MEDIUM: self.measurement.get_medium_message(),
ExperimentMeasureResult.HIGH: self.measurement.get_high_message()}
if self.result:
return message_dict[self.result]
return 'Result missing'
def get_class(self):
style_classes = {ExperimentMeasureResult.LOW: 'danger',
ExperimentMeasureResult.MEDIUM: 'warning',
ExperimentMeasureResult.HIGH: 'success'}
if not self.result:
return "default"
return style_classes[self.result]
def slug(self):
return self.measurement.slug()
def __str__(self):
return "Workbench scan of {0}".format(self.measurement.name)
| mit | -3,190,671,055,506,624,500 | 32.466667 | 94 | 0.650199 | false |
cloudify-cosmo/cloudify-manager | tests/integration_tests/tests/usage_collector_base.py | 1 | 2836 | ########
# Copyright (c) 2013-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from os.path import join
from integration_tests import BaseTestCase
from integration_tests.framework import docker
from integration_tests.tests.constants import MANAGER_PYTHON
from integration_tests.tests.utils import (assert_messages_in_log,
get_resource as resource)
from integration_tests.tests.utils import run_postgresql_command
COLLECTOR_SCRIPTS = ['collect_cloudify_uptime', 'collect_cloudify_usage']
SCRIPTS_DESTINATION_PATH = '/opt/cloudify/usage_collector'
LOG_PATH = '/var/log/cloudify/usage_collector'
LOG_FILE = 'usage_collector.log'
class TestUsageCollectorBase(BaseTestCase):
def run_scripts_with_deployment(self, yaml_path, messages):
deployment, _ = self.deploy_application(resource(yaml_path),
timeout_seconds=120)
self.run_collector_scripts_and_assert(messages)
self.undeploy_application(deployment.id)
def run_collector_scripts_and_assert(self, messages):
for script in COLLECTOR_SCRIPTS:
docker.execute(self.env.container_id, '{0} {1}.py'.format(
MANAGER_PYTHON,
join(SCRIPTS_DESTINATION_PATH, script))
)
assert_messages_in_log(self.env.container_id,
self.workdir,
messages,
join(LOG_PATH, LOG_FILE))
def clean_timestamps(self):
# This is necessary for forcing the collector scripts to actually run
# in subsequent tests, despite not enough time passing since last run
run_postgresql_command(
self.env.container_id,
"UPDATE usage_collector SET hourly_timestamp=NULL, "
"daily_timestamp=NULL")
def clean_usage_collector_log(self):
# We need to clean the usage_collector log before each test, because
# each test uses it for asserting different values.
old_usage_log = join(LOG_PATH, self._testMethodName)
test_usage_log = join(LOG_PATH, LOG_FILE)
self.execute_on_manager(['mv', test_usage_log, old_usage_log])
self.execute_on_manager(['touch', test_usage_log])
| apache-2.0 | 4,733,038,073,251,551,000 | 43.3125 | 79 | 0.663258 | false |
jr0d/mercury | src/mercury/backend/workers/service.py | 1 | 4525 | import logging
from mercury.common.configuration import MercuryConfiguration
from mercury.common.task_managers.base.manager import Manager
from mercury.common.task_managers.redis.task import RedisTask
from mercury.common.clients.router_req_client import RouterReqClient
from mercury.backend.rpc_client import RPCClient
from mercury.backend.configuration import (
add_common_options, BACKEND_CONFIG_FILE
)
log = logging.getLogger(__name__)
def options():
configuration = MercuryConfiguration(
'mercury-rpc-worker',
BACKEND_CONFIG_FILE,
description='Manager process for RPC workers'
)
add_common_options(configuration)
configuration.add_option('backend.redis.host',
default='localhost',
help_string='Redis server address')
configuration.add_option('backend.redis.port',
default=6379,
special_type=int,
help_string='Redis server port')
configuration.add_option('backend.redis.queue',
default='rpc_task_queue',
help_string='The queue to use for RPC tasks')
configuration.add_option('backend.workers.threads',
special_type=int,
default=4)
configuration.add_option('backend.workers.max_requests_per_thread',
special_type=int,
default=100)
configuration.add_option('backend.rpc_router',
required=True,
help_string='The RPC service router')
return configuration.scan_options()
class RPCTask(RedisTask):
def __init__(self, rpc_router_url, redis_host, redis_port, redis_queue):
"""
:param rpc_router_url:
:param redis_host:
:param redis_port:
:param redis_queue:
"""
self.rpc_router_url = rpc_router_url
self.rpc_router = RPCClient(self.rpc_router_url,
linger=0,
response_timeout=5,
rcv_retry=3)
super(RPCTask, self).__init__(redis_host, redis_port, redis_queue)
def do(self):
url = 'tcp://{host}:{port}'.format(**self.task)
client = RouterReqClient(url, linger=0,
response_timeout=5,
rcv_retry=3)
client.service_name = 'AgentTaskService'
_payload = {
'category': 'rpc',
'method': self.task['method'],
'args': self.task['args'],
'kwargs': self.task['kwargs'],
'task_id': self.task['task_id'],
'job_id': self.task['job_id']
}
log.info(f'Dispatching task: {self.task}')
response = client.transceiver(_payload)
if response.get('error'): # Transport Error
err_msg = f'{self.task["mercury_id"]} has gone away while ' \
f'handling {self.task["task_id"]}. Transport Message: ' \
f'{response["message"]}'
log.error(err_msg)
self.rpc_router.complete_task({
'job_id': self.task['job_id'],
'task_id': self.task['task_id'],
'status': 'ERROR',
'message': err_msg,
})
elif response['message']['status'] != 0:
self.rpc_router.complete_task({
'job_id': self.task['job_id'],
'task_id': self.task['task_id'],
'status': 'ERROR',
'message': f'Dispatch Error: {response["message"]}'})
# close the socket
client.close()
def configure_logging(config):
logging.basicConfig(level=logging.getLevelName(config.logging.level),
format=config.logging.format)
def main():
config = options()
configure_logging(config)
# Set this up for access from our threads
manager = Manager(RPCTask, config.backend.workers.threads,
config.backend.workers.max_requests_per_thread,
handler_args=(config.backend.rpc_router,
config.backend.redis.host,
config.backend.redis.port,
config.backend.redis.queue))
manager.manage()
if __name__ == '__main__':
main()
| apache-2.0 | -5,276,650,173,452,258,000 | 33.541985 | 79 | 0.529503 | false |
swehner/foos | plugins/leds.py | 1 | 3191 | #!/usr/bin/env python
import time
import sys
import threading
import queue
import collections
from foos.bus import Bus
class Pattern:
def __init__(self, time, leds=[]):
self.time = time
self.leds = leds
def flatten(l):
for el in l:
if isinstance(el, collections.Iterable):
for sub in flatten(el):
yield sub
else:
yield el
class Plugin:
def __init__(self, bus):
self.queue = queue.Queue()
self.bus = bus
fmap = {'score_goal': lambda d: self.setMode(pat_goal),
'upload_ok': lambda d: self.setMode(pat_ok),
'tv_standby': lambda d: self.setMode(pat_standby, loop=True),
'tv_on': lambda d: self.setMode([]),
'button_will_upload': lambda d: self.setMode(pat_upload_feedback),
'upload_error': lambda d: self.setMode(pat_error)}
self.bus.subscribe_map(fmap)
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def run(self):
while True:
loop, m = self.queue.get()
first = True
while first or loop:
first = False
for p in flatten(m):
if self.__canRun():
self.setLeds(p.leds)
self.__safeSleep(p.time)
else:
loop = False
break
# reset leds
self.setLeds()
def __safeSleep(self, t):
start = time.time()
while (time.time() < start + t) and self.__canRun():
time.sleep(0.05)
def __canRun(self):
return self.queue.empty()
def setLeds(self, leds=[]):
self.bus.notify("leds_enabled", leds)
def setMode(self, mode, loop=False):
self.stop = True
self.queue.put((loop, mode))
pat_reset = 3 * [Pattern(0.2, ["BI", "BD", "YI", "YD"]),
Pattern(0.1),
Pattern(0.2, ["BI", "BD", "YI", "YD"]),
Pattern(1)]
pat_standby = [Pattern(1, ["OK"]),
Pattern(1)]
pat_goal = [[Pattern(0.1, ["BD", "YD"]),
Pattern(0.1, ["OK"]),
Pattern(0.1, ["BI", "YI"])],
3 * [Pattern(0.1),
Pattern(0.1, ["BI", "BD", "OK", "YI", "YD"])]]
pat_ok = [Pattern(0.3, ["OK"])]
pat_upload_feedback = 2 * [Pattern(0.1, ["OK"]), Pattern(0.1)]
pat_error = 2 * [Pattern(0.3, ["YD", "BD"]),
Pattern(0.3)]
pat_demo = [Pattern(1, ["BD"]),
Pattern(1, ["BI"]),
Pattern(1, ["YD"]),
Pattern(1, ["YI"]),
Pattern(1, ["OK"])]
if __name__ == "__main__":
def write_data(led_event):
leds = led_event.data
print("\r", end="")
for led in ["BD", "BI", "OK", "YI", "YD"]:
print("0" if led in leds else " ", end=" ")
sys.stdout.flush()
bus = Bus()
bus.subscribe(write_data, thread=True)
controller = Plugin(bus)
controller.setMode(pat_standby, loop=True)
time.sleep(5)
controller.setMode(pat_goal)
time.sleep(5)
| gpl-3.0 | -5,291,139,710,385,785,000 | 26.508621 | 82 | 0.483861 | false |
ChristosChristofidis/h2o-3 | h2o-py/tests/testdir_hdfs/pyunit_NOPASS_HDFS_kmeans_mllib_1_large.py | 1 | 2412 | #----------------------------------------------------------------------
# Purpose: This test compares k-means centers between H2O and MLlib.
#----------------------------------------------------------------------
import sys
sys.path.insert(1, "../../")
import h2o
import numpy as np
def kmeans_mllib(ip, port):
h2o.init(ip, port)
# Check if we are running inside the H2O network by seeing if we can touch
# the namenode.
running_inside_h2o = h2o.is_running_internal_to_h2o()
if running_inside_h2o:
hdfs_name_node = h2o.get_h2o_internal_hdfs_name_node()
hdfs_cross_file = "/datasets/runit/BigCross.data"
print "Import BigCross.data from HDFS"
url = "hdfs://{0}{1}".format(hdfs_name_node, hdfs_cross_file)
cross_h2o = h2o.import_frame(url)
n = cross_h2o.nrow()
err_mllib = np.genfromtxt(h2o.locate("smalldata/mllib_bench/bigcross_wcsse.csv"), delimiter=",", skip_header=1)
ncent = [int(err_mllib[r][0]) for r in range(len(err_mllib))]
for k in ncent:
print "Run k-means++ with k = {0} and max_iterations = 10".format(k)
cross_km = h2o.kmeans(training_frame = cross_h2o, x = cross_h2o, k = k, init = "PlusPlus",
max_iterations = 10, standardize = False)
clust_mllib = np.genfromtxt(h2o.locate("smalldata/mllib_bench/bigcross_centers_" + str(k) + ".csv"),
delimiter=",").tolist()
clust_h2o = cross_km.centers()
# Sort in ascending order by first dimension for comparison purposes
clust_mllib.sort(key=lambda x: x[0])
clust_h2o.sort(key=lambda x: x[0])
print "\nMLlib Cluster Centers:\n"
print clust_mllib
print "\nH2O Cluster Centers:\n"
print clust_h2o
wcsse_mllib = err_mllib[err_mllib[0:4,0].tolist().index(k)][1]
wcsse_h2o = cross_km.tot_withinss() / n
print "\nMLlib Average Within-Cluster SSE: \n".format(wcsse_mllib)
print "H2O Average Within-Cluster SSE: \n".format(wcsse_h2o)
assert wcsse_h2o == wcsse_mllib, "Expected mllib and h2o to get the same wcsse. Mllib got {0}, and H2O " \
"got {1}".format(wcsse_mllib, wcsse_h2o)
if __name__ == "__main__":
h2o.run_test(sys.argv, kmeans_mllib) | apache-2.0 | -4,750,684,882,160,498,000 | 42.872727 | 119 | 0.548093 | false |
GeographicaGS/GeoServer-Python-REST-API | src/geoserverapirest/ext/sld/sld.py | 1 | 15360 | #!/usr/bin/env python
# coding=UTF-8
import geoserverapirest.ext.sld.core as core, geoserverapirest.ext.sld.color as color
import geoserverapirest.ext.sld.ranges as ranges
"""
This set of classes works as helpers to construct SLD and should be the only entry point to this module.
They are designed to work supplying dictionaries of properties so they can be used by means of defining
objects via dictionaries.
"""
strokeLineJoin = core.strokeLineJoin
class Automation(object):
"""
Automation objects base class.
All this classes do something in the __init__ and store the final output in the
out variable. This out variable can be retrieved just by calling the object as
WhateverAutomationObject().
"""
out = None
def __call__(self):
"""
Treat self.sld() as itself.
"""
return self.out
# -----------------
# Semiology Classes
# -----------------
class SemiologyStroke(Automation):
"""
Automation for stroke semiology.
Takes a stroke specification as a dictionary and stores a sld.GsSldStrokeSymbolizer:
stroke = {
"class": sld.SemiologyStroke,
"color": "#3e3e3e",
"width": 2,
"linejoin": strokeLineJoin["bevel"]
}
"""
def __init__(self, params):
self.out = core.GsSldStrokeSymbolizer(params["color"], params["width"], params["linejoin"])
class SemiologyFill(Automation):
"""
Automation for fill semiology.
Takes a fill specification as a dictionary and stores a sld.GsSldFillSymbolizer:
fill = {
"class": sld.SemiologyFill,
"color": "#e3e2e1"
}
"""
def __init__(self, params):
self.out = core.GsSldFillSymbolizer(params["color"])
class SemiologyPolygon(Automation):
"""
Automation for polygon semiology.
Takes a polygon symbol specification as a dictionary and stores a sld.GsSldPolygonSymbolizer:
polygonStrokeFill = {
"class": sld.SemiologyPolygon,
"stroke": stroke,
"fill": fill
}
"""
def __init__(self, params):
self.out = core.GsSldPolygonSymbolizer()
if "stroke" in params.keys():
self.out.addSymbol(SemiologyStroke(params["stroke"])())
if "fill" in params.keys():
self.out.addSymbol(SemiologyFill(params["fill"])())
class SemiologyPolygonSimpleRamp(Automation):
"""
Automation for a polygon simple ramp.
Takes a polygon simple ramp specification as a dictionary and stores a list of sld.GsSldPolygonSymbolizer:
polygonSimpleRamp = {
"class": sld.SemiologyPolygonSimpleRamp,
"stroke": stroke,
"low": "#dedece",
"high": "#4a4140"
}
"""
def __init__(self, params, steps):
self.out = []
c = color.Color()
colors = c.colorRamp(params["low"], params["high"], steps)
for i in range(0, steps):
o = core.GsSldPolygonSymbolizer()
if "stroke" in params.keys():
o.addSymbol(SemiologyStroke(params["stroke"])())
o.addSymbol(SemiologyFill({"color": colors[i]})())
self.out.append(o)
class SemiologyPolygonDoubleRamp(Automation):
"""
Automation for a polygon double ramp.
Takes a polygon double ramp specification as a dictionary and stores a list of sld.GsSldPolygonSymbolizer:
polygonDoubleRamp = {
"class": sld.SemiologyPolygonDoubleRamp,
"stroke": stroke,
"low": "#ff0000",
"middle": "#ffffff",
"high": "#0000ff"
}
"""
def __init__(self, params, sidesteps):
self.out = []
c = color.Color()
colors = c.colorDualRamp(params["low"], params["middle"], params["high"], sidesteps)
for i in range(0, (sidesteps*2)+1):
o = core.GsSldPolygonSymbolizer()
if "stroke" in params.keys():
o.addSymbol(SemiologyStroke(params["stroke"])())
o.addSymbol(SemiologyFill({"color": colors[i]})())
self.out.append(o)
class SemiologyPolygonCustomRamp(Automation):
"""
Automation for a polygon custom ramp.
Takes a polygon custom ramp specification as a dictionary and stores a list of sld.GsSldPolygonSymbolizer:
polygonCustomRamp = {
"class": sld.SemiologyPolygonCustomRamp,
"stroke": stroke,
"colors": ["#ff0000", "#00ff00", "#0000ff"]
}
"""
def __init__(self, params):
self.out = []
for i in params["colors"]:
o = core.GsSldPolygonSymbolizer()
if "stroke" in params.keys():
o.addSymbol(SemiologyStroke(params["stroke"])())
o.addSymbol(SemiologyFill({"color": i})())
self.out.append(o)
# -----------------
# Condition Classes
# -----------------
class ConditionGtoe(Automation):
"""
Automation for GTOE condition.
Takes a condition specification as a dictionary and stores a GsSldConditionGtoe:
ConditionGtoe = {
"class": sld.ConditionGtoe,
"attribute": "area",
"value": 20000000
}
"""
def __init__(self, params):
self.out = core.GsSldConditionGtoe(params["attribute"], params["value"])
class ConditionLtoe(Automation):
"""
Automation for LTOE condition.
Takes a condition specification as a dictionary and stores a GsSldConditionLtoe:
ConditionLtoe = {
"class": sld.ConditionLtoe,
"attribute": "area",
"value": 20000000
}
"""
def __init__(self, params):
self.out = core.GsSldConditionLtoe(params["attribute"], params["value"])
class ConditionEqual(Automation):
"""
Automation for EQUAL condition.
Takes a condition specification as a dictionary and stores a GsSldConditionEqual:
ConditionEqual = {
"class": sld.ConditionEqual,
"attribute": "PROVINCIA",
"value": "Córdoba"
}
"""
def __init__(self, params):
self.out = core.GsSldConditionEqual(params["attribute"], params["value"])
class ConditionAnd(Automation):
"""
Automation for AND condition.
Takes a condition specification as a dictionary and stores a GsSldConditionAnd:
conditionAnd = {
"class": sld.ConditionAnd,
"c0": conditionLtoe,
"c1": conditionEqual
}
"""
def __init__(self, params):
self.out = core.GsSldConditionAnd(params["c0"]["class"](params["c0"])(), params["c1"]["class"](params["c1"])())
class ConditionOr(Automation):
"""
Automation for OR condition.
Takes a condition specification as a dictionary and stores a GsSldConditionOr:
conditionOr = {
"class": sld.ConditionOr,
"c0": conditionLtoe,
"c1": conditionEqual
}
"""
def __init__(self, params):
self.out = core.GsSldConditionOr(params["c0"]["class"](params["c0"])(), params["c1"]["class"](params["c1"])()
)
# -----------------
# Ranges automation
# -----------------
class RangesQuartileMiddle(Automation):
"""
TODO: Redo this
Automation for a Jenks middle range calculation.
:param data: Data to create intervals from.
:type data: List
:param sideIntervals: Number of side intervals
:type sideIntervals: integer
:param precision: Precision
:type precision: integer
"""
def __init__(self, data, sideIntervals, middleValue, precision):
a = ranges.Range()
self.out = a.quartileMiddleInterval(data, sideIntervals, middleValue, precision)
class RangesQuartile(Automation):
"""
Automation for a quartile range calculation.
:param data: Data to create intervals from.
:type data: List
:param intervals: Number of intervals
:type intervals: integer
:param precision: Precision
:type precision: integer
"""
def __init__(self, data, intervals, precision):
a = ranges.Range()
self.out = a.quartileInterval(data, intervals, precision)
class RangesEqualMiddle(Automation):
"""
TODO: Redo this
Automation for a Jenks middle range calculation.
:param data: Data to create intervals from.
:type data: List
:param sideIntervals: Number of side intervals
:type sideIntervals: integer
:param precision: Precision
:type precision: integer
"""
def __init__(self, data, sideIntervals, middleValue, precision):
a = ranges.Range()
self.out = a.equalMiddleInterval(data, sideIntervals, middleValue, precision)
class RangesEqual(Automation):
"""
Automation for a equal range calculation.
:param data: Data to create intervals from.
:type data: List
:param intervals: Number of intervals
:type intervals: integer
:param precision: Precision
:type precision: integer
"""
def __init__(self, data, intervals, precision):
a = ranges.Range()
self.out = a.equalInterval(data, intervals, precision)
class RangesJenksMiddle(Automation):
"""
Automation for a Jenks middle range calculation.
:param data: Data to create intervals from.
:type data: List
:param sideIntervals: Number of side intervals
:type sideIntervals: integer
:param precision: Precision
:type precision: integer
"""
def __init__(self, data, sideIntervals, middleValue, precision):
a = ranges.Range()
self.out = a.jenksMiddleInterval(data, sideIntervals, middleValue, precision)
class RangesJenks(Automation):
"""
Automation for a jenks range calculation.
:param data: Data to create intervals from.
:type data: List
:param intervals: Number of intervals
:type intervals: integer
:param precision: Precision
:type precision: integer
"""
def __init__(self, data, intervals, precision):
a = ranges.Range()
self.out = a.jenksInterval(data, intervals, precision)
# ---------------------
# Full style automation
# ---------------------
class StyleBuilder(object):
"""
This is the base style builder.
:param namedLayerName: Name of the named layer
:type namedLayerName: String
:param styleName: Name of the style
:type styleName: String
:param ruleNames: A list of Strings with the names of the rules.
:type ruleNames: List
:param conditions: A list of geoserverapirest.ext.sld.core.GsSldConditionXXX with the conditions.
:type conditions: List
:param symbols: A list of geoserverapirest.ext.sld.core.GsSldPolygonSymbolizer with the symbolizers.
:type symbols: List
"""
@staticmethod
def build(namedLayerName, styleName, ruleNames, conditions, symbols):
ft = core.GsSldFeatureTypeStyle()
filters = []
if conditions is not None:
for i in conditions:
if i is not None:
filter = core.GsSldFilter()
filter.addCondition(i)
filters.append(filter)
else:
filters.append(i)
for i in range(0, len(ruleNames)):
r = core.GsSldRule(ruleNames[i].replace(" ", "_").lower(), ruleNames[i])
r.addSymbolizer(symbols[i])
if filters<>[]:
if filters[i] is not None:
r.addFilter(filters[i])
ft.addRule(r)
us = core.GsSldUserStyle(styleName)
us.addFeatureTypeStyle(ft)
nl = core.GsSldNamedLayer(namedLayerName)
nl.addUserStyle(us)
root = core.GsSldRoot()
root.addNamedLayer(nl)
return root
class StyleCustom(Automation):
"""
Automation for a full custom SLD style.
Takes a style specification as a dictionary and builds a full SLD. See test_18_automation.py for examples.
"""
def __init__(self, params):
symbols = [a["class"](a)() for a in params["symbols"]]
conditions = [a["class"](a)() for a in params["conditions"]] if "conditions" in params.keys() else None
self.out = StyleBuilder.build(params["namedlayername"], params["stylename"], params["rulenames"], conditions, symbols)
class StyleSimpleIntervals(Automation):
"""
Automation for a simple intervals SLD style.
Takes a style specification as a dictionary and builds a full SLD. See test_18_automation.py for examples.
"""
def __init__(self, params):
data = params["datasource"]["class"](params["datasource"])()
rang = params["rangetype"](data, params["steps"], params["precision"])()
conditions = []
for r in rang:
c = {"class": ConditionAnd,
"c0": {
"class": ConditionGtoe,
"attribute": params["datasource"]["attributename"],
"value": r[0]},
"c1": {
"class": ConditionLtoe,
"attribute": params["datasource"]["attributename"],
"value": r[1]}}
conditions.append(c["class"](c)())
symbols = params["ramp"]["class"](params["ramp"], params["steps"])()
rn = ranges.RuleNames()
ruleNames = rn.ruleNames(rang, params["rulenames"]["mono"], params["rulenames"]["dual"], params["rulenames"]["lambda"])
self.out = StyleBuilder.build(params["namedlayername"], params["stylename"], ruleNames, conditions, symbols)
class StyleCenteredIntervals(Automation):
"""
Automation for a double ramp.
Takes a style specification as a dictionary and builds a full SLD. See test_18_automation.py for examples.
"""
def __init__(self, params):
data = params["datasource"]["class"](params["datasource"])()
# Data below and above median
below = [a for a in data if a<params["mediandata"]]
above = [a for a in data if a>params["mediandata"]]
#TODO: Erase median Ranges. A waste of time
belowIntervals = params["rangetype"](below, params["steps"], params["precision"])()
aboveIntervals = params["rangetype"](above, params["steps"], params["precision"])()
belowIntervals.append([params["mediandata"], params["mediandata"]])
belowIntervals.extend(aboveIntervals)
conditions = []
# TODO: This is duplicated in the class above, take apart
for r in belowIntervals:
c = {"class": ConditionAnd,
"c0": {
"class": ConditionGtoe,
"attribute": params["datasource"]["attributename"],
"value": r[0]},
"c1": {
"class": ConditionLtoe,
"attribute": params["datasource"]["attributename"],
"value": r[1]}}
conditions.append(c["class"](c)())
symbols = params["ramp"]["class"](params["ramp"], params["steps"])()
rn = ranges.RuleNames()
ruleNames = rn.ruleNames(belowIntervals, params["rulenames"]["mono"], params["rulenames"]["dual"], \
params["rulenames"]["lambda"])
self.out = StyleBuilder.build(params["namedlayername"], params["stylename"], ruleNames, conditions, symbols)
| mit | 7,179,470,238,680,441,000 | 26.824275 | 127 | 0.601081 | false |
openstack/os-win | os_win/tests/unit/utils/test_jobutils.py | 1 | 14327 | # Copyright 2015 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from os_win import constants
from os_win import exceptions
from os_win.tests.unit import test_base
from os_win.utils import jobutils
@ddt.ddt
class JobUtilsTestCase(test_base.OsWinBaseTestCase):
"""Unit tests for the Hyper-V JobUtils class."""
_FAKE_RET_VAL = 0
_FAKE_JOB_STATUS_BAD = -1
_FAKE_JOB_DESCRIPTION = "fake_job_description"
_FAKE_JOB_PATH = 'fake_job_path'
_FAKE_ERROR = "fake_error"
_FAKE_ELAPSED_TIME = 0
def setUp(self):
super(JobUtilsTestCase, self).setUp()
self.jobutils = jobutils.JobUtils()
self.jobutils._conn_attr = mock.MagicMock()
@mock.patch.object(jobutils.JobUtils, '_wait_for_job')
def test_check_ret_val_started(self, mock_wait_for_job):
self.jobutils.check_ret_val(constants.WMI_JOB_STATUS_STARTED,
mock.sentinel.job_path)
mock_wait_for_job.assert_called_once_with(mock.sentinel.job_path)
@mock.patch.object(jobutils.JobUtils, '_wait_for_job')
def test_check_ret_val_ok(self, mock_wait_for_job):
self.jobutils.check_ret_val(self._FAKE_RET_VAL,
mock.sentinel.job_path)
self.assertFalse(mock_wait_for_job.called)
def test_check_ret_val_exception(self):
self.assertRaises(exceptions.WMIJobFailed,
self.jobutils.check_ret_val,
mock.sentinel.ret_val_bad,
mock.sentinel.job_path)
def test_wait_for_job_ok(self):
mock_job = self._prepare_wait_for_job(
constants.JOB_STATE_COMPLETED_WITH_WARNINGS)
job = self.jobutils._wait_for_job(self._FAKE_JOB_PATH)
self.assertEqual(mock_job, job)
def test_wait_for_job_error_state(self):
self._prepare_wait_for_job(
constants.JOB_STATE_TERMINATED)
self.assertRaises(exceptions.WMIJobFailed,
self.jobutils._wait_for_job,
self._FAKE_JOB_PATH)
def test_wait_for_job_error_code(self):
self._prepare_wait_for_job(
constants.JOB_STATE_COMPLETED_WITH_WARNINGS,
error_code=1)
self.assertRaises(exceptions.WMIJobFailed,
self.jobutils._wait_for_job,
self._FAKE_JOB_PATH)
@ddt.data({"extended": False,
"expected_fields": ["InstanceID"]},
{"extended": True,
"expected_fields": ["InstanceID", "DetailedStatus"]})
@ddt.unpack
@mock.patch.object(jobutils.JobUtils, '_get_job_error_details')
def test_get_job_details(self, mock_get_job_err, expected_fields,
extended):
mock_job = mock.Mock()
details = self.jobutils._get_job_details(mock_job, extended=extended)
if extended:
mock_get_job_err.assert_called_once_with(mock_job)
self.assertEqual(details['RawErrors'],
mock_get_job_err.return_value)
for field in expected_fields:
self.assertEqual(getattr(mock_job, field),
details[field])
def test_get_job_error_details(self):
mock_job = mock.Mock()
error_details = self.jobutils._get_job_error_details(mock_job)
mock_job.GetErrorEx.assert_called_once_with()
self.assertEqual(mock_job.GetErrorEx.return_value, error_details)
def test_get_job_error_details_exception(self):
mock_job = mock.Mock()
mock_job.GetErrorEx.side_effect = Exception
self.assertIsNone(self.jobutils._get_job_error_details(mock_job))
def test_get_pending_jobs(self):
mock_killed_job = mock.Mock(JobState=constants.JOB_STATE_KILLED)
mock_running_job = mock.Mock(JobState=constants.WMI_JOB_STATE_RUNNING)
mock_error_st_job = mock.Mock(JobState=constants.JOB_STATE_EXCEPTION)
mappings = [mock.Mock(AffectingElement=None),
mock.Mock(AffectingElement=mock_killed_job),
mock.Mock(AffectingElement=mock_running_job),
mock.Mock(AffectingElement=mock_error_st_job)]
self.jobutils._conn.Msvm_AffectedJobElement.return_value = mappings
mock_affected_element = mock.Mock()
expected_pending_jobs = [mock_running_job]
pending_jobs = self.jobutils._get_pending_jobs_affecting_element(
mock_affected_element)
self.assertEqual(expected_pending_jobs, pending_jobs)
self.jobutils._conn.Msvm_AffectedJobElement.assert_called_once_with(
AffectedElement=mock_affected_element.path_.return_value)
@mock.patch.object(jobutils._utils, '_is_not_found_exc')
def test_get_pending_jobs_ignored(self, mock_is_not_found_exc):
mock_not_found_mapping = mock.MagicMock()
type(mock_not_found_mapping).AffectingElement = mock.PropertyMock(
side_effect=exceptions.x_wmi)
self.jobutils._conn.Msvm_AffectedJobElement.return_value = [
mock_not_found_mapping]
pending_jobs = self.jobutils._get_pending_jobs_affecting_element(
mock.MagicMock())
self.assertEqual([], pending_jobs)
@mock.patch.object(jobutils._utils, '_is_not_found_exc')
def test_get_pending_jobs_reraised(self, mock_is_not_found_exc):
mock_is_not_found_exc.return_value = False
mock_not_found_mapping = mock.MagicMock()
type(mock_not_found_mapping).AffectingElement = mock.PropertyMock(
side_effect=exceptions.x_wmi)
self.jobutils._conn.Msvm_AffectedJobElement.return_value = [
mock_not_found_mapping]
self.assertRaises(exceptions.x_wmi,
self.jobutils._get_pending_jobs_affecting_element,
mock.MagicMock())
@ddt.data(True, False)
@mock.patch.object(jobutils.JobUtils,
'_get_pending_jobs_affecting_element')
def test_stop_jobs_helper(self, jobs_ended, mock_get_pending_jobs):
mock_job1 = mock.Mock(Cancellable=True)
mock_job2 = mock.Mock(Cancellable=True)
mock_job3 = mock.Mock(Cancellable=False)
pending_jobs = [mock_job1, mock_job2, mock_job3]
mock_get_pending_jobs.side_effect = (
pending_jobs,
pending_jobs if not jobs_ended else [])
mock_job1.RequestStateChange.side_effect = (
test_base.FakeWMIExc(hresult=jobutils._utils._WBEM_E_NOT_FOUND))
mock_job2.RequestStateChange.side_effect = (
test_base.FakeWMIExc(hresult=mock.sentinel.hresult))
if jobs_ended:
self.jobutils._stop_jobs(mock.sentinel.vm)
else:
self.assertRaises(exceptions.JobTerminateFailed,
self.jobutils._stop_jobs,
mock.sentinel.vm)
mock_get_pending_jobs.assert_has_calls(
[mock.call(mock.sentinel.vm)] * 2)
mock_job1.RequestStateChange.assert_called_once_with(
self.jobutils._KILL_JOB_STATE_CHANGE_REQUEST)
mock_job2.RequestStateChange.assert_called_once_with(
self.jobutils._KILL_JOB_STATE_CHANGE_REQUEST)
self.assertFalse(mock_job3.RequestStateqqChange.called)
@mock.patch.object(jobutils.JobUtils, '_stop_jobs')
def test_stop_jobs(self, mock_stop_jobs_helper):
fake_timeout = 1
self.jobutils.stop_jobs(mock.sentinel.element, fake_timeout)
mock_stop_jobs_helper.assert_called_once_with(mock.sentinel.element)
def test_is_job_completed_true(self):
job = mock.MagicMock(JobState=constants.WMI_JOB_STATE_COMPLETED)
self.assertTrue(self.jobutils._is_job_completed(job))
def test_is_job_completed_false(self):
job = mock.MagicMock(JobState=constants.WMI_JOB_STATE_RUNNING)
self.assertFalse(self.jobutils._is_job_completed(job))
def _prepare_wait_for_job(self, state=_FAKE_JOB_STATUS_BAD,
error_code=0):
mock_job = mock.MagicMock()
mock_job.JobState = state
mock_job.ErrorCode = error_code
mock_job.Description = self._FAKE_JOB_DESCRIPTION
mock_job.ElapsedTime = self._FAKE_ELAPSED_TIME
wmi_patcher = mock.patch.object(jobutils.JobUtils, '_get_wmi_obj')
mock_wmi = wmi_patcher.start()
self.addCleanup(wmi_patcher.stop)
mock_wmi.return_value = mock_job
return mock_job
def test_modify_virt_resource(self):
side_effect = [
(self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)]
self._check_modify_virt_resource_max_retries(side_effect=side_effect)
def test_modify_virt_resource_max_retries_exception(self):
side_effect = exceptions.HyperVException('expected failure.')
self._check_modify_virt_resource_max_retries(
side_effect=side_effect, num_calls=6, expected_fail=True)
def test_modify_virt_resource_max_retries(self):
side_effect = [exceptions.HyperVException('expected failure.')] * 5 + [
(self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)]
self._check_modify_virt_resource_max_retries(side_effect=side_effect,
num_calls=5)
@mock.patch('time.sleep')
def _check_modify_virt_resource_max_retries(
self, mock_sleep, side_effect, num_calls=1, expected_fail=False):
mock_svc = mock.MagicMock()
self.jobutils._vs_man_svc_attr = mock_svc
mock_svc.ModifyResourceSettings.side_effect = side_effect
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = mock.sentinel.res_data
if expected_fail:
self.assertRaises(exceptions.HyperVException,
self.jobutils.modify_virt_resource,
mock_res_setting_data)
else:
self.jobutils.modify_virt_resource(mock_res_setting_data)
mock_calls = [
mock.call(ResourceSettings=[mock.sentinel.res_data])] * num_calls
mock_svc.ModifyResourceSettings.has_calls(mock_calls)
mock_sleep.has_calls(mock.call(1) * num_calls)
def test_add_virt_resource(self):
self._test_virt_method('AddResourceSettings', 3, 'add_virt_resource',
True, mock.sentinel.vm_path,
[mock.sentinel.res_data])
def test_remove_virt_resource(self):
self._test_virt_method('RemoveResourceSettings', 2,
'remove_virt_resource', False,
ResourceSettings=[mock.sentinel.res_path])
def test_add_virt_feature(self):
self._test_virt_method('AddFeatureSettings', 3, 'add_virt_feature',
True, mock.sentinel.vm_path,
[mock.sentinel.res_data])
def test_modify_virt_feature(self):
self._test_virt_method('ModifyFeatureSettings', 3,
'modify_virt_feature', False,
FeatureSettings=[mock.sentinel.res_data])
def test_remove_virt_feature(self):
self._test_virt_method('RemoveFeatureSettings', 2,
'remove_virt_feature', False,
FeatureSettings=[mock.sentinel.res_path])
def _test_virt_method(self, vsms_method_name, return_count,
utils_method_name, with_mock_vm, *args, **kwargs):
mock_svc = mock.MagicMock()
self.jobutils._vs_man_svc_attr = mock_svc
vsms_method = getattr(mock_svc, vsms_method_name)
mock_rsd = self._mock_vsms_method(vsms_method, return_count)
if with_mock_vm:
mock_vm = mock.MagicMock()
mock_vm.path_.return_value = mock.sentinel.vm_path
getattr(self.jobutils, utils_method_name)(mock_rsd, mock_vm)
else:
getattr(self.jobutils, utils_method_name)(mock_rsd)
if args:
vsms_method.assert_called_once_with(*args)
else:
vsms_method.assert_called_once_with(**kwargs)
def _mock_vsms_method(self, vsms_method, return_count):
args = None
if return_count == 3:
args = (
mock.sentinel.job_path, mock.MagicMock(), self._FAKE_RET_VAL)
else:
args = (mock.sentinel.job_path, self._FAKE_RET_VAL)
vsms_method.return_value = args
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = mock.sentinel.res_data
mock_res_setting_data.path_.return_value = mock.sentinel.res_path
self.jobutils.check_ret_val = mock.MagicMock()
return mock_res_setting_data
@mock.patch.object(jobutils.JobUtils, 'check_ret_val')
def test_remove_multiple_virt_resources_not_found(self, mock_check_ret):
excepinfo = [None] * 5 + [jobutils._utils._WBEM_E_NOT_FOUND]
mock_check_ret.side_effect = exceptions.x_wmi(
'expected error', com_error=mock.Mock(excepinfo=excepinfo))
vsms_method = self.jobutils._vs_man_svc.RemoveResourceSettings
vsms_method.return_value = (mock.sentinel.job, mock.sentinel.ret_val)
mock_virt_res = mock.Mock()
self.assertRaises(exceptions.NotFound,
self.jobutils.remove_virt_resource, mock_virt_res)
vsms_method.assert_called_once_with(
ResourceSettings=[mock_virt_res.path_.return_value])
mock_check_ret.assert_called_once_with(mock.sentinel.ret_val,
mock.sentinel.job)
| apache-2.0 | -1,470,856,766,625,067,300 | 42.024024 | 79 | 0.619879 | false |
pfjel7/housing-insights | python/api/project_view_blueprint.py | 1 | 11855 | from flask import Blueprint
from flask import jsonify, request
import math
import logging
from flask_cors import cross_origin
def construct_project_view_blueprint(name, engine):
blueprint = Blueprint(name, __name__, url_prefix='/api')
@blueprint.route('/wmata/<nlihc_id>', methods=['GET'])
@cross_origin()
def nearby_transit(nlihc_id):
'''
Returns the nearby bus and metro routes and stops.
Currently this assumes that all entries in the wmata_dist
table are those that have a walking distance of 0.5 miles
or less. We may later want to implement functionality to
filter this to those with less distance.
'''
conn = engine.connect()
try:
q = """
SELECT dist_in_miles, type, stop_id_or_station_code
FROM wmata_dist
WHERE nlihc_id = '{}'
""".format(nlihc_id)
proxy = conn.execute(q)
results = proxy.fetchall()
#transform the results.
stops = {'bus':[],'rail':[]};
rail_stops = []; bus_stops = [];
bus_routes = {}; rail_routes = {};
for x in results:
#reformat the data into appropriate json
dist = str(x[0])
typ = x[1]
stop_id = x[2]
routes = unique_transit_routes([stop_id])
stop_dict = dict({'dist_in_miles':dist,
'type':typ,
'stop_id_or_station_code':stop_id,
'routes':routes
})
#Calculate summary statistics for ease of use
if typ == 'bus':
stops['bus'].append(stop_dict)
bus_stops.append(stop_id)
#Add all unique routes to a master list, with the shortest walking distance to that route
for route in routes:
if route not in bus_routes:
bus_routes[route] = {'route':route,'shortest_dist':10000}
if float(dist) < float(bus_routes[route]['shortest_dist']):
bus_routes[route]['shortest_dist'] = dist
if typ == 'rail':
stops['rail'].append(stop_dict)
rail_stops.append(stop_id)
#Add all unique routes to a master list, with the shortest walking distance to that route
#TODO refactor this into reusable function
for route in routes:
if route not in rail_routes:
rail_routes[route] = {'route':route,'shortest_dist':10000}
if float(dist) < float(rail_routes[route]['shortest_dist']):
rail_routes[route]['shortest_dist'] = dist
#TODO - might be easier to approach this by using separate variables and then repackaging into the desired output format at the end?
#Rearrange the bus routes into a groups of shortest distance for easier display on front end
bus_routes_grouped = []
for key in bus_routes:
dist = bus_routes[key]['shortest_dist']
idx = idx_from_ld(bus_routes_grouped,'shortest_dist',dist)
if idx == None:
bus_routes_grouped.append({"shortest_dist":dist, "routes":[]})
idx = idx_from_ld(bus_routes_grouped,'shortest_dist',dist)
bus_routes_grouped[idx]['routes'].append(key)
#Rearrange rail
rail_routes_grouped = []
for key in rail_routes:
dist = rail_routes[key]['shortest_dist']
idx = idx_from_ld(rail_routes_grouped,'shortest_dist',dist)
if idx == None:
rail_routes_grouped.append({"shortest_dist":dist, "routes":[]})
idx = idx_from_ld(rail_routes_grouped,'shortest_dist',dist)
rail_routes_grouped[idx]['routes'].append(key)
#TODO would be good to sort rail_routes_grouped and bus_routes_grouped before delivery (currently sorting on the front end)
conn.close()
return jsonify({'stops':stops,
'bus_routes':bus_routes,
'rail_routes':rail_routes,
'bus_routes_grouped':bus_routes_grouped,
'rail_routes_grouped':rail_routes_grouped
})
except Exception as e:
raise e
return "Query failed: {}".format(e)
def idx_from_ld(lst,key,value):
'''
Takes a list of dictionaries and returns the dictionary
entry matching the key and value supplied
Used for data forms like this: [{'foo':'bar'},{'foo':'asdf'}]
'''
for idx, dic in enumerate(lst):
if dic[key] == value:
return idx
return None
def unique_transit_routes(stop_ids):
if len(stop_ids) == 0:
return []
else:
#Second query to get the unique transit lines
q_list = str(stop_ids).replace('[','(').replace(']',')')
q = """
SELECT lines FROM wmata_info
WHERE stop_id_or_station_code in {}
""".format(q_list)
conn = engine.connect()
proxy = conn.execute(q)
routes = [x[0] for x in proxy.fetchall()]
conn.close()
#Parse the : separated objects
routes = ':'.join(routes)
routes = routes.split(':')
unique = list(set(routes))
return unique
@blueprint.route('/building_permits/<dist>', methods=['GET'])
@cross_origin()
def nearby_building_permits(dist):
conn = engine.connect()
#Get our params
dist = float(dist)
latitude = request.args.get('latitude',None)
longitude = request.args.get('longitude',None)
if latitude == None or longitude==None:
return "Please supply latitude and longitude"
else:
latitude=float(latitude)
longitude=float(longitude)
latitude_tolerance, longitude_tolerance = bounding_box(dist, latitude, longitude)
#Return just a subset of columns to lighten the data load. TODO do we want user option for short/all?
q = '''
SELECT
(latitude - {latitude} ) AS lat_diff
,(longitude - {longitude} ) AS lon_diff
,latitude
,longitude
,ward
,neighborhood_cluster
,anc
--,census_tract --not yet available
,zip
,permit_type_name
,permit_subtype_name
,full_address
,objectid
FROM building_permits
WHERE latitude < ({latitude} + {latitude_tolerance})::DECIMAL
AND latitude > ({latitude} - {latitude_tolerance})::DECIMAL
AND longitude < ({longitude} + {longitude_tolerance})::DECIMAL
AND longitude > ({longitude} - {longitude_tolerance})::DECIMAL
AND issue_date BETWEEN (now()::TIMESTAMP - INTERVAL '1 year') AND now()::TIMESTAMP
'''.format(
latitude=latitude,
longitude=longitude,
latitude_tolerance=latitude_tolerance,
longitude_tolerance=longitude_tolerance
)
proxy = conn.execute(q)
results = proxy.fetchall()
good_results = [dict(r) for r in results if haversine(latitude, longitude, float(r.latitude), float(r.longitude)) <= dist]
tot_permits = len(good_results)
output = {
'objects': good_results
, 'tot_permits':tot_permits
, 'distance': dist
}
output_json = jsonify(output)
conn.close()
return output_json
@blueprint.route('/projects/<dist>', methods=['GET'])
@cross_origin()
def nearby_projects(dist):
conn = engine.connect()
dist = float(dist)
#Get our params
latitude = request.args.get('latitude',None)
longitude = request.args.get('longitude',None)
if latitude == None or longitude==None:
return "Please supply latitude and longitude"
else:
latitude=float(latitude)
longitude=float(longitude)
latitude_tolerance, longitude_tolerance = bounding_box(dist, latitude, longitude)
q = '''
SELECT
(latitude - {latitude} ) AS lat_diff
,(longitude - {longitude} ) AS lon_diff
,*
FROM project
WHERE latitude < ({latitude} + {latitude_tolerance})::DECIMAL
AND latitude > ({latitude} - {latitude_tolerance})::DECIMAL
AND longitude < ({longitude} + {longitude_tolerance})::DECIMAL
AND longitude > ({longitude} - {longitude_tolerance})::DECIMAL
AND status = 'Active'
'''.format(
latitude=latitude,
longitude=longitude,
latitude_tolerance=latitude_tolerance,
longitude_tolerance=longitude_tolerance
)
proxy = conn.execute(q)
results = proxy.fetchall()
good_results = [dict(r) for r in results if haversine(latitude, longitude, float(r.latitude), float(r.longitude)) <= dist]
unit_counts = [r['proj_units_assist_max'] for r in good_results]
unit_counts = filter(None, unit_counts) #can't sum None
unit_counts = [int(u) for u in unit_counts] #temporarily needed b/c accidentally stored as text
tot_units = sum(unit_counts)
tot_buildings = len(good_results)
output = {
'objects': good_results,
'tot_units': tot_units,
'tot_buildings': tot_buildings,
'distance': dist
}
conn.close()
output_json = jsonify(output)
return output_json
#def haversine(lat1, long1, lat2, long2):
from math import radians, cos, sin, asin, sqrt
def haversine(lat1, lon1, lat2,lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
original_coords = (lat1, lon1, lat2, lon2) # for debugging
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 3956 # Radius of earth in miles
d = c * r
#print("Haversine for {} = {}".format(original_coords,d))
return c * r
def bounding_box(dist, latitude, longitude):
""" Cribbed from https://gis.stackexchange.com/questions/142326/calculating-longitude-length-in-miles """
radius = 3959 # miles, from google
dlat_rad = 69 * dist / radius # google again
latitude_tolerance = dist / 69
longitude_tolerance = dist / (math.cos(latitude) * 69.172)
return (latitude_tolerance, longitude_tolerance)
@blueprint.route('/project/<nlihc_id>/subsidies/', methods=['GET'])
@cross_origin()
def project_subsidies(nlihc_id):
q = """
SELECT * FROM subsidy
WHERE nlihc_id = '{}'
""".format(nlihc_id)
conn = engine.connect()
proxy = conn.execute(q)
results = [dict(x) for x in proxy.fetchall()]
conn.close()
output = {'objects': results}
return jsonify(output)
return blueprint
| mit | -6,892,794,526,416,599,000 | 35.253823 | 144 | 0.539688 | false |
xesscorp/KiPart | kipart/kipart.py | 1 | 39760 | # -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2015-2019 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function
import argparse as ap
import importlib
import io
import math
import os
import re
import sys
import zipfile
from builtins import str
from collections import OrderedDict
from copy import copy
from pprint import pprint
from affine import Affine
from past.utils import old_div
from .common import *
from .pckg_info import __version__
from .py_2_3 import *
__all__ = ["kipart"] # Only export this routine for use by the outside world.
THIS_MODULE = sys.modules[__name__] # Ref to this module for making named calls.
# Settings for creating the KiCad schematic part symbol.
# Dimensions are given in mils (0.001").
# Origin point.
XO = 0
YO = 0
# Pin settings.
PIN_LENGTH = 200
PIN_SPACING = 100
PIN_NUM_SIZE = 50 # Font size for pin numbers.
PIN_NAME_SIZE = 50 # Font size for pin names.
PIN_NAME_OFFSET = 40 # Separation between pin and pin name.
PIN_ORIENTATION = "left"
PIN_STYLE = "line"
SHOW_PIN_NUMBER = True # Show pin numbers when True.
SHOW_PIN_NAME = True # Show pin names when True.
SINGLE_PIN_SUFFIX = ""
MULTI_PIN_SUFFIX = "*"
PIN_SPACER_PREFIX = "*"
# Settings for box drawn around pins in a unit.
DEFAULT_BOX_LINE_WIDTH = 0
# Mapping from understandable schematic symbol box fill-type name
# to the fill-type indicator used in the KiCad part library.
BOX_FILLS = {"no_fill": "N", "fg_fill": "F", "bg_fill": "f"}
DEFAULT_BOX_FILL = "bg_fill"
# Part reference.
REF_SIZE = 60 # Font size.
REF_Y_OFFSET = 250
# Part number.
PART_NUM_SIZE = 60 # Font size.
PART_NUM_Y_OFFSET = 150
# Part footprint
PART_FOOTPRINT_SIZE = 60 # Font size.
PART_FOOTPRINT_Y_OFFSET = 50
# Part manufacturer number.
PART_MPN_SIZE = 60 # Font size.
PART_MPN_Y_OFFSET = -50
# Part datasheet.
PART_DATASHEET_SIZE = 60 # Font size.
PART_DATASHEET_Y_OFFSET = -150
# Part description.
PART_DESC_SIZE = 60 # Font size.
PART_DESC_Y_OFFSET = -250
# Mapping from understandable pin orientation name to the orientation
# indicator used in the KiCad part library. This mapping looks backward,
# but if pins are placed on the left side of the symbol, you actually
# want to use the pin symbol where the line points to the right.
# The same goes for the other sides.
PIN_ORIENTATIONS = {
"": "R",
"left": "R",
"right": "L",
"bottom": "U",
"down": "U",
"top": "D",
"up": "D",
}
scrubber = re.compile("[^\w~#]+")
PIN_ORIENTATIONS = {
scrubber.sub("", k).lower(): v for k, v in list(PIN_ORIENTATIONS.items())
}
ROTATION = {"left": 0, "right": 180, "bottom": 90, "top": -90}
# Mapping from understandable pin type name to the type
# indicator used in the KiCad part library.
PIN_TYPES = {
"input": "I",
"inp": "I",
"in": "I",
"clk": "I",
"output": "O",
"outp": "O",
"out": "O",
"bidirectional": "B",
"bidir": "B",
"bi": "B",
"inout": "B",
"io": "B",
"iop": "B",
"tristate": "T",
"tri": "T",
"passive": "P",
"pass": "P",
"unspecified": "U",
"un": "U",
"": "U",
"analog": "U",
"power_in": "W",
"pwr_in": "W",
"pwrin": "W",
"power": "W",
"pwr": "W",
"ground": "W",
"gnd": "W",
"power_out": "w",
"pwr_out": "w",
"pwrout": "w",
"pwr_o": "w",
"open_collector": "C",
"opencollector": "C",
"open_coll": "C",
"opencoll": "C",
"oc": "C",
"open_emitter": "E",
"openemitter": "E",
"open_emit": "E",
"openemit": "E",
"oe": "E",
"no_connect": "N",
"noconnect": "N",
"no_conn": "N",
"noconn": "N",
"nc": "N",
}
PIN_TYPES = {scrubber.sub("", k).lower(): v for k, v in list(PIN_TYPES.items())}
# Mapping from understandable pin drawing style to the style
# indicator used in the KiCad part library.
PIN_STYLES = {
"line": "",
"": "",
"inverted": "I",
"inv": "I",
"~": "I",
"#": "I",
"clock": "C",
"clk": "C",
"rising_clk": "C",
"inverted_clock": "IC",
"inv_clk": "IC",
"clk_b": "IC",
"clk_n": "IC",
"~clk": "IC",
"#clk": "IC",
"input_low": "L",
"inp_low": "L",
"in_lw": "L",
"in_b": "L",
"in_n": "L",
"~in": "L",
"#in": "L",
"clock_low": "CL",
"clk_low": "CL",
"clk_lw": "CL",
"output_low": "V",
"outp_low": "V",
"out_lw": "V",
"out_b": "V",
"out_n": "V",
"~out": "V",
"#out": "V",
"falling_edge_clock": "F",
"falling_clk": "F",
"fall_clk": "F",
"non_logic": "X",
"nl": "X",
"analog": "X",
}
PIN_STYLES = {scrubber.sub("", k).lower(): v for k, v in list(PIN_STYLES.items())}
# Format strings for various items in a KiCad part library.
LIB_HEADER = "EESchema-LIBRARY Version 2.3\n"
START_DEF = "DEF {name} {ref} 0 {pin_name_offset} {show_pin_number} {show_pin_name} {num_units} L N\n"
END_DEF = "ENDDEF\n"
REF_FIELD = 'F0 "{ref_prefix}" {x} {y} {font_size} H V {text_justification} CNN\n'
PARTNUM_FIELD = 'F1 "{num}" {x} {y} {font_size} H V {text_justification} CNN\n'
FOOTPRINT_FIELD = 'F2 "{footprint}" {x} {y} {font_size} H I {text_justification} CNN\n'
DATASHEET_FIELD = 'F3 "{datasheet}" {x} {y} {font_size} H I {text_justification} CNN\n'
MPN_FIELD = 'F4 "{manf_num}" {x} {y} {font_size} H I {text_justification} CNN "manf#"\n'
DESC_FIELD = 'F5 "{desc}" {x} {y} {font_size} H I {text_justification} CNN "desc"\n'
START_DRAW = "DRAW\n"
END_DRAW = "ENDDRAW\n"
BOX = "S {x0} {y0} {x1} {y1} {unit_num} 1 {line_width} {fill}\n"
PIN = "X {name} {num} {x} {y} {length} {orientation} {num_sz} {name_sz} {unit_num} 1 {pin_type} {pin_style}\n"
def annotate_pins(unit_pins):
"""Annotate pin names to indicate special information."""
for name, pins in unit_pins:
# If there are multiple pins with the same name in a unit, then append a
# distinctive suffix to the pin name to indicate multiple pins are placed
# at a single location on the unit. (This is done so multiple pins that
# should be on the same net (e.g. GND) can be connected using a single
# net connection in the schematic.)
name_suffix = SINGLE_PIN_SUFFIX
if len(pins) > 1:
# name_suffix = MULTI_PIN_SUFFIX
name_suffix = "[{}]".format(len(pins))
for pin in pins:
pin.name += name_suffix
def get_pin_num_and_spacer(pin):
pin_num = str(pin.num)
pin_spacer = 0
# spacer pins have pin numbers starting with a special prefix char.
if pin_num.startswith(PIN_SPACER_PREFIX):
pin_spacer = 1
pin_num = pin_num[1:] # Remove the spacer prefix.
return pin_num, pin_spacer
def count_pin_slots(unit_pins):
"""Count the number of vertical pin slots needed for a column of pins."""
# Compute the # of slots for the column of pins, taking spacers into account.
num_slots = 0
pin_num_len = 0
for name, pins in unit_pins:
pin_spacer = 0
pin_num_len = 0
for pin in pins:
pin_num, pin_spacer = get_pin_num_and_spacer(pin)
pin_num_len = max(pin_num_len, len(pin_num))
num_slots += pin_spacer # Add a slot if there was a spacer.
# Add a slot if the pin number was more than just a spacer prefix.
if pin_num_len > 0:
num_slots += 1
return num_slots
def pins_bbox(unit_pins):
"""Return the bounding box of a column of pins and their names."""
if len(unit_pins) == 0:
return [[XO, YO], [XO, YO]] # No pins, so no bounding box.
width = 0
for name, pins in unit_pins:
# Update the maximum observed width of a pin name. This is used later to
# size the width of the box surrounding the pin names for this unit.
width = max(width, len(pins[0].name) * PIN_NAME_SIZE)
# Add the separation space before and after the pin name.
width += PIN_LENGTH + 2 * PIN_NAME_OFFSET
# Make bounding box an integer number of pin spaces so pin connections are always on the grid.
width = math.ceil(old_div(float(width), PIN_SPACING)) * PIN_SPACING
# Compute the height of the column of pins.
height = count_pin_slots(unit_pins) * PIN_SPACING
return [[XO, YO + PIN_SPACING], [XO + width, YO - height]]
def balance_bboxes(bboxes):
"""Make the symbol more balanced by adjusting the bounding boxes of the pins on each side."""
X = 0
Y = 1
def find_bbox_bbox(*bboxes):
"""Find the bounding box for a set of bounding boxes."""
bb = [[0, 0], [0, 0]]
for bbox in bboxes:
bb[0][X] = min(bb[0][X], bbox[0][X])
bb[1][X] = max(bb[1][X], bbox[1][X])
bb[0][Y] = max(bb[0][Y], bbox[0][Y])
bb[1][Y] = min(bb[1][Y], bbox[1][Y])
return bb
# Determine the number of sides of the symbol with pins.
num_sides = len(bboxes)
if num_sides == 4:
# If the symbol has pins on all four sides, then check to see if there
# are approximately the same number of pins on all four sides. If so,
# then equalize the bounding box for each side. Otherwise, equalize
# the left & right bounding boxes and the top & bottom bounding boxes.
lr_bbox = find_bbox_bbox(bboxes["left"], bboxes["right"])
lr_hgt = abs(lr_bbox[0][Y] - lr_bbox[1][Y])
tb_bbox = find_bbox_bbox(bboxes["top"], bboxes["bottom"])
tb_hgt = abs(tb_bbox[0][Y] - tb_bbox[1][Y])
if 0.75 <= float(lr_hgt) / float(tb_hgt) <= 1 / 0.75:
bal_bbox = find_bbox_bbox(*list(bboxes.values()))
for side in bboxes:
bboxes[side] = copy(bal_bbox)
else:
bboxes["left"] = copy(lr_bbox)
bboxes["right"] = copy(lr_bbox)
bboxes["top"] = copy(tb_bbox)
bboxes["bottom"] = copy(tb_bbox)
elif num_sides == 3:
# If the symbol only has pins on threee sides, then equalize the
# bounding boxes for the pins on opposite sides and leave the
# bounding box on the other side unchanged.
if "left" not in bboxes or "right" not in bboxes:
# Top & bottom side pins, but the left or right side is empty.
bal_bbox = find_bbox_bbox(bboxes["top"], bboxes["bottom"])
bboxes["top"] = copy(bal_bbox)
bboxes["bottom"] = copy(bal_bbox)
elif "top" not in bboxes or "bottom" not in bboxes:
# Left & right side pins, but the top or bottom side is empty.
bal_bbox = find_bbox_bbox(bboxes["left"], bboxes["right"])
bboxes["left"] = copy(bal_bbox)
bboxes["right"] = copy(bal_bbox)
elif num_sides == 2:
# If the symbol only has pins on two opposing sides, then equalize the
# height of the bounding boxes for each side. Leave the width unchanged.
if "left" in bboxes and "right" in bboxes:
bal_bbox = find_bbox_bbox(bboxes["left"], bboxes["right"])
bboxes["left"][0][Y] = bal_bbox[0][Y]
bboxes["left"][1][Y] = bal_bbox[1][Y]
bboxes["right"][0][Y] = bal_bbox[0][Y]
bboxes["right"][1][Y] = bal_bbox[1][Y]
elif "top" in bboxes and "bottom" in bboxes:
bal_bbox = find_bbox_bbox(bboxes["top"], bboxes["bottom"])
bboxes["top"][0][Y] = bal_bbox[0][Y]
bboxes["top"][1][Y] = bal_bbox[1][Y]
bboxes["bottom"][0][Y] = bal_bbox[0][Y]
bboxes["bottom"][1][Y] = bal_bbox[1][Y]
def draw_pins(unit_num, unit_pins, bbox, transform, side, push, fuzzy_match):
"""Draw a column of pins rotated/translated by the transform matrix."""
# String to add pin definitions to.
pin_defn = ""
# Find the actual height of the column of pins and subtract it from the
# bounding box (which should be at least as large). Half the difference
# will be the offset needed to center the pins on the side of the symbol.
Y = 1 # Index for Y coordinate.
pins_bb = pins_bbox(unit_pins)
height_offset = abs(bbox[0][Y] - bbox[1][Y]) - abs(pins_bb[0][Y] - pins_bb[1][Y])
push = min(max(0.0, push), 1.0)
if side in ("right", "top"):
push = 1.0 - push
height_offset *= push
height_offset -= height_offset % PIN_SPACING # Keep stuff on the PIN_SPACING grid.
# Start drawing pins from the origin.
x = XO
y = YO - height_offset
for name, pins in unit_pins:
# Detect pins with "spacer" pin numbers.
pin_spacer = 0
pin_num_len = 0
for pin in pins:
pin_num, pin_spacer = get_pin_num_and_spacer(pin)
pin_num_len = max(pin_num_len, len(pin_num))
y -= pin_spacer * PIN_SPACING # Add space between pins if there was a spacer.
if pin_num_len == 0:
continue # Omit pin if it only had a spacer prefix and no actual pin number.
# Rotate/translate the current drawing point.
(draw_x, draw_y) = transform * (x, y)
# Use approximate matching to determine the pin's type, style and orientation.
pin_type = find_closest_match(pins[0].type, PIN_TYPES, fuzzy_match)
pin_style = find_closest_match(pins[0].style, PIN_STYLES, fuzzy_match)
pin_side = find_closest_match(pins[0].side, PIN_ORIENTATIONS, fuzzy_match)
if pins[0].hidden.lower().strip() in ["y", "yes", "t", "true", "1"]:
pin_style = "N" + pin_style
# Create all the pins with a particular name. If there are more than one,
# they are laid on top of each other and only the first is visible.
num_size = PIN_NUM_SIZE # First pin will be visible.
for pin in pins:
pin_num = str(pin.num)
# Remove any spacer prefix on the pin numbers.
if pin_num.startswith(PIN_SPACER_PREFIX):
pin_num = pin_num[1:]
# Create a pin using the pin data.
pin_defn += PIN.format(
name=pin.name,
num=pin_num,
x=int(draw_x),
y=int(draw_y),
length=PIN_LENGTH,
orientation=pin_side,
num_sz=num_size,
name_sz=PIN_NAME_SIZE,
unit_num=unit_num,
pin_type=pin_type,
pin_style=pin_style,
)
# Turn off visibility after the first pin.
num_size = 0
# Move to the next pin placement location on this unit.
y -= PIN_SPACING
return pin_defn # Return part symbol definition with pins added.
def zero_pad_nums(s):
# Pad all numbers in the string with leading 0's.
# Thus, 'A10' and 'A2' will become 'A00010' and 'A00002' and A2 will
# appear before A10 in a list.
try:
return re.sub(
r"\d+", lambda mtch: "0" * (8 - len(mtch.group(0))) + mtch.group(0), s
)
except TypeError:
return s # The input is probably not a string, so just return it unchanged.
def num_key(pin):
"""Generate a key from a pin's number so they are sorted by position on the package."""
# Pad all numeric strings in the pin name with leading 0's.
# Thus, 'A10' and 'A2' will become 'A00010' and 'A00002' and A2 will
# appear before A10 in a list.
return zero_pad_nums(pin[1][0].num)
def name_key(pin):
"""Generate a key from a pin's name so they are sorted more logically."""
# Pad all numeric strings in the pin name with leading 0's.
# Thus, 'adc10' and 'adc2' will become 'adc00010' and 'adc00002' and adc2 will
# appear before adc10 in a list.
return zero_pad_nums(pin[1][0].name)
def row_key(pin):
"""Generate a key from the order the pins were entered into the CSV file."""
return pin[1][0].index
def draw_symbol(
part_num,
part_ref_prefix,
part_footprint,
part_manf_num,
part_datasheet,
part_desc,
pin_data,
sort_type,
reverse,
fuzzy_match,
fill,
box_line_width,
push,
):
"""Add a symbol for a part to the library."""
# Start the part definition with the header.
part_defn = START_DEF.format(
name=part_num,
ref=part_ref_prefix,
pin_name_offset=PIN_NAME_OFFSET,
show_pin_number=SHOW_PIN_NUMBER and "Y" or "N",
show_pin_name=SHOW_PIN_NAME and "Y" or "N",
num_units=len(pin_data),
)
# Determine if there are pins across the top of the symbol.
# If so, right-justify the reference, part number, etc. so they don't
# run into the top pins. If not, stick with left-justification.
text_justification = "L"
horiz_offset = PIN_LENGTH
for unit in list(pin_data.values()):
if "top" in list(unit.keys()):
text_justification = "R"
horiz_offset = PIN_LENGTH - 50
break
# Create the field that stores the part reference.
if not part_ref_prefix:
part_ref_prefix = "U"
part_defn += REF_FIELD.format(
ref_prefix=part_ref_prefix,
x=XO + horiz_offset,
y=YO + REF_Y_OFFSET,
text_justification=text_justification,
font_size=REF_SIZE,
)
# Create the field that stores the part number.
if not part_num:
part_num = ""
part_defn += PARTNUM_FIELD.format(
num=part_num,
x=XO + horiz_offset,
y=YO + PART_NUM_Y_OFFSET,
text_justification=text_justification,
font_size=PART_NUM_SIZE,
)
# Create the field that stores the part footprint.
if not part_footprint:
part_footprint = ""
part_defn += FOOTPRINT_FIELD.format(
footprint=part_footprint,
x=XO + horiz_offset,
y=YO + PART_FOOTPRINT_Y_OFFSET,
text_justification=text_justification,
font_size=PART_FOOTPRINT_SIZE,
)
# Create the field that stores the datasheet link.
if not part_datasheet:
part_datasheet = ""
part_defn += DATASHEET_FIELD.format(
datasheet=part_datasheet,
x=XO + horiz_offset,
y=YO + PART_DATASHEET_Y_OFFSET,
text_justification=text_justification,
font_size=PART_DATASHEET_SIZE,
)
# Create the field that stores the manufacturer part number.
if part_manf_num:
part_defn += MPN_FIELD.format(
manf_num=part_manf_num,
x=XO + horiz_offset,
y=YO + PART_MPN_Y_OFFSET,
text_justification=text_justification,
font_size=PART_MPN_SIZE,
)
# Create the field that stores the datasheet link.
if part_desc:
part_defn += DESC_FIELD.format(
desc=part_desc,
x=XO + horiz_offset,
y=YO + PART_DESC_Y_OFFSET,
text_justification=text_justification,
font_size=PART_DESC_SIZE,
)
# Start the section of the part definition that holds the part's units.
part_defn += START_DRAW
# Get a reference to the sort-key generation function for pins.
pin_key_func = getattr(THIS_MODULE, "{}_key".format(sort_type))
# This is the sort-key generation function for unit names.
unit_key_func = lambda x: zero_pad_nums(x[0])
# Now create the units that make up the part. Unit numbers go from 1
# up to the number of units in the part. The units are sorted by their
# names before assigning unit numbers.
for unit_num, unit in enumerate(
[p[1] for p in sorted(pin_data.items(), key=unit_key_func)], 1
):
# The indices of the X and Y coordinates in a list of point coords.
X = 0
Y = 1
# Initialize data structures that store info for each side of a schematic symbol unit.
all_sides = ["left", "right", "top", "bottom"]
bbox = {side: [(XO, YO), (XO, YO)] for side in all_sides}
box_pt = {side: [XO + PIN_LENGTH, YO + PIN_SPACING] for side in all_sides}
anchor_pt = {side: [XO + PIN_LENGTH, YO + PIN_SPACING] for side in all_sides}
transform = {}
# Annotate the pins for each side of the symbol.
for side_pins in list(unit.values()):
annotate_pins(list(side_pins.items()))
# Determine the actual bounding box for each side.
bbox = {}
for side, side_pins in list(unit.items()):
bbox[side] = pins_bbox(list(side_pins.items()))
# Adjust the sizes of the bboxes to make the unit look more symmetrical.
balance_bboxes(bbox)
# Determine some important points for each side of pins.
for side in unit:
#
# C B-------A
# | |
# ------| name1 |
# | |
# ------| name2 |
#
# A = anchor point = upper-right corner of bounding box.
# B = box point = upper-left corner of bounding box + pin length.
# C = upper-left corner of bounding box.
anchor_pt[side] = [
max(bbox[side][0][X], bbox[side][1][X]),
max(bbox[side][0][Y], bbox[side][1][Y]),
]
box_pt[side] = [
min(bbox[side][0][X], bbox[side][1][X]) + PIN_LENGTH,
max(bbox[side][0][Y], bbox[side][1][Y]),
]
# AL = left-side anchor point.
# AB = bottom-side anchor point.
# AR = right-side anchor point.
# AT = top-side anchor-point.
# +-------------+
# | |
# | TOP |
# | |
# +------AL------------AT
# | |
# | | +---------+
# | | | |
# | L | | |
# | E | | R |
# | F | | I |
# | T | | G |
# | | | H |
# | | | T |
# | | | |
# +------AB-------+ AR--------+
# | BOTTOM |
# +--------+
#
# Create zero-sized bounding boxes for any sides of the unit without pins.
# This makes it simpler to do the width/height calculation that follows.
for side in all_sides:
if side not in bbox:
bbox[side] = [(XO, YO), (XO, YO)]
# This is the width and height of the box in the middle of the pins on each side.
box_width = max(
abs(bbox["top"][0][Y] - bbox["top"][1][Y]),
abs(bbox["bottom"][0][Y] - bbox["bottom"][1][Y]),
)
box_height = max(
abs(bbox["left"][0][Y] - bbox["left"][1][Y]),
abs(bbox["right"][0][Y] - bbox["right"][1][Y]),
)
for side in all_sides:
# Each side of pins starts off with the orientation of a left-hand side of pins.
# Transformation matrix starts by rotating the side of pins.
transform[side] = Affine.rotation(ROTATION[side])
# Now rotate the anchor point to see where it goes.
rot_anchor_pt = transform[side] * anchor_pt[side]
# Translate the rotated anchor point to coincide with the AL anchor point.
translate_x = anchor_pt["left"][X] - rot_anchor_pt[X]
translate_y = anchor_pt["left"][Y] - rot_anchor_pt[Y]
# Make additional translation to bring the AL point to the correct position.
if side == "right":
# Translate AL to AR.
translate_x += box_width
translate_y -= box_height
elif side == "bottom":
# Translate AL to AB
translate_y -= box_height
elif side == "top":
# Translate AL to AT
translate_x += box_width
# Create the complete transformation matrix = rotation followed by translation.
transform[side] = (
Affine.translation(translate_x, translate_y) * transform[side]
)
# Also translate the point on each side that defines the box around the symbol.
box_pt[side] = transform[side] * box_pt[side]
# Draw the transformed pins for each side of the symbol.
for side, side_pins in list(unit.items()):
# If the pins are ordered by their row in the spreadsheet or by their name,
# then reverse their order on the right and top sides so they go from top-to-bottom
# on the right side and left-to-right on the top side instead of the opposite
# as happens with counter-clockwise pin-number ordering.
side_reverse = reverse
if sort_type in ["name", "row"] and side in ["right", "top"]:
side_reverse = not reverse
# Sort the pins for the desired order: row-wise, numeric (pin #), alphabetical (pin name).
sorted_side_pins = sorted(
list(side_pins.items()), key=pin_key_func, reverse=side_reverse
)
# Draw the transformed pins for this side of the symbol.
part_defn += draw_pins(
unit_num, sorted_side_pins, bbox[side], transform[side], side, push, fuzzy_match
)
# Create the box around the unit's pins.
part_defn += BOX.format(
x0=int(box_pt["left"][X]),
y0=int(box_pt["top"][Y]),
x1=int(box_pt["right"][X]),
y1=int(box_pt["bottom"][Y]),
unit_num=unit_num,
line_width=box_line_width,
fill=BOX_FILLS[fill],
)
# Close the section that holds the part's units.
part_defn += END_DRAW
# Close the part definition.
part_defn += END_DEF
# Return complete part symbol definition.
return part_defn
def is_pwr(pin, fuzzy_match):
"""Return true if this is a power input pin."""
return (
find_closest_match(name=pin.type, name_dict=PIN_TYPES, fuzzy_match=fuzzy_match)
== "W"
)
def do_bundling(pin_data, bundle, fuzzy_match):
"""Handle bundling for power pins. Unbundle everything else."""
for unit in list(pin_data.values()):
for side in list(unit.values()):
for name, pins in list(side.items()):
if len(pins) > 1:
for index, p in enumerate(pins):
if is_pwr(p, fuzzy_match) and bundle:
side[p.name + "_pwr"].append(p)
else:
side[p.name + "_" + str(index)].append(p)
del side[name]
def scan_for_readers():
"""Look for scripts for reading part description files."""
trailer = "_reader.py" # Reader file names always end with this.
readers = {}
for dir in [os.path.dirname(os.path.abspath(__file__)), "."]:
for f in os.listdir(dir):
if f.endswith(trailer):
reader_name = f.replace(trailer, "")
readers[reader_name] = dir
return readers
def kipart(
part_reader,
part_data_file,
part_data_file_name,
part_data_file_type,
parts_lib,
fill,
box_line_width,
push,
allow_overwrite=False,
sort_type="name",
reverse=False,
fuzzy_match=False,
bundle=False,
debug_level=0,
):
"""Read part pin data from a CSV/text/Excel file and write or append it to a library file."""
# Get the part number and pin data from the CSV file.
for (
part_num,
part_ref_prefix,
part_footprint,
part_manf_num,
part_datasheet,
part_desc,
pin_data,
) in part_reader(part_data_file, part_data_file_name, part_data_file_type):
# Handle retaining/overwriting parts that are already in the library.
if parts_lib.get(part_num):
if allow_overwrite:
print("Overwriting part {}!".format(part_num))
else:
print("Retaining previous definition of part {}.".format(part_num))
continue
do_bundling(pin_data, bundle, fuzzy_match)
# Draw the schematic symbol into the library.
parts_lib[part_num] = draw_symbol(
part_num=part_num,
part_ref_prefix=part_ref_prefix,
part_footprint=part_footprint,
part_manf_num=part_manf_num,
part_datasheet=part_datasheet,
part_desc=part_desc,
pin_data=pin_data,
sort_type=sort_type,
reverse=reverse,
fuzzy_match=fuzzy_match,
fill=fill,
box_line_width=box_line_width,
push=push,
)
def read_lib_file(lib_file):
parts_lib = OrderedDict()
with open(lib_file, "r") as lib:
part_def = ""
for line in lib:
start = re.match("DEF (?P<part_name>\S+)", line)
end = re.match("ENDDEF$", line)
if start:
part_def = line
part_name = start.group("part_name")
elif end:
part_def += line
parts_lib[part_name] = part_def
else:
part_def += line
return parts_lib
def write_lib_file(parts_lib, lib_file):
print("Writing", lib_file, len(parts_lib))
LIB_HEADER = "EESchema-LIBRARY Version 2.3\n"
with open(lib_file, "w") as lib_fp:
lib_fp.write(LIB_HEADER)
for part_def in parts_lib.values():
lib_fp.write(part_def)
def call_kipart(args, part_reader, part_data_file, file_name, file_type, parts_lib):
"""Helper routine for calling kipart from main()."""
return kipart(
part_reader=part_reader,
part_data_file=part_data_file,
part_data_file_name=file_name,
part_data_file_type=file_type,
parts_lib=parts_lib,
fill=args.fill,
box_line_width=args.box_line_width,
push=args.push,
allow_overwrite=args.overwrite,
sort_type=args.sort,
reverse=args.reverse,
fuzzy_match=args.fuzzy_match,
bundle=args.bundle,
debug_level=args.debug,
)
def main():
# Get Python routines for reading part description/CSV files.
readers = scan_for_readers()
parser = ap.ArgumentParser(
description="Generate single & multi-unit schematic symbols for KiCad from a CSV file."
)
parser.add_argument(
"-v", "--version", action="version", version="KiPart " + __version__
)
parser.add_argument(
"input_files",
nargs="+",
type=str,
metavar="file.[csv|txt|xlsx|zip]",
help="Files for parts in CSV/text/Excel format or as such files in .zip archives.",
)
parser.add_argument(
"-r",
"--reader",
nargs="?",
type=lambda s: unicode(s).lower(),
choices=readers.keys(),
default="generic",
help="Name of function for reading the CSV or part description files.",
)
parser.add_argument(
"-s",
"--sort",
nargs="?",
# type=str.lower,
type=lambda s: unicode(s).lower(),
choices=["row", "num", "name"],
default="row",
help="Sort the part pins by their entry order in the CSV file, their pin number, or their pin name.",
)
parser.add_argument(
"--reverse", action="store_true", help="Sort pins in reverse order."
)
parser.add_argument(
"--side",
nargs="?",
# type=str.lower,
type=lambda s: unicode(s).lower(),
choices=["left", "right", "top", "bottom"],
default="left",
help="Which side to place the pins by default.",
)
parser.add_argument(
"--fill",
nargs="?",
type=lambda s: unicode(s).lower(),
choices=BOX_FILLS.keys(),
default=DEFAULT_BOX_FILL,
help="Select fill style for schematic symbol boxes.",
)
parser.add_argument(
"--box_line_width",
type=int,
default=DEFAULT_BOX_LINE_WIDTH,
help="Set line width of the schematic symbol box.",
)
parser.add_argument(
"--push",
type=float,
default=0.5,
help="Push pins left/up (0.0), center (0.5), or right/down(1.0) on the sides of the schematic symbol box."
)
parser.add_argument(
"-o",
"--output",
nargs="?",
type=str,
metavar="file.lib",
help="Generated KiCad symbol library for parts.",
)
parser.add_argument(
"-f",
"--fuzzy_match",
action="store_true",
help="Use approximate string matching when looking-up the pin type, style and orientation.",
)
parser.add_argument(
"-b",
"--bundle",
action="store_true",
help="Bundle multiple, identically-named power and ground pins each into a single schematic pin.",
)
parser.add_argument(
"-a",
"--append",
"--add",
action="store_true",
help="Add parts to an existing part library. Overwrite existing parts only if used in conjunction with -w.",
)
parser.add_argument(
"-w",
"--overwrite",
action="store_true",
help="Allow overwriting of an existing part library.",
)
parser.add_argument(
"-d",
"--debug",
nargs="?",
type=int,
default=0,
metavar="LEVEL",
help="Print debugging info. (Larger LEVEL means more info.)",
)
args = parser.parse_args()
# kipart f1.csv f2.csv # Create f1.lib, f2.lib
# kipart f1.csv f2.csv -w # Overwrite f1.lib, f2.lib
# kipart f1.csv f2.csv -a # Append to f1.lib, f2.lib
# kipart f1.csv f2.csv -o f.lib # Create f.lib
# kipart f1.csv f2.csv -w -o f.lib # Overwrite f.lib
# kipart f1.csv f2.csv -a -o f.lib # Append to f.lib
# Load the function for reading the part description file.
part_reader_name = args.reader + "_reader" # Name of the reader module.
reader_dir = readers[args.reader]
sys.path.append(reader_dir) # Import from dir where the reader is
if reader_dir == ".":
importlib.import_module(part_reader_name) # Import module.
reader_module = sys.modules[part_reader_name] # Get imported module.
else:
importlib.import_module("kipart." + part_reader_name) # Import module.
reader_module = sys.modules[
"kipart." + part_reader_name
] # Get imported module.
part_reader = getattr(reader_module, part_reader_name) # Get reader function.
DEFAULT_PIN.side = args.side
check_file_exists = True # Used to check for existence of a single output lib file.
for input_file in args.input_files:
# No explicit output lib file, so each individual input file will generate its own .lib file.
if check_file_exists or not args.output:
output_file = args.output or os.path.splitext(input_file)[0] + ".lib"
if os.path.isfile(output_file):
# The output lib file already exists.
if args.overwrite:
# Overwriting an existing file, so ignore the existing parts.
parts_lib = OrderedDict()
elif args.append:
# Appending to an existing file, so read in existing parts.
parts_lib = read_lib_file(output_file)
else:
print(
"Output file {} already exists! Use the --overwrite option to replace it or the --append option to append to it.".format(
output_file
)
)
sys.exit(1)
else:
# Lib file doesn't exist, so create a new lib file starting with no parts.
parts_lib = OrderedDict()
# Don't setup the output lib file again if -o option was used to specify a single output lib.
check_file_exists = not args.output
file_ext = os.path.splitext(input_file)[-1].lower() # Get input file extension.
if file_ext == ".zip":
# Process the individual files inside a ZIP archive.
with zipfile.ZipFile(input_file, "r") as zip_file:
for zipped_file in zip_file.infolist():
zip_file_ext = os.path.splitext(zipped_file.filename)[-1]
if zip_file_ext in [".csv", ".txt"]:
# Only process CSV, TXT, Excel files in the archive.
with zip_file.open(zipped_file, "r") as part_data_file:
part_data_file = io.TextIOWrapper(part_data_file)
call_kipart(
args,
part_reader,
part_data_file,
zipped_file.filename,
zip_file_ext,
parts_lib,
)
elif zip_file_ext in [".xlsx"]:
xlsx_data = zip_file.read(zipped_file)
part_data_file = io.BytesIO(xlsx_data)
call_kipart(
args,
part_reader,
part_data_file,
zipped_file.filename,
zip_file_ext,
parts_lib,
)
else:
# Skip unrecognized files.
continue
elif file_ext in [".csv", ".txt"]:
# Process CSV and TXT files.
with open(input_file, "r") as part_data_file:
call_kipart(
args, part_reader, part_data_file, input_file, file_ext, parts_lib
)
elif file_ext in [".xlsx"]:
# Process Excel files.
with open(input_file, "rb") as part_data_file:
call_kipart(
args, part_reader, part_data_file, input_file, file_ext, parts_lib
)
else:
# Skip unrecognized files.
continue
if not args.output:
# No global output lib file, so output a lib file for each input file.
write_lib_file(parts_lib, output_file)
if args.output:
# Only a single lib output file was given, so write library to it after all
# the input files were processed.
write_lib_file(parts_lib, output_file)
# main entrypoint.
if __name__ == "__main__":
main()
| mit | 7,613,360,735,598,875,000 | 34.217006 | 145 | 0.558249 | false |
yeming233/horizon | openstack_dashboard/dashboards/admin/networks/tables.py | 1 | 5042 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks \
import tables as project_tables
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
class DeleteNetwork(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Network",
u"Delete Networks",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Network",
u"Deleted Networks",
count
)
policy_rules = (("network", "delete_network"),)
def delete(self, request, obj_id):
try:
api.neutron.network_delete(request, obj_id)
except Exception as e:
LOG.info('Failed to delete network %(id)s: %(exc)s',
{'id': obj_id, 'exc': e})
msg = _('Failed to delete network %s') % obj_id
redirect = reverse('horizon:admin:networks:index')
exceptions.handle(request, msg, redirect=redirect)
class CreateNetwork(tables.LinkAction):
name = "create"
verbose_name = _("Create Network")
url = "horizon:admin:networks:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_network"),)
class EditNetwork(policy.PolicyTargetMixin, tables.LinkAction):
name = "update"
verbose_name = _("Edit Network")
url = "horizon:admin:networks:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_network"),)
DISPLAY_CHOICES = (
("up", pgettext_lazy("Admin state of a Network", u"UP")),
("down", pgettext_lazy("Admin state of a Network", u"DOWN")),
)
class AdminNetworksFilterAction(project_tables.ProjectNetworksFilterAction):
name = "filter_admin_networks"
filter_choices = (('project', _("Project ="), True),) +\
project_tables.ProjectNetworksFilterAction.filter_choices
class NetworksTable(tables.DataTable):
tenant = tables.Column("tenant_name", verbose_name=_("Project"))
name = tables.WrappingColumn("name_or_id", verbose_name=_("Network Name"),
link='horizon:admin:networks:detail')
subnets = tables.Column(project_tables.get_subnets,
verbose_name=_("Subnets Associated"),)
num_agents = tables.Column("num_agents",
verbose_name=_("DHCP Agents"))
shared = tables.Column("shared", verbose_name=_("Shared"),
filters=(filters.yesno, filters.capfirst))
external = tables.Column("router:external",
verbose_name=_("External"),
filters=(filters.yesno, filters.capfirst))
status = tables.Column(
"status", verbose_name=_("Status"),
display_choices=project_tables.STATUS_DISPLAY_CHOICES)
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=DISPLAY_CHOICES)
def get_object_display(self, network):
return network.name_or_id
class Meta(object):
name = "networks"
verbose_name = _("Networks")
table_actions = (CreateNetwork, DeleteNetwork,
AdminNetworksFilterAction)
row_actions = (EditNetwork, DeleteNetwork)
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(NetworksTable, self).__init__(
request, data=data,
needs_form_wrapper=needs_form_wrapper,
**kwargs)
try:
if not api.neutron.is_extension_supported(request,
'dhcp_agent_scheduler'):
del self.columns['num_agents']
except Exception:
msg = _("Unable to check if DHCP agent scheduler "
"extension is supported")
exceptions.handle(self.request, msg)
del self.columns['num_agents']
| apache-2.0 | -2,542,113,935,013,305,300 | 35.80292 | 78 | 0.62138 | false |
anomaly/prestans | prestans/rest/response.py | 1 | 13386 | import webob
from prestans import exception
from prestans.http import STATUS
from prestans.parser import AttributeFilter
from prestans import serializer
from prestans.types import Array
from prestans.types import BinaryResponse
from prestans.types import DataCollection
from prestans.types import Model
class Response(webob.Response):
"""
Response is the writable HTTP response. It inherits and leverages
from webob.Response to do the heavy lifting of HTTP Responses. It adds to
webob.Response prestans customisations.
Overrides content_type property to use prestans' serializers with the set body
"""
def __init__(self, charset, logger, serializers, default_serializer):
super(Response, self).__init__()
self._logger = logger
self._serializers = serializers
self._default_serializer = default_serializer
self._selected_serializer = None
self._template = None
self._app_iter = []
self._minify = False
self._attribute_filter = None
self._template = None
self._charset = charset
#:
#: IETF hash dropped the X- prefix for custom headers
#: http://stackoverflow.com/q/3561381
#: http://tools.ietf.org/html/draft-saintandre-xdash-00
#:
from prestans import __version__ as version
if not isinstance(version, str):
version = version.encode("latin1")
self.headers.add('Prestans-Version', version)
@property
def minify(self):
return self._minify
@minify.setter
def minify(self, value):
self._minify = value
@property
def logger(self):
return self._logger
@property
def supported_mime_types(self):
return [serializer.content_type() for serializer in self._serializers]
@property
def supported_mime_types_str(self):
return ''.join(str(mime_type) + ',' for mime_type in self.supported_mime_types)[:-1]
@property
def selected_serializer(self):
return self._selected_serializer
@property
def default_serializer(self):
return self._default_serializer
def _set_serializer_by_mime_type(self, mime_type):
"""
:param mime_type:
:return:
used by content_type_set to set get a reference to the appropriate serializer
"""
# ignore if binary response
if isinstance(self._app_iter, BinaryResponse):
self.logger.info("ignoring setting serializer for binary response")
return
for available_serializer in self._serializers:
if available_serializer.content_type() == mime_type:
self._selected_serializer = available_serializer
self.logger.info("set serializer for mime type: %s" % mime_type)
return
self.logger.info("could not find serializer for mime type: %s" % mime_type)
raise exception.UnsupportedVocabularyError(mime_type, self.supported_mime_types_str)
@property
def template(self):
"""
is an instance of prestans.types.DataType; mostly a subclass of prestans.types.Model
"""
return self._template
@template.setter
def template(self, value):
if value is not None and (not isinstance(value, DataCollection) and
not isinstance(value, BinaryResponse)):
raise TypeError("template in response must be of type prestans.types.DataCollection or subclass")
self._template = value
#:
#: Attribute filter setup
#:
@property
def attribute_filter(self):
return self._attribute_filter
@attribute_filter.setter
def attribute_filter(self, value):
if value is not None and not isinstance(value, AttributeFilter):
msg = "attribute_filter in response must be of type prestans.types.AttributeFilter"
raise TypeError(msg)
self._attribute_filter = value
def _content_type__get(self):
"""
Get/set the Content-Type header (or None), *without* the
charset or any parameters.
If you include parameters (or ``;`` at all) when setting the
content_type, any existing parameters will be deleted;
otherwise they will be preserved.
"""
header = self.headers.get('Content-Type')
if not header:
return None
return header.split(';', 1)[0]
def _content_type__set(self, value):
# skip for responses that have no body
if self.status_code in [STATUS.NO_CONTENT, STATUS.PERMANENT_REDIRECT, STATUS.TEMPORARY_REDIRECT]:
self.logger.info("attempt to set Content-Type to %s being ignored due to empty response" % value)
self._content_type__del()
else:
self._set_serializer_by_mime_type(value)
if ';' not in value:
header = self.headers.get('Content-Type', '')
if ';' in header:
params = header.split(';', 1)[1]
value += ';' + params
self.headers['Content-Type'] = value
self.logger.info("Content-Type set to: %s" % value)
def _content_type__del(self):
self.headers.pop('Content-Type', None)
# content_type; overrides webob.Response line 606
content_type = property(
_content_type__get,
_content_type__set,
_content_type__del,
doc=_content_type__get.__doc__
)
# body; overrides webob.Response line 324
@property
def body(self):
"""
Overridden response does not support md5, text or json properties. _app_iter
is set using rules defined by prestans.
body getter will return the validated prestans model.
webob does the heavy lifting with headers.
"""
#: If template is null; return an empty iterable
if self.template is None:
return []
return self._app_iter
@body.setter
def body(self, value):
#: If not response template; we have to assume its NO_CONTENT
#: hence do not allow setting the body
if self.template is None:
raise AssertionError("response_template is None; handler can't return a response")
#: value should be a subclass prestans.types.DataCollection
if not isinstance(value, DataCollection) and \
not isinstance(value, BinaryResponse):
msg = "%s is not a prestans.types.DataCollection or prestans.types.BinaryResponse subclass" % (
value.__class__.__name__
)
raise TypeError(msg)
#: Ensure that it matches the return type template
if not value.__class__ == self.template.__class__:
msg = "body must of be type %s, given %s" % (
self.template.__class__.__name__,
value.__class__.__name__
)
raise TypeError(msg)
#: If it's an array then ensure that element_template matches up
if isinstance(self.template, Array) and \
not isinstance(value.element_template, self.template.element_template.__class__):
msg = "array elements must of be type %s, given %s" % (
self.template.element_template.__class__.__name__,
value.element_template.__class__.__name__
)
raise TypeError(msg)
#: _app_iter assigned to value
#: we need to serialize the contents before we know the length
#: deffer the content_length property to be set by getter
self._app_iter = value
# body = property(_body__get, _body__set, _body__set)
def register_serializers(self, serializers):
"""
Adds extra serializers; generally registered during the handler lifecycle
"""
for new_serializer in serializers:
if not isinstance(new_serializer, serializer.Base):
msg = "registered serializer %s.%s does not inherit from prestans.serializer.Serializer" % (
new_serializer.__module__,
new_serializer.__class__.__name__
)
raise TypeError(msg)
self._serializers = self._serializers + serializers
def __call__(self, environ, start_response):
"""
Overridden WSGI application interface
"""
# prestans equivalent of webob.Response line 1022
if self.template is None or self.status_code == STATUS.NO_CONTENT:
self.content_type = None
start_response(self.status, self.headerlist)
if self.template is not None:
self.logger.warn("handler returns No Content but has a response_template; set template to None")
return []
# ensure what we are able to serialize is serializable
if not isinstance(self._app_iter, DataCollection) and \
not isinstance(self._app_iter, BinaryResponse):
if isinstance(self._app_iter, list):
app_iter_type = "list"
else:
app_iter_type = self._app_iter.__name__
msg = "handler returns content of type %s; not a prestans.types.DataCollection subclass" % (
app_iter_type
)
raise TypeError(msg)
if isinstance(self._app_iter, DataCollection):
#: See if attribute filter is completely invisible
if self.attribute_filter is not None:
#: Warning to say nothing is visible
if not self.attribute_filter.are_any_attributes_visible():
self.logger.warn("attribute_filter has all the attributes turned \
off, handler will return an empty response")
#: Warning to say none of the fields match
model_attribute_filter = None
if isinstance(self._app_iter, Array):
model_attribute_filter = AttributeFilter. \
from_model(self._app_iter.element_template)
elif isinstance(self._app_iter, Model):
model_attribute_filter = AttributeFilter. \
from_model(self._app_iter)
if model_attribute_filter is not None:
try:
model_attribute_filter.conforms_to_template_filter(self.attribute_filter)
except exception.AttributeFilterDiffers as exp:
exp.request = self.request
self.logger.warn("%s" % exp)
# body should be of type DataCollection try; attempt calling
# as_serializable with available attribute_filter
serializable_body = self._app_iter.as_serializable(self.attribute_filter.as_immutable(), self.minify)
#: attempt serializing via registered serializer
stringified_body = self._selected_serializer.dumps(serializable_body)
# if not isinstance(stringified_body, str):
# msg = "%s dumps must return a python str not %s" % (
# self._selected_serializer.__class__.__name__,
# stringified_body.__class__.__name__
# )
# raise TypeError(msg)
#: set content_length
self.content_length = len(stringified_body)
start_response(self.status, self.headerlist)
return [stringified_body.encode("utf-8")]
elif isinstance(self._app_iter, BinaryResponse):
if self._app_iter.content_length == 0 or \
self._app_iter.mime_type is None or \
self._app_iter.file_name is None:
msg = "Failed to write binary response with content_length %i; mime_type %s; file_name %s" % (
self._app_iter.content_length,
self._app_iter.mime_type,
self._app_iter.file_name
)
self.logger.warn(msg)
self.status = STATUS.INTERNAL_SERVER_ERROR
self.content_type = "text/plain"
return []
# set the content type
self.content_type = self._app_iter.mime_type
#: Add content disposition header
if self._app_iter.as_attachment:
attachment = "attachment; filename=\"%s\"" % self._app_iter.file_name
if not isinstance(attachment, str):
attachment = attachment.encode("latin1")
self.headers.add("Content-Disposition", attachment)
else:
inline = "inline; filename=\"%s\"" % self._app_iter.file_name
if not isinstance(inline, str):
inline = inline.encode("latin1")
self.headers.add("Content-Disposition", inline)
#: Write out response
self.content_length = self._app_iter.content_length
start_response(self.status, self.headerlist)
return [self._app_iter.contents]
else:
raise AssertionError("prestans failed to write a binary or textual response")
def __str__(self):
#: Overridden so webob's __str__ skips serializing the body
super(Response, self).__str__(skip_body=True) | bsd-3-clause | -3,160,285,723,558,265,000 | 35.377717 | 113 | 0.590318 | false |
Pelagicore/FrancaCCG | XMLtoC/__init__.py | 1 | 1046 | # -*- Mode: Python -*-
# GDBus - GLib D-Bus Library
#
# Copyright (C) 2008-2011 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this library; if not, see <http://www.gnu.org/licenses/>.
#
# Author: David Zeuthen <[email protected]>
# (2015) Jesper Lundkvist <[email protected]>
import os
builddir = os.environ.get('UNINSTALLED_GLIB_BUILDDIR')
if builddir is not None:
__path__.append(os.path.abspath(os.path.join(builddir, 'gio', 'gdbus-2.0', 'codegen_francac')))
| gpl-2.0 | -2,092,784,966,910,235,600 | 36.357143 | 99 | 0.737094 | false |
digwanderlust/pants | tests/python/pants_test/backend/python/tasks/python_task_test.py | 1 | 4202 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.python.register import build_file_aliases as register_python
from pants.base.address import SyntheticAddress
from pants_test.tasks.task_test_base import TaskTestBase
class PythonTaskTest(TaskTestBase):
def setUp(self):
super(PythonTaskTest, self).setUp()
# Use the "real" interpreter cache, so tests don't waste huge amounts of time recreating it.
# It would be nice to get the location of the real interpreter cache from PythonSetup,
# but unfortunately real subsystems aren't available here (for example, we have no access
# to the enclosing pants instance's options), so we have to hard-code it.
self.set_options_for_scope('python-setup',
interpreter_cache_dir=os.path.join(self.real_build_root, '.pants.d',
'python-setup', 'interpreters'),
chroot_cache_dir=os.path.join(self.real_build_root, '.pants.d',
'python-setup', 'chroots'),
resolver_cache_ttl=1000000000) # TODO: Do we need this now that there's a default?
@property
def alias_groups(self):
return register_python()
def create_python_library(self, relpath, name, source_contents_map=None,
dependencies=(), provides=None):
sources = ['__init__.py'] + source_contents_map.keys() if source_contents_map else None
sources_strs = ["'{0}'".format(s) for s in sources] if sources else None
self.create_file(relpath=self.build_path(relpath), contents=dedent("""
python_library(
name='{name}',
{sources_clause}
dependencies=[
{dependencies}
],
{provides_clause}
)
""").format(
name=name,
sources_clause='sources=[{0}],'.format(','.join(sources_strs)) if sources_strs else '',
dependencies=','.join(map(repr, dependencies)),
provides_clause='provides={0},'.format(provides) if provides else ''))
if source_contents_map:
self.create_file(relpath=os.path.join(relpath, '__init__.py'))
for source, contents in source_contents_map.items():
self.create_file(relpath=os.path.join(relpath, source), contents=contents)
return self.target(SyntheticAddress(relpath, name).spec)
def create_python_binary(self, relpath, name, entry_point, dependencies=(), provides=None):
self.create_file(relpath=self.build_path(relpath), contents=dedent("""
python_binary(
name='{name}',
entry_point='{entry_point}',
dependencies=[
{dependencies}
],
{provides_clause}
)
""").format(name=name, entry_point=entry_point, dependencies=','.join(map(repr, dependencies)),
provides_clause='provides={0},'.format(provides) if provides else ''))
return self.target(SyntheticAddress(relpath, name).spec)
def create_python_requirement_library(self, relpath, name, requirements):
def make_requirement(req):
return 'python_requirement("{}")'.format(req)
self.create_file(relpath=self.build_path(relpath), contents=dedent("""
python_requirement_library(
name='{name}',
requirements=[
{requirements}
]
)
""").format(name=name, requirements=','.join(map(make_requirement, requirements))))
return self.target(SyntheticAddress(relpath, name).spec)
def context(self, for_task_types=None, options=None, target_roots=None,
console_outstream=None, workspace=None):
# Our python tests don't pass on Python 3 yet.
# TODO: Clean up this hard-coded interpreter constraint once we have subsystems
# and can simplify InterpreterCache and PythonSetup.
self.set_options(interpreter=['CPython>=2.7,<3'])
return super(PythonTaskTest, self).context(for_task_types=for_task_types, options=options,
target_roots=target_roots, console_outstream=console_outstream, workspace=workspace)
| apache-2.0 | 2,458,237,542,830,675,500 | 44.182796 | 99 | 0.672537 | false |
nBeker/macros | virtual_keyboard.py | 1 | 6616 | from win32api import keybd_event as keyboard_event
from time import sleep
from win32con import KEYEVENTF_KEYUP as RELEASE_KEY
import win32con
SLEEP_CONST = .05
# Giant dictionary to hold key name and VK value
VK_CODE = {'BACKSPACE': win32con.VK_BACK,
'TAB': win32con.VK_TAB,
'\t': win32con.VK_TAB,
'clear': win32con.VK_CLEAR,
'ENTER': win32con.VK_RETURN,
'\n': win32con.VK_RETURN,
'SHIFT': win32con.VK_SHIFT,
'S': win32con.VK_SHIFT,
'CTRL': win32con.VK_CONTROL,
'^': win32con.VK_CONTROL,
'ALT': win32con.VK_MENU,
'A': win32con.VK_MENU,
'PAUSE': win32con.VK_PAUSE,
'CAPSLOCK': win32con.VK_CAPITAL,
'ESC': win32con.VK_ESCAPE,
'SPACEBAR': win32con.VK_SPACE,
' ': win32con.VK_SPACE,
'PAGE': win32con.VK_PRIOR,
'PAGE_DOWN': win32con.VK_NEXT,
'END': win32con.VK_END,
'HOME': win32con.VK_HOME,
'LEFT': win32con.VK_LEFT,
'UP': win32con.VK_UP,
'RIGHT': win32con.VK_RIGHT,
'DOWN': win32con.VK_DOWN,
'SELECT': win32con.VK_SELECT,
'PRINT': win32con.VK_PRINT,
'EXECUTE': win32con.VK_EXECUTE,
'PRINT_SCREEN': win32con.VK_SNAPSHOT,
'INS': win32con.VK_INSERT,
'DEL': win32con.VK_DELETE,
'HELP': win32con.VK_HELP,
'0': 0x30,
'1': 0x31,
'2': 0x32,
'3': 0x33,
'4': 0x34,
'5': 0x35,
'6': 0x36,
'7': 0x37,
'8': 0x38,
'9': 0x39,
'a': 0x41,
'b': 0x42,
'c': 0x43,
'd': 0x44,
'e': 0x45,
'f': 0x46,
'g': 0x47,
'h': 0x48,
'i': 0x49,
'j': 0x4A,
'k': 0x4B,
'l': 0x4C,
'm': 0x4D,
'n': 0x4E,
'o': 0x4F,
'p': 0x50,
'q': 0x51,
'r': 0x52,
's': 0x53,
't': 0x54,
'u': 0x55,
'v': 0x56,
'w': 0x57,
'x': 0x58,
'y': 0x59,
'z': 0x5A,
'LWIN': win32con.VK_LWIN,
'WIN': win32con.VK_LWIN,
'WINKEY': win32con.VK_LWIN,
'W': win32con.VK_LWIN,
'RWIN': win32con.VK_RWIN,
'NUMPAD_0': win32con.VK_NUMPAD0,
'NUMPAD_1': win32con.VK_NUMPAD1,
'NUMPAD_2': win32con.VK_NUMPAD2,
'NUMPAD_3': win32con.VK_NUMPAD3,
'NUMPAD_4': win32con.VK_NUMPAD4,
'NUMPAD_5': win32con.VK_NUMPAD5,
'NUMPAD_6': win32con.VK_NUMPAD6,
'NUMPAD_7': win32con.VK_NUMPAD7,
'NUMPAD_8': win32con.VK_NUMPAD8,
'NUMPAD_9': win32con.VK_NUMPAD9,
'MULTIPLY': win32con.VK_MULTIPLY,
'*': win32con.VK_MULTIPLY,
'ADD': win32con.VK_ADD,
'+': win32con.VK_ADD,
'SEPARATOR': win32con.VK_SEPARATOR,
'SUBTRACT': win32con.VK_SUBTRACT,
'-': win32con.VK_SUBTRACT,
'DECIMAL': win32con.VK_DECIMAL,
'.': win32con.VK_DECIMAL,
'DIVIDE': win32con.VK_DIVIDE,
'/': win32con.VK_DIVIDE,
'F1': win32con.VK_F1,
'F2': win32con.VK_F2,
'F3': win32con.VK_F3,
'F4': win32con.VK_F4,
'F5': win32con.VK_F5,
'F6': win32con.VK_F6,
'F7': win32con.VK_F7,
'F8': win32con.VK_F8,
'F9': win32con.VK_F9,
'F10': win32con.VK_F10,
'F11': win32con.VK_F11,
'F12': win32con.VK_F12,
'F13': win32con.VK_F13,
'F14': win32con.VK_F14,
'F15': win32con.VK_F15,
'F16': win32con.VK_F16,
'F17': win32con.VK_F17,
'F18': win32con.VK_F18,
'F19': win32con.VK_F19,
'F20': win32con.VK_F20,
'F21': win32con.VK_F21,
'F22': win32con.VK_F22,
'F23': win32con.VK_F23,
'F24': win32con.VK_F24,
'NUM_LOCK': win32con.VK_NUMLOCK,
'SCROLL_LOCK': win32con.VK_SCROLL,
'LEFT_SHIFT': win32con.VK_LSHIFT,
'RIGHT_SHIFT ': win32con.VK_RSHIFT,
'LEFT_CONTROL': win32con.VK_LCONTROL,
'RIGHT_CONTROL': win32con.VK_RCONTROL,
'LEFT_MENU': win32con.VK_LMENU,
'RIGHT_MENU': win32con.VK_RMENU,
'BROWSER_BACK': win32con.VK_BROWSER_BACK,
'BROWSER_FORWARD': win32con.VK_BROWSER_FORWARD,
'BROWSER_REFRESH': 0XA8,
'BROWSER_STOP': 0XA9,
'BROWSER_SEARCH': 0XAA,
'BROWSER_FAVORITES': 0XAB,
'BROWSER_START_AND_HOME': 0XAC,
'VOLUME_MUTE': win32con.VK_VOLUME_MUTE,
'VOLUME_DOWN': win32con.VK_VOLUME_DOWN,
'VOLUME_UP': win32con.VK_VOLUME_UP,
'NEXT_TRACK': win32con.VK_MEDIA_NEXT_TRACK,
'PREVIOUS_TRACK': win32con.VK_MEDIA_PREV_TRACK,
'STOP_MEDIA': 0XB2,
'TOGGLE_MEDIA': win32con.VK_MEDIA_PLAY_PAUSE,
'START_MAIL': 0XB4,
'SELECT_MEDIA': 0XB5,
'START_APPLICATION_1': 0XB6,
'START_APPLICATION_2': 0XB7,
'ATTN_KEY': win32con.VK_ATTN,
'CRSEL_KEY': win32con.VK_CRSEL,
'EXSEL_KEY': win32con.VK_EXSEL,
'PLAY_KEY': win32con.VK_PLAY,
'ZOOM_KEY': win32con.VK_ZOOM,
'CLEAR_KEY': win32con.VK_OEM_CLEAR,
# '+': win32con.VK_OEM_PLUS,
',': 0xBC,
# '-': 0xBD,
# '.': 0xBE,
# '/': 0xBF,
'`': 0xC0,
';': 0xBA,
'[': 0xDB,
'\\': 0xDC,
']': 0xDD,
"'": 0xDE}
def press(keys_list):
"""
one press, one release.
:param keys_list: list of keys
"""
for key in keys_list:
keyboard_event(VK_CODE[key], 0, 0, 0)
sleep(SLEEP_CONST)
keyboard_event(VK_CODE[key], 0, RELEASE_KEY, 0)
def hold(keys_list):
"""
press and hold. Do NOT release.
:param keys_list: list of keys
"""
for key in keys_list:
keyboard_event(VK_CODE[key], 0, 0, 0)
sleep(SLEEP_CONST)
def release(keys_list):
"""
release depressed keys
:param keys_list: list of keys
"""
for key in keys_list:
keyboard_event(VK_CODE[key], 0, RELEASE_KEY, 0)
def press_combo(keys_list):
"""
Holds and then Releases a key_list
:param keys_list: list of keys
"""
hold(keys_list)
release(keys_list) | apache-2.0 | -5,341,093,758,575,662,000 | 30.509524 | 58 | 0.483676 | false |
DataViva/dataviva-scripts | scripts/comtrade/helpers/calc_rca.py | 1 | 1782 | import sys, os
import pandas as pd
import numpy as np
file_path = os.path.dirname(os.path.realpath(__file__))
ps_calcs_lib_path = os.path.abspath(os.path.join(file_path, "../../../lib/ps_calcs"))
sys.path.insert(0, ps_calcs_lib_path)
import ps_calcs
def calc_rca(ypw):
ubiquity_required = 20
diversity_required = 200
total_exports_required = 50000000
'''trim country list by diversity'''
origin_diversity = ypw.reset_index()
origin_diversity = origin_diversity["wld_id"].value_counts()
origin_diversity = origin_diversity[origin_diversity > diversity_required]
'''trim country list by total exports'''
origin_totals = ypw.groupby(level=['wld_id']).sum()
origin_totals = origin_totals['val_usd']
origin_totals = origin_totals[origin_totals > total_exports_required]
filtered_origins = set(origin_diversity.index).intersection(set(origin_totals.index))
'''trim product list by ubiquity'''
product_ubiquity = ypw.reset_index()
product_ubiquity = product_ubiquity[product_ubiquity['val_usd'] > 0]
product_ubiquity = product_ubiquity["hs_id"].value_counts()
product_ubiquity = product_ubiquity[product_ubiquity > ubiquity_required]
filtered_products = set(product_ubiquity.index)
'''re-calculate rcas'''
origins_to_drop = set(ypw.index.get_level_values('wld_id')).difference(filtered_origins)
products_to_drop = set(ypw.index.get_level_values('hs_id')).difference(filtered_products)
ypw = ypw.drop(list(origins_to_drop), axis=0, level='wld_id')
ypw = ypw.drop(list(products_to_drop), axis=0, level='hs_id')
ypw_rca = ypw.reset_index()
ypw_rca = ypw_rca.pivot(index="wld_id", columns="hs_id", values="val_usd")
ypw_rca = ps_calcs.rca(ypw_rca)
return ypw_rca.fillna(0)
| mit | 7,757,585,191,492,210,000 | 36.914894 | 93 | 0.69248 | false |
copyninja/apt-offline | apt_offline_core/AptOfflineDebianBtsLib.py | 1 | 10168 | #!/usr/bin/env python
# debianbts.py - Methods to query Debian's BTS.
# Copyright (C) 2007-2010 Bastian Venthur <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Query Debian's Bug Tracking System (BTS).
This module provides a layer between Python and Debian's BTS. It provides
methods to query the BTS using the BTS' SOAP interface, and the Bugreport class
which represents a bugreport from the BTS.
"""
from datetime import datetime
import urllib
import urlparse
import SOAPpy
# Setup the soap server
# Default values
URL = 'http://bugs.debian.org/cgi-bin/soap.cgi'
NS = 'Debbugs/SOAP/V1'
BTS_URL = 'http://bugs.debian.org/'
def _get_http_proxy():
"""Returns an HTTP proxy URL formatted for consumption by SOAPpy.
SOAPpy does some fairly low-level HTTP manipulation and needs to be
explicitly made aware of HTTP proxy URLs, which also have to be
formatted without a schema or path.
"""
http_proxy = urllib.getproxies().get('http')
if http_proxy is None:
return None
return urlparse.urlparse(http_proxy).netloc
server = SOAPpy.SOAPProxy(URL, NS, http_proxy=_get_http_proxy())
class Bugreport(object):
"""Represents a bugreport from Debian's Bug Tracking System.
A bugreport object provides all attributes provided by the SOAP interface.
Most of the attributs are strings, the others are marked.
* bug_num: The bugnumber (int)
* severity: Severity of the bugreport
* tags: List of tags of the bugreport (list of strings)
* subject: The subject/title of the bugreport
* originator: Submitter of the bugreport
* mergedwith: List of bugnumbers this bug was merged with (list of ints)
* package: Package of the bugreport
* source: Source package of the bugreport
* date: Date of bug creation (datetime)
* log_modified: Date of update of the bugreport (datetime)
* done: Is the bug fixed or not (bool)
* archived: Is the bug archived or not (bool)
* unarchived: Was the bug unarchived or not (bool)
* fixed_versions: List of versions, can be empty even if bug is fixed (list of strings)
* found_versions: List of version numbers where bug was found (list of strings)
* forwarded: A URL or email address
* blocks: List of bugnumbers this bug blocks (list of ints)
* blockedby: List of bugnumbers which block this bug (list of ints)
* pending: Either 'pending' or 'done'
* msgid: Message ID of the bugreport
* owner: Who took responsibility for fixing this bug
* location: Either 'db-h' or 'archive'
* affects: List of Packagenames (list of strings)
* summary: Arbitrary text
"""
def __init__(self):
self.originator = None
self.date = None
self.subject = None
self.msgid = None
self.package = None
self.tags = None
self.done = None
self.forwarded = None
self.mergedwith = None
self.severity = None
self.owner = None
self.found_versions = None
self.fixed_versions = None
self.blocks = None
self.blockedby = None
self.unarchived = None
self.summary = None
self.affects = None
self.log_modified = None
self.location = None
self.archived = None
self.bug_num = None
self.source = None
self.pending = None
# The ones below are also there but not used
#self.fixed = None
#self.found = None
#self.fixed_date = None
#self.found_date = None
#self.keywords = None
#self.id = None
def __str__(self):
s = ""
for key, value in self.__dict__.iteritems():
if type(value) == type(unicode()):
value = value.encode('utf-8')
s += "%s: %s\n" % (key, str(value))
return s
def __cmp__(self, other):
"""Compare a bugreport with another.
The more open and and urgent a bug is, the greater the bug is:
outstanding > resolved > archived
critical > grave > serious > important > normal > minor > wishlist.
Openness always beats urgency, eg an archived bug is *always* smaller
than an outstanding bug.
This sorting is useful for displaying bugreports in a list and sorting
them in a useful way.
"""
myval = self._get_value()
otherval = other._get_value()
if myval < otherval:
return -1
elif myval == otherval:
return 0
else:
return 1
def _get_value(self):
if self.archived:
# archived and done
val = 0
elif self.done:
# not archived and done
val = 10
else:
# not done
val = 20
val += {u"critical" : 7,
u"grave" : 6,
u"serious" : 5,
u"important" : 4,
u"normal" : 3,
u"minor" : 2,
u"wishlist" : 1}[self.severity]
return val
def get_status(*nr):
"""Returns a list of Bugreport objects."""
reply = server.get_status(*nr)
# If we called get_status with one single bug, we get a single bug,
# if we called it with a list of bugs, we get a list,
# No available bugreports returns an enmpy list
bugs = []
if not reply:
pass
elif type(reply[0]) == type([]):
for elem in reply[0]:
bugs.append(_parse_status(elem))
else:
bugs.append(_parse_status(reply[0]))
return bugs
def get_usertag(email, *tags):
"""Return a dictionary of "usertag" => buglist mappings.
If tags are given the dictionary is limited to the matching tags, if no
tags are given all available tags are returned.
"""
reply = server.get_usertag(email, *tags)
# reply is an empty string if no bugs match the query
return dict() if reply == "" else reply._asdict()
def get_bug_log(nr):
"""Return a list of Buglogs.
A buglog is a dictionary with the following mappings:
"header" => string
"body" => string
"attachments" => list
"msg_num" => int
"""
reply = server.get_bug_log(nr)
buglog = [i._asdict() for i in reply._aslist()]
for b in buglog:
b["header"] = _uc(b["header"])
b["body"] = _uc(b["body"])
b["msg_num"] = int(b["msg_num"])
b["attachments"] = b["attachments"]._aslist()
return buglog
def newest_bugs(amount):
"""Returns a list of bugnumbers of the `amount` newest bugs."""
reply = server.newest_bugs(amount)
return reply._aslist()
def get_bugs(*key_value):
"""Returns a list of bugnumbers, that match the conditions given by the
key-value pair(s).
Possible keys are:
"package": bugs for the given package
"submitter": bugs from the submitter
"maint": bugs belonging to a maintainer
"src": bugs belonging to a source package
"severity": bugs with a certain severity
"status": can be either "done", "forwarded", or "open"
"tag": see http://www.debian.org/Bugs/Developer#tags for available tags
"owner": bugs which are assigned to `owner`
"bugs": takes list of bugnumbers, filters the list according to given criteria
"correspondent": bugs where `correspondent` has sent a mail to
Example: get_bugs('package', 'gtk-qt-engine', 'severity', 'normal')
"""
reply = server.get_bugs(*key_value)
return reply._aslist()
def _parse_status(status):
"""Return a bugreport object from a given status."""
status = status._asdict()
bug = Bugreport()
tmp = status['value']
bug.originator = _uc(tmp['originator'])
bug.date = datetime.utcfromtimestamp(tmp['date'])
bug.subject = _uc(tmp['subject'])
bug.msgid = _uc(tmp['msgid'])
bug.package = _uc(tmp['package'])
bug.tags = _uc(tmp['tags']).split()
bug.done = bool(tmp['done'])
bug.forwarded = _uc(tmp['forwarded'])
bug.mergedwith = [int(i) for i in str(tmp['mergedwith']).split()]
bug.severity = _uc(tmp['severity'])
bug.owner = _uc(tmp['owner'])
bug.found_versions = [_uc(str(i)) for i in tmp['found_versions']]
bug.fixed_versions = [_uc(str(i)) for i in tmp['fixed_versions']]
bug.blocks = [int(i) for i in str(tmp['blocks']).split()]
bug.blockedby = [int(i) for i in str(tmp['blockedby']).split()]
bug.unarchived = bool(tmp["unarchived"])
bug.summary = _uc(tmp['summary'])
affects = tmp['affects'].strip()
bug.affects = [_uc(i.strip()) for i in affects.split(',')] if affects else []
bug.log_modified = datetime.utcfromtimestamp(tmp['log_modified'])
bug.location = _uc(tmp['location'])
bug.archived = bool(tmp["archived"])
bug.bug_num = int(tmp['bug_num'])
bug.source = _uc(tmp['source'])
bug.pending = _uc(tmp['pending'])
# Also available, but unused or broken
#bug.fixed = _parse_crappy_soap(tmp, "fixed")
#bug.found = _parse_crappy_soap(tmp, "found")
#bug.found_date = [datetime.utcfromtimestamp(i) for i in tmp["found_date"]]
#bug.fixed_date = [datetime.utcfromtimestamp(i) for i in tmp["fixed_date"]]
#bug.keywords = _uc(tmp['keywords']).split()
#bug.id = int(tmp['id'])
return bug
def _uc(string):
"""Convert string to unicode.
This method only exists to unify the unicode conversion in this module.
"""
return unicode(string, 'utf-8', 'replace')
| gpl-3.0 | 842,525,302,459,896,700 | 33.006689 | 91 | 0.624803 | false |
sam-roth/Keypad | doc/source/conf.py | 1 | 11560 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Keypad documentation build configuration file, created by
# sphinx-quickstart on Sun May 11 19:23:37 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
import sphinx.ext.viewcode
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'sphinx.ext.autodoc',
'autodoc_patches',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
# 'sphinx.ext.viewcode',
'viewcode_patches',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
]
# autodoc_default_flags = ['members', 'undoc-members', 'inherited-members',
# 'show-inheritance']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Keypad'
copyright = '2014, Sam Roth'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['keypad.']
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "Keypad",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
#
# # A list of tuples containing pages or urls to link to.
# # Valid tuples should be in the following forms:
# # (name, page) # a link to a page
# # (name, "/aa/bb", 1) # a link to an arbitrary relative url
# # (name, "http://example.com", True) # arbitrary absolute url
# # Note the "1" or "True" value above as the third argument to indicate
# # an arbitrary url.
'navbar_links': [
# ("Examples", "examples"),
('Module Index', 'py-modindex'),
# ("Link", "http://example.com", True),
],
#
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 1,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "nav",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
# 'bootswatch_theme': "flatly",
'bootswatch_theme': "",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {
# 'sidebar': ['**']
# }
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = ['genindex', 'py-modindex', 'search', 'py-setindex']
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Stemdoc'
html_sidebars = {
'**': [
'localtoc.html',
'sourcelink.html',
'searchbox.html'
]
}
# -- Options for LaTeX output ---------------------------------------------
#
# latex_elements = {
# # The paper size ('letterpaper' or 'a4paper').
# #'papersize': 'letterpaper',
#
# # The font size ('10pt', '11pt' or '12pt').
# #'pointsize': '10pt',
#
# # Additional stuff for the LaTeX preamble.
# #'preamble': '',
# }
#
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
'fncychap': '',
'fontpkg': r'''
\usepackage[charter]{mathdesign}
\usepackage{beramono}
''',
# Additional stuff for the LaTeX preamble.
'preamble': r'''
\renewcommand*\sfdefault{\rmdefault}
''',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('apidoc/modules', 'Keypad.tex', 'Keypad Documentation',
'Sam Roth', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
latex_appendices = ['plugins', 'signal-howto', 'settings-howto']
# If false, no module index is generated.
latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'keypad', 'Keypad Documentation',
['Sam Roth'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Keypad', 'Keypad Documentation',
'Sam Roth', 'Keypad', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/3.4': None}
| gpl-3.0 | 5,291,438,570,175,474,000 | 30.243243 | 79 | 0.677336 | false |
AsherYang/ThreeLine | server/tornado/JSONEncoder.py | 1 | 1454 | #!/usr/bin/python
# -*- coding:utf-8 -*-
"""
Author: AsherYang
Email: [email protected]
Date: 2017/4/11
Desc: json encoder for custom class
"""
import json
from BaseResponse import BaseResponse
from ContentData import ContentData
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, BaseResponse):
contentData = obj.data
# print type(contentData)
realContent = []
if isinstance(contentData, list):
for data in contentData:
if isinstance(data, ContentData):
string = {'id': data.id, 'syncKey': data.syncKey, 'updateTime': data.updateTime,
'title': data.title, 'content': data.content, 'author': data.author,
'imagePath': data.imagePath, 'songName': data.songName, 'singer': data.singer}
realContent.append(string)
elif isinstance(contentData, basestring):
realContent = contentData
else:
realContent.append('unknown data type')
return {'code': obj.code, 'desc': obj.desc,
'data': realContent}
# if isinstance(contentData, list):
# print contentData[0].author
# else:
# print type(contentData)
else:
return json.JSONEncoder.default(self, obj)
| apache-2.0 | -4,659,070,593,291,785,000 | 34.463415 | 112 | 0.553645 | false |
almorel/lab | python/pycuda/main.py | 1 | 7262 | #!/usr/bin/env python
# Mandelbrot calculate using GPU, Serial numpy and faster numpy
# Use to show the speed difference between CPU and GPU calculations
# [email protected] July 2010
# Based on vegaseat's TKinter/numpy example code from 2006
# http://www.daniweb.com/code/snippet216851.html#
# with minor changes to move to numpy from the obsolete Numeric
import sys
import numpy as nm
import Tkinter as tk
import Image # PIL
import ImageTk # PIL
import pycuda.driver as drv
import pycuda.tools
import pycuda.autoinit
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
# set width and height of window, more pixels take longer to calculate
w = 1000
h = 1000
from pycuda.elementwise import ElementwiseKernel
complex_gpu = ElementwiseKernel(
"pycuda::complex<float> *z, pycuda::complex<float> *q, int *iteration, int maxiter",
"for (int n=0; n < maxiter; n++) {z[i] = (z[i]*z[i])+q[i]; if (abs(z[i]) > 2.0f) {iteration[i]=n; z[i] = pycuda::complex<float>(); q[i] = pycuda::complex<float>();};}",
"complex5",
preamble="#include <pycuda-complex.hpp>",)
def calculate_z_gpu(q, maxiter, z):
output = nm.resize(nm.array(0,), q.shape)
q_gpu = gpuarray.to_gpu(q.astype(nm.complex64))
z_gpu = gpuarray.to_gpu(z.astype(nm.complex64))
iterations_gpu = gpuarray.to_gpu(output)
# the for loop and complex calculations are all done on the GPU
# we bring the iterations_gpu array back to determine pixel colours later
complex_gpu(z_gpu, q_gpu, iterations_gpu, maxiter)
iterations = iterations_gpu.get()
return iterations
def calculate_z_numpy_gpu(q, maxiter, z):
"""Calculate z using numpy on the GPU via gpuarray"""
outputg = gpuarray.to_gpu(nm.resize(nm.array(0,), q.shape).astype(nm.int32))
zg = gpuarray.to_gpu(z.astype(nm.complex64))
qg = gpuarray.to_gpu(q.astype(nm.complex64))
# 2.0 as an array
twosg = gpuarray.to_gpu(nm.array([2.0]*zg.size).astype(nm.float32))
# 0+0j as an array
cmplx0sg = gpuarray.to_gpu(nm.array([0+0j]*zg.size).astype(nm.complex64))
# for abs_zg > twosg result
comparison_result = gpuarray.to_gpu(nm.array([False]*zg.size).astype(nm.bool))
# we'll add 1 to iterg after each iteration
iterg = gpuarray.to_gpu(nm.array([0]*zg.size).astype(nm.int32))
for iter in range(maxiter):
zg = zg*zg + qg
# abs returns a complex (rather than a float) from the complex
# input where the real component is the absolute value (which
# looks like a bug) so I take the .real after abs()
abs_zg = abs(zg).real
comparison_result = abs_zg > twosg
qg = gpuarray.if_positive(comparison_result, cmplx0sg, qg)
zg = gpuarray.if_positive(comparison_result, cmplx0sg, zg)
outputg = gpuarray.if_positive(comparison_result, iterg, outputg)
iterg = iterg + 1
output = outputg.get()
return output
def calculate_z_numpy(q, maxiter, z):
# calculate z using numpy, this is the original
# routine from vegaseat's URL
# NOTE this routine was faster using a default of double-precision complex nbrs
# rather than the current single precision
output = nm.resize(nm.array(0,), q.shape).astype(nm.int32)
for iter in range(maxiter):
z = z*z + q
done = nm.greater(abs(z), 2.0)
q = nm.where(done,0+0j, q)
z = nm.where(done,0+0j, z)
output = nm.where(done, iter, output)
return output
def calculate_z_serial(q, maxiter, z):
# calculate z using pure python with numpy arrays
# this routine unrolls calculate_z_numpy as an intermediate
# step to the creation of calculate_z_gpu
# it runs slower than calculate_z_numpy
output = nm.resize(nm.array(0,), q.shape).astype(nm.int32)
for i in range(len(q)):
if i % 100 == 0:
# print out some progress info since it is so slow...
print "%0.2f%% complete" % (1.0/len(q) * i * 100)
for iter in range(maxiter):
z[i] = z[i]*z[i] + q[i]
if abs(z[i]) > 2.0:
q[i] = 0+0j
z[i] = 0+0j
output[i] = iter
return output
show_instructions = False
if len(sys.argv) == 1:
show_instructions = True
if len(sys.argv) > 1:
if sys.argv[1] not in ['gpu', 'gpuarray', 'numpy', 'python']:
show_instructions = True
if show_instructions:
print "Usage: python mandelbrot.py [gpu|gpuarray|numpy|python]"
print "Where:"
print " gpu is a pure CUDA solution on the GPU"
print " gpuarray uses a numpy-like CUDA wrapper in Python on the GPU"
print " numpy is a pure Numpy (C-based) solution on the CPU"
print " python is a pure Python solution on the CPU with numpy arrays"
sys.exit(0)
routine = {'gpuarray':calculate_z_numpy_gpu,
'gpu':calculate_z_gpu,
'numpy':calculate_z_numpy,
'python':calculate_z_serial}
calculate_z = routine[sys.argv[1]]
##if sys.argv[1] == 'python':
# import psyco
# psyco.full()
# Using a WinXP Intel Core2 Duo 2.66GHz CPU (1 CPU used)
# with a 9800GT GPU I get the following timings (smaller is better).
# With 200x200 problem with max iterations set at 300:
# calculate_z_gpu: 0.03s
# calculate_z_serial: 8.7s
# calculate_z_numpy: 0.3s
#
# Using WinXP Intel 2.9GHz CPU (1 CPU used)
# with a GTX 480 GPU I get the following using 1000x1000 plot with 1000 max iterations:
# gpu: 0.07s
# gpuarray: 3.4s
# numpy: 43.4s
# python (serial): 1605.6s
class Mandelbrot(object):
def __init__(self):
# create window
self.root = tk.Tk()
self.root.title("Mandelbrot Set")
self.create_image()
self.create_label()
# start event loop
self.root.mainloop()
def draw(self, x1, x2, y1, y2, maxiter=300):
# draw the Mandelbrot set, from numpy example
xx = nm.arange(x1, x2, (x2-x1)/w*2)
yy = nm.arange(y2, y1, (y1-y2)/h*2) * 1j
# force yy, q and z to use 32 bit floats rather than
# the default 64 doubles for nm.complex for consistency with CUDA
yy = yy.astype(nm.complex64)
q = nm.ravel(xx+yy[:, nm.newaxis]).astype(nm.complex64)
z = nm.zeros(q.shape, nm.complex64)
start_main = drv.Event()
end_main = drv.Event()
start_main.record()
output = calculate_z(q, maxiter, z)
end_main.record()
end_main.synchronize()
secs = start_main.time_till(end_main)*1e-3
print "Main took", secs
output = (output + (256*output) + (256**2)*output) * 8
# convert output to a string
self.mandel = output.tostring()
def create_image(self):
""""
create the image from the draw() string
"""
self.im = Image.new("RGB", (w/2, h/2))
# you can experiment with these x and y ranges
self.draw(-2.13, 0.77, -1.3, 1.3, 1000)
self.im.fromstring(self.mandel, "raw", "RGBX", 0, -1)
def create_label(self):
# put the image on a label widget
self.image = ImageTk.PhotoImage(self.im)
self.label = tk.Label(self.root, image=self.image)
self.label.pack()
# test the class
if __name__ == '__main__':
test = Mandelbrot()
| gpl-3.0 | -6,831,566,313,177,774,000 | 34.42439 | 180 | 0.631644 | false |
pbanaszkiewicz/amy | amy/workshops/management/commands/instructors_activity.py | 1 | 5300 | import logging
import os
from django.core.mail import send_mail
from django.core.management.base import BaseCommand
from django.template.loader import get_template
from workshops.models import Badge, Person, Role
logger = logging.getLogger()
class Command(BaseCommand):
help = "Report instructors activity."
def add_arguments(self, parser):
parser.add_argument(
"--send-out-for-real",
action="store_true",
default=False,
help="Send information to the instructors.",
)
parser.add_argument(
"--no-may-contact-only",
action="store_true",
default=False,
help="Include instructors not willing to be contacted.",
)
parser.add_argument(
"--django-mailing",
action="store_true",
default=False,
help="Use Django mailing system. This requires some environmental "
"variables to be set, see `settings.py`.",
)
parser.add_argument(
"-s",
"--sender",
action="store",
default="[email protected]",
help='E-mail used in "from:" field.',
)
def foreign_tasks(self, tasks, person, roles):
"""List of other instructors' tasks, per event."""
return [
task.event.task_set.filter(role__in=roles)
.exclude(person=person)
.select_related("person")
for task in tasks
]
def fetch_activity(self, may_contact_only=True):
roles = Role.objects.filter(name__in=["instructor", "helper"])
instructor_badges = Badge.objects.instructor_badges()
instructors = Person.objects.filter(badges__in=instructor_badges)
instructors = instructors.exclude(email__isnull=True)
if may_contact_only:
instructors = instructors.exclude(may_contact=False)
# let's get some things faster
instructors = instructors.select_related("airport").prefetch_related(
"task_set", "lessons", "award_set", "badges"
)
# don't repeat the records
instructors = instructors.distinct()
result = []
for person in instructors:
tasks = person.task_set.filter(role__in=roles).select_related(
"event", "role"
)
record = {
"person": person,
"lessons": person.lessons.all(),
"instructor_awards": person.award_set.filter(
badge__in=person.badges.instructor_badges()
),
"tasks": zip(tasks, self.foreign_tasks(tasks, person, roles)),
}
result.append(record)
return result
def make_message(self, record):
tmplt = get_template("mailing/instructor_activity.txt")
return tmplt.render(context=record)
def subject(self, record):
# in future we can vary the subject depending on the record details
return "Updating your Software Carpentry information"
def recipient(self, record):
return record["person"].email
def send_message(
self, subject, message, sender, recipient, for_real=False, django_mailing=False
):
if for_real:
if django_mailing:
send_mail(subject, message, sender, [recipient])
else:
command = 'mail -s "{subject}" -r {sender} {recipient}'.format(
subject=subject,
sender=sender,
recipient=recipient,
)
writer = os.popen(command, "w")
writer.write(message)
writer.close()
if self.verbosity >= 2:
# write only a header
self.stdout.write("-" * 40 + "\n")
self.stdout.write("To: {}\n".format(recipient))
self.stdout.write("Subject: {}\n".format(subject))
self.stdout.write("From: {}\n".format(sender))
if self.verbosity >= 3:
# write whole message out
self.stdout.write(message + "\n")
def handle(self, *args, **options):
# default is dummy run - only actually send mail if told to
send_for_real = options["send_out_for_real"]
# by default include only instructors who have `may_contact==True`
no_may_contact_only = options["no_may_contact_only"]
# use mailing options from settings.py or the `mail` system command?
django_mailing = options["django_mailing"]
# verbosity option is added by Django
self.verbosity = int(options["verbosity"])
sender = options["sender"]
results = self.fetch_activity(not no_may_contact_only)
for result in results:
message = self.make_message(result)
subject = self.subject(result)
recipient = self.recipient(result)
self.send_message(
subject,
message,
sender,
recipient,
for_real=send_for_real,
django_mailing=django_mailing,
)
if self.verbosity >= 1:
self.stdout.write("Sent {} emails.\n".format(len(results)))
| mit | -337,196,337,048,741,500 | 32.757962 | 87 | 0.561698 | false |
gdetor/SI-RF-Structure | Statistics/bivariate.py | 1 | 5235 | # Copyright (c) 2014, Georgios Is. Detorakis ([email protected]) and
# Nicolas P. Rougier ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# This script illustrated the bivariate plot presented in [1].
import math
import numpy as np
import matplotlib
matplotlib.use('macosx')
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
import matplotlib.patheffects as PathEffects
matplotlib.rc('xtick', direction = 'out')
matplotlib.rc('ytick', direction = 'out')
matplotlib.rc('xtick.major', size = 8, width=1)
matplotlib.rc('xtick.minor', size = 4, width=1)
matplotlib.rc('ytick.major', size = 8, width=1)
matplotlib.rc('ytick.minor', size = 4, width=1)
matplotlib.rc('text', usetex=True )
matplotlib.rc('font', serif='Times')
#indices = [(3, 18) , (26, 18) , (10, 7) , (25, 11) , (3, 21) , (8, 11) , (21, 14) , (20, 16) , (8, 19) , (16, 5) , (0, 9) , (17, 15) , (7, 20) , (20, 0) , (27, 19) , (4, 24) ]
indices = [(10, 21) , (29, 16) , (28, 14) , (20, 17) , (13, 19) , (3, 15) , (23, 18) , (0, 18) , (8, 31) , (16, 11) , (0, 20) , (24, 13) , (11, 2) , (1, 1) , (19, 20) , (2, 21)]
if __name__=='__main__':
Z = np.load('areas-ref.npy')
X, Y = Z[:,0], Z[:,1]
fig = plt.figure(figsize=(8,8), facecolor="white")
ax = plt.subplot(1,1,1,aspect=1)
plt.scatter(X+0.01,Y+0.01,s=3, edgecolor='k', facecolor='k')
# Show some points
I = [a*32+b for (a,b) in indices]
# I = [3,143,149,189,1,209,192,167,64,87,10,40,68,185,61,198]
plt.scatter(X[I],Y[I],s=5,color='k')
for i in range(len(I)):
x,y = X[i],Y[i]
letter = ord('A')+i
plt.scatter(X[I[i]], Y[I[i]], s=40, facecolor='None', edgecolor='k')
# label = plt.annotate(" %c" % (chr(letter)), (x+.25,y+.25), weight='bold', fontsize=16,
# path_effects=[PathEffects.withStroke(linewidth=2, foreground="w", alpha=.75)])
plt.annotate(" %c" % (chr(ord('A')+i)), (X[I[i]]+.25,Y[I[i]]+.25), weight='bold')
# Select some points by cliking them
# letter = ord('A')
# def onclick(event):
# global letter
# #print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
# # event.button, event.x, event.y, event.xdata, event.ydata)
# C = (X-event.xdata)**2 + (Y-event.ydata)**2
# I = np.argmin(C)
# plt.ion()
# x,y = X[I],Y[I]
# # print x, y, I, np.unravel_index(I,(32,32))
# print np.unravel_index(I,(32,32)), ",",
# plt.scatter(x, y, s=40, facecolor='None', edgecolor='k')
# label = plt.annotate(" %c" % (chr(letter)), (x+.25,y+.25), weight='bold', fontsize=16,
# path_effects=[PathEffects.withStroke(linewidth=2, foreground="w", alpha=.75)])
# #label.set_bbox(dict(facecolor='white', edgecolor='None', alpha=0.65 ))
# plt.ioff()
# letter = letter+1
# cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.xlabel(r'Excitatory area (mm2)')
plt.ylabel(r'Inhibitory area (mm2')
plt.xscale('log')
plt.yscale('log')
plt.xticks([5,10,30], ['5','10','30'])
plt.yticks([5,10,30], ['5','10','30'])
plt.xlim(5,30)
plt.ylim(5,30)
plt.text(5.5,26, "n = 1024")
plt.plot([1,100],[1,100], ls='--', color='k')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.savefig('bivariate.pdf', dpi=72)
plt.show()
| gpl-3.0 | 5,246,652,282,058,843,000 | 42.625 | 177 | 0.634193 | false |
asmaps/nsupdate.info | nsupdate/main/models.py | 1 | 4197 | from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.conf import settings
from django.db.models.signals import post_delete
from django.contrib.auth.hashers import make_password
from main import dnstools
import dns.resolver
from datetime import datetime
import re
class BlacklistedDomain(models.Model):
domain = models.CharField(
max_length=256,
unique=True,
help_text='Blacklisted domain. Evaluated as regex (search).')
last_update = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User, blank=True, null=True)
def __unicode__(self):
return u"%s" % (self.domain, )
def domain_blacklist_validator(value):
for bd in BlacklistedDomain.objects.all():
if re.search(bd.domain, value):
raise ValidationError(u'This domain is not allowed')
class Domain(models.Model):
domain = models.CharField(max_length=256, unique=True)
nameserver_ip = models.IPAddressField(max_length=256,
help_text="An IP where the nsupdates for this domain will be sent to")
nameserver_update_key = models.CharField(max_length=256)
last_update = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User, blank=True, null=True)
def __unicode__(self):
return u"%s" % (self.domain, )
class Host(models.Model):
"""TODO: hash update_secret on save (if not already hashed)"""
subdomain = models.CharField(max_length=256, validators=[
RegexValidator(
regex=r'^(([a-z0-9][a-z0-9\-]*[a-z0-9])|[a-z0-9])$',
message='Invalid subdomain: only "a-z", "0-9" and "-" is allowed'
),
domain_blacklist_validator])
domain = models.ForeignKey(Domain)
update_secret = models.CharField(max_length=256) # gets hashed on save
comment = models.CharField(
max_length=256, default='', blank=True, null=True)
last_update = models.DateTimeField(auto_now=True)
last_api_update = models.DateTimeField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='hosts')
def __unicode__(self):
return u"%s.%s - %s" % (
self.subdomain, self.domain.domain, self.comment)
class Meta:
unique_together = (('subdomain', 'domain'),)
def get_fqdn(self):
return '%s.%s' % (self.subdomain, self.domain.domain)
@classmethod
def filter_by_fqdn(cls, fqdn, **kwargs):
# Assuming subdomain has no dots (.) the fqdn is split at the first dot
splitted = fqdn.split('.', 1)
if not len(splitted) == 2:
raise NotImplemented("FQDN has to contain a dot")
return Host.objects.filter(
subdomain=splitted[0], domain__domain=splitted[1], **kwargs)
def getIPv4(self):
try:
return dnstools.query_ns(self.get_fqdn(), 'A')
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.NoNameservers):
return ''
def getIPv6(self):
try:
return dnstools.query_ns(self.get_fqdn(), 'AAAA')
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.NoNameservers):
return ''
def poke(self):
self.last_api_update = datetime.now()
self.save()
def generate_secret(self):
# note: we use a quick hasher for the update_secret as expensive
# more modern hashes might put too much load on the servers. also
# many update clients might use http without ssl, so it is not too
# secure anyway.
secret = User.objects.make_random_password()
self.update_secret = make_password(
secret,
hasher='sha1'
)
self.save()
return secret
def post_delete_host(sender, **kwargs):
obj = kwargs['instance']
dnstools.delete(obj.get_fqdn())
post_delete.connect(post_delete_host, sender=Host)
| bsd-3-clause | 9,057,991,372,963,181,000 | 33.68595 | 90 | 0.653562 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/test/unit/jobs/test_mapper.py | 1 | 5801 | import uuid
import jobs.test_rules
from galaxy.jobs.mapper import (
JobRunnerMapper,
ERROR_MESSAGE_NO_RULE_FUNCTION,
ERROR_MESSAGE_RULE_FUNCTION_NOT_FOUND,
)
from galaxy.jobs import JobDestination
from galaxy.util import bunch
WORKFLOW_UUID = uuid.uuid1().hex
TOOL_JOB_DESTINATION = JobDestination()
DYNAMICALLY_GENERATED_DESTINATION = JobDestination()
def test_static_mapping():
mapper = __mapper()
assert mapper.get_job_destination( {} ) is TOOL_JOB_DESTINATION
def test_caching():
mapper = __mapper()
mapper.get_job_destination( {} )
mapper.get_job_destination( {} )
assert mapper.job_wrapper.tool.call_count == 1
def test_dynamic_mapping():
mapper = __mapper( __dynamic_destination( dict( function="upload" ) ) )
assert mapper.get_job_destination( {} ) is DYNAMICALLY_GENERATED_DESTINATION
assert mapper.job_config.rule_response == "local_runner"
def test_dynamic_mapping_priorities():
mapper = __mapper( __dynamic_destination( dict( function="tophat" ) ) )
assert mapper.get_job_destination( {} ) is DYNAMICALLY_GENERATED_DESTINATION
# Next line verifies we using definition in 20_instance.py instead of
# 10_site.py.
assert mapper.job_config.rule_response == "instance_dest_id"
def test_dynamic_mapping_defaults_to_tool_id_as_rule():
mapper = __mapper( __dynamic_destination( ) )
assert mapper.get_job_destination( {} ) is DYNAMICALLY_GENERATED_DESTINATION
assert mapper.job_config.rule_response == "tool1_dest_id"
def test_dynamic_mapping_job_conf_params():
mapper = __mapper( __dynamic_destination( dict( function="check_job_conf_params", param1="7" ) ) )
assert mapper.get_job_destination( {} ) is DYNAMICALLY_GENERATED_DESTINATION
assert mapper.job_config.rule_response == "sent_7_dest_id"
def test_dynamic_mapping_function_parameters():
mapper = __mapper( __dynamic_destination( dict( function="check_rule_params" ) ) )
assert mapper.get_job_destination( {} ) is DYNAMICALLY_GENERATED_DESTINATION
assert mapper.job_config.rule_response == "all_passed"
def test_dynamic_mapping_resource_parameters():
mapper = __mapper( __dynamic_destination( dict( function="check_resource_params" ) ) )
assert mapper.get_job_destination( {} ) is DYNAMICALLY_GENERATED_DESTINATION
assert mapper.job_config.rule_response == "have_resource_params"
def test_dynamic_mapping_workflow_invocation_parameter():
mapper = __mapper( __dynamic_destination( dict( function="check_workflow_invocation_uuid" ) ) )
assert mapper.get_job_destination( {} ) is DYNAMICALLY_GENERATED_DESTINATION
assert mapper.job_config.rule_response == WORKFLOW_UUID
def test_dynamic_mapping_no_function():
dest = __dynamic_destination( dict( ) )
mapper = __mapper( dest )
mapper.job_wrapper.tool.all_ids = [ "no_such_function" ]
error_message = ERROR_MESSAGE_NO_RULE_FUNCTION % dest
__assert_mapper_errors_with_message( mapper, error_message )
def test_dynamic_mapping_missing_function():
dest = __dynamic_destination( dict( function="missing_func" ) )
mapper = __mapper( dest )
mapper.job_wrapper.tool.all_ids = [ "no_such_function" ]
error_message = ERROR_MESSAGE_RULE_FUNCTION_NOT_FOUND % ( "missing_func" )
__assert_mapper_errors_with_message( mapper, error_message )
def __assert_mapper_errors_with_message( mapper, message ):
exception = None
try:
mapper.get_job_destination( {} )
except Exception as e:
exception = e
assert exception
assert str( exception ) == message, "%s != %s" % ( str( exception ), message )
def __mapper( tool_job_destination=TOOL_JOB_DESTINATION ):
job_wrapper = MockJobWrapper( tool_job_destination )
job_config = MockJobConfig()
mapper = JobRunnerMapper(
job_wrapper,
{},
job_config
)
mapper.rules_module = jobs.test_rules
return mapper
def __dynamic_destination( params={} ):
return JobDestination( runner="dynamic", params=params )
class MockJobConfig( object ):
def __init__( self ):
self.rule_response = None
self.dynamic_params = None
def get_destination( self, rep ):
# Called to transform dynamic job destination rule response
# from destination id/runner url into a dynamic job destination.
self.rule_response = rep
return DYNAMICALLY_GENERATED_DESTINATION
class MockJobWrapper( object ):
def __init__( self, tool_job_destination ):
self.tool = MockTool( tool_job_destination )
self.job_id = 12345
self.app = object()
def is_mock_job_wrapper( self ):
return True
def get_job(self):
raw_params = {
"threshold": 8,
"__workflow_invocation_uuid__": WORKFLOW_UUID,
}
def get_param_values( app, ignore_errors ):
assert app == self.app
params = raw_params.copy()
params[ "__job_resource" ] = {
"__job_resource__select": "True",
"memory": "8gb"
}
return params
return bunch.Bunch(
user=bunch.Bunch(
id=6789,
email="[email protected]"
),
raw_param_dict=lambda: raw_params,
get_param_values=get_param_values
)
class MockTool( object ):
def __init__( self, tool_job_destination ):
self.id = "testtoolshed/devteam/tool1/23abcd13123"
self.call_count = 0
self.tool_job_destination = tool_job_destination
self.all_ids = [ "testtoolshed/devteam/tool1/23abcd13123", "tool1" ]
def get_job_destination( self, params ):
self.call_count += 1
return self.tool_job_destination
def is_mock_tool( self ):
return True
| gpl-3.0 | -3,319,791,606,952,706,000 | 31.407821 | 102 | 0.655749 | false |
efornal/pulmo | app/migrations/0004_objectives_and_targets_connection.py | 1 | 2224 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0003_create_model_productionform'),
]
operations = [
migrations.CreateModel(
name='ConnectionSource',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
('ip', models.CharField(max_length=200, null=True)),
('observations', models.TextField(null=True, blank=True)),
],
options={
'db_table': 'connection_source',
'verbose_name_plural': 'ConnectionSources',
},
),
migrations.CreateModel(
name='ConnectionTarget',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
('ip', models.CharField(max_length=200, null=True)),
('ip_firewall', models.CharField(max_length=200, null=True)),
('observations', models.TextField(null=True, blank=True)),
],
options={
'db_table': 'connection_target',
'verbose_name_plural': 'ConnectionTargets',
},
),
migrations.AddField(
model_name='applicationform',
name='connection_sources',
field=models.ManyToManyField(to='app.ConnectionSource', blank=True),
),
migrations.AddField(
model_name='applicationform',
name='connection_targets',
field=models.ManyToManyField(to='app.ConnectionTarget', blank=True),
),
migrations.AddField(
model_name='productionform',
name='connection_sources',
field=models.ManyToManyField(to='app.ConnectionSource', blank=True),
),
migrations.AddField(
model_name='productionform',
name='connection_targets',
field=models.ManyToManyField(to='app.ConnectionTarget', blank=True),
),
]
| gpl-3.0 | -2,067,851,419,938,858,500 | 35.459016 | 80 | 0.547212 | false |
FluidityProject/fluidity | tools/optimality.py | 2 | 32601 | #!/usr/bin/python3
# Copyright (C) 2006 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a full list
# of copyright holders.
#
# Prof. C Pain
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# [email protected]
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import os.path
import numpy
import argparse
import shlex
from subprocess import Popen, PIPE
import scipy.optimize
import string
import libspud
from fluidity_tools import stat_parser
from fluidity_tools import stat_creator
import time
import pickle
import glob
import math
import shutil
# Hack for libspud to be able to read an option from a different files.
# A better solution would be to fix libspud or use an alternative implementation like
# https://github.com/gmarkall/manycore_form_compiler/blob/master/mcfc/optionfile.py
def superspud(filename, cmd):
libspud.load_options(filename)
r = None
if hasattr(cmd, '__iter__'):
for c in cmd:
exec("try: r = " + c + "\nexcept libspud.SpudNewKeyWarning: pass")
else:
exec("try: r = " + cmd + "\nexcept libspud.SpudNewKeyWarning: pass")
libspud.clear_options()
return r
# Executes the model specified in the optimality option tree
# The model stdout is printed to stdout.
def run_model(m, opt_options, model_options):
update_custom_controls(m, opt_options)
if (superspud(model_options, "libspud.have_option('/adjoint/controls/load_controls')")):
# If the model is loading the default controls, we need to make suer the control files are up to date:
update_default_controls(m, opt_options, model_options)
command_line = superspud(opt_options, "libspud.get_option('/model/command_line')")
option_file = superspud(opt_options, "libspud.get_option('/model/option_file')")
args = shlex.split(command_line)
args.append(option_file)
p = Popen(args, stdout=PIPE,stderr=PIPE)
out = string.join(p.stdout.readlines() )
outerr = string.join(p.stderr.readlines() )
if p.wait() != 0:
print("Model execution failed.")
print("The error was:")
print(outerr)
exit()
if verbose:
print("Model output: ")
print(out)
# Intialises the custom controls using the supplied python code.
def get_custom_controls(opt_options):
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
m = {}
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
# With the custom type, the user specifies python function to initialise the controls.
if ctype == 'custom':
initial_control_code = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type::custom/initial_control')")
d = {}
exec(initial_control_code in d)
m[cname] = d['initial_control']()
return m
# Initialse the default controls by reading in the control files.
# This assumes that the model has been run without the "/adjoint/load_controls" option (which produced the initial control files).
def read_default_controls(opt_options, model_options):
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
m = {}
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype == 'default':
act_flag = False # Check that at least one control file exists
for ctrl_file in glob.iglob('control_'+simulation_name+'_'+cname+ '_[0-9]*.pkl'):
try:
timestep = int(ctrl_file.strip()[len('control_'+simulation_name+'_'+ cname+ '_'):len(ctrl_file)-4])
except:
print("Error while reading the control files.")
print("The control file ", ctrl_file, " does not conform the standard naming conventions for control files.")
exit()
f = open(ctrl_file, 'rb')
m[(cname, timestep)] = pickle.load(f)
f.close()
act_flag = True
if act_flag == False:
print("Warning: Found no control file for control ", cname, ".")
return m
# Initialse the default controli bounds by reading in the control bound files.
# This assumes that the model has been run without the "/adjoint/load_controls" option (which produced the initial control bound files).
def read_default_control_bounds(opt_options, model_options):
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
m_bounds = {"lower_bound": {}, "upper_bound": {}}
# Loop over controls
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype != 'default':
continue
have_bound = {}
# Loop over lower and upper bound
for k in m_bounds.keys():
have_bound[k] = superspud(model_options, "libspud.have_option('/adjoint/controls/control["+str(i)+"/bounds/"+k+"')")
if not have_bound[k]:
continue
act_flag = False # Check that at least one control bound file exists
for ctrl_file in glob.iglob('control_'+simulation_name+'_'+cname+ '_'+k+'_[0-9]*.pkl'):
try:
timestep = int(ctrl_file.strip()[len('control_'+simulation_name+'_'+ cname+ '_'+k+'_'):len(ctrl_file)-4])
except:
print("Error while reading the control bound files.")
print("The control bound file ", ctrl_file, " does not conform the standard naming conventions for control files.")
exit()
f = open(ctrl_file, 'rb')
m_bounds[k][(cname, timestep)] = pickle.load(f)
f.close()
act_flag = True
if act_flag == False:
print("Warning: Found no control bound file for control ", cname, ".")
return m_bounds
# Completes the control bounds by adding the missing controls and filling them with nan's
def complete_default_control_bounds(m, m_bounds):
bound_types = {"lower_bound": {}, "upper_bound": {}}
for bound_type in bound_types:
for control in m.keys():
if m_bounds[bound_type].has_key(control):
continue
# We need objects as dtype because we want to keep the Nones for later
m_bounds[bound_type][control] = numpy.empty(shape = m[control].shape, dtype=object)
m_bounds[bound_type][control].fill(None)
return m_bounds
# Returns the control derivatives for both the custom and the default controls.
def read_control_derivatives(opt_options, model_options):
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
functional_name = superspud(opt_options, "libspud.get_option('/functional/name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
derivs = {}
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype == 'default':
act_flag = False # Check that at least one control file exists
for ctrl_file in glob.iglob('control_'+simulation_name+'_adjoint_'+functional_name+'_'+ cname+ '_TotalDerivative_[0-9]*.pkl'):
try:
# The naming convenction is control+simulation_name+control_name+TotalDerivative, but do not forget that
# the derivatives where produced during the adjoint run in which the simulation name is simulation_name+functional_name
timestep = int(ctrl_file.strip()[len('control_'+simulation_name+'_adjoint_'+functional_name+'_'+ cname+ '_TotalDerivative_'):len(ctrl_file)-4])
except:
print("Error while reading the control derivative files.")
print("The control file ", ctrl_file, " does not conform the standard naming conventions for control files.")
exit()
f = open(ctrl_file, 'rb')
derivs[(cname, timestep)] = pickle.load(f)
f.close()
act_flag = True
if act_flag == False:
print("Warning: Found no control derivative file for control ", cname, ".")
elif ctype == 'custom':
control_derivative_code = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type::custom/control_derivative')")
d = {}
exec(control_derivative_code in d)
derivs[cname] = d['control_derivative']()
else:
print("Unknown control type " + ctype + ".")
exit()
return derivs
# Writes the custom controls onto disk
def update_custom_controls(m, opt_options):
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
# With the custom type, the user specifies a python function to update the controls.
if ctype == 'custom':
update_control_code = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type::custom/update_control')")
d = {}
exec(update_control_code in d)
d['update_control'](m[cname])
# Writes the default controls onto disk
def update_default_controls(m, opt_options, model_options):
global debug
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
# Loop over default controls
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
if ctype == 'default':
# Loop over controls
for k in m.keys():
# Check if that is a control we are looking for
if k[0] == cname:
timestep = k[1]
file_name = 'control_'+simulation_name + '_' + cname + '_' + str(timestep) + '.pkl'
if not os.path.isfile(file_name):
print("Error: writing control file ", file_name, " which did not exist before.")
exit()
if debug:
# Check that the file we are writing has the same shape than the one we are writing
f = open(file_name, 'rb')
m_old = pickle.load(f)
if m[k].shape != m_old.shape:
print("Error: The shape of the control in ", file_name, " changed.")
exit()
f.close()
f = open(file_name, 'wb')
pickle.dump(m[k], f)
f.close()
# Check the consistency of model and option file
def check_option_consistency(opt_options, model_options):
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
ctype = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/type/name')")
# Check that the default controls exist in the model
# and that custom controls not.
if ctype == 'custom':
if superspud(model_options, "libspud.have_option('/adjoint/controls/control::" + cname + "')"):
print("The custom control " + cname + " is a default control in the model option tree.")
exit()
elif ctype== 'default':
if not superspud(model_options, "libspud.have_option('/adjoint/controls/control::" + cname + "')"):
print("The default control " + cname + " was not found in the model option tree.")
exit()
else:
print("Unknown control type " + ctype + ".")
exit()
# Check that the the controls in dJdm are consistent with the ones in m
# If m_bounds is present, it also checks the consistency of the bounds
def check_control_consistency(m, djdm, m_bounds=None):
djdm_keys = djdm.keys()
m_keys = m.keys()
djdm_keys.sort()
m_keys.sort()
if m_keys != djdm_keys:
print("Error: The controls are not consistent with the controls derivatives.")
print("The controls are:", m_keys)
print("The control derivatives are:", djdm_keys)
print("Check the consistency of the control definition in the model and the optimality configuration.")
exit()
for k, v in sorted(m.items()):
if m[k].shape != djdm[k].shape:
print("The control ", k, " has shape ", m[k].shape, " but dJd(", k, ") has shape ", djdm[k].shape)
exit()
# Check the bounds
if m_bounds!=None:
bound_types = ("lower_bound", "upper_bound")
for bound_type in bound_types:
m_bounds_keys = m_bounds[bound_type].keys()
m_bounds_keys.sort()
if m_keys != m_bounds_keys:
print("Error: The controls are not consistent with the control ", bound_type, ".")
print("The controls are:", m_keys)
print("The control ", bound_type, "s are:", m_bounds_keys)
exit()
for k, v in sorted(m.items()):
if m[k].shape != m_bounds[bound_type][k].shape:
print("The control ", k, " has shape ", m[k].shape, " but the ", bound_type, " has shape ", m_bounds[bound_type][k].shape)
exit()
def delete_temporary_files(model_options):
# remove any control files
pkl_files = glob.glob('control_*.pkl')
for f in pkl_files:
os.remove(f)
# remove any stat files from the model
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
stat_files = glob.glob(simulation_name+'*.stat')
for f in stat_files:
os.remove(f)
# Returns true if bounds are specified for one of the controls
def have_bounds(opt_options, model_options):
nb_controls = superspud(opt_options, "libspud.option_count('/control_io/control')")
have_bounds = False
for i in range(nb_controls):
cname = superspud(opt_options, "libspud.get_option('/control_io/control["+str(i)+"]/name')")
have_bounds = have_bounds or superspud(model_options, "libspud.have_option('/adjoint/controls/control["+cname+"]/bounds')")
return have_bounds
#######################################################
################# Optimisation loop ###################
#######################################################
def optimisation_loop(opt_options, model_options):
# Implement a memoization function to avoid duplicated functional (derivative) evaluations
class MemoizeMutable:
def __init__(self, fn):
self.fn = fn
self.memo = {}
def __call__(self, *args, **kwds):
import cPickle
str = cPickle.dumps(args, 1)+cPickle.dumps(kwds, 1)
if not self.memo.has_key(str):
self.memo[str] = self.fn(*args, **kwds)
return self.memo[str]
def has_cache(self, *args, **kwds):
import cPickle
str = cPickle.dumps(args, 1)+cPickle.dumps(kwds, 1)
return str in self.memo
# Insert a function value into the cache manually.
def __add__(self, value, *args, **kwds):
import cPickle
str = cPickle.dumps(args, 1)+cPickle.dumps(kwds, 1)
self.memo[str] = value
# Small test code for the un/serialiser
def test_serialise():
x = {'a': numpy.random.rand(3,2), 'b': numpy.random.rand(3,2,4,5), 'c': numpy.random.rand(1)}
[m_serial, m_shape] = serialise(x)
x_re = unserialise(m_serial, m_shape)
return (x['a'] == x_re['a']).all() and (x['b'] == x_re['b']).all() and (x['c'] == x_re['c']).all()
# This function takes in a dictionary m with numpy.array as entries.
# From that it creates one serialised numpy.array with all the data.
# In addition it creates m_shape, a dictionary which is used in unserialise.
def serialise(m):
m_serial = numpy.array([])
m_shape = {}
for k, v in sorted(m.items()):
m_serial = numpy.append(m_serial, v.flatten())
m_shape[k] = v.shape
return [m_serial, m_shape]
# Reconstructs the original dictionary of numpy.array's from the serialised version and the shape.
def unserialise(m_serial, m_shape):
m = {}
start_index = 0
for k, s in sorted(m_shape.items()):
offset = 1
for d in s:
offset = offset * d
end_index = start_index + offset
m[k] = numpy.reshape(m_serial[start_index:end_index], s)
start_index = end_index
return m
# Returns the functional value with the current controls
def J(m_serial, m_shape, write_stat=True):
has_cache = mem_pure_J.has_cache(m_serial, m_shape)
if has_cache:
cache_str = "(cache hit)"
else:
cache_str = ""
J = mem_pure_J(m_serial, m_shape)
print("J = %s %s" % (J, cache_str))
if write_stat:
# Update the functional value in the optimisation stat file
stat_writer[(functional_name, 'value')] = J
return J
# A pure version of the computation of J
def pure_J(m_serial, m_shape):
if verbose:
print("Running forward model for functional evaluation (<function pure_J>)")
m = unserialise(m_serial, m_shape)
run_model(m, opt_options, model_options)
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
stat_file = simulation_name+".stat"
s = stat_parser(stat_file)
if not functional_name in s:
print("The functional '", functional_name, "' does not exist in the stat file.")
print("Check your model configuration")
exit()
J = s[functional_name]["value"][-1]
return J
# Returns the functional derivative with respect to the controls.
def dJdm(m_serial, m_shape, write_stat=True):
return mem_pure_dJdm(m_serial, m_shape)
# A pure version of the computation of J
def pure_dJdm(m_serial, m_shape):
if verbose:
print("Running forward/adjoint model for functional derivative evaluation (<function pure_dJdm>)")
m = unserialise(m_serial, m_shape)
run_model(m, opt_options, model_options)
# While computing dJdm we run the forward/adjoint model and in particular we compute the
# functional values. In order to not compute the functional values again when calling
# J, we manually add write it into the memoize cache.
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
stat_file = simulation_name+".stat"
J = stat_parser(stat_file)[functional_name]["value"][-1]
# Add the functional value the memJ's cache
mem_pure_J.__add__(J, m_serial, m_shape)
# Now get the functional derivative information
djdm = read_control_derivatives(opt_options, model_options)
check_control_consistency(m, djdm, m_bounds)
# Serialise djdm in the same order than m_serial
djdm_serial = []
for k, v in sorted(m_shape.items()):
djdm_serial = numpy.append(djdm_serial, djdm[k])
return djdm_serial
# Check the gradient using the Taylor expansion
def check_gradient(m_serial, m_shape):
print('-' * 80)
print(' Entering gradient verification ')
print('-' * 80)
fd_errors = []
fd_conv = []
grad_errors = []
grad_conv = []
nb_tests = 4
perturbation = 2e-4
perturbation_vec = numpy.random.rand(len(m_serial))
j_unpert = J(m_serial, m_shape)
djdm_unpert = dJdm(m_serial, m_shape)
for i in range(nb_tests):
perturbation = perturbation/2
m_pert = m_serial + perturbation*perturbation_vec
fd_errors.append(abs(j_unpert - J(m_pert, m_shape)))
grad_errors.append(abs(j_unpert + numpy.dot(djdm_unpert, perturbation_vec*perturbation) - J(m_pert, m_shape)))
print("Error in Taylor expansion of order 0: ", fd_errors)
print("Error in Taylor expansion of order 1: ", grad_errors)
for i in range(nb_tests-1):
if fd_errors[i+1] == 0.0 or fd_errors[i] == 0.0:
fd_conv.append(1.0)
else:
fd_conv.append(math.log(fd_errors[i]/fd_errors[i+1], 2))
if grad_errors[i+1] == 0.0 or grad_errors[i] == 0.0:
grad_conv.append(2.0)
else:
grad_conv.append(math.log(grad_errors[i]/grad_errors[i+1], 2))
print("Convergence of Taylor expansion of order 0 (should be 1.0): ", fd_conv)
print("Convergence of Taylor expansion of order 1 (should be 2.0): ", grad_conv)
stat_writer[(functional_name, "iteration")] = 0
stat_writer[(functional_name + "_gradient_error", "convergence")] = min(grad_conv)
stat_writer.write()
# This function gets called after each optimisation iteration.
# It is currently used to write statistics and copy model output files into a subdirectory
def callback(m_serial, m_shape):
global iteration
iteration = iteration + 1
stat_writer[(functional_name, "iteration")] = iteration
stat_writer.write()
if superspud(opt_options, "libspud.have_option('/debug/save_model_output')"):
save_model_results()
print('-' * 80)
print(' Finished optimisation iteration', iteration)
print('-' * 80)
def save_model_results():
global iteration
# Copy any model output files in a subdirectory
simulation_name = superspud(model_options, "libspud.get_option('/simulation_name')")
Popen(["mkdir", "opt_"+str(iteration)+"_"+simulation_name.strip()])
Popen("cp "+simulation_name.strip()+"* "+"opt_"+str(iteration)+"_"+simulation_name.strip(), shell=True)
############################################################################
############################################################################
print('-' * 80)
print(' Beginning of optimisation loop')
print('-' * 80)
############################################################################
### Initialisation of optimisation loop ###
global iteration
iteration = 0
# Initialise stat file
if verbose:
print("Initialise stat file")
stat_writer=stat_creator(superspud(opt_options, "libspud.get_option('/name')").strip() + '.stat')
# Get the optimisation settings
if verbose:
print("Read oml settings")
algo = superspud(opt_options, "libspud.get_option('optimisation_options/optimisation_algorithm[0]/name')")
have_bound = have_bounds(opt_options, model_options)
# Create the memoized version of the functional (derivative) evaluation functions
mem_pure_dJdm = MemoizeMutable(pure_dJdm)
mem_pure_J = MemoizeMutable(pure_J)
### Get initial controls ###
### The initial controls are retrieved in several steps.
### 1) get custom controls by running the user specified python code and save the associated pkl files
### 2) run the forward/adjoint model without the "load_control" flag. The model will save the initial default controls as pkl files.
### 3) Finally load these initial default controls files
# First we initialise the custom controls
# This has to be done first since the next step
# involves running the model and therefore
# will need the custom controls to be set.
if verbose:
print("Get initial custom controls")
custom_m = get_custom_controls(opt_options)
# Next run the forward/adjoint model without the option
# /adjoint/controls/load_controls
if verbose:
print("Get initial default controls")
model_file = superspud(opt_options, "libspud.get_option('/model/option_file')")
if (superspud(model_options, "libspud.have_option('/adjoint/controls/load_controls')")):
superspud(model_options, ["libspud.delete_option('/adjoint/controls/load_controls')", "libspud.write_options('"+ model_file +"')"])
# Run the forward model including adjoint.
functional_name = superspud(opt_options, "libspud.get_option('/functional/name')")
if superspud(opt_options, "libspud.have_option('/adjoint/functional::"+functional_name+"/disable_adjoint_run')"):
superspud(opt_options, "libspud.delete_option('/adjoint/functional::"+functional_name+"/disable_adjoint_run')")
[custom_m_serial, custom_m_shape] = serialise(custom_m)
mem_pure_J(custom_m_serial, custom_m_shape)
if superspud(opt_options, "libspud.have_option('/debug/save_model_output')"):
save_model_results()
# This should have created all the default initial controls and we can now activate the load_controls flag.
superspud(model_options, ["libspud.add_option('/adjoint/controls/load_controls')", "libspud.write_options('"+ model_file +"')"])
# Finally, load the default controls
m = read_default_controls(opt_options, model_options)
m_bounds = read_default_control_bounds(opt_options, model_options)
nb_controls = len(m) + len(custom_m)
# And merge them
m.update(custom_m)
if (nb_controls != len(m)):
print("Error: Two controls with the same name defined.")
print("The controls must have all unique names.")
print("Your controls are: ", m.keys())
exit()
djdm = read_control_derivatives(opt_options, model_options)
# Now complete the bounds arrays where the user did not specify any bounds
m_bounds = complete_default_control_bounds(m, m_bounds)
# Since now all the controls and derivatives are defined, we can check the consistency of the control variables
check_control_consistency(m, djdm, m_bounds)
### Serialise the controls for the optimisation routine
[m_serial, m_shape] = serialise(m)
[m_lb_serial, m_lb_shape] = serialise(m_bounds["lower_bound"])
[m_ub_serial, m_ub_shape] = serialise(m_bounds["upper_bound"])
assert(m_ub_shape == m_shape)
assert(m_lb_shape == m_shape)
# zip the lower and upper bound to a list of tuples
m_bounds_serial = zip(m_lb_serial, m_ub_serial)
# Check gradient
if superspud(opt_options, "libspud.have_option('/debug/check_gradient')"):
check_gradient(m_serial, m_shape)
############################################################################
if algo != 'NULL':
print('-' * 80)
print(' Entering %s optimisation algorithm ' % algo)
print('-' * 80)
############################################################################
################################
########### BFGS ###############
################################
if algo == 'BFGS':
if have_bound:
print("BFGS does not support bounds.")
exit()
tol = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::BFGS/tolerance')")
maxiter=None
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::BFGS/iterations')"):
maxiter = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::BFGS/iterations')")
res = scipy.optimize.fmin_bfgs(J, m_serial, dJdm, gtol=tol, full_output=1, maxiter=maxiter, args=(m_shape, ), callback = lambda m: callback(m, m_shape))
print("Functional value J(m): ", res[1])
print("Control state m: ", res[0])
################################
########### NCG ################
################################
elif algo == 'NCG':
if have_bound:
print("NCG does not support bounds.")
exit()
tol = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::NCG/tolerance')")
maxiter=None
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::NCG/iterations')"):
maxiter = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::NCG/iterations')")
res = scipy.optimize.fmin_ncg(J, m_serial, dJdm, avextol=tol, full_output=1, maxiter=maxiter, args=(m_shape, ), callback = lambda m: callback(m, m_shape))
print("Functional value J(m): ", res[1])
print("Control state m: ", res[0])
################################
########### L-BFGS-B ###########
################################
elif algo == 'L-BFGS-B':
opt_args = dict(func=J, x0=m_serial, fprime=dJdm, args=(m_shape,))
if have_bound:
opt_args['bounds'] = m_bounds_serial
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/tolerance')"):
pgtol = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/tolerance')")
opt_args['pgtol'] = pgtol
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/factr')"):
factr = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/factr')")
opt_args['factr'] = factr
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/memory_limit')"):
memory_limit = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/memory_limit')")
opt_args['m'] = memory_limit
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/maximal_functional_evaluations')"):
maxfun = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/maximal_functional_evaluations')")
opt_args['maxfun'] = maxfun
if superspud(opt_options, "libspud.have_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/verbosity')"):
iprint = superspud(opt_options, "libspud.get_option('/optimisation_options/optimisation_algorithm::L-BFGS-B/verbosity')")
opt_args['iprint'] = iprint
res = scipy.optimize.fmin_l_bfgs_b(**opt_args)
print(res)
################################
########### NULL ##############
################################
elif algo == 'NULL':
exit()
else:
print("Unknown optimisation algorithm in option path.")
exit()
################# main() ###################
def main():
global verbose
global debug
parser = argparse.ArgumentParser(description='Optimisation program for fluidity.')
parser.add_argument('filename', metavar='FILE', help="the .oml file")
parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode')
parser.add_argument('-d', '--debug', action='store_true', help='the debug mode runs additional internal tests.')
args = parser.parse_args()
verbose = args.verbose
debug = args.debug
if not os.path.isfile(args.filename):
print("File", args.filename, "not found.")
exit()
# Initial spud environments for the optimality and model options.
opt_file = args.filename
if not superspud(opt_file, "libspud.have_option('/optimisation_options')"):
print("File", args.filename, "is not a valid .oml file.")
exit()
model_file = superspud(opt_file, "libspud.get_option('/model/option_file')")
if not os.path.isfile(model_file):
print("Could not find ", model_file ," as specified in /model/option_file")
exit()
# Create a copy of the option files so that we don't touch the original
def rename_file(fn):
fn_basename, fn_extension = os.path.splitext(fn)
shutil.copy(fn, fn_basename+'_tmp'+fn_extension)
fn = fn_basename+'_tmp'+fn_extension
return fn
model_file = rename_file(model_file)
opt_file = rename_file(opt_file)
superspud(opt_file, ["libspud.set_option('/model/option_file', '" + model_file + "')", "libspud.write_options('" + opt_file + "')"])
# Check consistency of the option files
check_option_consistency(opt_file, model_file)
# Start the optimisation loop
optimisation_loop(opt_file, model_file)
# Tidy up
os.remove(opt_file)
os.remove(model_file)
################# __main__ ########################
if '__main__'==__name__:
start_time = time.time()
main()
print("Optimisation finished in ", time.time() - start_time, "seconds")
| lgpl-2.1 | -6,594,667,205,454,949,000 | 44.279167 | 158 | 0.648968 | false |
amalakar/dd-agent | checks.d/go_expvar.py | 1 | 7218 | # stdlib
from collections import defaultdict
import re
# 3rd party
import requests
# project
from checks import AgentCheck
DEFAULT_MAX_METRICS = 350
PATH = "path"
ALIAS = "alias"
TYPE = "type"
TAGS = "tags"
GAUGE = "gauge"
RATE = "rate"
DEFAULT_TYPE = GAUGE
SUPPORTED_TYPES = {
GAUGE: AgentCheck.gauge,
RATE: AgentCheck.rate,
}
DEFAULT_METRIC_NAMESPACE = "go_expvar"
# See http://golang.org/pkg/runtime/#MemStats
DEFAULT_GAUGE_MEMSTAT_METRICS = [
# General statistics
"Alloc", "TotalAlloc",
# Main allocation heap statistics
"HeapAlloc", "HeapSys", "HeapIdle", "HeapInuse",
"HeapReleased", "HeapObjects",
]
DEFAULT_RATE_MEMSTAT_METRICS = [
# General statistics
"Lookups", "Mallocs", "Frees",
# Garbage collector statistics
"PauseTotalNs", "NumGC",
]
DEFAULT_METRICS = [{PATH: "memstats/%s" % path, TYPE: GAUGE} for path in DEFAULT_GAUGE_MEMSTAT_METRICS] +\
[{PATH: "memstats/%s" % path, TYPE: RATE} for path in DEFAULT_RATE_MEMSTAT_METRICS]
class GoExpvar(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self._last_gc_count = defaultdict(int)
def _get_data(self, url):
r = requests.get(url)
r.raise_for_status()
return r.json()
def _load(self, instance):
url = instance.get('expvar_url')
if not url:
raise Exception('GoExpvar instance missing "expvar_url" value.')
tags = instance.get('tags', [])
tags.append("expvar_url:%s" % url)
data = self._get_data(url)
metrics = DEFAULT_METRICS + instance.get("metrics", [])
max_metrics = instance.get("max_returned_metrics", DEFAULT_MAX_METRICS)
namespace = instance.get('namespace', DEFAULT_METRIC_NAMESPACE)
return data, tags, metrics, max_metrics, url, namespace
def get_gc_collection_histogram(self, data, tags, url, namespace):
num_gc = data.get("memstats", {}).get("NumGC")
pause_hist = data.get("memstats", {}).get("PauseNs")
last_gc_count = self._last_gc_count[url]
if last_gc_count == num_gc:
# No GC has run. Do nothing
return
start = last_gc_count % 256
end = (num_gc + 255) % 256 + 1
if start < end:
values = pause_hist[start:end]
else:
values = pause_hist[start:] + pause_hist[:end]
self._last_gc_count[url] = num_gc
for value in values:
self.histogram(
self.normalize("memstats.PauseNs", namespace, fix_case=True),
value, tags=tags)
def check(self, instance):
data, tags, metrics, max_metrics, url, namespace = self._load(instance)
self.get_gc_collection_histogram(data, tags, url, namespace)
self.parse_expvar_data(data, tags, metrics, max_metrics, namespace)
def parse_expvar_data(self, data, tags, metrics, max_metrics, namespace):
'''
Report all the metrics based on the configuration in instance
If a metric is not well configured or is not present in the payload,
continue processing metrics but log the information to the info page
'''
count = 0
for metric in metrics:
path = metric.get(PATH)
metric_type = metric.get(TYPE, DEFAULT_TYPE)
metric_tags = list(metric.get(TAGS, []))
metric_tags += tags
alias = metric.get(ALIAS)
if not path:
self.warning("Metric %s has no path" % metric)
continue
if metric_type not in SUPPORTED_TYPES:
self.warning("Metric type %s not supported for this check" % metric_type)
continue
keys = path.split("/")
values = self.deep_get(data, keys)
if len(values) == 0:
self.warning("No results matching path %s" % path)
continue
tag_by_path = alias is not None
for traversed_path, value in values:
actual_path = ".".join(traversed_path)
if tag_by_path:
metric_tags.append("path:%s" % actual_path)
metric_name = alias or self.normalize(actual_path, namespace, fix_case=True)
try:
float(value)
except ValueError:
self.log.warning("Unreportable value for path %s: %s" % (path, value))
continue
if count >= max_metrics:
self.warning("Reporting more metrics than the allowed maximum. "
"Please contact [email protected] for more information.")
return
SUPPORTED_TYPES[metric_type](self, metric_name, value, metric_tags)
count += 1
def deep_get(self, content, keys, traversed_path=None):
'''
Allow to retrieve content nested inside a several layers deep dict/list
Examples: -content: {
"key1": {
"key2" : [
{
"name" : "object1",
"value" : 42
},
{
"name" : "object2",
"value" : 72
}
]
}
}
-keys: ["key1", "key2", "1", "value"] would return [(["key1", "key2", "1", "value"], 72)]
-keys: ["key1", "key2", "1", "*"] would return [(["key1", "key2", "1", "value"], 72), (["key1", "key2", "1", "name"], "object2")]
-keys: ["key1", "key2", "*", "value"] would return [(["key1", "key2", "1", "value"], 72), (["key1", "key2", "0", "value"], 42)]
'''
if traversed_path is None:
traversed_path = []
if keys == []:
return [(traversed_path, content)]
key = keys[0]
regex = "".join(["^", key, "$"])
try:
key_rex = re.compile(regex)
except Exception:
self.warning("Cannot compile regex: %s" % regex)
return []
results = []
for new_key, new_content in self.items(content):
if key_rex.match(new_key):
results.extend(self.deep_get(new_content, keys[1:], traversed_path + [str(new_key)]))
return results
def items(self, object):
if isinstance(object, list):
for new_key, new_content in enumerate(object):
yield str(new_key), new_content
elif isinstance(object, dict):
for new_key, new_content in object.iteritems():
yield str(new_key), new_content
else:
self.log.warning("Could not parse this object, check the json"
"served by the expvar")
| bsd-3-clause | -5,956,096,732,492,222,000 | 33.869565 | 147 | 0.519396 | false |
deepmind/distrax | distrax/_src/bijectors/split_coupling_test.py | 1 | 10197 | # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `split_coupling.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import bijector as base_bijector
from distrax._src.bijectors import block
from distrax._src.bijectors import split_coupling
import jax
import jax.numpy as jnp
import numpy as np
def _create_split_coupling_bijector(split_index,
split_axis=-1,
swap=False,
event_ndims=2):
return split_coupling.SplitCoupling(
split_index=split_index,
split_axis=split_axis,
event_ndims=event_ndims,
swap=swap,
conditioner=lambda x: x**2,
bijector=lambda _: lambda x: 2. * x + 3.)
class DummyBijector(base_bijector.Bijector):
def forward_and_log_det(self, x):
super()._check_forward_input_shape(x)
return x, jnp.zeros((x.shape[:-self.event_ndims_in]), dtype=jnp.float_)
class SplitCouplingTest(parameterized.TestCase):
def test_properties(self):
bijector = _create_split_coupling_bijector(
split_index=0, swap=False, split_axis=-1, event_ndims=2)
ones = jnp.ones((4, 5))
self.assertEqual(bijector.split_index, 0)
self.assertEqual(bijector.split_axis, -1)
self.assertFalse(bijector.swap)
np.testing.assert_allclose(
bijector.conditioner(2 * ones), 4 * ones, atol=1e-4)
assert callable(bijector.bijector(ones))
@parameterized.named_parameters(
('negative split_index', {'split_index': -1, 'event_ndims': 0}),
('positive split_axis',
{'split_index': 0, 'event_ndims': 0, 'split_axis': 3}),
('negative event_ndims', {'split_index': 0, 'event_ndims': -1}),
('invalid split_axis',
{'split_index': 0, 'event_ndims': 1, 'split_axis': -2}),
)
def test_invalid_properties(self, bij_params):
bij_params.update(
{'conditioner': lambda x: x, 'bijector': lambda _: lambda x: x})
with self.assertRaises(ValueError):
split_coupling.SplitCoupling(**bij_params)
def test_raises_on_bijector_with_different_event_ndims(self):
inner_bij = lambda _: DummyBijector(1, 0, False, False)
bij_params = {'split_index': 0, 'event_ndims': 1,
'conditioner': lambda x: x, 'bijector': inner_bij}
bij = split_coupling.SplitCoupling(**bij_params)
with self.assertRaises(ValueError):
bij.forward_and_log_det(jnp.zeros((4, 3)))
def test_raises_on_bijector_with_extra_event_ndims(self):
inner_bij = lambda _: DummyBijector(2, 2, False, False)
bij_params = {'split_index': 0, 'event_ndims': 1,
'conditioner': lambda x: x, 'bijector': inner_bij}
bij = split_coupling.SplitCoupling(**bij_params)
with self.assertRaises(ValueError):
bij.forward_and_log_det(jnp.zeros((4, 3)))
@chex.all_variants
@parameterized.parameters(
{'split_index': 0, 'split_axis': -1, 'swap': False},
{'split_index': 3, 'split_axis': -1, 'swap': False},
{'split_index': 5, 'split_axis': -1, 'swap': False},
{'split_index': 0, 'split_axis': -2, 'swap': False},
{'split_index': 2, 'split_axis': -2, 'swap': False},
{'split_index': 4, 'split_axis': -2, 'swap': False},
{'split_index': 0, 'split_axis': -1, 'swap': True},
{'split_index': 3, 'split_axis': -1, 'swap': True},
{'split_index': 5, 'split_axis': -1, 'swap': True},
{'split_index': 0, 'split_axis': -2, 'swap': True},
{'split_index': 2, 'split_axis': -2, 'swap': True},
{'split_index': 4, 'split_axis': -2, 'swap': True},
)
def test_shapes_are_correct(self, split_index, split_axis, swap):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
bijector = _create_split_coupling_bijector(
split_index, split_axis, swap, event_ndims=2)
# Forward methods.
y, logdet = self.variant(bijector.forward_and_log_det)(x)
self.assertEqual(y.shape, (2, 3, 4, 5))
self.assertEqual(logdet.shape, (2, 3))
# Inverse methods.
x, logdet = self.variant(bijector.inverse_and_log_det)(y)
self.assertEqual(x.shape, (2, 3, 4, 5))
self.assertEqual(logdet.shape, (2, 3))
@chex.all_variants
def test_swapping_works(self):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
# Don't swap.
bijector = _create_split_coupling_bijector(
split_index=3, split_axis=-1, swap=False)
y = self.variant(bijector.forward)(x)
np.testing.assert_array_equal(y[..., :3], x[..., :3])
# Swap.
bijector = _create_split_coupling_bijector(
split_index=3, split_axis=-1, swap=True)
y = self.variant(bijector.forward)(x)
np.testing.assert_array_equal(y[..., 3:], x[..., 3:])
# Don't swap.
bijector = _create_split_coupling_bijector(
split_index=3, split_axis=-2, swap=False)
y = self.variant(bijector.forward)(x)
np.testing.assert_array_equal(y[..., :3, :], x[..., :3, :])
# Swap.
bijector = _create_split_coupling_bijector(
split_index=3, split_axis=-2, swap=True)
y = self.variant(bijector.forward)(x)
np.testing.assert_array_equal(y[..., 3:, :], x[..., 3:, :])
@chex.all_variants
@parameterized.parameters(
{'split_index': 0, 'split_axis': -1, 'swap': False},
{'split_index': 3, 'split_axis': -1, 'swap': False},
{'split_index': 5, 'split_axis': -1, 'swap': False},
{'split_index': 0, 'split_axis': -2, 'swap': False},
{'split_index': 2, 'split_axis': -2, 'swap': False},
{'split_index': 4, 'split_axis': -2, 'swap': False},
{'split_index': 0, 'split_axis': -1, 'swap': True},
{'split_index': 3, 'split_axis': -1, 'swap': True},
{'split_index': 5, 'split_axis': -1, 'swap': True},
{'split_index': 0, 'split_axis': -2, 'swap': True},
{'split_index': 2, 'split_axis': -2, 'swap': True},
{'split_index': 4, 'split_axis': -2, 'swap': True},
)
def test_inverse_methods_are_correct(self, split_index, split_axis, swap):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
bijector = _create_split_coupling_bijector(
split_index, split_axis, swap, event_ndims=2)
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
x_rec, logdet_inv = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x_rec, x, atol=1e-6)
np.testing.assert_allclose(logdet_fwd, -logdet_inv, atol=1e-6)
@chex.all_variants
@parameterized.parameters(
{'split_index': 0, 'split_axis': -1, 'swap': False},
{'split_index': 3, 'split_axis': -1, 'swap': False},
{'split_index': 5, 'split_axis': -1, 'swap': False},
{'split_index': 0, 'split_axis': -2, 'swap': False},
{'split_index': 2, 'split_axis': -2, 'swap': False},
{'split_index': 4, 'split_axis': -2, 'swap': False},
{'split_index': 0, 'split_axis': -1, 'swap': True},
{'split_index': 3, 'split_axis': -1, 'swap': True},
{'split_index': 5, 'split_axis': -1, 'swap': True},
{'split_index': 0, 'split_axis': -2, 'swap': True},
{'split_index': 2, 'split_axis': -2, 'swap': True},
{'split_index': 4, 'split_axis': -2, 'swap': True},
)
def test_composite_methods_are_consistent(self, split_index, split_axis,
swap):
key = jax.random.PRNGKey(42)
bijector = _create_split_coupling_bijector(
split_index, split_axis, swap, event_ndims=2)
# Forward methods.
x = jax.random.normal(key, (2, 3, 4, 5))
y1 = self.variant(bijector.forward)(x)
logdet1 = self.variant(bijector.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bijector.forward_and_log_det)(x)
np.testing.assert_allclose(y1, y2, atol=1e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=1e-8)
# Inverse methods.
y = jax.random.normal(key, (2, 3, 4, 5))
x1 = self.variant(bijector.inverse)(y)
logdet1 = self.variant(bijector.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x1, x2, atol=1e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=1e-8)
def test_raises_on_invalid_input_shape(self):
event_shape = (2, 3)
bij = split_coupling.SplitCoupling(
split_index=event_shape[-1] // 2,
event_ndims=len(event_shape),
conditioner=lambda x: x,
bijector=lambda _: lambda x: x)
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.assertRaises(ValueError):
fn(jnp.zeros((3,)))
def test_raises_on_invalid_inner_bijector(self):
event_shape = (2, 3)
bij = split_coupling.SplitCoupling(
split_index=event_shape[-1] // 2,
event_ndims=len(event_shape),
conditioner=lambda x: x,
bijector=lambda _: block.Block(lambda x: x, len(event_shape) + 1))
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.assertRaises(ValueError):
fn(jnp.zeros(event_shape))
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = _create_split_coupling_bijector(0, -1, False, event_ndims=2)
x = np.zeros((2, 3, 4, 5))
f(x, bijector)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | -6,103,982,720,229,582,000 | 40.962963 | 80 | 0.611454 | false |
rcatwood/Savu | savu/plugins/filters/component_analysis/ica.py | 1 | 2407 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: ica
:platform: Unix
:synopsis: A plugin to fit peaks
.. moduleauthor:: Aaron Parsons <[email protected]>
"""
import logging
from savu.plugins.utils import register_plugin
from savu.plugins.filters.base_component_analysis import BaseComponentAnalysis
from sklearn.decomposition import FastICA
import numpy as np
@register_plugin
class Ica(BaseComponentAnalysis):
"""
This plugin performs independent component analysis on XRD/XRF spectra.
:param w_init: The initial mixing matrix. Default: None.
:param random_state: The state. Default: 1.
"""
def __init__(self):
super(Ica, self).__init__("Ica")
def filter_frames(self, data):
logging.debug("I am starting the old componenty vous")
data = data[0]
sh = data.shape
newshape = (np.prod(sh[:-1]), sh[-1])
print "The shape of the data is:"+str(data.shape) + str(newshape)
data = np.reshape(data, (newshape))
# data will already be shaped correctly
logging.debug("Making the matrix")
ica = FastICA(n_components=self.parameters['number_of_components'],
algorithm='parallel',
whiten=self.parameters['whiten'],
w_init=self.parameters['w_init'],
random_state=self.parameters['random_state'])
logging.debug("Performing the fit")
data = self.remove_nan_inf(data) #otherwise the fit flags up an error for obvious reasons
S_ = ica.fit_transform(data)
print "S_Shape is:"+str(S_.shape)
print "self.images_shape:"+str(self.images_shape)
scores = np.reshape(S_, (self.images_shape))
eigenspectra = ica.components_
logging.debug("mange-tout")
return [scores, eigenspectra]
| gpl-3.0 | 5,032,943,843,606,573,000 | 37.206349 | 98 | 0.666805 | false |
thethythy/Mnemopwd | mnemopwd/client/uilayer/uicomponents/TitledBorderWindow.py | 1 | 2935 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import curses
from .BaseWindow import BaseWindow
class TitledBorderWindow(BaseWindow):
"""
A window with a border and a title. Subclass of BaseWindow.
"""
def __init__(self, parent, h, w, y, x, title, modal=False, colourT=False, colourD=False):
"""Create base window"""
BaseWindow.__init__(self, parent, h, w, y, x, modal=modal)
self.title = title
self.colourT = colourT
self.colourD = colourD
self._create()
def redraw(self):
"""See mother class"""
self._create()
BaseWindow.redraw(self)
def close(self):
"""See mother class"""
if self.modal:
self.shadows.erase() # Erase shadows
self.shadows.refresh()
BaseWindow.close(self)
def _create(self):
self.window.attrset(self.colourD)
self.window.border()
self.window.addstr(1, 2, self.title, self.colourT)
self.window.hline(2, 1, curses.ACS_HLINE, self.w - 2)
# Add a shadow if it is a modal window
if self.modal:
self.shadows = curses.newwin(self.h, self.w + 1, self.y + 1,
self.x + 1)
self.shadows.attrset(self.colourD)
self.shadows.addstr(self.h - 1, 0, chr(0x2580)*self.w) # Horizontal
for i in range(0, self.h - 1):
self.shadows.addstr(i, self.w - 1, chr(0x2588)) # Vertical
self.shadows.refresh()
self.window.refresh()
self.window.attrset(0)
| bsd-2-clause | -3,652,827,740,056,266,000 | 39.205479 | 93 | 0.667121 | false |
liw/daos | src/tests/ftest/container/container_check.py | 1 | 2451 | #!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from avocado.core.exceptions import TestFail
from dfuse_test_base import DfuseTestBase
class DfuseContainerCheck(DfuseTestBase):
# pylint: disable=too-few-public-methods,too-many-ancestors
"""Base Dfuse Container check test class.
:avocado: recursive
"""
def test_dfuse_container_check(self):
"""Jira ID: DAOS-3635.
Test Description:
Purpose of this test is to try and mount different container types
to dfuse and check the behavior.
Use cases:
Create pool
Create container of type default
Try to mount to dfuse and check the behavior.
Create container of type POSIX.
Try to mount to dfuse and check the behavior.
:avocado: tags=all,small,full_regression,dfusecontainercheck
"""
# get test params for cont and pool count
cont_types = self.params.get("cont_types", '/run/container/*')
# Create a pool and start dfuse.
self.add_pool(connect=False)
for cont_type in cont_types:
# Get container params
self.add_container(self.pool, create=False)
# create container
if cont_type == "POSIX":
self.container.type.update(cont_type)
self.container.create()
# Attempt to mount the dfuse mount point - this should only succeed
# with a POSIX container
try:
self.start_dfuse(
self.hostlist_clients, self.pool, self.container)
if cont_type != "POSIX":
self.fail("Non-POSIX type container mounted over dfuse")
except TestFail as error:
if cont_type == "POSIX":
self.fail(
"POSIX type container failed dfuse mount: {}".format(
error))
self.log.info(
"Non-POSIX type container expected to fail dfuse mount")
# Verify dfuse is running on the POSIX type container
if cont_type == "POSIX":
self.dfuse.check_running()
# Stop dfuse and destroy the container for next iteration
if not cont_type == "":
self.stop_dfuse()
self.container.destroy(1)
| apache-2.0 | -1,506,915,065,626,267,000 | 34.014286 | 79 | 0.575683 | false |
no-net/gr-winelo | python/client/sim_source_c.py | 1 | 12656 | import numpy
#from grc_gnuradio import blks2 as grc_blks2
from gnuradio import gr, uhd, blocks # , analog
from gruel import pmt
# import grextras for python blocks
import gnuradio.extras
from twisted.internet import reactor
#import thread
from threading import Thread
import time
import random
from winelo.client import SendFactory, uhd_gate
#from winelo.client.tcp_blocks import tcp_source
class sim_source_cc(gr.block):
def __init__(self, hier_blk, serverip, serverport, clientname,
packetsize, samp_rate, center_freq, net_id=0):
gr.block.__init__(
self,
name="WiNeLo source",
in_sig=[numpy.complex64],
out_sig=[numpy.complex64],
)
print '[INFO] WiNeLo - Instantiating %s' % clientname
self.hier_blk = hier_blk
# this will store all samples that came from twisted
self.samples = numpy.zeros(0)
# this is used to connect the block to the twisted reactor
self.twisted_conn = None
self.net_id = net_id
# Needed for WiNeLo-time
self.virtual_counter = 0
# Evaluated for timed commands -> can be higher/absolute (GPS time)
self.virtual_time = 0
self.virt_offset = 0
self.absolute_time = True
self.samp_rate = samp_rate
# Port used by tcp source/sink for sample transmission
self.dataport = None
self.packet_size = packetsize
self.samples_to_produce = self.packet_size
self.drop_one_in_n_cmds = 0 # TODO: was 50 for per_measurement!
# TODO: DEBUG
#self.no_zero_counter = 0
self.dbg_counter = 0
# connect to the server
reactor.connectTCP(serverip,
serverport,
SendFactory(self, {'type': 'rx',
'name': clientname,
'centerfreq': center_freq,
'samprate': self.samp_rate,
'net_id': self.net_id,
'packet_size': packetsize})
)
if not reactor.running:
print '[INFO] WiNeLo - Starting the reactor'
#thread.start_new_thread(reactor.run, (),
#{'installSignalHandlers': 0})
Thread(target=reactor.run, args=(False,)).start()
else:
time.sleep(2)
print '[INFO] WiNeLo - giving twisted time to setup and block ' \
'everything'
time.sleep(3)
def work(self, input_items, output_items):
#print "Source work called"
self.twisted_conn.condition.acquire()
if self.virtual_counter == 0:
self.generate_rx_tags()
while True:
# this is necessary because twisted and gnuradio are running in
# different threads. So it is possible that new samples arrive
# while gnuradio is still working on the old samples
if len(input_items[0]) is 0:
#print "DEBUG: sim_source - waiting for items"
self.twisted_conn.condition.wait()
#print "DEBUG: sim_source - got items"
#if len(input_items[0]) is 0:
# return 0
if self.samples_to_produce <= len(input_items[0]) and \
self.samples_to_produce > 0:
produce_n_samples = self.samples_to_produce
else:
produce_n_samples = len(input_items[0])
if produce_n_samples > len(output_items[0]):
produce_n_samples = len(output_items[0])
#print "DEBUG: src - produce_n: %s - samples_to_produce: %s" \
#% (produce_n_samples, self.samples_to_produce)
#elif self.samples_to_produce < len(input_items[0]):
# print "DEBUG: samples to produce:", self.samples_to_produce,"\
#" - len input:", len(input_items[0]), " - len output:", \
#len(output_items[0])
# if self.samples_to_produce > 0:
# output_items[0][:] = \
#input_items[0][0:self.samples_to_produce]
#else:
output_items[0][0:produce_n_samples] = \
input_items[0][0:produce_n_samples]
### DEBUG:
#no_zeros_last = self.no_zero_counter
#for item in output_items[0][0:produce_n_samples]:
# if item != 0:
# self.no_zero_counter += 1
#if self.no_zero_counter != no_zeros_last:
# print "self.no_zero_counter", self.no_zero_counter
#elif len(input_items[0]) < len(output_items[0]):
# n_processed = len(input_items[0])
# output_items[0] = input_items[0]
#print "Source processed:", n_processed
#print "DEBUG: sim_source - elif - items processed:",
#n_processed
#time.sleep(1.0 / self.samp_rate * n_processed)
#else:
# n_processed = len(output_items[0])
# output_items[0] = input_items[0][0:n_processed]
#print "Source processed:", n_processed
#print "DEBUG: sim_source - else - items processed:", \
#n_processed
#time.sleep(1.0 / self.samp_rate * n_processed)
self.timeout_start = None
self.virtual_counter += produce_n_samples
self.virtual_time += produce_n_samples / float(self.samp_rate)
# TODO TODO: Produce max. diff samples, then call commands before
# running again!
# CHECK TIMED COMMANDS
if len(self.hier_blk.commands) > 0 and \
len(self.hier_blk.command_times) > 0:
#print "DEBUG: evaluating cmd times"
cmd_time, n_cmds = self.hier_blk.command_times[0]
#print "DEBUG: time %s - n_cmds %s - virt_time %s" \
#% (time, n_cmds, self.virtual_time)
while self.virtual_time > (cmd_time + 0.0065):
#print "DEBUG: calling run_timed_cmds"
if self.drop_one_in_n_cmds > 0:
rand_no = random.randint(1, self.drop_one_in_n_cmds)
else:
rand_no = 0
if rand_no == 1:
self.hier_blk.command_times.pop(0)
self.hier_blk.commands.pop(0)
print "[INFO] WiNeLo - Dropped cmd due to HW model!"
else:
#print "DEBUG: RxRxRx - Tuning cmd sent at: %s - " \
#"CMD time: %s" % (self.virtual_time, cmd_time)
#print "DEBUG: Set RX-freq to %s at %s" \
#% (self.hier_blk.commands[0][1], cmd_time)
#print "DEBUG: virtual counter:", self.virtual_counter
self.hier_blk.command_times.pop(0)
#print "DEBUG---------------------hier_blk_cmd_times",\
#self.hier_blk.command_times
self.run_timed_cmds(n_cmds)
if len(self.hier_blk.command_times) > 0:
#print "DEBUG: NEW TIME, CMDS"
cmd_time, n_cmds = self.hier_blk.command_times[0]
else:
break
#if produce_n_samples < self.p_size:
# print "DEBUG: source - ACK less samples"
self.samples_to_produce -= produce_n_samples
#print "DEBUG: NO ACK sent"
#print "DEBUG: NO ACK - produced:", len(output_items[0])
#print "DEBUG: NO ACK - samples to produce:", \
#self.samples_to_produce
#print "DEBUG: NO ACK - len input", len(input_items[0])
if self.samples_to_produce == 0:
self.dbg_counter += 1
#print "DEBUG: ACK senti no:", self.dbg_counter
#print "DEBUG: ACK - produced:", produce_n_samples
self.twisted_conn.samplesReceived()
self.samples_to_produce = self.packet_size
self.twisted_conn.condition.release()
#print "DEBUG: sim_src - produced:", n_processed
return produce_n_samples
def run_timed_cmds(self, n_cmds):
for i in range(n_cmds):
cmd, args = self.hier_blk.commands.pop()
#print "DEBUG: src - running cmd %s with args %s" % (cmd, args)
cmd(*args)
def new_samples_received(self, samples):
self.samples = numpy.append(self.samples, samples)
def set_connection(self, connection):
self.twisted_conn = connection
def set_dataport(self, port):
self.dataport = port
print '[INFO] WiNeLo - Port %s will be used for data transmission' \
% self.dataport
def set_packetsize(self, packet_size):
self.packet_size = packet_size
if self.samples_to_produce > self.packet_size:
self.samples_to_produce = self.packet_size
def update_virttime(self, time_offset):
if self.absolute_time:
print "[INFO] WiNeLo - Setting source time to server time:", \
time_offset
self.virtual_time += time_offset
self.virt_offset = time_offset
def get_dataport(self):
while self.dataport is None:
reactor.callWhenRunning(time.sleep, 0.5)
return self.dataport
def get_time_now(self):
# Calculate time according tot the sample rate & the number of
# processed items
#time = 1.0 / self.samp_rate * self.virtual_counter
time = self.virtual_time
full_secs = int(time)
frac_secs = time - int(time)
# Return full & fractional seconds (like UHD)
return full_secs, frac_secs
def generate_rx_tags(self):
#Produce tags
offset = self.nitems_written(0) + 0
key_time = pmt.pmt_string_to_symbol("rx_time")
#value_time = pmt.from_python(1.0 /
#self.samp_rate * self.virtual_counter)
value_time = pmt.from_python(self.get_time_now())
key_rate = pmt.pmt_string_to_symbol("rx_rate")
value_rate = pmt.from_python(self.samp_rate)
self.add_item_tag(0, offset, key_time, value_time)
self.add_item_tag(0, offset, key_rate, value_rate)
class sim_source_c(gr.hier_block2, uhd_gate):
"""
Wireless Netowrks In-the-Loop source
Note: This is not a subclass of uhd.usrp_source because some methods
shouldn't be available at all for this block.
"""
def __init__(self, serverip, serverport, clientname, packetsize,
simulation, samp_rate, center_freq, net_id,
device_addr, stream_args):
gr.hier_block2.__init__(self, "sim_source_c",
gr.io_signature(0, 0, 0),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
uhd_gate.__init__(self)
self.simulation = simulation
self.serverip = serverip
self.samp_rate = samp_rate
self.typ = 'rx'
if not self.simulation:
self.usrp = uhd.usrp_source(device_addr, stream_args)
# TODO: Parameters
self.connect(self.usrp, self)
else:
self.simsrc = sim_source_cc(self, serverip, serverport, clientname,
packetsize, samp_rate, center_freq,
net_id)
# TODO: dirty hack!!!
# self.tcp_source = grc_blks2.tcp_source(itemsize=gr.sizeof_gr_complex,
# addr=self.serverip,
# port=self.simsrc.get_dataport(),
# server=False)
self.tcp_source = gr.udp_source(itemsize=gr.sizeof_gr_complex,
host=self.serverip,
port=self.simsrc.get_dataport(),
payload_size=1472,
eof=False,
wait=True)
self.gain_blk = blocks.multiply_const_vcc((1, ))
self.connect(self.tcp_source, self.gain_blk, self.simsrc, self)
| gpl-3.0 | -4,139,212,272,071,479,300 | 43.720848 | 84 | 0.514855 | false |
stettberger/metagit | gmp/tools.py | 1 | 1689 | from gmp.options import *
import subprocess
import tempfile
import os
class ScreenExecutor:
instance = None
def __init__(self):
self.screen_fd, self.screen_path = tempfile.mkstemp()
self.counter = 0
self.screen_fd = os.fdopen(self.screen_fd, "w")
def get():
if not ScreenExecutor.instance:
ScreenExecutor.instance = ScreenExecutor()
return ScreenExecutor.instance
get = staticmethod(get)
def push(cmd):
i = ScreenExecutor.get()
i.screen_fd.write("""screen %d sh -c "echo; echo '%s' ; %s; echo Press ENTER;read a"\n"""
% (i.counter, cmd.replace("'", "\\\""), cmd))
i.counter += 1
push = staticmethod(push)
def execute():
if Options.opt('screen'):
i = ScreenExecutor.get()
i.screen_fd.write('caption always "%{wR}%c | %?%-Lw%?%{wB}%n*%f %t%?(%u)%?%{wR}%?%+Lw%?"\n')
i.screen_fd.close()
a = subprocess.Popen("screen -c %s" % i.screen_path, shell=True)
a.wait()
os.unlink(i.screen_path)
execute = staticmethod(execute)
def esc(str):
str = str.replace("\\", "\\\\")
quote = False
for c in " ;&|{}()$":
if c in str:
quote = True
if quote:
return "'" + str.replace("'", "\\'") + "'"
return str
echo_exec = True
def execute(cmd, echo=True):
if Options.opt('screen'):
ScreenExecutor.push(cmd)
return
if echo:
print(cmd)
a = subprocess.Popen(cmd, shell=(type(cmd) == str))
# Just wait here if we are not in parallel mode
if not Options.opt('parallel'):
a.wait()
return a
| gpl-3.0 | -679,937,508,764,406,700 | 26.688525 | 104 | 0.541149 | false |
Wizmann/DjangoSimditor | bootstrap3/renderers.py | 1 | 12414 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import (TextInput, DateInput, FileInput, CheckboxInput,
ClearableFileInput, Select, RadioSelect, CheckboxSelectMultiple)
from django.forms.extras import SelectDateWidget
from django.forms.forms import BaseForm, BoundField
from django.utils.encoding import force_text
from django.utils.html import conditional_escape, strip_tags
from django.template import Context
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from .bootstrap import get_bootstrap_setting
from bootstrap3.text import text_value
from .exceptions import BootstrapError
from .html import add_css_class
from .forms import (render_field, render_label, render_form_group,
is_widget_with_placeholder, is_widget_required_attribute, FORM_GROUP_CLASS)
class FormRenderer(object):
"""
Default form renderer
"""
def __init__(self, form, layout='', form_group_class=FORM_GROUP_CLASS,
field_class='', label_class='', show_help=True, exclude='',
set_required=True):
if not isinstance(form, BaseForm):
raise BootstrapError(
'Parameter "form" should contain a valid Django Form.')
self.form = form
self.layout = layout
self.form_group_class = form_group_class
self.field_class = field_class
self.label_class = label_class
self.show_help = show_help
self.exclude = exclude
self.set_required = set_required
def render_fields(self):
rendered_fields = []
for field in self.form:
rendered_fields.append(render_field(
field,
layout=self.layout,
form_group_class=self.form_group_class,
field_class=self.field_class,
label_class=self.label_class,
show_help=self.show_help,
exclude=self.exclude,
set_required=self.set_required,
))
return '\n'.join(rendered_fields)
def get_form_errors(self):
form_errors = []
for field in self.form:
if field.is_hidden and field.errors:
form_errors += field.errors
return form_errors + self.form.non_field_errors()
def render_errors(self):
form_errors = self.get_form_errors()
if form_errors:
errors = '\n'.join(['<p>{e}</p>'.format(e=e) for e in form_errors])
return '''
<div class="alert alert-danger alert-dismissable alert-link">
<button class="close" data-dismiss="alert" aria-hidden="true">
×</button>{errors}</div>\n'''.format(errors=errors)
return ''
def render(self):
return self.render_errors() + self.render_fields()
class FieldRenderer(object):
"""
Default field renderer
"""
def __init__(self, field, layout='', form_group_class=FORM_GROUP_CLASS,
field_class=None, label_class=None, show_label=True,
show_help=True, exclude='', set_required=True,
addon_before=None, addon_after=None):
# Only allow BoundField
if not isinstance(field, BoundField):
raise BootstrapError('Parameter "field" should contain a valid Django BoundField.')
self.field = field
self.layout = layout
self.form_group_class = form_group_class
self.field_class = field_class
self.label_class = label_class
self.show_label = show_label
self.exclude = exclude
self.set_required = set_required
self.widget = field.field.widget
self.initial_attrs = self.widget.attrs.copy()
self.field_help = force_text(mark_safe(field.help_text)) if show_help and field.help_text else ''
self.field_errors = [conditional_escape(force_text(error)) for error in field.errors]
self.placeholder = field.label
self.form_error_class = getattr(field.form, 'error_css_class', '')
self.form_required_class = getattr(field.form, 'required_css_class', '')
self.addon_before = addon_before
self.addon_after = addon_after
def restore_widget_attrs(self):
self.widget.attrs = self.initial_attrs
def add_class_attrs(self):
self.widget.attrs['class'] = self.widget.attrs.get('class', '')
if not isinstance(self.widget, (CheckboxInput,
RadioSelect,
CheckboxSelectMultiple,
FileInput)):
self.widget.attrs['class'] = add_css_class(
self.widget.attrs['class'], 'form-control')
def add_placeholder_attrs(self):
placeholder = self.widget.attrs.get('placeholder', self.placeholder)
if placeholder and is_widget_with_placeholder(self.widget):
self.widget.attrs['placeholder'] = placeholder
def add_help_attrs(self):
title = self.widget.attrs.get('title', strip_tags(self.field_help))
if not isinstance(self.widget, CheckboxInput):
self.widget.attrs['title'] = title
def add_required_attrs(self):
if self.set_required and is_widget_required_attribute(self.widget):
self.widget.attrs['required'] = 'required'
def add_widget_attrs(self):
self.add_class_attrs()
self.add_placeholder_attrs()
self.add_help_attrs()
self.add_required_attrs()
def list_to_class(self, html, klass):
mapping = [
('<ul', '<div'),
('</ul>', '</div>'),
('<li', '<div class="{klass}"'.format(klass=klass)),
('</li>', '</div>'),
]
for k, v in mapping:
html = html.replace(k, v)
return html
def put_inside_label(self, html):
content = '{field} {label}'.format(field=html, label=self.field.label)
return render_label(content=content, label_title=strip_tags(self.field_help))
def fix_date_select_input(self, html):
div1 = '<div class="col-xs-4">'
div2 = '</div>'
html = html.replace('<select', div1 + '<select')
html = html.replace('</select>', '</select>' + div2)
return '<div class="row bootstrap3-multi-input">' + html + '</div>'
def fix_clearable_file_input(self, html):
"""
Fix a clearable file input
TODO: This needs improvement
Currently Django returns
Currently: <a href="dummy.txt">dummy.txt</a> <input id="file4-clear_id" name="file4-clear" type="checkbox" /> <label for="file4-clear_id">Clear</label><br />Change: <input id="id_file4" name="file4" type="file" /><span class=help-block></span></div>
"""
# TODO This needs improvement
return '<div class="row bootstrap3-multi-input"><div class="col-xs-12">' + html + '</div></div>'
def post_widget_render(self, html):
if isinstance(self.widget, RadioSelect):
html = self.list_to_class(html, 'radio')
elif isinstance(self.widget, CheckboxSelectMultiple):
html = self.list_to_class(html, 'checkbox')
elif isinstance(self.widget, SelectDateWidget):
html = self.fix_date_select_input(html)
elif isinstance(self.widget, ClearableFileInput):
html = self.fix_clearable_file_input(html)
elif isinstance(self.widget, CheckboxInput):
html = self.put_inside_label(html)
return html
def wrap_widget(self, html):
if isinstance(self.widget, CheckboxInput):
html = '<div class="checkbox">{content}</div>'.format(content=html)
return html
def make_input_group(self, html):
if ((self.addon_before or self.addon_after) and
isinstance(self.widget, (TextInput, DateInput, Select))
):
before = '<span class="input-group-addon">{addon}</span>'.format(
addon=self.addon_before) if self.addon_before else ''
after = '<span class="input-group-addon">{addon}</span>'.format(
addon=self.addon_after) if self.addon_after else ''
html = '<div class="input-group">{before}{html}{after}</div>'.format(
before=before, after=after, html=html)
return html
def append_to_field(self, html):
help_text_and_errors = [self.field_help] + self.field_errors \
if self.field_help else self.field_errors
if help_text_and_errors:
help_html = get_template(
'bootstrap3/field_help_text_and_errors.html').render(Context({
'field': self.field,
'help_text_and_errors': help_text_and_errors,
'layout': self.layout,
}))
html += '<span class="help-block">{help}</span>'.format(help=help_html)
return html
def get_field_class(self):
field_class = self.field_class
if not field_class and self.layout == 'horizontal':
field_class = get_bootstrap_setting('horizontal_field_class')
return field_class
def wrap_field(self, html):
field_class = self.get_field_class()
if field_class:
html = '<div class="{klass}">{html}</div>'.format(klass=field_class, html=html)
return html
def get_label_class(self):
label_class = self.label_class
if not label_class and self.layout == 'horizontal':
label_class = get_bootstrap_setting('horizontal_label_class')
label_class = text_value(label_class)
if not self.show_label:
label_class = add_css_class(label_class, 'sr-only')
return add_css_class(label_class, 'control-label')
def get_label(self):
if isinstance(self.widget, CheckboxInput):
label = None
else:
label = self.field.label
if self.layout == 'horizontal' and not label:
return ' '
return label
def add_label(self, html):
label = self.get_label()
if label:
html = render_label(label, label_class=self.get_label_class()) + html
return html
def get_form_group_class(self):
form_group_class = self.form_group_class
if self.field.errors and self.form_error_class:
form_group_class = add_css_class(
form_group_class, self.form_error_class)
if self.field.field.required and self.form_required_class:
form_group_class = add_css_class(
form_group_class, self.form_required_class)
if self.field_errors:
form_group_class = add_css_class(form_group_class, 'has-error')
elif self.field.form.is_bound:
form_group_class = add_css_class(form_group_class, 'has-success')
return form_group_class
def wrap_label_and_field(self, html):
return render_form_group(html, self.get_form_group_class())
def render(self):
# See if we're not excluded
if self.field.name in self.exclude.replace(' ', '').split(','):
return ''
# Hidden input requires no special treatment
if self.field.is_hidden:
return force_text(self.field)
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
self.restore_widget_attrs()
html = self.post_widget_render(html)
html = self.wrap_widget(html)
html = self.make_input_group(html)
html = self.append_to_field(html)
html = self.wrap_field(html)
html = self.add_label(html)
html = self.wrap_label_and_field(html)
return html
class InlineFieldRenderer(FieldRenderer):
"""
Inline field renderer
"""
def add_error_attrs(self):
field_title = self.widget.attrs.get('title', '')
field_title += ' ' + ' '.join([strip_tags(e) for e in self.field_errors])
self.widget.attrs['title'] = field_title.strip()
def add_widget_attrs(self):
super(InlineFieldRenderer, self).add_widget_attrs()
self.add_error_attrs()
def append_to_field(self, html):
return html
def get_field_class(self):
return self.field_class
def get_label_class(self):
return add_css_class(self.label_class, 'sr-only')
| mit | -1,456,957,295,543,779,600 | 38.788462 | 257 | 0.599807 | false |
elba7r/lite-system | erpnext/config/stock.py | 1 | 6000 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Stock Transactions"),
"items": [
{
"type": "doctype",
"name": "Stock Entry",
"description": _("Record item movement."),
},
{
"type": "doctype",
"name": "Delivery Note",
"description": _("Shipments to customers."),
},
{
"type": "doctype",
"name": "Purchase Receipt",
"description": _("Goods received from Suppliers."),
},
{
"type": "doctype",
"name": "Material Request",
"description": _("Requests for items."),
},
]
},
{
"label": _("Stock Reports"),
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Stock Ledger",
"doctype": "Stock Ledger Entry",
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Balance",
"doctype": "Stock Ledger Entry"
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Projected Qty",
"doctype": "Item",
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Ageing",
"doctype": "Item",
},
]
},
{
"label": _("Items and Pricing"),
"items": [
{
"type": "doctype",
"name": "Item",
"description": _("All Products or Services."),
},
{
"type": "doctype",
"name": "Product Bundle",
"description": _("Bundle items at time of sale."),
},
{
"type": "doctype",
"name": "Price List",
"description": _("Price List master.")
},
{
"type": "doctype",
"name": "Item Group",
"icon": "fa fa-sitemap",
"label": _("Item Group"),
"link": "Tree/Item Group",
"description": _("Tree of Item Groups."),
},
{
"type": "doctype",
"name": "Item Price",
"description": _("Multiple Item prices."),
"route": "Report/Item Price"
},
{
"type": "doctype",
"name": "Shipping Rule",
"description": _("Rules for adding shipping costs.")
},
{
"type": "doctype",
"name": "Pricing Rule",
"description": _("Rules for applying pricing and discount.")
},
]
},
{
"label": _("Serial No and Batch"),
"items": [
{
"type": "doctype",
"name": "Serial No",
"description": _("Single unit of an Item."),
},
{
"type": "doctype",
"name": "Batch",
"description": _("Batch (lot) of an Item."),
},
{
"type": "doctype",
"name": "Installation Note",
"description": _("Installation record for a Serial No.")
},
{
"type": "report",
"name": "Serial No Service Contract Expiry",
"doctype": "Serial No"
},
{
"type": "report",
"name": "Serial No Status",
"doctype": "Serial No"
},
{
"type": "report",
"name": "Serial No Warranty Expiry",
"doctype": "Serial No"
},
]
},
{
"label": _("Tools"),
"icon": "fa fa-wrench",
"items": [
{
"type": "doctype",
"name": "Stock Reconciliation",
"description": _("Upload stock balance via csv.")
},
{
"type": "doctype",
"name": "Packing Slip",
"description": _("Split Delivery Note into packages.")
},
{
"type": "doctype",
"name": "Quality Inspection",
"description": _("Incoming quality inspection.")
},
{
"type": "doctype",
"name": "Landed Cost Voucher",
"description": _("Update additional costs to calculate landed cost of items"),
}
]
},
{
"label": _("Setup"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "Stock Settings",
"description": _("Default settings for stock transactions.")
},
{
"type": "doctype",
"name": "Warehouse",
"description": _("Where items are stored."),
},
{
"type": "doctype",
"name": "UOM",
"label": _("Unit of Measure") + " (UOM)",
"description": _("e.g. Kg, Unit, Nos, m")
},
{
"type": "doctype",
"name": "Item Attribute",
"description": _("Attributes for Item Variants. e.g Size, Color etc."),
},
{
"type": "doctype",
"name": "Brand",
"description": _("Brand master.")
},
]
},
{
"label": _("Analytics"),
"icon": "fa fa-table",
"items": [
{
"type": "report",
"is_query_report": False,
"name": "Item-wise Price List Rate",
"doctype": "Item Price",
},
{
"type": "page",
"name": "stock-analytics",
"label": _("Stock Analytics"),
"icon": "fa fa-bar-chart"
},
{
"type": "report",
"is_query_report": True,
"name": "Delivery Note Trends",
"doctype": "Delivery Note"
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase Receipt Trends",
"doctype": "Purchase Receipt"
},
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Ordered Items To Be Delivered",
"doctype": "Delivery Note"
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase Order Items To Be Received",
"doctype": "Purchase Receipt"
},
{
"type": "report",
"name": "Item Shortage Report",
"route": "Report/Bin/Item Shortage Report",
"doctype": "Purchase Receipt"
},
{
"type": "report",
"is_query_report": True,
"name": "Requested Items To Be Transferred",
"doctype": "Material Request"
},
{
"type": "report",
"is_query_report": True,
"name": "Batch-Wise Balance History",
"doctype": "Batch"
},
{
"type": "report",
"is_query_report": True,
"name": "Item Prices",
"doctype": "Price List"
},
{
"type": "report",
"is_query_report": True,
"name": "Itemwise Recommended Reorder Level",
"doctype": "Item"
},
]
},
]
| gpl-3.0 | -9,195,382,982,470,161,000 | 20.505376 | 83 | 0.4915 | false |
dimatura/opendr | slider_demo.py | 1 | 3446 | from cvwrap import cv2
import numpy as np
import chumpy as ch
from copy import deepcopy
def nothing(x):
pass
def get_renderer():
import chumpy as ch
from opendr.everything import *
# Load mesh
m = load_mesh('/Users/matt/geist/OpenDR/test_dr/nasa_earth.obj')
m.v += ch.array([0,0,3.])
w, h = (320, 240)
trans = ch.array([[0,0,0]])
# Construct renderer
rn = TexturedRenderer()
rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn.set(v=trans+m.v, f=m.f, texture_image=m.texture_image[:,:,::-1], ft=m.ft, vt=m.vt, bgcolor=ch.zeros(3))
rn.vc = SphericalHarmonics(vn=VertNormals(v=rn.v, f=rn.f), components=ch.array([4.,0.,0.,0.]), light_color=ch.ones(3))
return rn
def main():
# Create a black image, a window
img = np.zeros((300,512,3), np.uint8)
cv2.namedWindow('image')
cv2.namedWindow('derivatives')
rn = get_renderer()
tracked = {
'sph0': rn.vc.components[0],
'sph1': rn.vc.components[1],
'sph2': rn.vc.components[2],
'sph3': rn.vc.components[3],
'k0': rn.camera.k[0],
'k1': rn.camera.k[1],
'k2': rn.camera.k[2]
}
cnst = 1000
for k in sorted(tracked.keys()):
v = tracked[k]
cv2.createTrackbar(k, 'image', 0,cnst, nothing)
old_tracked = tracked
cv2.setTrackbarPos('sph0', 'image', 800)
while(1):
cv2.imshow('image',rn.r)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
for k, v in tracked.items():
v[:] = np.array(cv2.getTrackbarPos(k, 'image')).astype(np.float32)*4/cnst
if tracked[k].r[0] != old_tracked[k].r[0]:
drim = rn.dr_wrt(v).reshape(rn.shape)
mn = np.mean(drim)
drim /= np.max(np.abs(drim.ravel()))*2.
drim += .5
# drim = drim - np.min(drim)
# drim = drim / np.max(drim)
cv2.imshow('derivatives', drim)
cv2.waitKey(1)
old_tracked = deepcopy(tracked)
# while True:
# for k_change in sorted(tracked.keys()):
# if k_change == 'sph0':
# continue
# for t in np.arange(0, np.pi, .05):
# cv2.setTrackbarPos(k_change, 'image', int(np.sin(t)*1000))
# cv2.imshow('image',rn.r)
# k = cv2.waitKey(1) & 0xFF
# if k == 27:
# break
#
# for k, v in tracked.items():
# v[:] = np.array(cv2.getTrackbarPos(k, 'image')).astype(np.float32)*4/cnst
# if tracked[k].r[0] != old_tracked[k].r[0]:
# drim = rn.dr_wrt(v).reshape(rn.shape)
# mn = np.mean(drim)
# drim /= np.max(np.abs(drim.ravel()))*2.
# drim += .5
# # drim = drim - np.min(drim)
# # drim = drim / np.max(drim)
# cv2.imshow('derivatives', drim)
#
#
# print rn.vc.components
#
# cv2.waitKey(1)
# old_tracked = deepcopy(tracked)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| mit | 3,740,670,982,097,944,000 | 31.509434 | 126 | 0.482008 | false |
ceteri/pytextrank | setup.py | 1 | 1642 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pytextrank",
version="2.0.3",
author="Paco Xander Nathan",
author_email="[email protected]",
description="Python implementation of TextRank for phrase extraction and lightweight summarization of text documents",
long_description=long_description,
long_description_content_type="text/markdown",
url="http://github.com/DerwenAI/pytextrank",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Human Machine Interfaces",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Text Processing :: General",
"Topic :: Text Processing :: Indexing",
"Topic :: Text Processing :: Linguistic",
],
python_requires=">=3.5",
install_requires=[
"coverage",
"graphviz",
"networkx",
"spacy",
],
keywords="textrank, spacy, phrase extraction, parsing, natural language processing, nlp, knowledge graph, graph algorithms, text analytics, extractive summarization",
license="MIT",
zip_safe=False,
)
| apache-2.0 | -4,886,367,476,329,915,000 | 38.095238 | 170 | 0.64799 | false |
bjornedstrom/python-aesgcm | test/test.py | 1 | 2446 | from binascii import hexlify, unhexlify
import unittest
import aesgcm
class TestVectors(unittest.TestCase):
VECTORS = [
{
'key': unhexlify(b'0000000000000000000000000000000000000000000000000000000000000000'),
'iv': unhexlify(b'000000000000000000000000'),
'aad': None,
'ptx': unhexlify(b'00000000000000000000000000000000'),
'ctx': unhexlify(b'cea7403d4d606b6e074ec5d3baf39d18'),
'tag': unhexlify(b'd0d1c8a799996bf0265b98b5d48ab919')
},
{
'key': unhexlify(b'0000000000000000000000000000000000000000000000000000000000000000'),
'iv': unhexlify(b'000000000000000000000000'),
'aad': unhexlify(b'00000000000000000000000000000000'),
'ptx': None,
'ctx': None,
'tag': unhexlify(b'2d45552d8575922b3ca3cc538442fa26')
},
{
'key': unhexlify(b'0000000000000000000000000000000000000000000000000000000000000000'),
'iv': unhexlify(b'000000000000000000000000'),
'aad': unhexlify(b'00000000000000000000000000000000'),
'ptx': unhexlify(b'00000000000000000000000000000000'),
'ctx': unhexlify(b'cea7403d4d606b6e074ec5d3baf39d18'),
'tag': unhexlify(b'ae9b1771dba9cf62b39be017940330b4')
}
]
def _verify_vec(self, vec):
enc = aesgcm.EncryptObject(vec['key'], vec['iv'])
dec = aesgcm.DecryptObject(vec['key'], vec['iv'], vec['tag'])
if vec['aad']:
enc.update_aad(vec['aad'])
dec.update_aad(vec['aad'])
if vec['ptx'] and vec['ctx']:
self.assertEqual(vec['ctx'], enc.encrypt(vec['ptx']))
self.assertEqual(vec['ptx'], dec.decrypt(vec['ctx']))
self.assertEqual(vec['tag'], enc.finalize())
self.assertTrue(dec.finalize())
def test_vec_1(self):
self._verify_vec(self.VECTORS[0])
def test_vec_2(self):
self._verify_vec(self.VECTORS[1])
def test_vec_3(self):
self._verify_vec(self.VECTORS[2])
def test_invalid_tag(self):
vec = self.VECTORS[0]
invalid_tag = unhexlify(b'00000000000000000000000000000000')
dec = aesgcm.DecryptObject(vec['key'], vec['iv'], invalid_tag)
dec.decrypt(vec['ctx'])
self.assertRaises(aesgcm.AuthenticationError, dec.finalize)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | -3,236,481,412,823,010,000 | 33.942857 | 98 | 0.60834 | false |
Shiruyaka/MeowMeow | Keyring.py | 1 | 2692 | # -*- coding: utf-8 -*-
from collections import namedtuple
import time
import random
from Crypto.PublicKey import RSA
PrivateRing = namedtuple('PrivateRing', 'timestamp key_id pub_key priv_key')
PublicRing = namedtuple('PublicRing', 'timestamp key_id pub_key owner_trust user_name key_legit')
def import_keyring(typeOfKeyRing):
ring = list()
try:
with open(typeOfKeyRing + '_keyring.txt', 'r') as r:
data = r.read()
data = data.rstrip().split('@')
for line in data:
if not line:
continue
line = line.rstrip().split('|')
if typeOfKeyRing == 'priv':
ring.append(PrivateRing(*line))
elif typeOfKeyRing == 'pub':
ring.append(PublicRing(*line))
except IOError:
new_file = open(typeOfKeyRing + '_keyring.txt', 'w')
new_file.close()
return ring
def export_keyring(ring, typeOfKeyRing):
with open(typeOfKeyRing + '_keyring.txt', 'w') as w:
for key in ring:
record = ''
for attr in key:
record += attr + '|'
record = record.rstrip('|')
record += '@'
w.write(record)
def add_to_keyring(ring, typeOfKeyRing, attributes):
if typeOfKeyRing == 'priv':
ring.append(PrivateRing(*attributes))
else:
ring.append(PublicRing(*attributes))
return ring
######randomly choose key from private keyring to encrypt
def find_pubkey_in_ring(ring, id = None, whose = None):
if id:
result = [x.pub_key for x in ring if x.key_id == id]
if len(result) == 0:
return None
else:
return RSA.importKey(result[0])
elif whose:
result = [x.pub_key for x in ring if x.user_name == whose]
if len(result) == 0:
return None
else:
print len(result)
ind = random.randint(0, len(result) - 1)
print ind
return RSA.importKey(result[ind])
def find_privkey_in_ring(ring, id):
result = [x.priv_key for x in ring if x.key_id == id]
if len(result) != 0:
return RSA.importKey(result[0])
else:
return []
def choose_randomly_enc_key(ring):
ind = random.randint(0,len(ring) - 1)
return RSA.importKey(ring[ind].priv_key), ring[ind].key_id
def parse_keys_from_db(data):
ring = list()
for i in data:
tmstmp = time.mktime(i[0].timetuple())
id = i[1]
pub_key = str(i[2])
usr_name = i[3]
trust = i[4]
ring.append(PublicRing(tmstmp, id, pub_key , 0, usr_name, trust))
return ring
| mit | 4,771,661,473,854,343,000 | 25.92 | 98 | 0.554978 | false |
htcondor/job_hooks | hooks/hook_reply_fetch.py | 1 | 3170 | #!/usr/bin/python
# Copyright 2008 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import pickle
import sys
import os
import logging
from condorutils import SUCCESS, FAILURE
from condorutils.workfetch import *
from condorutils.socketutil import *
from condorutils.osutil import *
from condorutils.readconfig import *
from condorutils.log import *
def main(argv=None):
if argv is None:
argv = sys.argv
log_name = os.path.basename(argv[0])
try:
config = read_condor_config('JOB_HOOKS', ['IP', 'PORT', 'LOG'], permit_param_only = False)
except ConfigError, error:
try:
print >> sys.stderr, 'Warning: %s' % error.msg
print >> sys.stderr, 'Attemping to read config from "/etc/condor/job-hooks.conf"'
config = read_config_file('/etc/condor/job-hooks.conf', 'Hooks')
except ConfigError, error:
print >> sys.stderr, 'Error: %s. Exiting' % error.msg
return(FAILURE)
try:
size = int(read_condor_config('', ['MAX_JOB_HOOKS_LOG'])['max_job_hooks_log'])
except:
size = 1000000
base_logger = create_file_logger(log_name, '%s.reply' % config['log'], logging.INFO, size=size)
log(logging.INFO, log_name, 'Hook called')
# Create a reply_fetch notification
request = condor_wf()
reply_type = sys.argv[1]
if reply_type == 'accept':
request.type = condor_wf_types.reply_claim_accept
elif reply_type == 'reject':
request.type = condor_wf_types.reply_claim_reject
else:
log(logging.ERROR, log_name, 'Received unknown reply fetch type: %s' % reply_type)
return(FAILURE)
# Store the ClassAd from STDIN in the data portion of the message
request.data = ''
for line in sys.stdin:
request.data = request.data + str(line)
slots = grep('^WF_REQ_SLOT\s*=\s*"(.+)"$', request.data)
if slots != None:
log(logging.INFO, log_name, 'Slot %s is making the request' % slots[0].strip())
# Send the message
log(logging.INFO, log_name, 'Contacting daemon')
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client_socket.connect((config['ip'], int(config['port'])))
client_socket.send(pickle.dumps(request, 2))
except Exception, error:
try:
close_socket(client_socket)
except:
pass
log(logging.ERROR, log_name, 'socket error %d: %s' % (error[0], error[1]))
try:
close_socket(client_socket)
except SocketError, error:
log(logging.WARNING, log_name, error.msg)
log(logging.INFO, log_name, 'Hook exiting')
return(SUCCESS)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -4,077,373,557,673,507,000 | 31.680412 | 98 | 0.662145 | false |
pombredanne/discern | examples/problem_grader/grader/migrations/0001_initial.py | 1 | 7167 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Rubric'
db.create_table(u'grader_rubric', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('associated_problem', self.gf('django.db.models.fields.IntegerField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'grader', ['Rubric'])
# Adding model 'RubricOption'
db.create_table(u'grader_rubricoption', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('rubric', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['grader.Rubric'])),
('option_points', self.gf('django.db.models.fields.IntegerField')()),
('option_text', self.gf('django.db.models.fields.TextField')()),
('selected', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'grader', ['RubricOption'])
# Adding model 'UserProfile'
db.create_table(u'grader_userprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('api_key', self.gf('django.db.models.fields.TextField')(default='')),
('api_user', self.gf('django.db.models.fields.TextField')(default='')),
('api_user_created', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'grader', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'Rubric'
db.delete_table(u'grader_rubric')
# Deleting model 'RubricOption'
db.delete_table(u'grader_rubricoption')
# Deleting model 'UserProfile'
db.delete_table(u'grader_userprofile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'grader.rubric': {
'Meta': {'object_name': 'Rubric'},
'associated_problem': ('django.db.models.fields.IntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'grader.rubricoption': {
'Meta': {'object_name': 'RubricOption'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option_points': ('django.db.models.fields.IntegerField', [], {}),
'option_text': ('django.db.models.fields.TextField', [], {}),
'rubric': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['grader.Rubric']"}),
'selected': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'grader.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'api_key': ('django.db.models.fields.TextField', [], {'default': "''"}),
'api_user': ('django.db.models.fields.TextField', [], {'default': "''"}),
'api_user_created': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['grader'] | agpl-3.0 | 4,707,439,369,962,626,000 | 60.793103 | 187 | 0.566206 | false |
deepgraphs/dgraphdb | restDgraphDb/dgraphdb/dgraphdbstore.py | 1 | 1380 | __author__ = 'mpetyx'
from rdflib import Graph
# import rdflib.plugin
from django.conf import settings
import datetime
import os
# register('SQLite', Store, 'rdflib.store.SQLite', 'SQLite')
def random_file_generating():
basename = "deepGraphFile"
suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
middle = os.urandom(16).encode('hex')
filename = "_".join([basename, middle, suffix])
return filename
class DeepGraphStore():
store_name = settings.DEEPGRAPHS_DEFAULT_STORAGE
def __init__(self, create=True):
self.create = create
self.path = "databases/" + random_file_generating()
def setUp(self):
self.graph = Graph(store=self.store_name)
self.graph.open(self.path, create=self.create)
if self.create:
self.graph.parse("http://njh.me/foaf.rdf", format='xml')
self.graph.commit()
def open(self, path):
self.graph = Graph(self.store_name).open(path, False)
return self.graph.__len__
def query(self, sparql_query):
return self.graph.query(sparql_query)
def parse(self, path_to_file_):
self.graph.parse(path_to_file_)
def load(self, triples):
self.graph.load(triples)
def close(self):
self.graph.close()
def size(self):
size = self.graph.__len__
self.close()
return size
| mit | 5,964,938,748,346,534,000 | 25.037736 | 68 | 0.624638 | false |
aysteph3/MiniMIPS_Testing | src/algorithm_opt_greedy_ver2.py | 1 | 9513 | # Copyright (C) 2017 Siavoosh Payandeh Azad, Stephen Oyeniran
# for each new function we start from empty set!
import Logger
import sys
import copy
import itertools
import time
import package
package.generate_folders(package.generated_files_folder)
sys.stdout = Logger.Logger(package.generated_files_folder)
if "-sp" in sys.argv[1:]:
saf_output_patterns_file_name= package.generated_files_folder + "/" +"SAF"+ sys.argv[sys.argv.index('-sp') + 1]
else:
saf_output_patterns_file_name= package.generated_files_folder + "/" + "SAFpatterns.txt"
def check_if_sufficient(function_dict, function_id_1, function_id_2, list_patterns, debug, verbose):
or_op = "0"*package.data_width
if debug:
print "\t--------------------"
print "\tchecking if sufficient number of ones reached!"
print "\t\tline\top1\t\top2\t\tfunc_"+str(function_id_1)+" \t\t func_"+str(function_id_2)+"\t\txor(1,2)\tand(1,xor)\tor(prev_or,and)"
print "\t\t"+"------------------------------------------"*3
for i in list_patterns:
xor_op = format(int(function_dict[i][function_id_1], 2) ^ int(function_dict[i][function_id_2], 2), 'b').zfill(package.data_width)
and_op = format(int(function_dict[i][function_id_2], 2) & int(xor_op, 2), 'b').zfill(package.data_width)
or_op = format(int(or_op, 2) | int(and_op, 2), 'b').zfill(package.data_width)
if debug:
print "\t\t"+str(i)+"\t", function_dict[i][0],"\t", function_dict[i][1],"\t", function_dict[i][function_id_1], "\t", function_dict[i][function_id_2], "\t",str(xor_op).zfill(package.data_width), "\t"+str(and_op)+ "\t"+str(or_op)
if or_op == "1"*package.data_width:
if verbose:
print "\tbingo! all ones!"
return or_op
else:
if debug and verbose:
print "\tdidnt reach all ones!"
return or_op
input_file_name, verbose, debug, output_table_file_name, output_patterns_file_name, scanning_table_file_name, redundant_function_reduction = package.parse_program_arg(sys.argv, package.generated_files_folder)
data_width = package.data_width
print "data_width:", data_width
start_time = time.time()
function_dict = copy.deepcopy(package.parse_input_pattern_file(input_file_name))
len_of_list = len(function_dict[function_dict.keys()[0]])
number_of_lines = len(function_dict.keys())
try:
table_file = open(output_table_file_name, 'w')
scanning_table_file = open(scanning_table_file_name, 'w')
test_patterns_file = open(output_patterns_file_name, 'w')
saf_test_patterns_file = open(saf_output_patterns_file_name, 'w')
except IOError:
print "Could not open input pattern file, test pattern file, conformity or scanning table file!"
sys.exit()
if package.test_subset:
function_list = []
for item in package.test_only_list:
function_list.append(item+1)
else:
function_list = range(2, len_of_list)
package.make_table_header(table_file, function_list)
package.make_table_header(scanning_table_file, function_list)
number_of_ones_in_experiments = 0
number_of_zeros_in_experiments = 0
used_dic = {}
final_set_of_patterns = []
overal_test_length = 0
for func_id_1 in function_list:
current_set_of_patterns = []
string = '%10s' %("f_"+str(func_id_1-1)+"|") # -1 to march the number of functions for readability
scanning_string = '%10s' %("f_"+str(func_id_1-1)+"|") # -1 to march the number of functions for readability
scanning_test_f1 = "0"*data_width
for func_id_2 in function_list:
if func_id_1 != func_id_2:
scanning_test_f1_f2 = "0"*data_width
list_of_used_patterns = range(1, number_of_lines+1)
if verbose:
print "------------------------------------------"*3
print "function 1: ", func_id_1-1, "function 2:", func_id_2-1
print "------------------------------------------"*3
counter = 0
list_of_excluded_patterns = copy.deepcopy(current_set_of_patterns)
break_the_loop = False
best_solution = []
best_value = 0
sufficient = check_if_sufficient(function_dict, func_id_1, func_id_2, list_of_excluded_patterns, debug, verbose)
while(counter < number_of_lines):
list_of_ones_in_ands = package.find_most_signifacant_conformity(function_dict, func_id_1, func_id_2, list_of_used_patterns,
list_of_excluded_patterns, sufficient, debug, verbose)
if len(list_of_ones_in_ands.keys())>0:
if verbose:
print "\tmax number of ones:", max(list_of_ones_in_ands.keys())
if max(list_of_ones_in_ands.keys()) == 0:
break
list_of_best_patterns = list_of_ones_in_ands[max(list_of_ones_in_ands.keys())]
if verbose:
print "\tbest patterns in this round:", list_of_best_patterns
for item in list_of_best_patterns:
if type(item) == int:
item = [item]
if verbose:
print "\t----------------------"
print "\ttrying combination: ", list_of_excluded_patterns+list(item)
sufficient = check_if_sufficient(function_dict, func_id_1, func_id_2, list_of_excluded_patterns+list(item), debug, verbose)
if sufficient.count("1") == len(sufficient):
best_solution = copy.deepcopy(list_of_excluded_patterns+list(item))
if verbose:
print "\twe got it!"
break_the_loop = True
break
else:
if verbose:
print "\tnew number of ones :", sufficient.count("1"), "\t\tprevious value:", best_value
if sufficient.count("1") > best_value:
if verbose:
print "\tfound a better solution!"
list_of_excluded_patterns += list(item)
best_solution = copy.deepcopy(list_of_excluded_patterns)
best_value = sufficient.count("1")
break
if break_the_loop:
break
if break_the_loop:
break
counter += 1
else:
break
if verbose:
print "\t------------------------------------------------------------------"
if verbose:
print "best conformity solution for func ", func_id_1-1, " and func ", func_id_2-1, ": ", sufficient, best_solution
if verbose:
print "------------------------------"
for final_pattern in best_solution:
if final_pattern not in current_set_of_patterns:
current_set_of_patterns.append(final_pattern)
string += "\t"+str(sufficient)
for scan_pattern in best_solution:
scanning_test_f1_f2 = format(int(scanning_test_f1_f2, 2) | int(function_dict[scan_pattern][func_id_1], 2), 'b').zfill(data_width)
if redundant_function_reduction:
if (str(func_id_1-1)+"_"+str(func_id_2-1) in package.related_functions.keys()):
number_of_zeros_in_experiments += sufficient.count("0") - package.related_functions[str(func_id_1-1)+"_"+str(func_id_2-1)].count("0")
elif (str(func_id_1-1)+"_*" in package.related_functions.keys()):
number_of_zeros_in_experiments += sufficient.count("0") - package.related_functions[str(func_id_1-1)+"_*"].count("0")
elif ("*_"+str(func_id_2-1) in package.related_functions.keys()):
number_of_zeros_in_experiments += sufficient.count("0") - package.related_functions["*_"+str(func_id_2-1)].count("0")
else:
number_of_zeros_in_experiments += sufficient.count("0")
else:
number_of_zeros_in_experiments += sufficient.count("0")
number_of_ones_in_experiments += sufficient.count("1")
used_dic['{0:03}'.format(func_id_1)+"_"+'{0:03}'.format(func_id_2)] = copy.deepcopy(current_set_of_patterns)
else:
scanning_test_f1_f2 = "0"*data_width
string += "\t"+"x"*data_width
scanning_test_f1 = format(int(scanning_test_f1, 2) | int(scanning_test_f1_f2, 2), 'b').zfill(data_width)
scanning_string += "\t"+str(scanning_test_f1_f2)
#-------------------------------------------------------------------------------
# This part fixes the scanning test results for the current function pair
#-------------------------------------------------------------------------------
scanning_test_f1, best_solution = package.run_scanning_optimization(scanning_test_f1, function_dict, func_id_1, debug, verbose, best_solution)
scanning_string += "\t"+str(scanning_test_f1)
scanning_table_file.write(scanning_string+"\n")
table_file.write(string+"\n")
for k in current_set_of_patterns:
if k not in final_set_of_patterns:
final_set_of_patterns.append(k)
opcode = "{0:04b}".format((func_id_1-2))
for j in current_set_of_patterns:
saf_test_patterns_file.write(function_dict[j][0]+function_dict[j][1]+opcode+"\n")
#overal_test_length += len(list_of_necessary_patterns)
print "reporting test length for functions:"
for func_id_1 in range(2, len_of_list):
max_lenght = 0
for item in used_dic.keys():
if int(item.split("_")[0]) == func_id_1:
if len(used_dic[item])>max_lenght:
max_lenght = len(used_dic[item])
overal_test_length += max_lenght
print "function id: ", func_id_1-1, "\ttest length:", max_lenght
stop_time = time.time()
final_unused_patterns = copy.deepcopy(package.final_un_used_pattern(number_of_lines, final_set_of_patterns))
for item in sorted(final_set_of_patterns):
test_patterns_file.write(str(function_dict[item][0])+""+str(function_dict[item][1])+"\n")
# reports!
package.report_usefull_patterns_per_round(used_dic, len_of_list)
print "overal test length:", overal_test_length
package.print_results(final_set_of_patterns, final_unused_patterns, verbose)
package.print_fault_coverage(number_of_lines, number_of_ones_in_experiments, number_of_zeros_in_experiments)
print "------------------------------------------"*3
print "program took ", str(stop_time-start_time), "seconds"
# closing all files
table_file.close()
scanning_table_file.close()
test_patterns_file.close()
saf_test_patterns_file.close()
| gpl-3.0 | -8,961,026,154,048,714,000 | 41.09292 | 230 | 0.644276 | false |
davidandreoletti/pyconvert | util/util.py | 1 | 1159 | def enum(*sequential, **named):
"""
Creates an "enum" type
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
def escapeAllWith(aString, anEscapeString):
"""
Escape all characters with the escape string.
@param aString String to escape
@param anEscapeString String to escape the first string with
@return Escaped string
"""
escapedString = list()
for c in aString:
escapedString.append(anEscapeString)
escapedString.append(c)
return ''.join(escapedString)
def escapePathForUnix(aString):
"""
Escape all characters with the escape string.
@param aString String to escape
@param anEscapeString String to escape the first string with
@return Escaped string
"""
path = escapeAllWith(aString,"\\")
return path
def escapePathForOSIndependentShell(aString):
"""
Escape all characters with the escape string.
@param aString String to escape
@param anEscapeString String to escape the first string with
@return Escaped string
"""
return escapePathForUnix(aString)
| mit | 3,658,403,551,419,297,000 | 25.340909 | 66 | 0.672994 | false |
jreback/pandas | pandas/core/indexes/datetimelike.py | 1 | 28697 | """
Base and utility classes for tseries type pandas objects.
"""
from datetime import datetime
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, TypeVar, Union, cast
import numpy as np
from pandas._libs import NaT, Timedelta, iNaT, join as libjoin, lib
from pandas._libs.tslibs import BaseOffset, Resolution, Tick
from pandas._typing import Callable, Label
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
is_bool_dtype,
is_categorical_dtype,
is_dtype_equal,
is_integer,
is_list_like,
is_period_dtype,
is_scalar,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.extension import (
NDArrayBackedExtensionIndex,
inherit_names,
make_wrapped_arith_op,
)
from pandas.core.indexes.numeric import Int64Index
from pandas.core.tools.timedeltas import to_timedelta
if TYPE_CHECKING:
from pandas import CategoricalIndex
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_T = TypeVar("_T", bound="DatetimeIndexOpsMixin")
def _join_i8_wrapper(joinf, with_indexers: bool = True):
"""
Create the join wrapper methods.
"""
# error: 'staticmethod' used with a non-method
@staticmethod # type: ignore[misc]
def wrapper(left, right):
# Note: these only get called with left.dtype == right.dtype
if isinstance(
left, (np.ndarray, DatetimeIndexOpsMixin, ABCSeries, DatetimeLikeArrayMixin)
):
left = left.view("i8")
if isinstance(
right,
(np.ndarray, DatetimeIndexOpsMixin, ABCSeries, DatetimeLikeArrayMixin),
):
right = right.view("i8")
results = joinf(left, right)
if with_indexers:
# dtype should be timedelta64[ns] for TimedeltaIndex
# and datetime64[ns] for DatetimeIndex
dtype = cast(np.dtype, left.dtype).base
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
@inherit_names(
["inferred_freq", "_resolution_obj", "resolution"],
DatetimeLikeArrayMixin,
cache=True,
)
@inherit_names(["mean", "asi8", "freq", "freqstr"], DatetimeLikeArrayMixin)
class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex):
"""
Common ops mixin to support a unified interface datetimelike Index.
"""
_can_hold_strings = False
_data: Union[DatetimeArray, TimedeltaArray, PeriodArray]
_data_cls: Union[Type[DatetimeArray], Type[TimedeltaArray], Type[PeriodArray]]
freq: Optional[BaseOffset]
freqstr: Optional[str]
_resolution_obj: Resolution
_bool_ops: List[str] = []
_field_ops: List[str] = []
# error: "Callable[[Any], Any]" has no attribute "fget"
hasnans = cache_readonly(
DatetimeLikeArrayMixin._hasnans.fget # type: ignore[attr-defined]
)
_hasnans = hasnans # for index / array -agnostic code
@classmethod
def _simple_new(
cls,
values: Union[DatetimeArray, TimedeltaArray, PeriodArray],
name: Label = None,
):
assert isinstance(values, cls._data_cls), type(values)
result = object.__new__(cls)
result._data = values
result._name = name
result._cache = {}
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result._reset_identity()
return result
@property
def _is_all_dates(self) -> bool:
return True
# ------------------------------------------------------------------------
# Abstract data attributes
@property
def values(self) -> np.ndarray:
# Note: PeriodArray overrides this to return an ndarray of objects.
return self._data._data
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc and other functions.
"""
result = lib.item_from_zerodim(result)
if is_bool_dtype(result) or lib.is_scalar(result):
return result
attrs = self._get_attributes_dict()
if not is_period_dtype(self.dtype) and attrs["freq"]:
# no need to infer if freq is None
attrs["freq"] = "infer"
return type(self)(result, **attrs)
# ------------------------------------------------------------------------
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
elif other.dtype.kind in ["f", "i", "u", "c"]:
return False
elif not isinstance(other, type(self)):
should_try = False
inferable = self._data._infer_matches
if other.dtype == object:
should_try = other.inferred_type in inferable
elif is_categorical_dtype(other.dtype):
other = cast("CategoricalIndex", other)
should_try = other.categories.inferred_type in inferable
if should_try:
try:
other = type(self)(other)
except (ValueError, TypeError, OverflowError):
# e.g.
# ValueError -> cannot parse str entry, or OutOfBoundsDatetime
# TypeError -> trying to convert IntervalIndex to DatetimeIndex
# OverflowError -> Index([very_large_timedeltas])
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
return np.array_equal(self.asi8, other.asi8)
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
res = self.get_loc(key)
except (KeyError, TypeError, ValueError):
return False
return bool(
is_scalar(res) or isinstance(res, slice) or (is_list_like(res) and len(res))
)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take((), kwargs)
indices = np.asarray(indices, dtype=np.intp)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
result = NDArrayBackedExtensionIndex.take(
self, indices, axis, allow_fill, fill_value, **kwargs
)
if isinstance(maybe_slice, slice):
freq = self._data._get_getitem_freq(maybe_slice)
result._data._freq = freq
return result
_can_hold_na = True
_na_value = NaT
"""The expected NA value to use with this index."""
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError("list-like tolerance size must match target index size")
return tolerance
def tolist(self) -> List:
"""
Return a list of the underlying data.
"""
return list(self.astype(object))
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
if len(i8) and self.is_monotonic_increasing:
# quick check
if i8[0] != iNaT:
return self._data._box_func(i8[0])
if self.hasnans:
if not skipna:
return self._na_value
i8 = i8[~self._isnan]
if not len(i8):
return self._na_value
min_stamp = i8.min()
return self._data._box_func(min_stamp)
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo("int64").max
return i8.argmin()
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Series.max : Return the maximum value in a Series.
"""
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
if len(i8) and self.is_monotonic:
# quick check
if i8[-1] != iNaT:
return self._data._box_func(i8[-1])
if self.hasnans:
if not skipna:
return self._na_value
i8 = i8[~self._isnan]
if not len(i8):
return self._na_value
max_stamp = i8.max()
return self._data._box_func(max_stamp)
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
# --------------------------------------------------------------------
# Rendering Methods
def format(
self,
name: bool = False,
formatter: Optional[Callable] = None,
na_rep: str = "NaT",
date_format: Optional[str] = None,
) -> List[str]:
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(
ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, na_rep=na_rep, date_format=date_format)
def _format_with_header(
self, header: List[str], na_rep: str = "NaT", date_format: Optional[str] = None
) -> List[str]:
return header + list(
self._format_native_types(na_rep=na_rep, date_format=date_format)
)
@property
def _formatter_func(self):
return self._data._formatter()
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
if attrib == "freq":
freq = self.freqstr
if freq is not None:
freq = repr(freq)
attrs.append(("freq", freq))
return attrs
def _summary(self, name=None) -> str:
"""
Return a summarized representation.
Parameters
----------
name : str
Name to use in the summary representation.
Returns
-------
str
Summarized representation of the index.
"""
formatter = self._formatter_func
if len(self) > 0:
index_summary = f", {formatter(self[0])} to {formatter(self[-1])}"
else:
index_summary = ""
if name is None:
name = type(self).__name__
result = f"{name}: {len(self)} entries{index_summary}"
if self.freq:
result += f"\nFreq: {self.freqstr}"
# display as values, not quoted
result = result.replace("'", "")
return result
# --------------------------------------------------------------------
# Indexing Methods
def _validate_partial_date_slice(self, reso: Resolution):
raise NotImplementedError
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
raise NotImplementedError
def _partial_date_slice(
self,
reso: Resolution,
parsed: datetime,
):
"""
Parameters
----------
reso : Resolution
parsed : datetime
Returns
-------
slice or ndarray[intp]
"""
self._validate_partial_date_slice(reso)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
vals = self._data._ndarray
unbox = self._data._unbox
if self.is_monotonic_increasing:
if len(self) and (
(t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
):
# we are out of range
raise KeyError
# TODO: does this depend on being monotonic _increasing_?
# a monotonic (sorted) series can be sliced
left = vals.searchsorted(unbox(t1), side="left")
right = vals.searchsorted(unbox(t2), side="right")
return slice(left, right)
else:
lhs_mask = vals >= unbox(t1)
rhs_mask = vals <= unbox(t2)
# try to find the dates
return (lhs_mask & rhs_mask).nonzero()[0]
# --------------------------------------------------------------------
# Arithmetic Methods
__add__ = make_wrapped_arith_op("__add__")
__sub__ = make_wrapped_arith_op("__sub__")
__radd__ = make_wrapped_arith_op("__radd__")
__rsub__ = make_wrapped_arith_op("__rsub__")
__pow__ = make_wrapped_arith_op("__pow__")
__rpow__ = make_wrapped_arith_op("__rpow__")
__mul__ = make_wrapped_arith_op("__mul__")
__rmul__ = make_wrapped_arith_op("__rmul__")
__floordiv__ = make_wrapped_arith_op("__floordiv__")
__rfloordiv__ = make_wrapped_arith_op("__rfloordiv__")
__mod__ = make_wrapped_arith_op("__mod__")
__rmod__ = make_wrapped_arith_op("__rmod__")
__divmod__ = make_wrapped_arith_op("__divmod__")
__rdivmod__ = make_wrapped_arith_op("__rdivmod__")
__truediv__ = make_wrapped_arith_op("__truediv__")
__rtruediv__ = make_wrapped_arith_op("__rtruediv__")
def shift(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
arr = self._data.view()
arr._freq = self.freq
result = arr._time_shift(periods, freq=freq)
return type(self)(result, name=self.name)
# --------------------------------------------------------------------
# List-like Methods
def _get_delete_freq(self, loc: int):
"""
Find the `freq` for self.delete(loc).
"""
freq = None
if is_period_dtype(self.dtype):
freq = self.freq
elif self.freq is not None:
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
np.asarray(loc, dtype=np.intp), len(self)
)
if isinstance(loc, slice) and loc.step in (1, None):
if loc.start in (0, None) or loc.stop in (len(self), None):
freq = self.freq
return freq
def _get_insert_freq(self, loc, item):
"""
Find the `freq` for self.insert(loc, item).
"""
value = self._data._validate_scalar(item)
item = self._data._box_func(value)
freq = None
if is_period_dtype(self.dtype):
freq = self.freq
elif self.freq is not None:
# freq can be preserved on edge cases
if self.size:
if item is NaT:
pass
elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
else:
# Adding a single item to an empty index may preserve freq
if self.freq.is_on_offset(item):
freq = self.freq
return freq
@doc(NDArrayBackedExtensionIndex.delete)
def delete(self, loc):
result = super().delete(loc)
result._data._freq = self._get_delete_freq(loc)
return result
@doc(NDArrayBackedExtensionIndex.insert)
def insert(self, loc: int, item):
result = super().insert(loc, item)
result._data._freq = self._get_insert_freq(loc, item)
return result
# --------------------------------------------------------------------
# Join/Set Methods
def _get_join_freq(self, other):
"""
Get the freq to attach to the result of a join operation.
"""
if is_period_dtype(self.dtype):
freq = self.freq
else:
self = cast(DatetimeTimedeltaMixin, self)
freq = self.freq if self._can_fast_union(other) else None
return freq
def _wrap_joined_index(self, joined: np.ndarray, other):
assert other.dtype == self.dtype, (other.dtype, self.dtype)
result = super()._wrap_joined_index(joined, other)
result._data._freq = self._get_join_freq(other)
return result
@doc(Index._convert_arr_indexer)
def _convert_arr_indexer(self, keyarr):
try:
return self._data._validate_listlike(keyarr, allow_object=True)
except (ValueError, TypeError):
return com.asarray_tuplesafe(keyarr)
class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin):
"""
Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
but not PeriodIndex
"""
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
_is_unique = Index.is_unique
def _with_freq(self, freq):
arr = self._data._with_freq(freq)
return type(self)._simple_new(arr, name=self.name)
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return False
def is_type_compatible(self, kind: str) -> bool:
return kind in self._data._infer_matches
# --------------------------------------------------------------------
# Set Operation Methods
@Appender(Index.difference.__doc__)
def difference(self, other, sort=None):
new_idx = super().difference(other, sort=sort)._with_freq(None)
return new_idx
def _intersection(self, other: Index, sort=False) -> Index:
"""
intersection specialized to the case with matching dtypes.
"""
other = cast("DatetimeTimedeltaMixin", other)
if len(self) == 0:
return self.copy()._get_reconciled_name_object(other)
if len(other) == 0:
return other.copy()._get_reconciled_name_object(self)
elif not self._can_fast_intersect(other):
result = Index._intersection(self, other, sort=sort)
# We need to invalidate the freq because Index._intersection
# uses _shallow_copy on a view of self._data, which will preserve
# self.freq if we're not careful.
result = self._wrap_setop_result(other, result)
return result._with_freq(None)._with_freq("infer")
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
# after sorting, the intersection always starts with the right index
# and ends with the index of which the last elements is smallest
end = min(left[-1], right[-1])
start = right[0]
if end < start:
result = self[:0]
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left._values[lslice]
# error: Argument 1 to "_simple_new" of "DatetimeIndexOpsMixin" has
# incompatible type "Union[ExtensionArray, Any]"; expected
# "Union[DatetimeArray, TimedeltaArray, PeriodArray]" [arg-type]
result = type(self)._simple_new(left_chunk) # type: ignore[arg-type]
return self._wrap_setop_result(other, result)
def _can_fast_intersect(self: _T, other: _T) -> bool:
if self.freq is None:
return False
elif other.freq != self.freq:
return False
elif not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
return False
elif self.freq.is_anchored():
# this along with matching freqs ensure that we "line up",
# so intersection will preserve freq
return True
elif not len(self) or not len(other):
return False
elif isinstance(self.freq, Tick):
# We "line up" if and only if the difference between two of our points
# is a multiple of our freq
diff = self[0] - other[0]
remainder = diff % self.freq.delta
return remainder == Timedelta(0)
return True
def _can_fast_union(self: _T, other: _T) -> bool:
# Assumes that type(self) == type(other), as per the annotation
# The ability to fast_union also implies that `freq` should be
# retained on union.
if not isinstance(other, type(self)):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
# TODO: do union on the reversed indexes?
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other, sort=None):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
elif sort is False:
# TDIs are not in the "correct" order and we don't want
# to sort but want to remove overlaps
left, right = self, other
left_start = left[0]
loc = right.searchsorted(left_start, side="left")
right_chunk = right._values[:loc]
dates = concat_compat((left._values, right_chunk))
# With sort being False, we can't infer that result.freq == self.freq
# TODO: no tests rely on the _with_freq("infer"); needed?
result = self._shallow_copy(dates)._with_freq("infer")
return result
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side="right")
right_chunk = right._values[loc:]
dates = concat_compat([left._values, right_chunk])
# The can_fast_union check ensures that the result.freq
# should match self.freq
dates = type(self._data)(dates, freq=self.freq)
result = type(self)._simple_new(dates)
return result
else:
return left
def _union(self, other, sort):
# We are called by `union`, which is responsible for this validation
assert isinstance(other, type(self))
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
result = this._fast_union(other, sort=sort)
if sort is None:
# In the case where sort is None, _can_fast_union
# implies that result.freq should match self.freq
assert result.freq == self.freq, (result.freq, self.freq)
elif result.freq is None:
# TODO: no tests rely on this; needed?
result = result._with_freq("infer")
return result
else:
i8self = Int64Index._simple_new(self.asi8)
i8other = Int64Index._simple_new(other.asi8)
i8result = i8self._union(i8other, sort=sort)
result = type(self)(i8result, dtype=self.dtype, freq="infer")
return result
# --------------------------------------------------------------------
# Join Methods
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique, with_indexers=False
)
def join(
self, other, how: str = "left", level=None, return_indexers=False, sort=False
):
"""
See Index.join
"""
pself, pother = self._maybe_promote(other)
if pself is not self or pother is not other:
return pself.join(
pother, how=how, level=level, return_indexers=return_indexers, sort=sort
)
this, other = self._maybe_utc_convert(other)
return Index.join(
this,
other,
how=how,
level=level,
return_indexers=return_indexers,
sort=sort,
)
def _maybe_utc_convert(self: _T, other: Index) -> Tuple[_T, Index]:
# Overridden by DatetimeIndex
return self, other
# --------------------------------------------------------------------
# List-Like Methods
@Appender(DatetimeIndexOpsMixin.insert.__doc__)
def insert(self, loc, item):
if isinstance(item, str):
# TODO: Why are strings special?
# TODO: Should we attempt _scalar_from_string?
return self.astype(object).insert(loc, item)
return DatetimeIndexOpsMixin.insert(self, loc, item)
| bsd-3-clause | -3,249,803,881,970,340,400 | 32.023015 | 88 | 0.552671 | false |
RJT1990/pyflux | pyflux/gas/scores.py | 1 | 2369 | from numpy import abs, exp, power, array, sqrt, pi
from scipy.special import gamma
#TODO: This file should eventually be replaced, by moving the existing functions (used for GARCH based models)
# into the GARCH folder, with Cythonizations
class Score(object):
@staticmethod
def score(y,loc,scale,shape):
pass
@staticmethod
def adj_score(y,loc,scale,shape):
pass
class BetatScore(Score):
def __init__(self):
super(Score,self).__init__()
@staticmethod
def mu_score(y,loc,scale,shape):
try:
return (((shape+1.0)*power(y-loc,2))/float(shape*exp(scale) + power(y-loc,2))) - 1.0
except:
return -1.0
@staticmethod
def mu_adj_score(y,loc,scale,shape):
try:
return (((shape+1.0)*power(y-loc,2))/float(shape*exp(scale) + power(y-loc,2))) - 1.0
except:
return -1.0
def score(self,y,loc,scale,shape):
return array([self.mu_score(y,loc,scale,shape)])
def adj_score(self,y,loc,scale,shape):
return array([self.mu_adj_score(y,loc,scale,shape)])
class SkewBetatScore(Score):
def __init__(self):
super(Score,self).__init__()
@staticmethod
def tv_variate_exp(df):
return (sqrt(df)*gamma((df-1.0)/2.0))/(sqrt(pi)*gamma(df/2.0))
@staticmethod
def mu_score(y,loc,scale,shape,skewness):
try:
if (y-loc)>=0:
return (((shape+1.0)*power(y-loc,2))/float(power(skewness,2)*shape*exp(scale) + power(y-loc,2))) - 1.0
else:
return (((shape+1.0)*power(y-loc,2))/float(power(skewness,-2)*shape*exp(scale) + power(y-loc,2))) - 1.0
except:
return -1.0
@staticmethod
def mu_adj_score(y,loc,scale,shape,skewness):
try:
if (y-loc)>=0:
return (((shape+1.0)*power(y-loc,2))/float(power(skewness,2)*shape*exp(scale) + power(y-loc,2))) - 1.0
else:
return (((shape+1.0)*power(y-loc,2))/float(power(skewness,-2)*shape*exp(scale) + power(y-loc,2))) - 1.0
except:
return -1.0
def score(self,y,loc,scale,shape,skewness):
return array([self.mu_score(y,loc,scale,shape,skewness)])
def adj_score(self,y,loc,scale,shape,skewness):
return array([self.mu_adj_score(y,loc,scale,shape,skewness)]) | bsd-3-clause | -3,477,348,812,986,579,500 | 29.779221 | 123 | 0.580414 | false |
openstack/tempest-lib | tempest_lib/tests/services/compute/test_aggregates_client.py | 1 | 6220 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.services.compute import aggregates_client
from tempest_lib.tests import fake_auth_provider
from tempest_lib.tests.services.compute import base
class TestAggregatesClient(base.BaseComputeServiceTest):
FAKE_SHOW_AGGREGATE = {
"aggregate":
{
"name": "hoge",
"availability_zone": None,
"deleted": False,
"created_at":
"2015-07-16T03:07:32.000000",
"updated_at": None,
"hosts": [],
"deleted_at": None,
"id": 1,
"metadata": {}
}
}
FAKE_CREATE_AGGREGATE = {
"aggregate":
{
"name": u'\xf4',
"availability_zone": None,
"deleted": False,
"created_at": "2015-07-21T04:11:18.000000",
"updated_at": None,
"deleted_at": None,
"id": 1
}
}
FAKE_UPDATE_AGGREGATE = {
"aggregate":
{
"name": u'\xe9',
"availability_zone": None,
"deleted": False,
"created_at": "2015-07-16T03:07:32.000000",
"updated_at": "2015-07-23T05:16:29.000000",
"hosts": [],
"deleted_at": None,
"id": 1,
"metadata": {}
}
}
FAKE_AGGREGATE = {
"availability_zone": "nova",
"created_at": "2013-08-18T12:17:56.297823",
"deleted": False,
"deleted_at": None,
"hosts": [
"21549b2f665945baaa7101926a00143c"
],
"id": 1,
"metadata": {
"availability_zone": "nova"
},
"name": u'\xe9',
"updated_at": None
}
FAKE_ADD_HOST = {'aggregate': FAKE_AGGREGATE}
FAKE_REMOVE_HOST = {'aggregate': FAKE_AGGREGATE}
FAKE_SET_METADATA = {'aggregate': FAKE_AGGREGATE}
def setUp(self):
super(TestAggregatesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = aggregates_client.AggregatesClient(
fake_auth, 'compute', 'regionOne')
def _test_list_aggregates(self, bytes_body=False):
self.check_service_client_function(
self.client.list_aggregates,
'tempest_lib.common.rest_client.RestClient.get',
{"aggregates": []},
bytes_body)
def test_list_aggregates_with_str_body(self):
self._test_list_aggregates()
def test_list_aggregates_with_bytes_body(self):
self._test_list_aggregates(bytes_body=True)
def _test_show_aggregate(self, bytes_body=False):
self.check_service_client_function(
self.client.show_aggregate,
'tempest_lib.common.rest_client.RestClient.get',
self.FAKE_SHOW_AGGREGATE,
bytes_body,
aggregate_id=1)
def test_show_aggregate_with_str_body(self):
self._test_show_aggregate()
def test_show_aggregate_with_bytes_body(self):
self._test_show_aggregate(bytes_body=True)
def _test_create_aggregate(self, bytes_body=False):
self.check_service_client_function(
self.client.create_aggregate,
'tempest_lib.common.rest_client.RestClient.post',
self.FAKE_CREATE_AGGREGATE,
bytes_body,
name='hoge')
def test_create_aggregate_with_str_body(self):
self._test_create_aggregate()
def test_create_aggregate_with_bytes_body(self):
self._test_create_aggregate(bytes_body=True)
def test_delete_aggregate(self):
self.check_service_client_function(
self.client.delete_aggregate,
'tempest_lib.common.rest_client.RestClient.delete',
{}, aggregate_id="1")
def _test_update_aggregate(self, bytes_body=False):
self.check_service_client_function(
self.client.update_aggregate,
'tempest_lib.common.rest_client.RestClient.put',
self.FAKE_UPDATE_AGGREGATE,
bytes_body,
aggregate_id=1)
def test_update_aggregate_with_str_body(self):
self._test_update_aggregate()
def test_update_aggregate_with_bytes_body(self):
self._test_update_aggregate(bytes_body=True)
def _test_add_host(self, bytes_body=False):
self.check_service_client_function(
self.client.add_host,
'tempest_lib.common.rest_client.RestClient.post',
self.FAKE_ADD_HOST,
bytes_body,
aggregate_id=1)
def test_add_host_with_str_body(self):
self._test_add_host()
def test_add_host_with_bytes_body(self):
self._test_add_host(bytes_body=True)
def _test_remove_host(self, bytes_body=False):
self.check_service_client_function(
self.client.remove_host,
'tempest_lib.common.rest_client.RestClient.post',
self.FAKE_REMOVE_HOST,
bytes_body,
aggregate_id=1)
def test_remove_host_with_str_body(self):
self._test_remove_host()
def test_remove_host_with_bytes_body(self):
self._test_remove_host(bytes_body=True)
def _test_set_metadata(self, bytes_body=False):
self.check_service_client_function(
self.client.set_metadata,
'tempest_lib.common.rest_client.RestClient.post',
self.FAKE_SET_METADATA,
bytes_body,
aggregate_id=1)
def test_set_metadata_with_str_body(self):
self._test_set_metadata()
def test_set_metadata_with_bytes_body(self):
self._test_set_metadata(bytes_body=True)
| apache-2.0 | -3,175,314,732,062,788,000 | 31.395833 | 78 | 0.589389 | false |
mogoweb/chromium-crosswalk | chrome/common/extensions/docs/server2/server_instance.py | 1 | 6582 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from api_data_source import APIDataSource
from api_list_data_source import APIListDataSource
from appengine_wrappers import IsDevServer
from availability_finder import AvailabilityFinder
from compiled_file_system import CompiledFileSystem
from empty_dir_file_system import EmptyDirFileSystem
from example_zipper import ExampleZipper
from host_file_system_creator import HostFileSystemCreator
from host_file_system_iterator import HostFileSystemIterator
from intro_data_source import IntroDataSource
from object_store_creator import ObjectStoreCreator
from path_canonicalizer import PathCanonicalizer
from permissions_data_source import PermissionsDataSource
from redirector import Redirector
from reference_resolver import ReferenceResolver
from samples_data_source import SamplesDataSource
import svn_constants
from template_data_source import TemplateDataSource
from test_branch_utility import TestBranchUtility
from test_object_store import TestObjectStore
class ServerInstance(object):
def __init__(self,
object_store_creator,
host_file_system,
app_samples_file_system,
base_path,
compiled_fs_factory,
branch_utility,
host_file_system_creator):
self.object_store_creator = object_store_creator
self.host_file_system = host_file_system
self.app_samples_file_system = app_samples_file_system
self.compiled_host_fs_factory = compiled_fs_factory
self.host_file_system_creator = host_file_system_creator
self.host_file_system_iterator = HostFileSystemIterator(
host_file_system_creator,
host_file_system,
branch_utility)
self.availability_finder = AvailabilityFinder(
self.host_file_system_iterator,
object_store_creator,
branch_utility)
self.api_list_data_source_factory = APIListDataSource.Factory(
self.compiled_host_fs_factory,
self.host_file_system,
svn_constants.API_PATH,
svn_constants.PUBLIC_TEMPLATE_PATH)
self.api_data_source_factory = APIDataSource.Factory(
self.compiled_host_fs_factory,
svn_constants.API_PATH,
self.availability_finder,
branch_utility)
self.ref_resolver_factory = ReferenceResolver.Factory(
self.api_data_source_factory,
self.api_list_data_source_factory,
object_store_creator)
self.api_data_source_factory.SetReferenceResolverFactory(
self.ref_resolver_factory)
# Note: samples are super slow in the dev server because it doesn't support
# async fetch, so disable them.
if IsDevServer():
extension_samples_fs = EmptyDirFileSystem()
else:
extension_samples_fs = self.host_file_system
self.samples_data_source_factory = SamplesDataSource.Factory(
extension_samples_fs,
CompiledFileSystem.Factory(extension_samples_fs, object_store_creator),
self.app_samples_file_system,
CompiledFileSystem.Factory(self.app_samples_file_system,
object_store_creator),
self.ref_resolver_factory,
svn_constants.EXAMPLES_PATH,
base_path)
self.api_data_source_factory.SetSamplesDataSourceFactory(
self.samples_data_source_factory)
self.intro_data_source_factory = IntroDataSource.Factory(
self.compiled_host_fs_factory,
self.ref_resolver_factory,
[svn_constants.INTRO_PATH, svn_constants.ARTICLE_PATH])
self.permissions_data_source = PermissionsDataSource(
self.compiled_host_fs_factory,
self.host_file_system,
'/'.join((svn_constants.API_PATH, '_api_features.json')),
'/'.join((svn_constants.API_PATH, '_permission_features.json')),
'/'.join((svn_constants.JSON_PATH, 'permissions.json')))
self.example_zipper = ExampleZipper(
self.compiled_host_fs_factory,
self.host_file_system,
svn_constants.DOCS_PATH)
self.path_canonicalizer = PathCanonicalizer(self.compiled_host_fs_factory)
self.redirector = Redirector(
self.compiled_host_fs_factory,
self.host_file_system,
svn_constants.PUBLIC_TEMPLATE_PATH)
self.strings_json_path = '/'.join((svn_constants.JSON_PATH, 'strings.json'))
self.sidenav_json_base_path = svn_constants.JSON_PATH
self.manifest_json_path = '/'.join(
(svn_constants.JSON_PATH, 'manifest.json'))
self.manifest_features_path = '/'.join(
(svn_constants.API_PATH, '_manifest_features.json'))
self.template_data_source_factory = TemplateDataSource.Factory(
self.api_data_source_factory,
self.api_list_data_source_factory,
self.intro_data_source_factory,
self.samples_data_source_factory,
self.compiled_host_fs_factory,
self.ref_resolver_factory,
self.permissions_data_source,
svn_constants.PUBLIC_TEMPLATE_PATH,
svn_constants.PRIVATE_TEMPLATE_PATH,
base_path)
self.api_data_source_factory.SetTemplateDataSource(
self.template_data_source_factory)
self.permissions_data_source.SetTemplateDataSource(
self.template_data_source_factory)
@staticmethod
def ForTest(file_system):
object_store_creator = ObjectStoreCreator.ForTest()
return ServerInstance(object_store_creator,
file_system,
EmptyDirFileSystem(),
'',
CompiledFileSystem.Factory(file_system,
object_store_creator),
TestBranchUtility.CreateWithCannedData(),
HostFileSystemCreator.ForTest(file_system,
object_store_creator))
@staticmethod
def ForLocal():
object_store_creator = ObjectStoreCreator(start_empty=False,
store_type=TestObjectStore)
host_file_system_creator = HostFileSystemCreator.ForLocal(
object_store_creator)
trunk_file_system = host_file_system_creator.Create()
return ServerInstance(
object_store_creator,
trunk_file_system,
EmptyDirFileSystem(),
'',
CompiledFileSystem.Factory(trunk_file_system, object_store_creator),
TestBranchUtility.CreateWithCannedData(),
host_file_system_creator)
| bsd-3-clause | 8,341,158,082,518,326,000 | 37.491228 | 80 | 0.676542 | false |
ironfroggy/django-mailer | mailer/__init__.py | 1 | 4161 | VERSION = (0, 2, 0, "dev", 1)
def get_version():
if VERSION[3] == "final":
return "%s.%s.%s" % (VERSION[0], VERSION[1], VERSION[2])
elif VERSION[3] == "dev":
return "%s.%s.%s%s%s" % (VERSION[0], VERSION[1], VERSION[2], VERSION[3], VERSION[4])
else:
return "%s.%s.%s%s" % (VERSION[0], VERSION[1], VERSION[2], VERSION[3])
__version__ = get_version()
PRIORITY_MAPPING = {
"high": "1",
"medium": "2",
"low": "3",
"deferred": "4",
}
# replacement for django.core.mail.send_mail
def send_mail(subject, message, from_email, recipient_list, priority="medium",
fail_silently=False, auth_user=None, auth_password=None, headers=None):
from django.utils.encoding import force_unicode
from mailer.models import Message
priority = PRIORITY_MAPPING[priority]
# need to do this in case subject used lazy version of ugettext
subject = force_unicode(subject)
message = force_unicode(message)
if len(subject) > 100:
subject = u"%s..." % subject[:97]
for to_address in recipient_list:
message_obj = Message.objects.create(
to_address=to_address,
from_address=from_email,
subject=subject,
message_body=message,
priority=priority)
if headers:
for name, value in headers.items():
message_obj.headers[name] = value
message_obj.save()
def send_html_mail(subject, message, message_html, from_email, recipient_list,
priority="medium", fail_silently=False, auth_user=None,
auth_password=None, headers=None):
"""
Function to queue HTML e-mails
"""
from django.utils.encoding import force_unicode
from mailer.models import Message
priority = PRIORITY_MAPPING[priority]
# need to do this in case subject used lazy version of ugettext
subject = force_unicode(subject)
for to_address in recipient_list:
message_obj = Message.objects.create(to_address=to_address,
from_address=from_email,
subject=subject,
message_body=message,
message_body_html=message_html,
priority=priority)
if headers:
for name, value in headers.items():
message_obj.headers[name] = value
message_obj.save()
def mail_admins(subject, message, fail_silently=False, priority="medium", headers=None):
from django.utils.encoding import force_unicode
from django.conf import settings
from mailer.models import Message
priority = PRIORITY_MAPPING[priority]
subject = settings.EMAIL_SUBJECT_PREFIX + force_unicode(subject)
message = force_unicode(message)
if len(subject) > 100:
subject = u"%s..." % subject[:97]
for name, to_address in settings.ADMINS:
message_obj = Message.objects.create(to_address=to_address,
from_address=settings.SERVER_EMAIL,
subject=subject,
message_body=message,
priority=priority)
if headers:
for name, value in headers.items():
message_obj.headers[name] = value
message_obj.save()
def mail_managers(subject, message, fail_silently=False, priority="medium", headers=None):
from django.utils.encoding import force_unicode
from django.conf import settings
from mailer.models import Message
priority = PRIORITY_MAPPING[priority]
subject = settings.EMAIL_SUBJECT_PREFIX + force_unicode(subject)
message = force_unicode(message)
if len(subject) > 100:
subject = u"%s..." % subject[:97]
for name, to_address in settings.MANAGERS:
message_obj = Message.objects.create(to_address=to_address,
from_address=settings.SERVER_EMAIL,
subject=subject,
message_body=message,
priority=priority)
if headers:
for name, value in headers.items():
message_obj.headers[name] = value
message_obj.save()
| mit | -1,367,007,737,760,832,500 | 32.02381 | 92 | 0.604903 | false |
neilLasrado/erpnext | erpnext/controllers/queries.py | 1 | 19700 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import erpnext
from frappe.desk.reportview import get_match_cond, get_filters_cond
from frappe.utils import nowdate, getdate
from collections import defaultdict
from erpnext.stock.get_item_details import _get_item_tax_template
from frappe.utils import unique
# searches for active employees
def employee_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
fields = get_fields("Employee", ["name", "employee_name"])
return frappe.db.sql("""select {fields} from `tabEmployee`
where status = 'Active'
and docstatus < 2
and ({key} like %(txt)s
or employee_name like %(txt)s)
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, employee_name), locate(%(_txt)s, employee_name), 99999),
idx desc,
name, employee_name
limit %(start)s, %(page_len)s""".format(**{
'fields': ", ".join(fields),
'key': searchfield,
'fcond': get_filters_cond(doctype, filters, conditions),
'mcond': get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for leads which are not converted
def lead_query(doctype, txt, searchfield, start, page_len, filters):
fields = get_fields("Lead", ["name", "lead_name", "company_name"])
return frappe.db.sql("""select {fields} from `tabLead`
where docstatus < 2
and ifnull(status, '') != 'Converted'
and ({key} like %(txt)s
or lead_name like %(txt)s
or company_name like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, lead_name), locate(%(_txt)s, lead_name), 99999),
if(locate(%(_txt)s, company_name), locate(%(_txt)s, company_name), 99999),
idx desc,
name, lead_name
limit %(start)s, %(page_len)s""".format(**{
'fields': ", ".join(fields),
'key': searchfield,
'mcond':get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for customer
def customer_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
cust_master_name = frappe.defaults.get_user_default("cust_master_name")
fields = ["name", "customer_group", "territory"]
if not cust_master_name == "Customer Name":
fields.append("customer_name")
fields = get_fields("Customer", fields)
searchfields = frappe.get_meta("Customer").get_search_fields()
searchfields = " or ".join([field + " like %(txt)s" for field in searchfields])
return frappe.db.sql("""select {fields} from `tabCustomer`
where docstatus < 2
and ({scond}) and disabled=0
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),
idx desc,
name, customer_name
limit %(start)s, %(page_len)s""".format(**{
"fields": ", ".join(fields),
"scond": searchfields,
"mcond": get_match_cond(doctype),
"fcond": get_filters_cond(doctype, filters, conditions).replace('%', '%%'),
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for supplier
def supplier_query(doctype, txt, searchfield, start, page_len, filters):
supp_master_name = frappe.defaults.get_user_default("supp_master_name")
fields = ["name", "supplier_group"]
if not supp_master_name == "Supplier Name":
fields.append("supplier_name")
fields = get_fields("Supplier", fields)
return frappe.db.sql("""select {field} from `tabSupplier`
where docstatus < 2
and ({key} like %(txt)s
or supplier_name like %(txt)s) and disabled=0
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),
idx desc,
name, supplier_name
limit %(start)s, %(page_len)s """.format(**{
'field': ', '.join(fields),
'key': searchfield,
'mcond':get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
def tax_account_query(doctype, txt, searchfield, start, page_len, filters):
company_currency = erpnext.get_company_currency(filters.get('company'))
tax_accounts = frappe.db.sql("""select name, parent_account from tabAccount
where tabAccount.docstatus!=2
and account_type in (%s)
and is_group = 0
and company = %s
and account_currency = %s
and `%s` LIKE %s
order by idx desc, name
limit %s, %s""" %
(", ".join(['%s']*len(filters.get("account_type"))), "%s", "%s", searchfield, "%s", "%s", "%s"),
tuple(filters.get("account_type") + [filters.get("company"), company_currency, "%%%s%%" % txt,
start, page_len]))
if not tax_accounts:
tax_accounts = frappe.db.sql("""select name, parent_account from tabAccount
where tabAccount.docstatus!=2 and is_group = 0
and company = %s and account_currency = %s and `%s` LIKE %s limit %s, %s""" #nosec
% ("%s", "%s", searchfield, "%s", "%s", "%s"),
(filters.get("company"), company_currency, "%%%s%%" % txt, start, page_len))
return tax_accounts
def item_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False):
conditions = []
#Get searchfields from meta and use in Item Link field query
meta = frappe.get_meta("Item", cached=True)
searchfields = meta.get_search_fields()
if "description" in searchfields:
searchfields.remove("description")
searchfields = searchfields + [field for field in[searchfield or "name", "item_code", "item_group", "item_name"]
if not field in searchfields]
searchfields = " or ".join([field + " like %(txt)s" for field in searchfields])
description_cond = ''
if frappe.db.count('Item', cache=True) < 50000:
# scan description only if items are less than 50000
description_cond = 'or tabItem.description LIKE %(txt)s'
fields = get_fields("Item", ["name", "item_group"])
if "description" in fields:
fields.remove("description")
return frappe.db.sql("""select
{fields},
if(length(tabItem.description) > 40, \
concat(substr(tabItem.description, 1, 40), "..."), description) as description
from tabItem
where tabItem.docstatus < 2
and tabItem.has_variants=0
and tabItem.disabled=0
and (tabItem.end_of_life > %(today)s or ifnull(tabItem.end_of_life, '0000-00-00')='0000-00-00')
and ({scond} or tabItem.item_code IN (select parent from `tabItem Barcode` where barcode LIKE %(txt)s)
{description_cond})
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, item_name), locate(%(_txt)s, item_name), 99999),
idx desc,
name, item_name
limit %(start)s, %(page_len)s """.format(
fields=', '.join(fields),
key=searchfield,
scond=searchfields,
fcond=get_filters_cond(doctype, filters, conditions).replace('%', '%%'),
mcond=get_match_cond(doctype).replace('%', '%%'),
description_cond = description_cond),
{
"today": nowdate(),
"txt": "%%%s%%" % txt,
"_txt": txt.replace("%", ""),
"start": start,
"page_len": page_len
}, as_dict=as_dict)
def bom(doctype, txt, searchfield, start, page_len, filters):
conditions = []
fields = get_fields("BOM", ["name", "item"])
return frappe.db.sql("""select {fields}
from tabBOM
where tabBOM.docstatus=1
and tabBOM.is_active=1
and tabBOM.`{key}` like %(txt)s
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
idx desc, name
limit %(start)s, %(page_len)s """.format(
fields=", ".join(fields),
fcond=get_filters_cond(doctype, filters, conditions).replace('%', '%%'),
mcond=get_match_cond(doctype).replace('%', '%%'),
key=searchfield),
{
'txt': '%' + txt + '%',
'_txt': txt.replace("%", ""),
'start': start or 0,
'page_len': page_len or 20
})
def get_project_name(doctype, txt, searchfield, start, page_len, filters):
cond = ''
if filters.get('customer'):
cond = """(`tabProject`.customer = %s or
ifnull(`tabProject`.customer,"")="") and""" %(frappe.db.escape(filters.get("customer")))
fields = get_fields("Project", ["name"])
return frappe.db.sql("""select {fields} from `tabProject`
where `tabProject`.status not in ("Completed", "Cancelled")
and {cond} `tabProject`.name like %(txt)s {match_cond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
idx desc,
`tabProject`.name asc
limit {start}, {page_len}""".format(
fields=", ".join(['`tabProject`.{0}'.format(f) for f in fields]),
cond=cond,
match_cond=get_match_cond(doctype),
start=start,
page_len=page_len), {
"txt": "%{0}%".format(txt),
"_txt": txt.replace('%', '')
})
def get_delivery_notes_to_be_billed(doctype, txt, searchfield, start, page_len, filters, as_dict):
fields = get_fields("Delivery Note", ["name", "customer", "posting_date"])
return frappe.db.sql("""
select %(fields)s
from `tabDelivery Note`
where `tabDelivery Note`.`%(key)s` like %(txt)s and
`tabDelivery Note`.docstatus = 1
and status not in ("Stopped", "Closed") %(fcond)s
and (
(`tabDelivery Note`.is_return = 0 and `tabDelivery Note`.per_billed < 100)
or `tabDelivery Note`.grand_total = 0
or (
`tabDelivery Note`.is_return = 1
and return_against in (select name from `tabDelivery Note` where per_billed < 100)
)
)
%(mcond)s order by `tabDelivery Note`.`%(key)s` asc limit %(start)s, %(page_len)s
""" % {
"fields": ", ".join(["`tabDelivery Note`.{0}".format(f) for f in fields]),
"key": searchfield,
"fcond": get_filters_cond(doctype, filters, []),
"mcond": get_match_cond(doctype),
"start": start,
"page_len": page_len,
"txt": "%(txt)s"
}, {"txt": ("%%%s%%" % txt)}, as_dict=as_dict)
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
cond = ""
if filters.get("posting_date"):
cond = "and (batch.expiry_date is null or batch.expiry_date >= %(posting_date)s)"
batch_nos = None
args = {
'item_code': filters.get("item_code"),
'warehouse': filters.get("warehouse"),
'posting_date': filters.get('posting_date'),
'txt': "%{0}%".format(txt),
"start": start,
"page_len": page_len
}
having_clause = "having sum(sle.actual_qty) > 0"
if filters.get("is_return"):
having_clause = ""
if args.get('warehouse'):
batch_nos = frappe.db.sql("""select sle.batch_no, round(sum(sle.actual_qty),2), sle.stock_uom,
concat('MFG-',batch.manufacturing_date), concat('EXP-',batch.expiry_date)
from `tabStock Ledger Entry` sle
INNER JOIN `tabBatch` batch on sle.batch_no = batch.name
where
batch.disabled = 0
and sle.item_code = %(item_code)s
and sle.warehouse = %(warehouse)s
and (sle.batch_no like %(txt)s
or batch.expiry_date like %(txt)s
or batch.manufacturing_date like %(txt)s)
and batch.docstatus < 2
{cond}
{match_conditions}
group by batch_no {having_clause}
order by batch.expiry_date, sle.batch_no desc
limit %(start)s, %(page_len)s""".format(
cond=cond,
match_conditions=get_match_cond(doctype),
having_clause = having_clause
), args)
return batch_nos
else:
return frappe.db.sql("""select name, concat('MFG-', manufacturing_date), concat('EXP-',expiry_date) from `tabBatch` batch
where batch.disabled = 0
and item = %(item_code)s
and (name like %(txt)s
or expiry_date like %(txt)s
or manufacturing_date like %(txt)s)
and docstatus < 2
{0}
{match_conditions}
order by expiry_date, name desc
limit %(start)s, %(page_len)s""".format(cond, match_conditions=get_match_cond(doctype)), args)
def get_account_list(doctype, txt, searchfield, start, page_len, filters):
filter_list = []
if isinstance(filters, dict):
for key, val in filters.items():
if isinstance(val, (list, tuple)):
filter_list.append([doctype, key, val[0], val[1]])
else:
filter_list.append([doctype, key, "=", val])
elif isinstance(filters, list):
filter_list.extend(filters)
if "is_group" not in [d[1] for d in filter_list]:
filter_list.append(["Account", "is_group", "=", "0"])
if searchfield and txt:
filter_list.append([doctype, searchfield, "like", "%%%s%%" % txt])
return frappe.desk.reportview.execute("Account", filters = filter_list,
fields = ["name", "parent_account"],
limit_start=start, limit_page_length=page_len, as_list=True)
def get_blanket_orders(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select distinct bo.name, bo.blanket_order_type, bo.to_date
from `tabBlanket Order` bo, `tabBlanket Order Item` boi
where
boi.parent = bo.name
and boi.item_code = {item_code}
and bo.blanket_order_type = '{blanket_order_type}'
and bo.company = {company}
and bo.docstatus = 1"""
.format(item_code = frappe.db.escape(filters.get("item")),
blanket_order_type = filters.get("blanket_order_type"),
company = frappe.db.escape(filters.get("company"))
))
@frappe.whitelist()
def get_income_account(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
# income account can be any Credit account,
# but can also be a Asset account with account_type='Income Account' in special circumstances.
# Hence the first condition is an "OR"
if not filters: filters = {}
condition = ""
if filters.get("company"):
condition += "and tabAccount.company = %(company)s"
fields = get_fields("Account", ["name"])
return frappe.db.sql("""select {fields} from `tabAccount`
where (tabAccount.report_type = "Profit and Loss"
or tabAccount.account_type in ("Income Account", "Temporary"))
and tabAccount.is_group=0
and tabAccount.`{key}` LIKE %(txt)s
{condition} {match_condition}
order by idx desc, name"""
.format(
fields=", ".join(fields),
condition=condition,
match_condition=get_match_cond(doctype),
key=searchfield
), {
'txt': '%' + txt + '%',
'company': filters.get("company", "")
})
@frappe.whitelist()
def get_expense_account(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
if not filters: filters = {}
condition = ""
if filters.get("company"):
condition += "and tabAccount.company = %(company)s"
fields = get_fields("Account", ["name"])
return frappe.db.sql("""select {fields}, tabAccount.name from `tabAccount`
where (tabAccount.report_type = "Profit and Loss"
or tabAccount.account_type in ("Expense Account", "Fixed Asset", "Temporary", "Asset Received But Not Billed", "Capital Work in Progress"))
and tabAccount.is_group=0
and tabAccount.docstatus!=2
and tabAccount.{key} LIKE %(txt)s
{condition} {match_condition}"""
.format(
fields=", ".join(['`tabAccount`.{0}'.format(f) for f in fields]),
condition=condition,
key=searchfield,
match_condition=get_match_cond(doctype)
), {
'company': filters.get("company", ""),
'txt': '%' + txt + '%'
})
@frappe.whitelist()
def warehouse_query(doctype, txt, searchfield, start, page_len, filters):
# Should be used when item code is passed in filters.
conditions, bin_conditions = [], []
filter_dict = get_doctype_wise_filters(filters)
sub_query = """ select round(`tabBin`.actual_qty, 2) from `tabBin`
where `tabBin`.warehouse = `tabWarehouse`.name
{bin_conditions} """.format(
bin_conditions=get_filters_cond(doctype, filter_dict.get("Bin"),
bin_conditions, ignore_permissions=True))
fields = get_fields("Warehouse", ["name"])
query = """select {fields},
CONCAT_WS(" : ", "Actual Qty", ifnull( ({sub_query}), 0) ) as actual_qty
from `tabWarehouse`
where
`tabWarehouse`.`{key}` like {txt}
{fcond} {mcond}
order by
`tabWarehouse`.name desc
limit
{start}, {page_len}
""".format(
fields=", ".join(['`tabWarehouse`.{0}'.format(f) for f in fields]),
sub_query=sub_query,
key=searchfield,
fcond=get_filters_cond(doctype, filter_dict.get("Warehouse"), conditions),
mcond=get_match_cond(doctype),
start=start,
page_len=page_len,
txt=frappe.db.escape('%{0}%'.format(txt))
)
return frappe.db.sql(query)
def get_doctype_wise_filters(filters):
# Helper function to seperate filters doctype_wise
filter_dict = defaultdict(list)
for row in filters:
filter_dict[row[0]].append(row)
return filter_dict
@frappe.whitelist()
def get_batch_numbers(doctype, txt, searchfield, start, page_len, filters):
fields = get_fields("Batch", ["batch_id"])
query = """select %(fields)s from `tabBatch`
where disabled = 0
and (expiry_date >= CURDATE() or expiry_date IS NULL)
and name like %(txt)s"""
flt = {
"fields": ", ".join(fields),
"txt": frappe.db.escape('%{0}%'.format(txt))
}
if filters and filters.get('item'):
query += " and item = %(item)s"
flt.append({
"item": frappe.db.escape(filters.get('item'))
})
return frappe.db.sql(query, flt)
@frappe.whitelist()
def item_manufacturer_query(doctype, txt, searchfield, start, page_len, filters):
item_filters = [
['manufacturer', 'like', '%' + txt + '%'],
['item_code', '=', filters.get("item_code")]
]
fields = get_fields("Item Manufacturer", ["manufacturer", "manufacturer_part_no"])
item_manufacturers = frappe.get_all(
"Item Manufacturer",
fields=fields,
filters=item_filters,
limit_start=start,
limit_page_length=page_len,
as_list=1
)
return item_manufacturers
@frappe.whitelist()
def get_purchase_receipts(doctype, txt, searchfield, start, page_len, filters):
fields = get_fields("Purchase Receipt", ["name"])
item_filters = [
['Purchase Receipt', 'docstatus', '=', '1'],
['Purchase Receipt', 'name', 'like', '%' + txt + '%'],
['Purchase Receipt Item', 'item_code', '=', filters.get("item_code")]
]
purchase_receipts = frappe.get_all('Purchase Receipt',
fields=fields,
filters=item_filters,
as_list=1
)
return purchase_receipts
@frappe.whitelist()
def get_purchase_invoices(doctype, txt, searchfield, start, page_len, filters):
fields = get_fields("Purchase Invoice", ["name"])
item_filters =[
['Purchase Invoice', 'docstatus', '=', '1'],
['Purchase Invoice', 'name', 'like', '%' + txt + '%'],
['Purchase Invoice Item', 'item_code', '=', filters.get("item_code")],
]
purchase_invoices = frappe.get_all('Purchase Invoice',
fields=fields,
filters=item_filters,
as_list=1
)
return purchase_invoices
@frappe.whitelist()
def get_tax_template(doctype, txt, searchfield, start, page_len, filters):
item_doc = frappe.get_cached_doc('Item', filters.get('item_code'))
item_group = filters.get('item_group')
taxes = item_doc.taxes or []
while item_group:
item_group_doc = frappe.get_cached_doc('Item Group', item_group)
taxes += item_group_doc.taxes or []
item_group = item_group_doc.parent_item_group
if not taxes:
fields = get_fields("Item Tax Template", ["name"])
return frappe.db.sql(""" SELECT %(fields)s FROM `tabItem Tax Template` """ , {fields: ", ".join(fields)})
else:
args = {
'item_code': filters.get('item_code'),
'posting_date': filters.get('valid_from'),
'tax_category': filters.get('tax_category')
}
taxes = _get_item_tax_template(args, taxes, for_validate=True)
return [(d,) for d in set(taxes)]
def get_fields(doctype, fields=[]):
meta = frappe.get_meta(doctype)
fields.extend(meta.get_search_fields())
if meta.title_field and not meta.title_field.strip() in fields:
fields.insert(1, meta.title_field.strip())
return unique(fields)
| gpl-3.0 | -5,845,624,524,436,555,000 | 30.980519 | 143 | 0.651421 | false |
OctavianLee/Pywechat | pywechat/services/wechat_shake.py | 1 | 20297 | # -*- coding: utf-8 -*-
from pywechat.services.basic import Basic
class ShakeService(Basic):
"""This class is an implement of the Wechat service of shaking.
All request's urls come from the official documents.
Link: https://mp.weixin.qq.com/wiki/home/index.html
"""
def bind_page(
self,
page_ids, bind, append,
device_id=None, uuid=None, major=None, minor=None):
"""Binds the relations ship between the device and pages.
Link:
https://mp.weixin.qq.com/wiki/12/c8120214ec0ba08af5dfcc0da1a11400.html
Args:
page_ids: the list of page_id.
bind: the mark of binding operation.
0 is to dismiss the relationship.
1 is to build the relationship.
append: the mark of appending operation.
0 is to bestrow the page.
1 is to append the page.
device_id: the device id,
it can be None when UUID, major and minor are seted.
uuid: the uuid of device.
major: the major of device.
minor: the minor of device.
Returns:
the json data.Example:
{
"data": {
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"page_ids": page_ids,
"bind": bind,
"append": append
}
if device_id:
data["device_identifier"] = {
"device_id": device_id
}
else:
data["device_identifier"] = {
"uuid": uuid,
"major": major,
"minor": minor
}
url = 'https://api.weixin.qq.com/shakearound/device/bindpage'
json_data = self._send_request('post', url, data=data)
return json_data
def upload_material(self, image):
"""Uploads the material for the icon of page.
Formats: jpg, jpeg, png, gif. Size: better 120*120, limit 200*200 px
Link:
https://mp.weixin.qq.com/wiki/5/e997428269ff189d8f9a4b9e177be2d9.html
Args:
image: the file of image. open(image_name, 'rb')
Returns:
the json data.Example:
{
"data": {
"pic_url":
"http://shp.qpic.cn/wechat_shakearound_pic/0/1428377032/120"
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
url = 'https://api.weixin.qq.com/shakearound/material/add'
files = {'media': image}
json_data = self._send_request('post', url, files=files)
return json_data
def apply_devices(
self,
quantity, apply_reason, comment,
poi_id=None):
"""Applys devices from the wechat.
Link:
https://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
Args:
quantity: the quantity of devices.(less than 500)
apply_reason: the reason of applying(less than 100 characters)
comment: the coment(less than 15 characters or 30 letters)
poi_id: the id of poin of interest
Returns:
the json data.Example:
{
"data": {
"apply_id": 123,
"device_identifiers":[
{
"device_id":10100,
"uuid":"FDA50693-A4E2-4FB1-AFCF-C6EB07647825",
"major":10001,
"minor":10002
}
]
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
url = 'https://api.weixin.qq.com/shakearound/device/applyid'
data = {
"quantity": quantity,
"apply_reason": apply_reason,
"comment": comment
}
if poi_id:
data["poi_id"] = poi_id
json_data = self._send_request('post', url, data=data)
return json_data
def update_device(
self,
comment,
device_id=None, uuid=None, major=None, minor=None):
"""Edit the comment of a device.
Link:
https://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
Args:
comment: the coment(less than 15 characters or 30 letters)
device_id: the device id,
it can be None when UUID, major and minor are seted.
uuid: the uuid of device.
major: the major of device.
minor: the minor of device.
Returns:
the json data.Example:
{
"data": {
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"comment": comment,
}
if device_id:
data["device_identifier"] = {
"device_id": device_id
}
else:
data["device_identifier"] = {
"uuid": uuid,
"major": major,
"minor": minor
}
url = 'https://api.weixin.qq.com/shakearound/device/update'
json_data = self._send_request('post', url, data=data)
return json_data
def bind_location(
self,
poi_id,
device_id=None, uuid=None, major=None, minor=None):
"""Bind the device with a location.
Link:
https://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
Args:
poi_id: the id of poin of interest
device_id: the device id,
it can be None when UUID, major and minor are seted.
uuid: the uuid of device.
major: the major of device.
minor: the minor of device.
Returns:
the json data.Example:
{
"data": {
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"poi_id": poi_id
}
if device_id:
data["device_identifier"] = {
"device_id": device_id
}
else:
data["device_identifier"] = {
"uuid": uuid,
"major": major,
"minor": minor
}
url = 'https://api.weixin.qq.com/shakearound/device/bindlocation'
json_data = self._send_request('post', url, data=data)
return json_data
def search_device(
self,
device_id=None, uuid=None, major=None, minor=None):
"""Finds the information of a device.
Link:
https://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
Args:
device_id: the device id,
it can be None when UUID, major and minor are seted.
uuid: the uuid of device.
major: the major of device.
minor: the minor of device.
Returns:
the json data.Example:
{
"data": {
"devices": [
{
"comment": "",
"device_id": 10097,
"major": 10001,
"minor": 12102,
"page_ids": "15369",
"status": 1,
"poi_id": 0,
"uuid": "FDA50693-A4E2-4FB1-AFCF-C6EB07647825"
}
],
"total_count": 1
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
device_identifier = {}
if device_id:
device_identifier = {
"device_id": device_id
}
else:
device_identifier = {
"uuid": uuid,
"major": major,
"minor": minor
}
data = {
"device_identifiers": [device_identifier]
}
url = 'https://api.weixin.qq.com/shakearound/device/search'
json_data = self._send_request('post', url, data=data)
return json_data
def search_devices(
self,
begin, count,
apply_id=None):
"""Finds the information of devices.
Link:
https://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html
Args:
begin: the start number of devices.
count: the number of devices will query.
apply_id: the applicaition number of devices.
Returns:
the json data.Example:
{
"data": {
"devices": [
{
"comment": "",
"device_id": 10097,
"major": 10001,
"minor": 12102,
"page_ids": "15369",
"status": 1,
"poi_id": 0,
"uuid": "FDA50693-A4E2-4FB1-AFCF-C6EB07647825"
}
],
"total_count": 1
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"begin": begin,
"count": count
}
if apply_id:
data["apply_id"] = apply_id
url = 'https://api.weixin.qq.com/shakearound/device/search'
json_data = self._send_request('post', url, data=data)
return json_data
def add_page(
self,
title, description, page_url, icon_url,
comment=None):
"""Adds the new page.
Link:
https://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
Args:
title: the page title(less than 6 characters).
description: the vice title(less than 7 characters).
page_url: the url of page.
icon_url: the url of icon.
comment: the coment(less than 15 characters)
Returns:
the json data.Example:
{
"data": {
"page_id": 28840
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"title": title,
"description": description,
"page_url": page_url,
"icon_url": icon_url
}
if comment:
data["comment"] = comment
url = 'https://api.weixin.qq.com/shakearound/page/add'
json_data = self._send_request('post', url, data=data)
return json_data
def update_page(
self,
page_id, title, description, page_url, icon_url,
comment=None):
"""Edits a page.
Link:
https://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
Args:
page_id: the id of page.
title: the page title(less than 6 characters).
description: the vice title(less than 7 characters).
page_url: the url of page.
icon_url: the url of icon.
comment: the coment(less than 15 characters)
Returns:
the json data.Example:
{
"data": {
"page_id": 28840
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"page_id": page_id,
"title": title,
"description": description,
"page_url": page_url,
"icon_url": icon_url
}
if comment:
data["comment"] = comment
url = 'https://api.weixin.qq.com/shakearound/page/update'
json_data = self._send_request('post', url, data=data)
return json_data
def search_page_by_ids(self, page_ids):
"""Finds pages by ids.
Link:
https://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
Args:
page_ids: the list of page id.
Returns:
the json data.Example:
{
"data": {
"pages": [
{
"comment": "just for test",
"description": "test",
"icon_url": "https://www.baidu.com/img/bd_logo1",
"page_id": 28840,
"page_url": "http://xw.qq.com/testapi1",
"title": "测试1"
}
],
"total_count": 1
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"page_ids": page_ids,
}
url = 'https://api.weixin.qq.com/shakearound/page/search'
json_data = self._send_request('post', url, data=data)
return json_data
def search_page_by_counts(self, begin, count):
"""Finds pages by counts.
Link:
https://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
Args:
begin: the start number of pages.
count: the number of pages will query.
Returns:
the json data.Example:
{
"data": {
"pages": [
{
"comment": "just for test",
"description": "test",
"icon_url": "https://www.baidu.com/img/bd_logo1",
"page_id": 28840,
"page_url": "http://xw.qq.com/testapi1",
"title": "测试1"
}
],
"total_count": 1
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"begin": begin,
"count": count
}
url = 'https://api.weixin.qq.com/shakearound/page/search'
json_data = self._send_request('post', url, data=data)
return json_data
def delete_page(self, page_ids):
"""Deletes pages by ids.
Link:
https://mp.weixin.qq.com/wiki/5/6626199ea8757c752046d8e46cf13251.html
Args:
page_ids: the list of page id.
Returns:
the json data.Example:
{
"data": {
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"page_ids": page_ids,
}
url = 'https://api.weixin.qq.com/shakearound/page/delete'
json_data = self._send_request('post', url, data=data)
return json_data
def get_shake_info(self, ticket, need_poi=None):
"""Gets the informaiton of shaking.
Gets the information of devices including UUID, major, minor etc.
Link:
https://mp.weixin.qq.com/wiki/3/34904a5db3d0ec7bb5306335b8da1faf.html
Args:
ticket: the ticket of business which can be getted from url.
need_poi: whether it needs to return poi_id.
1 is to return.
Returns:
the json data.Example:
{
"data": {
},
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"ticket": ticket
}
if need_poi:
data["need_poi"] = need_poi
url = 'https://api.weixin.qq.com/shakearound/user/getshakeinfo'
json_data = self._send_request('post', url, data=data)
return json_data
def device_statistics(
self,
begin_date, end_date,
device_id=None, uuid=None, major=None, minor=None):
"""Gets the statistics of a device.
Link:
https://mp.weixin.qq.com/wiki/0/8a24bcacad40fe7ee98d1573cb8a6764.html
Args:
begin_date: the timestamp of start date
end_date: the timestamp of end date, the max time span is 30 days.
device_id: the device id,
it can be None when UUID, major and minor are seted.
uuid: the uuid of device.
major: the major of device.
minor: the minor of device.
Returns:
the json data.Example:
{
"data": [
{
"click_pv": 0,
"click_uv": 0,
"ftime": 1425052800,
"shake_pv": 0,
"shake_uv": 0
}
],
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"begin_date": begin_date,
"end_date": end_date
}
if device_id:
data["device_identifier"] = {
"device_id": device_id
}
else:
data["device_identifier"] = {
"uuid": uuid,
"major": major,
"minor": minor
}
url = 'https://api.weixin.qq.com/shakearound/statistics/device'
json_data = self._send_request('post', url, data=data)
return json_data
def page_statistics(self, page_id, begin_date, end_date):
"""Finds the information of a page.
(Link:
https://mp.weixin.qq.com/wiki/0/8a24bcacad40fe7ee98d1573cb8a6764.html)
Args:
begin_date: the timestamp of start date
end_date: the timestamp of end date, the max time span is 30 days.
page_id: the id of page.
Returns:
the json data.Example:
{
"data": [
{
"click_pv": 0,
"click_uv": 0,
"ftime": 1425052800,
"shake_pv": 0,
"shake_uv": 0
}
],
"errcode": 0,
"errmsg": "success."
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"page_id": page_id,
"begin_date": begin_date,
"end_date": end_date
}
url = 'https://api.weixin.qq.com/shakearound/statistics/page'
json_data = self._send_request('post', url, data=data)
return json_data
| mit | 5,705,444,596,172,750,000 | 28.836765 | 80 | 0.452708 | false |
iwalz/zendserverapi | docs/conf.py | 1 | 7828 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Zend Server API documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 24 01:33:37 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Zend Server API'
copyright = '2012, Ingo Walz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.2'
# The full version, including alpha/beta/rc tags.
release = '0.0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZendServerAPIdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '\usepackage[plainpages=false]',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ZendServerAPI.tex', 'Zend Server API Documentation',
'Ingo Walz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zendserverapi', 'Zend Server API Documentation',
['Ingo Walz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ZendServerAPI', 'Zend Server API Documentation',
'Ingo Walz', 'ZendServerAPI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause | -1,641,784,278,740,480,300 | 31.213992 | 80 | 0.70465 | false |
rcanepa/cs-fundamentals | python/interview_questions/longest_repeated_substring.py | 1 | 2128 | """Longest repeated substring (or LCP = longest common prefix in a suffix array).
Problem: find the longest repeated substring inside a string.
Steps:
1. Create suffixes. This should be linear in time and space, but it isn't.
Slicing strings in Python (with slice or [a:b]) is a linear operation
with regard to the size of the string. In the end, this implementation
provides a quadratic time O(N^2).
2. Sort suffixes. This should be N * log(N) in time.
3. Find LCP between adjacent suffixes.
Usage:
This script can be use reading data from the standard input. Example:
cat ~/manifesto.txt | python3 -m interview_questions.longest_repeated_substring
"""
import sys
import time
def lcp(s1, s2):
"""Return the length of the longest common prefix
between strings `s1` and `s2`."""
comp = 0
for i in range(min(len(s1), len(s2))):
if s1[i] != s2[i]:
break
comp += 1
return comp
def lrs(text):
"""Return the longest repeated substring using a Suffix Array."""
# Step 1: create the suffixes array.
suffixes = []
for i in range(len(s)):
suffixes.append(s[i:])
# Step 2: sort the suffixes array.
sorted_suffixes = sorted(suffixes)
# Step: find the longest repeated substring.
result = ""
for i in range(len(sorted_suffixes) - 1):
l = lcp(sorted_suffixes[i], sorted_suffixes[i + 1])
if l > len(result):
result = sorted_suffixes[i][:l]
return result
if __name__ == "__main__":
s = ""
t0 = time.time()
for line in sys.stdin:
s += line
t1 = time.time()
print("################################################################################")
print('-> Took {:.3f}ms to read the file.'.format((t1 - t0) * 1000))
t0 = time.time()
r = lrs(s)
t1 = time.time()
print('-> Took {:.3f}ms to find the longest repeated substring the file.'.format((t1 - t0) * 1000))
print("################################################################################")
print("The longest repeated substring is:")
print(r)
| mit | 4,694,552,034,413,392,000 | 30.761194 | 103 | 0.569549 | false |
sijmenvos/Uforia-browser | nodejs/build_index/databases/mysql.py | 1 | 7922 | #!/usr/bin/env python
# Copyright (C) 2013 Hogeschool van Amsterdam
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import MySQLdb
import traceback
import sys
import warnings
class Database(object):
"""
This is a MySQL implementation of Uforia's data storage facility.
"""
def __init__(self, config):
"""
Initializes a MySQL database connection using the specified Uforia
configuration.
config - The uforia configuration object
"""
if (not config.DBHOST or not config.DBUSER
or not config.DBPASS or not config.DBNAME):
raise ValueError("""Cannot initialize a database
connection without valid credentials.""")
else:
hostname = config.DBHOST
username = config.DBUSER
password = config.DBPASS
self.database = config.DBNAME
self.connection = None
attempts = 0
retries = config.DBRETRY
while not self.connection:
try:
attempts += 1
self.connection = MySQLdb.connect(host=hostname,
user=username,
passwd=password,
db=self.database,
charset='utf8',
use_unicode=True)
except MySQLdb.OperationalError, e:
print("Could not connect to the MySQL server: " + str(e))
print("Sleeping for 3 seconds...")
time.sleep(3)
if attempts > retries:
print("The MySQL server didn't respond after "
+ str(retries) +
""" requests; you might be flooding it
with connections.""")
print("""Consider raising the maximum amount of
connections on your MySQL server or lower
the amount of concurrent Uforia threads!""")
traceback.print_exc(file=sys.stderr)
break
try:
self.cursor = self.connection.cursor()
except:
traceback.print_exc(file=sys.stderr)
def execute_query(self, query, params=None):
"""
Executes a query and doesn't return data.
Handling of executed query is done in the def itself.
query - The query string
"""
try:
warnings.filterwarnings('ignore', category=self.connection.Warning)
#print "query is %s " % query
self.cursor.execute(query, params)
warnings.resetwarnings()
except:
traceback.print_exc(file=sys.stderr)
def read_mimetypes(self, column="modules"):
"""
Grabs a column from the supported_mimetypes table.
columns: mime_type & modules
Default is the modules column which is the table
name for the related mime_type.
"""
query = "SELECT "+column+" FROM supported_mimetypes;"
self.execute_query(query)
return self.cursor.fetchall()
def read_filestable(self, _all=False):
"""
Reads the 'files' table depending on the boolean flag.
By default it only returns the column names for
the purpose of JSON index generation.
_all - Boolean flag
"""
if(_all):
query = """
SELECT
`hashid`, `fullpath`, `name`, `size`,
`owner`, `group`, `perm`,
`mtime`, `atime`, `ctime`,
`md5`, `sha1`, `sha256`,
`ftype`, `mtype`, `btype`
FROM files;
"""
else:
query = """
SELECT column_name
FROM information_schema.columns
WHERE table_schema = '"""+ self.database +"""'
AND table_name = 'files';
"""
result = self.execute_query(query)
return self.cursor.fetchall()
def read_table(self, _table, columnsonly=True, onerow=False, return_dict=False):
"""
Generic func to read either only the columns or the entire table.
If there is no table, will raise an exception.
data_type is used to determine if it should be an int/float/string
column_type simply returns the name of the column
"""
if(not _table):
raise Exception("No table specified for read_table!")
if(columnsonly):
query = """
SELECT column_name,data_type
FROM information_schema.columns
WHERE table_schema = '""" + self.database + """'
AND table_name = '"""+ _table +"""';
"""
else:
query = """
SELECT * FROM """+ _table +""";
"""
if return_dict:
self.cursor = self.connection.cursor(cursorclass=MySQLdb.cursors.DictCursor)
self.execute_query(query)
if(onerow):
result = self.cursor.fetchone()
else:
result = self.cursor.fetchall()
return result
def read_table_list(self, _tablelist, columnsonly=True):
"""
Generic func to read either only the columns or the entire table.
If there is no table, will raise an exception.
data_type is used to determine if it should be an int/float/string
column_type simply returns the name of the column
"""
if(not _tablelist):
raise Exception("No table specified for read_tablelist!")
if(columnsonly):
query = """
SELECT column_name,data_type
FROM information_schema.columns
WHERE table_schema = '""" + self.database + """'
AND table_name = '""" + _tablelist[0] + """'"""
# skip first and last in list
for table in _tablelist[1:-1]:
query += """
OR table_name = '"""+ table +"""'
"""
# add last and end query
query += "OR table_name = '" + _tablelist[-1] + "';"
else:
query = """
SELECT * FROM """
for table in _tablelist[:-1]:
query += "'" + table + "',"
query += "'" + _tablelist[-1] + "';"
self.execute_query(query)
result = self.cursor.fetchall()
return result
def like_mime(self, _mime=None, _table="supported_mimetypes"):
"""
like_mime queries the database using a LIKE query
this is done under the assumption that whatever the config file reads is not 100% accurate
but close enough to still produce a hit in the database.
"""
if(not _mime):
raise Exception("No mime supplied. Cannot query database.")
query = "SELECT * from "+ _table +" WHERE mime_type LIKE '%"+_mime+"%';"
self.execute_query(query)
result = self.cursor.fetchall()
return result
def get_modules_column(self):
"""
Returns only the 'modules' column from the supported_mimetypes table.
"""
query = "SELECT modules from supported_mimetypes;"
self.execute_query(query)
result = self.cursor.fetchall()
return result
| gpl-2.0 | -7,971,151,846,153,987,000 | 33.146552 | 98 | 0.535471 | false |
alex-ip/agdc | agdc/tilecompare.py | 1 | 24264 | #!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""
tilecompare.py - compare two sets of tiles.
"""
from __future__ import absolute_import
import sys
import os
import re
from . import dbutil
from osgeo import gdal
import numpy as np
from .dbcompare import ComparisonWrapper
from eotools.execute import execute
# #
# # TileComparisonPair class
# #
#
# Constants
#
IGNORE_CLASS_ID = [2]
MOSAIC_CLASS_ID = [4]
class TileComparisonPair(object):
"""A pair of databases from which tiles are to be compared.
Analagous to the ComparisonPair class for comparing databases, the
TileCompare class provides for comparision of tile stores from two
databases. The first database pertains to a benchmark tile store, and the
second database relates to the tile store arising from the latest ingest
code we are seeking to verify.
"""
# pylint:disable=too-many-instance-attributes
def __init__(self, db1, db2, schema1, schema2):
"""
Positional Arguments:
db1, db2: Connections to the databases to be compared.
Keyword Arguments:
schema1: The schema to be used for the first database (db1)
schema2: The schema to be used for the second database (db2)
"""
# Set autocommit mode on the connections; retain the old settings.
self.old_autocommit = (db1.autocommit, db2.autocommit)
db1.autocommit = True
db2.autocommit = True
# Sanitise the schema names, just in case.
self.schema1 = dbutil.safe_name(schema1)
self.schema2 = dbutil.safe_name(schema2)
# Wrap the connections to gain access to database structure queries.
self.db1 = ComparisonWrapper(db1, self.schema1)
self.db2 = ComparisonWrapper(db2, self.schema2)
# Get the database names...
self.db1_name = self.db1.database_name()
self.db2_name = self.db2.database_name()
# and qualify with the schema names if they are not 'public'
if self.schema1 != 'public':
self.db1_name = self.schema1 + '.' + self.db1_name
if self.schema2 != 'public':
self.db2_name = self.schema2 + '.' + self.db2_name
# Set input, expected and output directores
# Not used yet
module = "tilecompare"
suite = "TileCompare"
self.input_dir = dbutil.input_directory(module, suite)
self.output_dir = dbutil.output_directory(module, suite)
self.expected_dir = dbutil.expected_directory(module, suite)
# tile_root could be different to database?
def restore_autocommit(self):
"""Restore the autocommit status of the underlying connections.
The comparison pair should not be used after calling this, in
case the connections have been reset to autocommit=False. The
method sets the database attributes to None to enforce this."""
self.db1.conn.autocommit = self.old_autocommit[0]
self.db2.conn.autocommit = self.old_autocommit[1]
self.db1 = None
self.db2 = None
def compare_tile_stores(db1, db2, schema1='public', schema2='public',
output=sys.stdout):
"""Compares the tile stores from two databases.
Database Connection db1 is assumed to represent the production tile store,
against which we wish to verify the tile store resulting from a Fresh
Ingest, which has taken place onto the previously-empty Database Connection
db2.
This function runs in three stages:
1. Gather the Fresh Ingest information on Database Connection db2 into a
table and copy this accross to Database Connection db1, the production
database.
2. On Database Connection db1, merge the table from Step 1 to find the
corresponding production tiles.
3. For those Fresh Ingest tiles where a production tile can be found,
compare the two tiles and report if there is a difference. It can happen
that the tile exists on Fresh Ingest but not on production tile store.
This can happen for one of several reasons:
a) The old ingest used PQA to determine the existence of lower-level
data. By contrast, the Fresh Ingest process looks at the tile
directly to evaluate the exitence of data.
b) Mosaic tiles used to be created on user-request by the stacker class
of the API. By contrast, The Fresh Ingest process does this
automatically.
c) The coverage method of the Fresh Ingest process will, very
occasionally, pick up some new tiles.
Such anomalies are reported in the output stream with a "WARNING" prefix
Preconditions: db1 and db2 are open database connections. These are
assumed to be psycopg2 connections to PostgreSQL databases. Tables
that are not being explictly ignored are expected to have primary keys.
Positional Arguments:
db1, db2: Connections to the databases to be compared.
Keyword Arguments:
schema1: The schema to be used for the first database (db1), defaults
to 'public'.
schema2: The schema to be used for the second database (db2), defaults
to 'public'.
output: Where the output goes. This is assumed to be a file object.
Defaults to sys.stdout.
Return Value: Returns a list (path1, path2) of those corresponding tile
pairs where the contents differ.
"""
pair = TileComparisonPair(db1, db2, schema1, schema2)
#TEMPORARY delete some tiles:
_temp_delete_some_tiles(pair)
# Create a random 9-digit string to append to tables"
random_suffix = dbutil.random_name("_")
# Name of table to which information from fresh ingest will be written.
test_ingest_table = 'test_ingest%s' %random_suffix
# Create the table pertaining to the fresh ingest and copy it to the
# production database.
_copy_fresh_ingest_info(pair, test_ingest_table)
# Create tuple (list_both, list_db1_not_db2, list_db2_not_db1), where each
# list is a list of tuples:
# (level, tile_class_id1, tile_class_id2, path1, path2).
(list_both, list_db1_not_db2, list_db2_not_db1) = \
_get_comparison_pairs(pair, test_ingest_table)
# Output information for the edge cases of tiles being in only one database
tile_list = [p[3] for p in list_db1_not_db2]
_log_missing_tile_info(tile_list, pair.db1_name, pair.db2_name,
output)
tile_list = [p[4] for p in list_db2_not_db1]
_log_missing_tile_info(tile_list, pair.db2_name, pair.db1_name,
output)
output.writelines('There might be further mosaic tiles that are missing\n')
# Compare the tiles if they both exist
difference_pairs = _compare_tile_contents(list_both, output)
return difference_pairs
def _temp_delete_some_tiles(comparison_pair):
"""Temporarily delete some files."""
#TEMPORARY delete some tiles from tile table to test whether
#we can detect that they are present on DB1 but not on DB2.
sql = ("DELETE FROM tile WHERE x_index=116")
with comparison_pair.db2.cursor() as cur:
cur.execute(sql, {})
def _copy_fresh_ingest_info(comparison_pair, test_ingest_info_table):
"""Given this database connection, collate the acquisition information
for each tile into a table. Copy this table to the production database."""
sql = ("CREATE TABLE " + test_ingest_info_table + " AS" + "\n" +
"SELECT tile_id, x_index, y_index, a.acquisition_id," + "\n" +
"a.end_datetime - a.start_datetime as aq_len," + "\n" +
"tile_class_id, tile_pathname, level_id, satellite_id," + "\n" +
"sensor_id, a.start_datetime, a.end_datetime FROM tile t\n"
"INNER JOIN dataset d on d.dataset_id=t.dataset_id" + "\n" +
"INNER JOIN acquisition a on d.acquisition_id=a.acquisition_id\n")
with comparison_pair.db2.cursor() as cur:
cur.execute(sql, {})
dbutil.TESTSERVER.copy_table_between_databases(comparison_pair.db2_name,
comparison_pair.db1_name,
test_ingest_info_table)
def _get_comparison_pairs(db_pair, test_ingest_info):
"""Given Database 2's information in test_ingest_info table, generate pairs
of corresponding tiles from Database 1 and Database 2.
Returns: 3 lists as follows:
1. production_and_test: those corresponding pairs which exist on Database 1
and Database 2.
2. production_not_test: the tiles found only on Database 1.
3. test_not_production: the tiles found only on Database 2.
Each element of the above lists is a 5-tuple:
(level_name, tile_class_id on Database 1, tile_class_id on Database 2,
tile_pathname on Database 1, tile_pathname on Database 2)."""
fuzzy_match_percentage = 15
# Strip the random suffix from the test_ingest_info table and use it
# for other tables.
random_suffix = re.match(r'.+(_\d+)', test_ingest_info).groups(1)
# Match the datasets from Database 2 to those in Database 1
sql = (
"CREATE TEMPORARY TABLE datasets_join_info AS SELECT DISTINCT\n" +
"a.acquisition_id AS acquisition_id1, ti.acquisition_id AS\n" +
"acquisition_id2, level_id FROM acquisition a\n" +
"INNER JOIN " + test_ingest_info + " ti ON " +
" a.satellite_id=ti.satellite_id AND " +
" a.sensor_id=ti.sensor_id AND " +
" a.start_datetime BETWEEN " +
" ti.start_datetime - " + str(fuzzy_match_percentage/100.) +
" *ti.aq_len AND\n" +
" ti.start_datetime + " + str(fuzzy_match_percentage/100.) +
" *ti.aq_len AND\n" +
" a.end_datetime BETWEEN " +
" ti.end_datetime - " + str(fuzzy_match_percentage/100.) +
" *ti.aq_len AND\n" +
" ti.end_datetime + " + str(fuzzy_match_percentage/100.) +
" *ti.aq_len;"
)
# Find all tiles from Database 1 which appear in the datasets
sqltemp = (
"CREATE TEMPORARY TABLE tiles1 AS SELECT\n" +
"acquisition_id1, acquisition_id2, dji.level_id,\n" +
"tile_class_id AS tile_class_id1, tile_pathname AS path1,\n" +
"x_index, y_index FROM datasets_join_info dji\n" +
"INNER JOIN acquisition a ON a.acquisition_id=dji.acquisition_id1\n" +
"INNER JOIN dataset d on d.acquisition_id=a.acquisition_id AND\n" +
" d.level_id=dji.level_id\n" +
"INNER JOIN tile t ON t.dataset_id=d.dataset_id\n" +
"WHERE t.tile_class_id<>2;"
)
sql = sql + sqltemp
# Find all tiles from test ingestion
sqltemp = (
"CREATE TEMPORARY TABLE tiles2 AS SELECT\n" +
"acquisition_id1, acquisition_id2, dji.level_id,\n" +
"tile_class_id AS tile_class_id2, tile_pathname AS path2,\n" +
"x_index, y_index FROM datasets_join_info dji\n" +
"INNER JOIN " + test_ingest_info + " ti ON \n" +
" ti.acquisition_id=dji.acquisition_id2 AND\n" +
" ti.level_id=dji.level_id;"
)
sql = sql + sqltemp
# For each Database 1 tile found in the test ingest datasets, find the
# corresponding Database 2 tile if it exists.
production_all_tiles = 'tiles1_all%s' %random_suffix
test_ingest_all_tiles = 'tiles2_all%s' %random_suffix
sqltemp = (
"CREATE TABLE " + production_all_tiles + " AS SELECT\n" +
"level_name, tile_class_id1, tile_class_id2, path1, path2\n" +
"FROM tiles1 t1 LEFT OUTER JOIN tiles2 t2 ON\n" +
"t1.acquisition_id1=t2.acquisition_id1 AND\n" +
"t1.level_id=t2.level_id AND\n" +
"t1.x_index=t2.x_index AND t1.y_index=t2.y_index\n" +
"INNER JOIN processing_level p on p.level_id=t1.level_id;"
)
sql = sql + sqltemp
# For each Database 2 tile found in the test ingest datasets, find the
# corresponding Database 1 tile if it exists.
sqltemp = (
"CREATE TABLE " + test_ingest_all_tiles + " AS SELECT\n" +
"level_name, tile_class_id1, tile_class_id2, path1, path2\n" +
"FROM tiles2 t2 LEFT OUTER JOIN tiles1 t1 ON\n" +
"t1.acquisition_id1=t2.acquisition_id1 AND\n" +
"t1.level_id=t2.level_id AND\n" +
"t1.x_index=t2.x_index AND t1.y_index=t2.y_index\n" +
"INNER JOIN processing_level p on p.level_id=t2.level_id; "
)
sql = sql+sqltemp
# Generate list of tiles found in Database 1 and Database 2
sql_fetch_both = ("SELECT\n" +
"t1.level_name, t1.tile_class_id1, t2.tile_class_id2, \n" +
"t1.path1, t2.path2 FROM\n" +
production_all_tiles + " t1 INNER JOIN " +
test_ingest_all_tiles + " t2 ON\n" +
"t1.path1=t2.path1 AND t1.path2=t2.path2;")
# Generate list of tiles found in Database 1 but not Database 2
sql_fetch_production_not_test = ("SELECT\n" +
"level_name, tile_class_id1, tile_class_id2, \n" +
"path1, path2 FROM\n" +
production_all_tiles + " WHERE path2 is NULL;")
# Generate list of tiles found in Database 2 but not Database 1
sql_fetch_test_not_production = ("SELECT\n" +
"level_name, tile_class_id1, tile_class_id2,\n" +
"path1, path2 FROM\n" +
test_ingest_all_tiles + " WHERE path1 is NULL;")
with db_pair.db1.cursor() as cur:
cur.execute(sql, {})
cur.execute(sql_fetch_both, {})
production_and_test = cur.fetchall()
cur.execute(sql_fetch_production_not_test, {})
production_not_test = cur.fetchall()
cur.execute(sql_fetch_test_not_production, {})
test_not_production = cur.fetchall()
db_pair.db1.drop_table(test_ingest_info)
db_pair.db1.drop_table(production_all_tiles)
db_pair.db1.drop_table(test_ingest_all_tiles)
return (production_and_test, production_not_test, test_not_production)
def _log_missing_tile_info(tile_list, dbname_present, dbname_missing, output):
"""Log information from the edge case of tiles present on dbname_present,
but missing on dbname_missing."""
if tile_list:
if len(tile_list) == 1:
number_str = " is %d tile " %len(tile_list)
else:
number_str = " are %d tiles " %len(tile_list)
output.writelines('Given the datasets from the Test Ingest process, ' \
'there are %s that are in the %s tile ' \
'store that are not in the %s tile store:\n'\
%(number_str, dbname_present, dbname_missing))
for tile in tile_list:
output.writelines('WARNING: Only in %s tilestore:' \
'%s\n'%(dbname_present, tile))
def _compare_tile_contents(list_both, output):
"""Compare the tile pairs contained in list_both. Additionally, report
those tiles that are only in Database 1, or only in Database 2.
Positional arguments: 3 lists as follows:
1. production_and_test: those corresponding pairs which exist on Database 1
and Database 2.
2. production_not_test: the tiles found only on Database 1.
3. test_not_production: the tiles found only on Database 2.
Each element of the above lists is a 5-tuple:
(level_name, tile_class_id on Database 1, tile_class_id on Database 2,
tile_pathname on Database 1, tile_pathname on Database 2).
Returns:
List of tile-path pairs (path1, path2) for which a difference has been
detected."""
#pylint:disable=too-many-locals
# Define a list of tuples (path1, path2) where the contents differ
# Each
rec_num = 0
difference_pairs = []
for tile_pair in list_both:
rec_num += 1
is_mosaic_vrt = False
level, tile_class_id1, tile_class_id2, path1, path2 = tile_pair
output.writelines('RECORD NUMBER %d tile_class_id2=%d level=%s\n'
%(rec_num, tile_class_id2, level))
# For a mosaic tile, the tile entry may not be on the database, so
# look in mosaic_cache:
if tile_class_id2 in MOSAIC_CLASS_ID:
path1 = os.path.join(os.path.dirname(path1), 'mosaic_cache',
os.path.basename(path1))
# For non-PQA tiles, the benchmark mosaic will be .vrt extension
if level in ['NBAR', 'ORTHO']:
path1 = re.match(r'(.+)\.tif$', path1).groups(1)[0] + '.vrt'
is_mosaic_vrt = True
# Check the Geotransform, Projection and shape (unless it is a vrt)
if is_mosaic_vrt:
data1, data2, msg = (None, None, "")
else:
# Skip checking of metadata for a vrt mosaic since we will check
# with system diff command in _compare_data
data1, data2, msg = _check_tile_metadata(path1, path2)
if msg:
output.writelines(msg)
# Compare the tile contents
are_different, msg = _compare_data(level,
tile_class_id1, tile_class_id2,
path1, path2, data1, data2)
if are_different:
difference_pairs.extend((path1, path2))
if msg:
sys.stdout.writelines(msg)
output.writelines(msg)
return difference_pairs
def _check_tile_metadata(path1, path2):
"""Given two tile paths, check that the projections, geotransforms and
dimensions agree. Returns a message in string msg which, if empty,
indicates agreement on the metadata."""
# pylint:disable=too-many-branches
# pylint:disable=too-many-statements
gdal.UseExceptions()
msg = ""
data1 = None
data2 = None
# Open the tile files
try:
dset1 = gdal.Open(path1)
data1 = dset1.ReadAsArray()
except RuntimeError:
msg += "ERROR:\tBenchmark tile %s does not exist\n" %path1
dset1 = None
data1 = None
try:
dset2 = gdal.Open(path2)
data2 = dset2.ReadAsArray()
except RuntimeError:
msg += "ERROR:\tTest Ingest tile %s does not exist\n" %path2
dset2 = None
data2 = None
# Check geotransforms present
try:
geotransform1 = dset1.GetGeoTransform()
except RuntimeError:
if dset1:
# file exists but geotransform not present
msg += "\tError:\tGeotransform for %s not present\n" %path1
geotransform1 = None
try:
geotransform2 = dset2.GetGeoTransform()
except RuntimeError:
if dset2:
# file exists but geotransform not present
msg += "\tError:\tGeotransform for %s not present\n" %path2
geotransform2 = None
# Check geotransforms equal
if geotransform1 and geotransform2:
if geotransform1 != geotransform2:
msg += "\tError:\tGeotransforms disagree for %s and %s\n"\
%(path1, path2)
# Check projections present
try:
projection1 = dset1.GetProjection()
except RuntimeError:
if dset1:
# file exists but projections not present
msg += "\tError:\tProjection for %s not present\n" %path1
projection1 = None
try:
projection2 = dset2.GetProjection()
except RuntimeError:
if dset2:
# file exists but projection not present
msg += "\tError:\tProjection for %s not present\n" %path2
projection2 = None
# Check projections equal
if projection1 and projection2:
if projection1 != projection2:
msg += "\tError:\tProjections disagree for %s and %s\n"\
%(path1, path2)
# Check the dimensions of the arrays
if dset1 and dset2:
if data1.shape != data2.shape:
msg += "\tError:\tDimensions of arrays disagree for %s and %s\n" \
%(path1, path2)
if dset1 and data1 is None:
msg += "\tError:\tArray data for %s not present\n" %path1
if dset2 and data2 is None:
msg += "\tError:\tArray data for %s not present\n" %path2
return (data1, data2, msg)
def _compare_data(level, tile_class_id1, tile_class_id2, path1, path2,
data1, data2):
"""Given two arrays and the level name, check that the data arrays agree.
If the level is 'PQA' and the tile is a mosaic, then only compare mosaics
at pixels where the contiguity bit is set in both versions of the mosaic
tile. Returns a message in string msg which, if empty indicates agreement
on the tile data."""
# pylint:disable=too-many-arguments
# pylint:disable=too-many-locals
# pylint:disable=unused-argument
different = False
msg = ""
if tile_class_id2 not in MOSAIC_CLASS_ID:
if (data1 != data2).any():
msg += "Difference in Tile data: %s and %s\n" \
%(path1, path2)
else:
# mosaic tile
if level == 'PQA':
ind = (data1 == data2)
# Check that differences are due to differing treatment
# of contiguity bit.
data1_diff = data1[~ind].ravel()
data2_diff = data2[~ind].ravel()
contiguity_diff = \
np.logical_or(
np.bitwise_and(data1_diff, 1 << 8) == 0,
np.bitwise_and(data2_diff, 1 << 8) == 0)
if not contiguity_diff.all():
msg += "On %d pixels, mosaiced tile benchmark %s differs"\
"from Fresh Ingest %s\n"\
%(np.count_nonzero(~contiguity_diff), path1, path2)
different = True
else:
diff_cmd = ["diff",
"-I",
"[Ff]ilename",
"%s" %path1,
"%s" %path2
]
result = execute(diff_cmd, shell=False)
if result['stdout'] != '':
msg += "Difference between mosaic vrt files:\n" + \
result['stdout']
different = True
if result['stderr'] != '':
msg += "Error in system diff command:\n" + result['stderr']
return (different, msg)
| bsd-3-clause | -6,603,637,928,083,167,000 | 39.172185 | 86 | 0.613872 | false |
marckn/dimerizer | dimerizer/forcefield/collect/collectfromtopology.py | 1 | 2636 | import dimerizer.forcefield.basic_parsing_tools as parser
import basic_func as basic
def collect_tags(fname, atomlist):
"""
Collect the dimerized atomtypes.
fname is a topology filename, atomlist is
the list of atom INDICES (0 to N-1)
Returns:
tuple with two elements:
1) a list of tags with idx-tag correspondance
2) the list of dimerized tags without repetitions
"""
lfile=parser.filetolist(fname)
asec = parser.get_section(lfile,"atoms")[0]
tags=[]
dtags=[]
for ln in asec[1]:
prs=parser.parse_line(ln)
if prs[0] != "Data":
continue
serial= int(prs[1][0])
tag = prs[1][1]
tags.append(tag)
if serial-1 in atomlist:
dtags.append(tag)
dtags = list(set(dtags))
return (tags,dtags)
def lines_involved(fname,tags, atlist):
"""
For each interaction line return the tags involved by the dimerization.
Return a list of tuples, each tuple contains:
1 - the kind of interaction (angle, dihedral, ...)
2 - the list of tag combinations
Input:
the topology filename
the idx - tag correspondance
the list of atoms to be dimerized
"""
lfile=parser.filetolist(fname)
sec_bonds=parser.get_section(lfile,"bonds")
sec_pairs=parser.get_section(lfile,"pairs")
sec_angles=parser.get_section(lfile,"angles")
sec_dihedrals=parser.get_section(lfile,"(dihedrals|impropers)")
sec_cmap=parser.get_section(lfile,"cmap")
rval=[]
l1 = basic.ffentries(sec_bonds,tags,atlist,2)
if not l1 is None:
rval.append(l1)
l2 = basic.ffentries(sec_pairs,tags,atlist,2)
if not l2 is None:
rval.append(l2)
l3 = basic.ffentries(sec_angles,tags,atlist,3)
if not l3 is None:
rval.append(l3)
l4 = basic.ffentries(sec_dihedrals,tags,atlist,4)
if not l4 is None:
rval.append(l4)
l5 = basic.ffentries(sec_cmap,tags,atlist,5)
if not l5 is None:
rval.append(l5)
return rval
def dihedral_lines(fname,tags):
"""
For each dihedral interaction line return the tags.
Return a list of tuples, each tuple contains:
1 - the kind of interaction (angle, dihedral, ...) - for conformity
2 - the list of tag combinations
Input:
the topology filename
the idx - tag correspondance
"""
lfile=parser.filetolist(fname)
sec_dihedrals=parser.get_section(lfile,"(dihedrals|impropers)")
rval=[]
l4 = basic.ffentries(sec_dihedrals,tags,range(0,len(tags)),4)
if not l4 is None:
rval.append(l4)
return rval
| gpl-3.0 | -1,301,878,778,461,048,800 | 22.963636 | 74 | 0.638088 | false |
masschallenge/django-accelerator | accelerator_abstract/models/base_startup_mentor_relationship.py | 1 | 1538 | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from django.conf import settings
from django.db import models
from accelerator_abstract.models.accelerator_model import AcceleratorModel
CONFIRMED_RELATIONSHIP = "Confirmed"
DESIRED_RELATIONSHIP = "Desired"
DISCUSSING_RELATIONSHIP = "In Discussions With"
RELATIONSHIP_CHOICES = ((CONFIRMED_RELATIONSHIP, CONFIRMED_RELATIONSHIP),
(DISCUSSING_RELATIONSHIP, DISCUSSING_RELATIONSHIP),
(DESIRED_RELATIONSHIP, DESIRED_RELATIONSHIP))
class BaseStartupMentorRelationship(AcceleratorModel):
startup_mentor_tracking = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
"StartupMentorTrackingRecord"),
on_delete=models.CASCADE)
mentor = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
status = models.CharField(
max_length=32,
choices=RELATIONSHIP_CHOICES,
default=DESIRED_RELATIONSHIP)
primary = models.BooleanField(default=False)
class Meta(AcceleratorModel.Meta):
db_table = 'accelerator_startupmentorrelationship'
abstract = True
verbose_name_plural = 'Startup Mentor Relationships'
def __str__(self):
name = "Relationship of %s to %s" % (
self.startup_mentor_tracking.startup.name,
self.mentor.get_profile().full_name()
)
return name
| mit | 2,679,707,739,714,140,000 | 34.767442 | 75 | 0.676853 | false |
cogu/py-apx | util/apx_split.py | 1 | 3475 | #!/usr/bin/env python3
import os, sys
import apx
import argparse
def parse_lines_in_file(path):
"""
Parses text file path line by line and returns a list of names found in it.
The special character '#' can be used as a comment character and allows users to write line comments.
Comments does not affect what this function returns.
"""
signals = []
with open(path) as fp:
for line in fp:
# removes all text comments starting with # character
parts = line.partition('#')
line = parts[0]
# removes starting and ending whitespace
line = line.strip()
if len(line) > 0:
signals.append(line)
return signals
def create_apx_node_from_file_name(file_name, default_name):
if file_name is None:
node_name = default_name
else:
node_name = os.path.basename(file_name)
if '.apx' in node_name:
node_name = os.path.splitext(node_name)[0]
return apx.Node(node_name)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('input_file', help='The APX file to split (.apx)')
arg_parser.add_argument('port_names', nargs='*', help="Port names to be included in head")
arg_parser.add_argument('--head', help='APX File to write head result (.apx)', default=None)
arg_parser.add_argument('--tail', help='APX file to write tail result (.apx)', default=None)
arg_parser.add_argument('--head_name', help='Force new name of head APX node', default='Head')
arg_parser.add_argument('--tail_name', help='Force new name of head APX node', default='Tail')
arg_parser.add_argument('--file', help='Read port names from file instead', default=None)
arg_parser.add_argument('--sort', help='Name of the new APX node', action='store_true', default=False)
arg_parser.add_argument('--mirror', help='Forces output of head and tail to be mirrored', action='store_true', default=False)
args = arg_parser.parse_args()
if args.file is None and len(args.port_names)==0:
arg_parser.print_help()
sys.exit(1)
head_node = create_apx_node_from_file_name(args.head, args.head_name)
if args.tail is not None:
tail_node = create_apx_node_from_file_name(args.tail, args.tail_name)
else:
tail_node = None
source_node = apx.Parser().parse(args.input_file)
if args.file is not None:
port_names = parse_lines_in_file(args.file)
else:
port_names = args.port_names
processed = set()
for name in port_names:
source_port = source_node.find(name)
if (source_port is not None) and (source_port.name not in processed):
processed.add(source_port.name)
head_node.add_port_from_node(source_node, source_port)
if args.mirror:
head_node=head_node.mirror()
head_node.finalize(args.sort)
if args.head is not None:
head_node.save_apx(output_file=args.head, normalized=True)
else:
print(head_node.dumps(normalized=True))
if tail_node is not None:
if args.mirror:
tail_node=tail_node.mirror()
head_node.finalize(args.sort)
for source_port in source_node.providePorts+source_node.requirePorts:
if source_port.name not in processed:
tail_node.add_port_from_node(source_node, source_port)
tail_node.save_apx(output_file=args.tail, normalized=True)
| mit | -8,647,016,750,354,452,000 | 38.044944 | 129 | 0.641151 | false |
energicryptocurrency/energi | qa/rpc-tests/rest.py | 1 | 15355 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Energi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from struct import *
from io import BytesIO
from codecs import encode
import http.client
import urllib.parse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls
def http_get_call(host, port, path, response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read().decode('utf-8')
#allows simple http post calls with a request body
def http_post_call(host, port, path, requestdata = '', response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('POST', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
url = urllib.parse.urlparse(self.nodes[0].url)
print("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += hex_str_to_bytes(txid)
binaryRequest += pack("i", n)
binaryRequest += hex_str_to_bytes(vintx)
binaryRequest += pack("i", 0)
bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = BytesIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send a invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 400) #must be a 400 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 200 because we are within the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str, parse_float=Decimal)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid']
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj, True)
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
| mit | -6,753,828,709,250,829,000 | 44.428994 | 132 | 0.616346 | false |
baykovr/rukovod | rukovod.py | 1 | 6576 | #!/usr/bin/env python
# Texas A&M University
# Department of Computer Science and Engineering
# Robert A. Baykov
import sys,datetime,time,csv,os,argparse,smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from rukovod_datatypes import Course,Section,Student
# TODO: gpg signing
USE_CRYPTO = False
# number of seconds to wait in between sending emails
TIME_DELAY = 1
# email from field, ie [email protected]
EMAIL_FROM = 'REDACTED'
# email authentications, ie password for [email protected]
EMAIL_AUTH = 'REDACTED'
# email subject
EMAIL_SUBJ = 'REDACTED'
# default, our load balanced smtp relay
SMTP_RELAY = "smtp-relay.tamu.edu"
SMTP_RELAY_PORT = 25
# -- from toolbox.py http://github.com/baykovr/toolbox
# -- Some common functions
def f_as_list(filename):
# Note: will strip out new line characters
# Return file contents as a list
# each line is a new item
try:
line_list = []
fp = open(filename)
for line in fp:
line = line.strip('\r\n')
line_list.append(line)
return line_list
except Exception, e:
print '[ ! ] in f_getlist',e
return -1
def pipe_cmd(command):
try:
return os.popen(command).read()
except Exception as e:
print e
return -1
def cmd(cmd):
print 'Trying to exec:',cmd
try:
suppression = "&>/dev/null"
return os.system(cmd)
except Exception as e:
print e
return -1
def send_mail(fromaddr,toaddr,subject,body,username,password):
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(SMTP_RELAY, SMTP_RELAY_PORT)
server.ehlo()
server.starttls()
server.ehlo()
# smtp credentials
server.login(username, password)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
# -- This parses the course file
# the course file holds some course meta information
# Line 1 is the course name
# subsequent lines after that are sections (multiple)
# COURSE CSCE-999
# SECTION 500-600 TR 08:00-09:10 HRBB-999 rosters/roster.csv
def init_course_file(course_file):
COURSE = None
info = f_as_list(course_file)
for line in info:
line = line.split(' ')
if line[0] == 'COURSE':
COURSE = Course(line[1])
elif line[0] == 'SECTION':
COURSE.add(Section(line[1],line[2],line[3],line[4],line[5]))
print 'Loaded '
print 'Course : ',COURSE.name
print 'Sections: ',
# Now you can do some actions, such as dump all emails
# for section in COURSE.sections:
# print '=== SECTION:',section.Number
# for username in section.students:
# #print username,
# print section.students[username].Email,','
return COURSE
# -- MAIL / CRYPTO --
def mail_unsigned_feedback(dst_addr,feedback):
print 'mailing'
print 'UNCOMMENT rukovod.py@110 to actually send.'
#send_mail(EMAIL_FROM,dst_addr,EMAIL_SUBJ,feedback,EMAIL_FROM,EMAIL_AUTH)
def mail_signed_feedback(dst_addr,feedback):
print 'mailing-signed'
#TODO GPG
# The generic gradebook file has arbitrary columns
# Markup
def process_generic_grades_file(grades_file):
email_list = []
print '[...] FILE :',grades_file
print '[ ! ] WARNING: always double check email / roster records against this csv before mailing.'
ok = raw_input('[ ? ] continue (y/N):')
if ok.lower() != 'y':
print 'Exiting.'
return
try:
f = open(grades_file, 'rb')
reader = csv.reader(f)
header = ''
total_rows = 0
for row in reader:
if total_rows == 0:
header = row
# -- Header --
header_row_index = 0
for header_row in header:
if 'email' in header_row.lower():
email_dst_index = header_row_index
break
header_row_index+=1
# If no such column found offer debug and exit
if email_dst_index == -1:
print '\n[ ! ] could not locate an email address column'
nok = raw_input('[ ? ] show checked columns (y/N):')
if nok.lower() == 'y':
header_row_index=0
for header_row in header:
print '\t[',header_row_index,']',header_row
header_row_index+=1
print 'Check columns, Exiting.'
return
# -- /Header --
# -- Data Rows --
else:
# Construct Email Body
# Column : Data
# Column : Date
# etc ...
email_body = ''
email_dest = row[email_dst_index]
email_body += 'BEGIN-MESSAGE'+'*'*40 +'\n'
for i in range(0,len(header)):
email_body += header[i].ljust(12) + ' ' + row[i] + '\n'
email_body += 'END-MESSAGE'+'*'*42+'\n'
email_list.append( (email_dest,email_body) )
# -- /Data Rows --
total_rows+=1
# Check
if total_rows-1 == 0:
print '[ ! ] 0 rows found, nothing to do.'
print '[...] total entries extracted:',total_rows-1 # minus header
print '[...] estimated time to send :',TIME_DELAY*total_rows-1,'(seconds)'
if len(email_list) > 0:
ok = raw_input('[ ? ] preview first message (y/N)')
if ok.lower() == 'y':
print 'DESTINATION:',email_list[0][0]
print email_list[0][1]
ok = raw_input('\n[ ! ] SEND ALL MAIL (y/N)')
if ok.lower() == 'y':
# MAIL-AWAY
for email in email_list:
# Dump to stdout for record
print 'MAILING',datetime.datetime.now()
print 'DESTINATION:',email[0]
print email[1]
# Mail
if USE_CRYPTO == True:
mail_signed_feedback(email[0],email[1])
else:
mail_unsigned_feedback(email[0],email[1])
# Wait
time.sleep(TIME_DELAY)
else:
print 'Exiting.'
return
else:
print '[ ! ] no mail to send, exiting.'
except Exception as e:
print '[ ! ]',e
exit(1)
finally:
f.close()
if __name__ == "__main__":
try:
# TODO, PGP
pass
except Exception as e:
print '[ ! ]',e
ok = raw_input('[ ? ] continue without crypto (y/N):')
if ok.lower() == 'y':
USE_CRYPTO = False
else:
print 'Exiting.'
exit(0)
# Parse Args
parser = argparse.ArgumentParser(description='rukovod')
group = parser.add_mutually_exclusive_group()
group.add_argument('-c','--course',help='Course file',type=str)
group.add_argument('-g','--grades',help='Grades file',type=str)
arguments = parser.parse_args()
if arguments.course:
COURSE = init_course_file(arguments.course)
elif arguments.grades:
process_generic_grades_file(arguments.grades)
else:
print '-c / --course COURSE_FILE'
print '-g / --grades GRADES_FILE' | gpl-3.0 | 2,644,757,180,881,620,500 | 24.417671 | 99 | 0.621502 | false |
bigswitch/nova | nova/tests/unit/api/openstack/compute/test_flavors.py | 1 | 24823 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six.moves.urllib.parse as urlparse
import webob
from nova.api.openstack import common
from nova.api.openstack.compute import flavors as flavors_v21
import nova.compute.flavors
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"root_gb": '10',
"ephemeral_gb": '20',
"swap": '10',
"disabled": False,
"vcpus": '',
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"root_gb": '20',
"ephemeral_gb": '10',
"swap": '5',
"disabled": False,
"vcpus": '',
},
}
def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
if marker in ['99999']:
raise exception.MarkerNotFound(marker)
def reject_min(db_attr, filter_attr):
return (filter_attr in filters and
int(flavor[db_attr]) < int(filters[filter_attr]))
filters = filters or {}
res = []
for (flavor_name, flavor) in FAKE_FLAVORS.items():
if reject_min('memory_mb', 'min_memory_mb'):
continue
elif reject_min('root_gb', 'min_root_gb'):
continue
res.append(flavor)
res = sorted(res, key=lambda item: item[sort_key])
output = []
marker_found = True if marker is None else False
for flavor in res:
if not marker_found and marker == flavor['flavorid']:
marker_found = True
elif marker_found:
if limit is None or len(output) < int(limit):
output.append(flavor)
return output
def fake_get_limit_and_marker(request, max_limit=1):
params = common.get_pagination_params(request)
limit = params.get('limit', max_limit)
limit = min(max_limit, limit)
marker = params.get('marker')
return limit, marker
def empty_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
return []
def return_flavor_not_found(flavor_id, ctxt=None):
raise exception.FlavorNotFound(flavor_id=flavor_id)
class FlavorsTestV21(test.TestCase):
_prefix = "/v2/fake"
Controller = flavors_v21.FlavorsController
fake_request = fakes.HTTPRequestV21
_rspv = "v2/fake"
_fake = "/fake"
def setUp(self):
super(FlavorsTestV21, self).setUp()
self.flags(osapi_compute_extension=[])
fakes.stub_out_networking(self)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
fake_get_all_flavors_sorted_list)
self.stubs.Set(nova.compute.flavors,
"get_flavor_by_flavor_id",
fake_flavor_get_by_flavor_id)
self.controller = self.Controller()
def _set_expected_body(self, expected, ephemeral, swap, disabled):
# NOTE(oomichi): On v2.1 API, some extensions of v2.0 are merged
# as core features and we can get the following parameters as the
# default.
expected['OS-FLV-EXT-DATA:ephemeral'] = ephemeral
expected['OS-FLV-DISABLED:disabled'] = disabled
expected['swap'] = swap
def test_get_flavor_by_invalid_id(self):
self.stubs.Set(nova.compute.flavors,
"get_flavor_by_flavor_id",
return_flavor_not_found)
req = self.fake_request.blank(self._prefix + '/flavors/asdf')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'asdf')
def test_get_flavor_by_id(self):
req = self.fake_request.blank(self._prefix + '/flavors/1')
flavor = self.controller.show(req, '1')
expected = {
"flavor": {
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
}
self._set_expected_body(expected['flavor'], ephemeral='20',
swap='10', disabled=False)
self.assertEqual(flavor, expected)
def test_get_flavor_with_custom_link_prefix(self):
self.flags(osapi_compute_link_prefix='http://zoo.com:42',
osapi_glance_link_prefix='http://circus.com:34')
req = self.fake_request.blank(self._prefix + '/flavors/1')
flavor = self.controller.show(req, '1')
expected = {
"flavor": {
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://zoo.com:42/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://zoo.com:42" + self._fake +
"/flavors/1",
},
],
},
}
self._set_expected_body(expected['flavor'], ephemeral='20',
swap='10', disabled=False)
self.assertEqual(expected, flavor)
def test_get_flavor_list(self):
req = self.fake_request.blank(self._prefix + '/flavors')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "1",
"name": "flavor 1",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_with_marker(self):
self.maxDiff = None
url = self._prefix + '/flavors?limit=1&marker=1'
req = self.fake_request.blank(url)
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
'flavors_links': [
{'href': 'http://localhost/' + self._rspv +
'/flavors?limit=1&marker=2',
'rel': 'next'}
]
}
self.assertThat(flavor, matchers.DictMatches(expected))
def test_get_flavor_list_with_invalid_marker(self):
req = self.fake_request.blank(self._prefix + '/flavors?marker=99999')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_detail_with_limit(self):
url = self._prefix + '/flavors/detail?limit=1'
req = self.fake_request.blank(url)
response = self.controller.detail(req)
response_list = response["flavors"]
response_links = response["flavors_links"]
expected_flavors = [
{
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
]
self._set_expected_body(expected_flavors[0], ephemeral='20',
swap='10', disabled=False)
self.assertEqual(response_list, expected_flavors)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/' + self._rspv + '/flavors/detail', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['1'], 'marker': ['1']},
matchers.DictMatches(params))
def test_get_flavor_with_limit(self):
req = self.fake_request.blank(self._prefix + '/flavors?limit=2')
response = self.controller.index(req)
response_list = response["flavors"]
response_links = response["flavors_links"]
expected_flavors = [
{
"id": "1",
"name": "flavor 1",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
}
]
self.assertEqual(response_list, expected_flavors)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/' + self._rspv + '/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['2'], 'marker': ['2']},
matchers.DictMatches(params))
def test_get_flavor_with_default_limit(self):
self.stubs.Set(common, "get_limit_and_marker",
fake_get_limit_and_marker)
self.flags(osapi_max_limit=1)
req = fakes.HTTPRequest.blank('/v2/fake/flavors?limit=2')
response = self.controller.index(req)
response_list = response["flavors"]
response_links = response["flavors_links"]
expected_flavors = [
{
"id": "1",
"name": "flavor 1",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost/fake/flavors/1",
}
]
}
]
self.assertEqual(response_list, expected_flavors)
self.assertEqual(response_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
self.assertThat({'limit': ['2'], 'marker': ['1']},
matchers.DictMatches(params))
def test_get_flavor_list_detail(self):
req = self.fake_request.blank(self._prefix + '/flavors/detail')
flavor = self.controller.detail(req)
expected = {
"flavors": [
{
"id": "1",
"name": "flavor 1",
"ram": "256",
"disk": "10",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/1",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/1",
},
],
},
{
"id": "2",
"name": "flavor 2",
"ram": "512",
"disk": "20",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self._set_expected_body(expected['flavors'][0], ephemeral='20',
swap='10', disabled=False)
self._set_expected_body(expected['flavors'][1], ephemeral='10',
swap='5', disabled=False)
self.assertEqual(expected, flavor)
def test_get_empty_flavor_list(self):
self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
empty_get_all_flavors_sorted_list)
req = self.fake_request.blank(self._prefix + '/flavors')
flavors = self.controller.index(req)
expected = {'flavors': []}
self.assertEqual(flavors, expected)
def test_get_flavor_list_filter_min_ram(self):
# Flavor lists may be filtered by minRam.
req = self.fake_request.blank(self._prefix + '/flavors?minRam=512')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_ram(self):
# Ensure you cannot list flavors with invalid minRam param.
req = self.fake_request.blank(self._prefix + '/flavors?minRam=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_list_filter_min_disk(self):
# Flavor lists may be filtered by minDisk.
req = self.fake_request.blank(self._prefix + '/flavors?minDisk=20')
flavor = self.controller.index(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_disk(self):
# Ensure you cannot list flavors with invalid minDisk param.
req = self.fake_request.blank(self._prefix + '/flavors?minDisk=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_list_detail_min_ram_and_min_disk(self):
"""Tests that filtering work on flavor details and that minRam and
minDisk filters can be combined
"""
req = self.fake_request.blank(self._prefix + '/flavors/detail'
'?minRam=256&minDisk=20')
flavor = self.controller.detail(req)
expected = {
"flavors": [
{
"id": "2",
"name": "flavor 2",
"ram": "512",
"disk": "20",
"vcpus": "",
"links": [
{
"rel": "self",
"href": "http://localhost/" + self._rspv +
"/flavors/2",
},
{
"rel": "bookmark",
"href": "http://localhost" + self._fake +
"/flavors/2",
},
],
},
],
}
self._set_expected_body(expected['flavors'][0], ephemeral='10',
swap='5', disabled=False)
self.assertEqual(expected, flavor)
class DisabledFlavorsWithRealDBTestV21(test.TestCase):
"""Tests that disabled flavors should not be shown nor listed."""
Controller = flavors_v21.FlavorsController
_prefix = "/v2"
fake_request = fakes.HTTPRequestV21
def setUp(self):
super(DisabledFlavorsWithRealDBTestV21, self).setUp()
# Add a new disabled type to the list of flavors
self.req = self.fake_request.blank(self._prefix + '/flavors')
self.context = self.req.environ['nova.context']
self.admin_context = context.get_admin_context()
self.disabled_type = self._create_disabled_instance_type()
self.addCleanup(self.disabled_type.destroy)
self.inst_types = objects.FlavorList.get_all(self.admin_context)
self.controller = self.Controller()
def _create_disabled_instance_type(self):
flavor = objects.Flavor(context=self.admin_context,
name='foo.disabled', flavorid='10.disabled',
memory_mb=512, vcpus=2, root_gb=1,
ephemeral_gb=0, swap=0, rxtx_factor=1.0,
vcpu_weight=1, disabled=True, is_public=True,
extra_specs={}, projects=[])
flavor.create()
return flavor
def test_index_should_not_list_disabled_flavors_to_user(self):
self.context.is_admin = False
flavor_list = self.controller.index(self.req)['flavors']
api_flavorids = set(f['id'] for f in flavor_list)
db_flavorids = set(i['flavorid'] for i in self.inst_types)
disabled_flavorid = str(self.disabled_type['flavorid'])
self.assertIn(disabled_flavorid, db_flavorids)
self.assertEqual(db_flavorids - set([disabled_flavorid]),
api_flavorids)
def test_index_should_list_disabled_flavors_to_admin(self):
self.context.is_admin = True
flavor_list = self.controller.index(self.req)['flavors']
api_flavorids = set(f['id'] for f in flavor_list)
db_flavorids = set(i['flavorid'] for i in self.inst_types)
disabled_flavorid = str(self.disabled_type['flavorid'])
self.assertIn(disabled_flavorid, db_flavorids)
self.assertEqual(db_flavorids, api_flavorids)
def test_show_should_include_disabled_flavor_for_user(self):
"""Counterintuitively we should show disabled flavors to all users and
not just admins. The reason is that, when a user performs a server-show
request, we want to be able to display the pretty flavor name ('512 MB
Instance') and not just the flavor-id even if the flavor id has been
marked disabled.
"""
self.context.is_admin = False
flavor = self.controller.show(
self.req, self.disabled_type['flavorid'])['flavor']
self.assertEqual(flavor['name'], self.disabled_type['name'])
def test_show_should_include_disabled_flavor_for_admin(self):
self.context.is_admin = True
flavor = self.controller.show(
self.req, self.disabled_type['flavorid'])['flavor']
self.assertEqual(flavor['name'], self.disabled_type['name'])
class ParseIsPublicTestV21(test.TestCase):
Controller = flavors_v21.FlavorsController
def setUp(self):
super(ParseIsPublicTestV21, self).setUp()
self.controller = self.Controller()
def assertPublic(self, expected, is_public):
self.assertIs(expected, self.controller._parse_is_public(is_public),
'%s did not return %s' % (is_public, expected))
def test_None(self):
self.assertPublic(True, None)
def test_truthy(self):
self.assertPublic(True, True)
self.assertPublic(True, 't')
self.assertPublic(True, 'true')
self.assertPublic(True, 'yes')
self.assertPublic(True, '1')
def test_falsey(self):
self.assertPublic(False, False)
self.assertPublic(False, 'f')
self.assertPublic(False, 'false')
self.assertPublic(False, 'no')
self.assertPublic(False, '0')
def test_string_none(self):
self.assertPublic(None, 'none')
self.assertPublic(None, 'None')
def test_other(self):
self.assertRaises(
webob.exc.HTTPBadRequest, self.assertPublic, None, 'other')
| apache-2.0 | 774,215,699,162,965,000 | 35.666174 | 79 | 0.457237 | false |
flux3dp/fluxghost | fluxghost/api/discover.py | 1 | 4822 |
from time import time
import logging
import json
from fluxghost import g
logger = logging.getLogger("API.DISCOVER")
def get_online_message(source, device):
st = None
doc = {
"uuid": device.uuid.hex,
"alive": True,
"source": source,
"serial": device.serial,
"version": str(device.version),
"model": device.model_id,
}
if source == "lan":
doc.update({
"name": device.name,
"ipaddr": device.ipaddr,
"password": device.has_password,
})
st = device.status
elif source == "h2h":
st = device.device_status
doc.update({
"name": device.nickname,
"addr": device.addr,
})
else:
st = {}
doc.update({
"st_ts": st.get("st_ts"),
"st_id": st.get("st_id"),
"st_prog": st.get("st_prog"),
"head_module": st.get("st_head", st.get("head_module")),
"error_label": st.get("st_err", st.get("error_label"))
})
return doc
def get_offline_message(source, device=None, uuid=None):
return {
"uuid": device.uuid.hex if device else uuid.hex,
"alive": False,
"source": source
}
def discover_api_mixin(cls):
class DiscoverApi(cls):
def __init__(self, *args):
super().__init__(*args)
self.lan_alive_devices = set()
self.usb_alive_addr = {}
self.server.discover_devices.items()
self.POOL_TIME = 1.0
def review_lan_devices(self):
t = time()
with self.server.discover_mutex:
for uuid, device in self.server.discover_devices.items():
if t - device.last_update > 30:
# Dead devices
if uuid in self.lan_alive_devices:
self.lan_alive_devices.remove(uuid)
self.send_text(self.build_dead_response("lan",
device))
else:
# Alive devices
self.lan_alive_devices.add(uuid)
self.send_text(self.build_response("lan", device))
def review_usb_devices(self):
rmlist = []
for addr, uuid in self.usb_alive_addr.items():
usbprotocol = g.USBDEVS.get(addr)
if usbprotocol and usbprotocol.uuid == uuid:
pass
else:
rmlist.append(addr)
self.send_text(self.build_dead_response("h2h", uuid=uuid))
for addr in rmlist:
self.usb_alive_addr.pop(addr)
for addr, usbdevice in g.USBDEVS.items():
if addr not in self.usb_alive_addr:
self.usb_alive_addr[addr] = usbdevice.uuid
self.send_text(self.build_response("h2h", usbdevice))
def on_review_devices(self):
self.review_lan_devices()
self.review_usb_devices()
def on_text_message(self, message):
try:
payload = json.loads(message)
except Exception as e:
self.traceback("BAD_PARAMS")
return
cmd = payload.get("cmd")
if cmd == "poke":
try:
self.server.discover.poke(payload["ipaddr"])
except OSError as e:
pass
except Exception as e:
logger.error("Poke error: %s", repr(e))
elif cmd == "poketcp":
try:
self.server.discover.add_poketcp_ipaddr(payload["ipaddr"])
except OSError as e:
pass
except Exception as e:
logger.error("Poke TCP error: %s", repr(e))
elif cmd == 'testtcp':
print(payload["ipaddr"])
try:
self.server.discover.test_poketcp_ipaddr(payload["ipaddr"])
except OSError as e:
pass
except Exception as e:
logger.error("Test TCP error: %s", repr(e))
else:
self.send_error("L_UNKNOWN_COMMAND")
def on_loop(self):
self.on_review_devices()
self.POOL_TIME = min(self.POOL_TIME + 1.0, 3.0)
def on_closed(self):
pass
def build_dead_response(self, source, device=None, uuid=None):
return json.dumps(
get_offline_message(source, device=device, uuid=uuid))
def build_response(self, source, device):
return json.dumps(get_online_message(source, device))
return DiscoverApi
| agpl-3.0 | 3,847,416,626,889,435,600 | 31.362416 | 79 | 0.48652 | false |
rocky/python3-trepan | trepan/lib/printing.py | 1 | 4182 | # -*- coding: utf-8 -*-
# Copyright (C) 2007-2010, 2015, 2020 Rocky Bernstein
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import inspect, types
def print_dict(s, obj, title):
if hasattr(obj, "__dict__"):
obj = obj.__dict__
pass
if isinstance(obj, dict):
s += "\n%s:\n" % title
keys = list(obj.keys())
keys.sort()
for key in keys:
s += " %s:\t%s\n" % (repr(key), obj[key])
pass
pass
return s
def print_argspec(obj, obj_name):
"""A slightly decorated version of inspect.format_argspec"""
try:
return obj_name + inspect.formatargspec(*inspect.getargspec(obj))
except:
return None
return # Not reached
def print_obj(arg, frame, format=None, short=False):
"""Return a string representation of an object """
try:
if not frame:
# ?? Should we have set up a dummy globals
# to have persistence?
obj = eval(arg, None, None)
else:
obj = eval(arg, frame.f_globals, frame.f_locals)
pass
except:
return 'No symbol "' + arg + '" in current context.'
# format and print
what = arg
if format:
what = format + " " + arg
obj = printf(obj, format)
s = "%s = %s" % (what, obj)
if not short:
s += "\ntype = %s" % type(obj)
if callable(obj):
argspec = print_argspec(obj, arg)
if argspec:
s += ":\n\t"
if inspect.isclass(obj):
s += "Class constructor information:\n\t"
obj = obj.__init__
elif isinstance(obj, types.InstanceType):
obj = obj.__call__
pass
s += argspec
pass
# Try to list the members of a class.
# Not sure if this is correct or the
# best way to do.
s = print_dict(s, obj, "object variables")
if hasattr(obj, "__class__"):
s = print_dict(s, obj.__class__, "class variables")
pass
return s
pconvert = {"c": chr, "x": hex, "o": oct, "f": float, "s": str}
twos = (
"0000",
"0001",
"0010",
"0011",
"0100",
"0101",
"0110",
"0111",
"1000",
"1001",
"1010",
"1011",
"1100",
"1101",
"1110",
"1111",
)
def printf(val, fmt):
global pconvert, twos
if not fmt:
fmt = " " # not 't' nor in pconvert
# Strip leading '/'
if fmt[0] == "/":
fmt = fmt[1:]
f = fmt[0]
if f in pconvert.keys():
try:
return pconvert[f](val)
except:
return str(val)
# binary (t is from 'twos')
if f == "t":
try:
res = ""
while val:
res = twos[val & 0xF] + res
val = val >> 4
return res
except:
return str(val)
return str(val)
if __name__ == "__main__":
print(print_dict("", globals(), "my globals"))
print("-" * 40)
print(print_obj("print_obj", None))
print("-" * 30)
print(print_obj("Exception", None))
print("-" * 30)
print(print_argspec("Exception", None))
class Foo:
def __init__(self, bar=None):
pass
pass
print(print_obj("Foo.__init__", None))
print("-" * 30)
print(print_argspec(Foo.__init__, "__init__"))
assert printf(31, "/o") == "037"
assert printf(31, "/t") == "00011111"
assert printf(33, "/c") == "!"
assert printf(33, "/x") == "0x21"
| gpl-3.0 | 5,689,020,719,061,596,000 | 25.980645 | 73 | 0.518173 | false |
chennan47/OSF-Offline | start.py | 1 | 1174 | import sys
from PyQt5.QtWidgets import QApplication, QMessageBox, QSystemTrayIcon
from osfoffline import utils
from osfoffline.application.main import OSFApp
from osfoffline.database_manager.db import drop_db
def running_warning():
warn_app = QApplication(sys.argv)
QMessageBox.information(
None,
"Systray",
"OSF-Offline is already running. Check out the system tray."
)
warn_app.quit()
sys.exit(0)
def start():
# Start logging all events
if '--drop' in sys.argv:
drop_db()
utils.start_app_logging()
if sys.platform == 'win32':
from server import SingleInstance
single_app = SingleInstance()
if single_app.already_running():
running_warning()
app = QApplication(sys.argv)
if not QSystemTrayIcon.isSystemTrayAvailable():
QMessageBox.critical(
None,
"Systray",
"Could not detect a system tray on this system"
)
sys.exit(1)
QApplication.setQuitOnLastWindowClosed(False)
osf = OSFApp()
osf.start()
osf.hide()
sys.exit(app.exec_())
if __name__ == "__main__":
start()
| apache-2.0 | 8,891,531,229,938,620,000 | 20.345455 | 70 | 0.626065 | false |
onedata/cluster-example | bamboos/docker/package.py | 1 | 3382 | #!/usr/bin/env python
# coding=utf-8
"""Author: Tomasz Lichon
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Build packages in dockerized environment, as user 'package'
Run the script with -h flag to learn about script's running options.
"""
from os.path import expanduser
import argparse
import os
import sys
from environment import docker
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Run a command inside a dockerized development environment.')
parser.add_argument(
'-i', '--image',
action='store',
default='onedata/builder:v25',
help='docker image to use for building',
dest='image')
parser.add_argument(
'-s', '--src',
action='store',
default=os.getcwd(),
help='source directory to run command from',
dest='src')
parser.add_argument(
'-d', '--dst',
action='store',
default=None,
help='destination directory where the build will be stored; defaults '
'to source dir if unset',
dest='dst')
parser.add_argument(
'-k', '--keys',
action='store',
default=expanduser("~/.ssh"),
help='directory of ssh keys used for dependency fetching',
dest='keys')
parser.add_argument(
'-r', '--reflect-volume',
action='append',
default=[],
help="host's paths to reflect in container's filesystem",
dest='reflect')
parser.add_argument(
'-c', '--command',
action='store',
default='make',
help='command to run in the container',
dest='command')
parser.add_argument(
'-w', '--workdir',
action='store',
default=None,
help='path to the working directory; defaults to destination dir if unset',
dest='workdir')
parser.add_argument(
'-e', '--env',
action='append',
default=[],
help='env variables to set in the environment',
dest='envs')
[args, pass_args] = parser.parse_known_args()
destination = args.dst if args.dst else args.src
workdir = args.workdir if args.workdir else destination
command = '''
import os, shutil, subprocess, sys
os.environ['HOME'] = '/home/package'
ssh_home = '/home/package/.ssh'
if '{src}' != '{dst}':
ret = subprocess.call(['rsync', '--archive', '/tmp/src/', '{dst}'])
if ret != 0:
sys.exit(ret)
shutil.copytree('/tmp/keys', ssh_home)
for root, dirs, files in os.walk(ssh_home):
for dir in dirs:
os.chmod(os.path.join(root, dir), 0o700)
for file in files:
os.chmod(os.path.join(root, file), 0o600)
sh_command = 'eval $(ssh-agent) > /dev/null; ssh-add 2>&1; {command} {params}'
ret = subprocess.call(['sh', '-c', sh_command])
sys.exit(ret)
'''
command = command.format(
command=args.command,
params=' '.join(pass_args),
src=args.src,
dst=destination)
reflect = [(destination, 'rw')]
reflect.extend(zip(args.reflect, ['rw'] * len(args.reflect)))
ret = docker.run(tty=True,
interactive=True,
rm=True,
reflect=reflect,
volumes=[(args.keys, '/tmp/keys', 'ro'),
(args.src, '/tmp/src', 'ro')],
workdir=workdir,
image=args.image,
run_params=(['--privileged=true']),
command=['python', '-c', command],
user='package')
sys.exit(ret)
| mit | -2,463,403,816,468,090,400 | 25.015385 | 79 | 0.617978 | false |
pandich/pymetrics | pymetrics/histogram.py | 1 | 1143 | import numpy as np
from metric import metric_decorated
from statistical_metric import StatisticalMetric
from pymetrics.unit.timeunit import now
time_key = 'time'
value_key = 'value'
class Histogram(StatisticalMetric):
time_series_dtype = np.dtype([
(time_key, float),
(value_key, float),
])
def __init__(self, name, dtype=time_series_dtype):
StatisticalMetric.__init__(self, name, dtype)
return
def update(self, event_time=None, value=None):
self.append((event_time or now(), value or 1))
return
def values(self):
return self._series[value_key]
def values_by_time(self, threshold):
filtered = np.where(self._series[time_key] >= threshold)
return self._series[filtered][value_key]
def __exit__(self, value_type, value, traceback):
self.update(value)
return
def histogrammed(target=None, **options):
def after(record):
record.histogram.update(record.result)
return
return metric_decorated(
target,
Histogram,
histogrammed,
after=after,
**options
)
| apache-2.0 | 4,475,350,069,275,106,300 | 22.326531 | 64 | 0.627297 | false |
crazyyoung01/vv | vn.trader/ctaAlgo/multiCtaTemplate.py | 1 | 7374 | # encoding: UTF-8
'''
本文件包含了CTA引擎中的策略开发用模板,开发策略时需要继承CtaTemplate类。
'''
from ctaBase import *
from vtConstant import *
########################################################################
class MultiCtaTemplate(object):
"""MultiCTA策略模板"""
# 策略类的名称和作者
className = 'MultiCtaTemplate'
author = EMPTY_UNICODE
# MongoDB数据库的名称,K线数据库默认为1分钟
tickDbName = TICK_DB_NAME
barDbName = MINUTE_DB_NAME
#barDbName = DATA_DB_NAME
# 策略的基本参数
name = EMPTY_UNICODE # 策略实例名称
productClass = EMPTY_STRING # 产品类型(只有IB接口需要)
currency = EMPTY_STRING # 货币(只有IB接口需要)
# 策略的基本变量,由引擎管理
inited = False # 是否进行了初始化
trading = False # 是否启动交易,由引擎管理
pos = {} # 持仓情况
vtSymbolList = [] # 交易的合约vt系统代码
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author']
vtSymbolKey = "vtSymbol"
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
self.ctaEngine = ctaEngine
# 设置策略的参数
if setting:
d = self.__dict__
for key in self.paramList:
if key in setting:
d[key] = setting[key]
#加载合约参数
if self.vtSymbolKey in setting:
self.vtSymbolList = setting[self.vtSymbolKey]
#初始化持仓
for symbol in self.vtSymbolList:
self.pos[symbol] = 0
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onTrade(self, trade):
"""收到成交推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def onAccount(self, account):
"""收到Account推送(必须由用户继承实现)"""
raise NotImplementedError
#----------------------------------------------------------------------
def buy(self, symbol, price, volume, stop=False):
"""买开"""
return self.sendOrder(CTAORDER_BUY, symbol, price, volume, stop)
#----------------------------------------------------------------------
def sell(self, symbol, price, volume, stop=False):
"""卖平"""
return self.sendOrder(CTAORDER_SELL, symbol, price, volume, stop)
#----------------------------------------------------------------------
def short(self, symbol, price, volume, stop=False):
"""卖开"""
return self.sendOrder(CTAORDER_SHORT, symbol, price, volume, stop)
#----------------------------------------------------------------------
def cover(self, symbol, price, volume, stop=False):
"""买平"""
return self.sendOrder(CTAORDER_COVER, symbol, price, volume, stop)
#----------------------------------------------------------------------
def sendOrder(self, orderType, symbol, price, volume, stop=False):
"""发送委托"""
if self.trading:
# 如果stop为True,则意味着发本地停止单
if stop:
self.writeCtaLog(u'%s1' %orderType)
vtOrderID = self.ctaEngine.sendStopOrder(symbol, orderType, price, volume, self)
else:
self.writeCtaLog(u'%s2' %orderType)
vtOrderID = self.ctaEngine.sendOrder(symbol, orderType, price, volume, self)
return vtOrderID
else:
# 交易停止时发单返回空字符串
return ''
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
# 如果发单号为空字符串,则不进行后续操作
if not vtOrderID:
return
if STOPORDERPREFIX in vtOrderID:
self.ctaEngine.cancelStopOrder(vtOrderID)
else:
self.ctaEngine.cancelOrder(vtOrderID)
#----------------------------------------------------------------------
def insertTick(self, tick):
"""向数据库中插入tick数据"""
self.ctaEngine.insertData(self.tickDbName, tick.vtSymbol, tick)
#----------------------------------------------------------------------
def insertBar(self, bar):
"""向数据库中插入bar数据"""
self.ctaEngine.insertData(self.barDbName, bar.vtSymbol, bar)
#----------------------------------------------------------------------
def loadTick(self, symbol, days):
"""读取tick数据"""
return self.ctaEngine.loadTick(self.tickDbName, symbol, days)
#----------------------------------------------------------------------
def loadBar(self, symbol, days):
"""读取bar数据"""
return self.ctaEngine.loadBar(self.barDbName, symbol, days)
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录CTA日志"""
content = self.name + ':' + content
self.ctaEngine.writeCtaLog(content)
#----------------------------------------------------------------------
def putEvent(self):
"""发出策略状态变化事件"""
self.ctaEngine.putStrategyEvent(self.name)
#----------------------------------------------------------------------
def getEngineType(self):
"""查询当前运行的环境"""
return self.ctaEngine.engineType
| mit | 1,487,841,099,240,909,300 | 33.378947 | 96 | 0.402174 | false |
TylerTemp/wordz | wordz/single_select.py | 1 | 3137 | import textwrap
import curses
import logging
from wordz import keys
logger = logging.getLogger('single_select')
class SingleSelect(object):
check = '\u2713'
def __init__(self, content, current=0, default=0, padding=0, lineno=False):
self.content = content
self.current = current
self.select = default
self.padding = padding
self.lineno = lineno
def render(self, screen):
width = screen.width - self.padding
for index, raw_content in enumerate(self.content):
color = screen.HIGHLIGHT if index == self.current else None
if self.lineno:
lineno = ' %s.' % (index + 1)
else:
lineno = ''
prefix = (' ' * self.padding) + ('%s%s ' % (self.check if color else ' ', lineno))
_, old_x = screen.getyx()
screen.write(prefix, color)
_, new_x = screen.getyx()
indent = ' ' * new_x
for each_ch in raw_content:
screen.write(each_ch, color)
if each_ch == '\n':
screen.write(indent, color)
else:
_, now_x = screen.getyx()
if now_x >= width - 1:
screen.write('\n' + indent, color)
else:
screen.write('\n')
def handler(self, k):
# if k in (keys.KEY_ENTER, keys.KEY_SPACE):
# self.select = self.current
# return
if k == keys.KEY_UP:
offset = -1
elif k == keys.KEY_DOWN:
offset = 1
else:
allowed = len(self.content)
for each in ('A', 'a', '1'):
asc_num = ord(each)
index = k - asc_num
if 0 <= index < allowed:
break
else:
return False
self.select = self.current = index
return True
max_num = len(self.content) - 1
current = self.current + offset
if current < 0:
current = max_num
elif current > max_num:
current = 0
self.current = self.select = current
return False
def get_selected(self):
return self.select
def select_item(self, index):
self.select = index
def get(self):
return self.select
def main(stdscr):
import string
import sys
import atexit
ss = SingleSelect([('中文%s' % x) * 20 for x in range(4)], padding=5, lineno=True)
# ss = SingleSelect('ABCD', [(string.ascii_letters) * 4 for x in range(4)])
screen = Screen(stdscr)
atexit.register(lambda: sys.__stdout__.write('select %s' % ss.select))
while True:
with screen.handle(ss.handler) as s:
s.clear()
ss.render(s)
if __name__ == '__main__':
from wordz.screen import Screen
from wordz.bashlog import filelogger, stdoutlogger, DEBUG
from wordz.main import redirect
stdoutlogger(None, DEBUG)
redirect()
# filelogger('/tmp/wordz.log', None, DEBUG)
curses.wrapper(main)
| gpl-3.0 | -9,084,918,205,113,767,000 | 27.225225 | 94 | 0.514204 | false |
jerryryle/python-lz4ex | tests/lz4hc_test.py | 1 | 1734 | from lz4ex import lz4, lz4hc
import unittest
class TestLZ4(unittest.TestCase):
def test_compress_default(self):
input_data = b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123"
input_data_size = len(input_data)
compressed = lz4hc.compress_hc(input_data, lz4hc.COMPRESSIONLEVEL_MAX)
decompressed = lz4.decompress_safe(compressed, input_data_size)
self.assertEqual(input_data, decompressed)
def test_create_and_free_stream(self):
stream = lz4hc.create_hc_stream(4*1024, lz4hc.COMPRESSIONLEVEL_MAX)
self.assertNotEqual(stream, None)
lz4hc.free_hc_stream(stream)
def test_stream_compress(self):
input_data = b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123"
block_size = int((len(input_data)/2)+1)
stream = lz4hc.create_hc_stream(block_size, lz4hc.COMPRESSIONLEVEL_MAX)
self.assertNotEqual(stream, None)
compressed_data1 = lz4hc.compress_hc_continue(stream, input_data[:block_size])
compressed_data2 = lz4hc.compress_hc_continue(stream, input_data[block_size:])
lz4hc.free_hc_stream(stream)
stream = lz4.create_decode_stream(block_size)
self.assertNotEqual(stream, None)
decompressed_data1 = lz4.decompress_safe_continue(stream, compressed_data1)
decompressed_data2 = lz4.decompress_safe_continue(stream, compressed_data2)
lz4.free_decode_stream(stream)
decompressed_data = decompressed_data1+decompressed_data2
self.assertEqual(decompressed_data, input_data)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 8,206,075,741,504,018,000 | 39.325581 | 136 | 0.72203 | false |
EICT/C-BAS | src/vendor/geni_trust/gen-certs.py | 1 | 10956 | #!/usr/bin/env python
import sys
import os.path
import optparse
import geniutil
import datetime
import subprocess
import uuid
CA_CERT_FILE = 'ca-cert.pem'
CA_KEY_FILE = 'ca-key.pem'
SA_CERT_FILE = 'sa-cert.pem'
SA_KEY_FILE = 'sa-key.pem'
MA_CERT_FILE = 'ma-cert.pem'
MA_KEY_FILE = 'ma-key.pem'
AM_CERT_FILE = 'am-cert.pem'
AM_KEY_FILE = 'am-key.pem'
SERVER_CERT_FILE = 'ch-cert.pem'
SERVER_KEY_FILE = 'ch-key.pem'
ADMIN_NAME = 'root'
ADMIN_EMAIL = '%[email protected]' % (ADMIN_NAME,)
ADMIN_KEY_FILE = '%s-key.pem' % (ADMIN_NAME,)
ADMIN_CERT_FILE = '%s-cert.pem' % (ADMIN_NAME,)
ADMIN_CRED_FILE = '%s-cred.xml' % (ADMIN_NAME,)
USER_NAME = 'alice'
USER_EMAIL = '%[email protected]' % (USER_NAME,)
USER_KEY_FILE = '%s-key.pem' % (USER_NAME,)
USER_CERT_FILE = '%s-cert.pem' % (USER_NAME,)
USER_CRED_FILE = '%s-cred.xml' % (USER_NAME,)
USER_URN_FILE = '%s-urn.xml' % (USER_NAME,)
BAD_USER_NAME = 'malcom'
BAD_USER_EMAIL = '%[email protected]' % (BAD_USER_NAME,)
BAD_USER_KEY_FILE = '%s-key.pem' % (BAD_USER_NAME,)
BAD_USER_CERT_FILE = '%s-cert.pem' % (BAD_USER_NAME,)
BAD_USER_CRED_FILE = '%s-cred.xml' % (BAD_USER_NAME,)
SLICE_NAME = 'pizzaslice'
SLICE_CRED_FILE = 'pizzaslice_cred.xml'
EXPEDIENT_NAME = 'expedient'
EXPEDIENT_EMAIL = '%[email protected]' % (EXPEDIENT_NAME,)
EXPEDIENT_KEY_FILE = '%s-key.pem' % (EXPEDIENT_NAME,)
EXPEDIENT_CERT_FILE = '%s-cert.pem' % (EXPEDIENT_NAME,)
EXPEDIENT_CRED_FILE = '%s-cred.xml' % (EXPEDIENT_NAME,)
cert_serial_number = 10
CRED_EXPIRY = datetime.datetime.utcnow() + datetime.timedelta(days=100)
def write_file(dir_path, filename, str, silent=False):
path = os.path.join(dir_path, filename)
with open(path, 'w') as f:
f.write(str)
if not silent:
print " Wrote file %s" % (path,)
def read_file(dir_path, filename):
path = os.path.join(dir_path, filename)
contents = None
with open(path, 'r') as f:
contents = f.read()
return contents
def insert_user(username, urn, cert, creds, uuid, firstName, lastName, email, clearDB=False):
import pymongo
client = pymongo.MongoClient('localhost', 27017)
database = client['ohouse']
if clearDB:
database['ma'].drop()
database['sa'].drop()
create_fields = {"MEMBER_CERTIFICATE": cert,
"MEMBER_UID" : uuid,
"MEMBER_FIRSTNAME": firstName,
"MEMBER_LASTNAME": lastName,
"MEMBER_USERNAME": username,
"MEMBER_EMAIL": email,
"MEMBER_CREDENTIALS": creds,
"MEMBER_URN": urn,
"type" : "member"}
database['ma'].insert(create_fields)
if __name__ == "__main__":
parser = optparse.OptionParser(usage = "usage: %prog directory_path")
parser.add_option("--silent", action="store_true", help="Silence output", default=False)
parser.add_option("--authority", help="Authority to use", default="")
parser.add_option("--ca_cert_path", help="Path to CA certificate files ca-cert.pem and ca-key.pem (defaults to None)", default=None)
opts, args = parser.parse_args(sys.argv)
if len(args) == 1: # no args given, index 0 is the script name
parser.print_help()
sys.exit(0)
#Simple test for xmlsec1 presence on system
try :
with open(os.devnull, "w") as null:
subprocess.call(["xmlsec1", "-h"], stdout = null, stderr = null)
except OSError:
print "xmlsec1 not found. Please install xmsec1 (http://www.aleksey.com/xmlsec/)."
sys.exit(0)
dir_path = args[1]
if not os.path.isdir(dir_path):
raise ValueError("The given path does not exist.")
#<UT>
if not opts.authority:
var = raw_input("Please enter CBAS authority/hostname (default: cbas.eict.de) ")
if not var:
authority= 'cbas.eict.de'
else:
authority = var
else:
authority = opts.authority
if not opts.ca_cert_path:
if not opts.silent:
print "Creating CA certificate"
urn = geniutil.encode_urn(authority, 'authority', 'ca')
cert_serial_number += 1
ca_c, ca_pu, ca_pr = geniutil.create_certificate(urn, is_ca=True, serial_number=cert_serial_number, life_days=10000)
write_file(dir_path, CA_CERT_FILE, ca_c, opts.silent)
write_file(dir_path, CA_KEY_FILE, ca_pr, opts.silent)
else:
if not os.path.isdir(opts.ca_cert_path):
raise ValueError("The given path for CA certificate files does not exist.")
ca_c = read_file(dir_path, CA_CERT_FILE)
ca_pr = read_file(dir_path, CA_KEY_FILE)
autority_urn, _, _ = geniutil.extract_certificate_info(ca_c)
authority = geniutil.decode_urn(autority_urn)[0]
if not opts.silent:
print "Using CA certificate from "+authority
if not opts.silent:
print "Creating SA certificate"
urn = geniutil.encode_urn(authority, 'authority', 'sa')
cert_serial_number += 1
sa_c, sa_pu, sa_pr = geniutil.create_certificate(urn, ca_pr, ca_c, is_ca=True, serial_number=cert_serial_number, life_days=10000)
write_file(dir_path, SA_CERT_FILE, sa_c, opts.silent)
write_file(dir_path, SA_KEY_FILE, sa_pr, opts.silent)
if not opts.silent:
print "Creating MA certificate"
urn = geniutil.encode_urn(authority, 'authority', 'ma')
cert_serial_number += 1
ma_c, ma_pu, ma_pr = geniutil.create_certificate(urn, ca_pr, ca_c, is_ca=True, serial_number=cert_serial_number, life_days=10000)
write_file(dir_path, MA_CERT_FILE, ma_c, opts.silent)
write_file(dir_path, MA_KEY_FILE, ma_pr, opts.silent)
if not opts.silent:
print "Creating AM certificate"
urn = geniutil.encode_urn(authority, 'authority', 'am')
cert_serial_number += 1
am_c, am_pu, am_pr = geniutil.create_certificate(urn, ca_pr, ca_c, serial_number=cert_serial_number, life_days=10000)
write_file(dir_path, AM_CERT_FILE, am_c, opts.silent)
write_file(dir_path, AM_KEY_FILE, am_pr, opts.silent)
if not opts.silent:
print "--------------------"
print "You may want to configure the above certificates & private keys in your SA/MA/AM servers."
print "Also, you may want to add the SA & MA certificates to the trusted_roots of the AM servers."
print "--------------------"
if not opts.silent:
print "Creating server certificate"
urn = geniutil.encode_urn(authority, 'authority', 'ch')
cert_serial_number += 1
server_c, _, server_pr = geniutil.create_certificate(urn, ca_pr, ca_c, serial_number=cert_serial_number, life_days=10000)
write_file(dir_path, SERVER_CERT_FILE, server_c, opts.silent)
write_file(dir_path, SERVER_KEY_FILE, server_pr, opts.silent)
if not opts.silent:
print "Creating test user cert and cred (valid, signed by MA)"
urn = geniutil.encode_urn(authority, 'user', USER_NAME)
cert_serial_number += 1
u_c,u_pu,u_pr = geniutil.create_certificate(urn, issuer_key=ma_pr, issuer_cert=ma_c, email=USER_EMAIL,
serial_number=cert_serial_number, uuidarg=str(uuid.uuid4()))
write_file(dir_path, USER_CERT_FILE, u_c, opts.silent)
write_file(dir_path, USER_KEY_FILE, u_pr, opts.silent)
u_cred = geniutil.create_credential_ex(u_c, u_c, ma_pr, ma_c, ['PROJECT_CREATE', 'PROJECT_REMOVE', 'SLICE_CREATE'], CRED_EXPIRY)
write_file(dir_path, USER_CRED_FILE, u_cred, opts.silent)
write_file(dir_path, USER_URN_FILE, urn, opts.silent)
if not opts.silent:
print "Creating bad test user cert and cred (invalid, self-signed)"
urn = geniutil.encode_urn(authority, 'user', BAD_USER_NAME)
cert_serial_number += 1
bu_c,bu_pu,bu_pr = geniutil.create_certificate(urn, email=BAD_USER_EMAIL, serial_number=cert_serial_number,
uuidarg=str(uuid.uuid4()))
write_file(dir_path, BAD_USER_CERT_FILE, bu_c, opts.silent)
write_file(dir_path, BAD_USER_KEY_FILE, bu_pr, opts.silent)
bu_cred = geniutil.create_credential(bu_c, bu_c, ma_pr, ma_c, "user", CRED_EXPIRY)
write_file(dir_path, BAD_USER_CRED_FILE, bu_cred, opts.silent)
if not opts.silent:
print "Creating admin cert and cred"
urn = geniutil.encode_urn(authority, 'user', ADMIN_NAME)
admin_uuid = str(uuid.uuid4())
cert_serial_number += 1
a_c,a_pu,a_pr = geniutil.create_certificate(urn, issuer_key=ma_pr, issuer_cert=ma_c, email=ADMIN_EMAIL,
serial_number=cert_serial_number, uuidarg=admin_uuid, life_days=10000)
write_file(dir_path, ADMIN_CERT_FILE, a_c, opts.silent)
write_file(dir_path, ADMIN_KEY_FILE, a_pr, opts.silent)
p_list = ["GLOBAL_MEMBERS_VIEW", "GLOBAL_MEMBERS_WILDCARDS", "GLOBAL_PROJECTS_MONITOR", "GLOBAL_PROJECTS_VIEW",
"GLOBAL_PROJECTS_WILDCARDS", "MEMBER_REGISTER", "SERVICE_REMOVE", "SERVICE_VIEW",
"MEMBER_REMOVE_REGISTRATION", "SERVICE_REGISTER"]
a_cred = geniutil.create_credential_ex(a_c, a_c, ma_pr, ma_c, p_list, CRED_EXPIRY)
write_file(dir_path, ADMIN_CRED_FILE, a_cred, opts.silent)
insert_user(ADMIN_NAME,urn,a_c,a_cred,admin_uuid,'System', 'Administrator','[email protected]', True)
urn = geniutil.encode_urn(authority, 'user', EXPEDIENT_NAME)
exp_uuid = str(uuid.uuid4())
cert_serial_number += 1
a_c,a_pu,a_pr = geniutil.create_certificate(urn, issuer_key=ma_pr, issuer_cert=ma_c, email=EXPEDIENT_EMAIL,
serial_number=cert_serial_number, uuidarg=exp_uuid, life_days=10000)
write_file(dir_path, EXPEDIENT_CERT_FILE, a_c, opts.silent)
write_file(dir_path, EXPEDIENT_KEY_FILE, a_pr, opts.silent)
p_list = ["GLOBAL_MEMBERS_VIEW", "GLOBAL_MEMBERS_WILDCARDS", "GLOBAL_PROJECTS_MONITOR", "GLOBAL_PROJECTS_VIEW",
"GLOBAL_PROJECTS_WILDCARDS", "MEMBER_REGISTER", "SERVICE_REMOVE", "SERVICE_VIEW",
"MEMBER_REMOVE_REGISTRATION", "SERVICE_REGISTER"]
a_cred = geniutil.create_credential_ex(a_c, a_c, ma_pr, ma_c, p_list, CRED_EXPIRY)
write_file(dir_path, EXPEDIENT_CRED_FILE, a_cred, opts.silent)
insert_user(EXPEDIENT_NAME,urn,a_c,a_cred,exp_uuid,'Expedient', 'User-agent','[email protected]')
if not opts.silent:
print "Creating slice credential for valid test user"
urn = geniutil.encode_urn(authority, 'slice', SLICE_NAME)
s_c = geniutil.create_slice_certificate(urn, sa_pr, sa_c, CRED_EXPIRY)
s_cred = geniutil.create_credential(u_c, s_c, sa_pr, sa_c, "slice", CRED_EXPIRY)
write_file(dir_path, SLICE_CRED_FILE, s_cred, opts.silent)
if not opts.silent:
print "--------------------"
print "You can use the user certificates and slice cert to test. In production you may acquire them from a MA and SA."
print "--------------------"
| bsd-3-clause | 7,574,652,154,376,849,000 | 45.621277 | 136 | 0.629518 | false |
1032231418/python | lesson10/apps/books/publish/__init__.py | 1 | 3345 | # coding=utf8
from django.views.generic import ListView, DetailView, CreateView
from django.db.models import Q
from django.http import JsonResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.shortcuts import render
from pure_pagination.mixins import PaginationMixin
from django.contrib.auth.mixins import LoginRequiredMixin
from django.conf import settings
from books.models import Publish, Author, Book
from books.forms import PublishForm
import json
import logging
logger = logging.getLogger('opsweb')
class PublishListView(LoginRequiredMixin, PaginationMixin, ListView):
'''
动作:getlist, create
'''
model = Publish
template_name = "books/publish_list.html"
context_object_name = "publish_list"
paginate_by = 5
keyword = ''
def get_queryset(self):
queryset = super(PublishListView, self).get_queryset()
self.keyword = self.request.GET.get('keyword', '').strip()
if self.keyword:
queryset = queryset.filter(Q(name__icontains=self.keyword) |
Q(address__icontains=self.keyword) |
Q(city__icontains=self.keyword))
return queryset
def get_context_data(self, **kwargs):
context = super(PublishListView, self).get_context_data(**kwargs)
context['keyword'] = self.keyword
return context
def post(self, request):
form = PublishForm(request.POST)
if form.is_valid():
form.save()
res = {'code': 0, 'result': '添加出版商成功'}
else:
# form.errors会把验证不通过的信息以对象的形式传到前端,前端直接渲染即可
res = {'code': 1, 'errmsg': form.errors}
print form.errors
return JsonResponse(res, safe=True)
class PublishDetailView(LoginRequiredMixin, DetailView):
'''
动作:getone, update, delete
'''
model = Publish
template_name = "books/publish_detail.html"
context_object_name = 'publish'
next_url = '/books/publishlist/'
def post(self, request, *args, **kwargs):
pk = kwargs.get('pk')
p = self.model.objects.get(pk=pk)
form = PublishForm(request.POST, instance=p)
if form.is_valid():
form.save()
res = {"code": 0, "result": "更新出版商成功", 'next_url': self.next_url}
else:
res = {"code": 1, "errmsg": form.errors, 'next_url': self.next_url}
return render(request, settings.JUMP_PAGE, res)
# return HttpResponseRedirect(reverse('books:publish_detail',args=[pk]))
def delete(self, request, *args, **kwargs):
pk = kwargs.get('pk')
# 通过出版社对象查所在该出版社的书籍,如果有关联书籍不可以删除,没有关联书籍可以删除
try:
obj = self.model.objects.get(pk=pk)
if not obj.book_set.all():
self.model.objects.filter(pk=pk).delete()
res = {"code": 0, "result": "删除出版商成功"}
else:
res = {"code": 1, "errmsg": "该出版社有关联书籍,请联系管理员"}
except:
res = {"code": 1, "errmsg": "删除错误请联系管理员"}
return JsonResponse(res, safe=True)
| apache-2.0 | -5,549,527,290,084,019,000 | 32.706522 | 80 | 0.613028 | false |
mattilyra/gensim | gensim/corpora/svmlightcorpus.py | 1 | 5903 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Corpus in SVMlight format."""
from __future__ import with_statement
import logging
from gensim import utils
from gensim.corpora import IndexedCorpus
logger = logging.getLogger(__name__)
class SvmLightCorpus(IndexedCorpus):
"""Corpus in SVMlight format.
Quoting http://svmlight.joachims.org/:
The input file contains the training examples. The first lines may contain comments and are ignored
if they start with #. Each of the following lines represents one training example
and is of the following format::
<line> .=. <target> <feature>:<value> <feature>:<value> ... <feature>:<value> # <info>
<target> .=. +1 | -1 | 0 | <float>
<feature> .=. <integer> | "qid"
<value> .=. <float>
<info> .=. <string>
The "qid" feature (used for SVMlight ranking), if present, is ignored.
Notes
-----
Although not mentioned in the specification above, SVMlight also expect its feature ids to be 1-based
(counting starts at 1). We convert features to 0-base internally by decrementing all ids when loading a SVMlight
input file, and increment them again when saving as SVMlight.
"""
def __init__(self, fname, store_labels=True):
"""
Parameters
----------
fname: str
Path to corpus.
store_labels : bool, optional
Whether to store labels (~SVM target class). They currently have no application but stored
in `self.labels` for convenience by default.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
self.fname = fname # input file, see class doc for format
self.length = None
self.store_labels = store_labels
self.labels = []
def __iter__(self):
""" Iterate over the corpus, returning one sparse (BoW) vector at a time.
Yields
------
list of (int, float)
Document in BoW format.
"""
lineno = -1
self.labels = []
with utils.smart_open(self.fname) as fin:
for lineno, line in enumerate(fin):
doc = self.line2doc(line)
if doc is not None:
if self.store_labels:
self.labels.append(doc[1])
yield doc[0]
self.length = lineno + 1
@staticmethod
def save_corpus(fname, corpus, id2word=None, labels=False, metadata=False):
"""Save a corpus in the SVMlight format.
The SVMlight `<target>` class tag is taken from the `labels` array, or set to 0 for all documents
if `labels` is not supplied.
Parameters
----------
fname : str
Path to output file.
corpus : iterable of iterable of (int, float)
Corpus in BoW format.
id2word : dict of (str, str), optional
Mapping id -> word.
labels : list or False
An SVMlight `<target>` class tags or False if not present.
metadata : bool
ARGUMENT WILL BE IGNORED.
Returns
-------
list of int
Offsets for each line in file (in bytes).
"""
logger.info("converting corpus to SVMlight format: %s", fname)
offsets = []
with utils.smart_open(fname, 'wb') as fout:
for docno, doc in enumerate(corpus):
label = labels[docno] if labels else 0 # target class is 0 by default
offsets.append(fout.tell())
fout.write(utils.to_utf8(SvmLightCorpus.doc2line(doc, label)))
return offsets
def docbyoffset(self, offset):
"""Get the document stored at file position `offset`.
Parameters
----------
offset : int
Document's position.
Returns
-------
tuple of (int, float)
"""
with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())[0]
# TODO: it brakes if gets None from line2doc
def line2doc(self, line):
"""Get a document from a single line in SVMlight format.
This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.doc2line`.
Parameters
----------
line : str
Line in SVMLight format.
Returns
-------
(list of (int, float), str)
Document in BoW format and target class label.
"""
line = utils.to_unicode(line)
line = line[: line.find('#')].strip()
if not line:
return None # ignore comments and empty lines
parts = line.split()
if not parts:
raise ValueError('invalid line format in %s' % self.fname)
target, fields = parts[0], [part.rsplit(':', 1) for part in parts[1:]]
# ignore 'qid' features, convert 1-based feature ids to 0-based
doc = [(int(p1) - 1, float(p2)) for p1, p2 in fields if p1 != 'qid']
return doc, target
@staticmethod
def doc2line(doc, label=0):
"""Convert BoW representation of document in SVMlight format.
This method inverse of :meth:`~gensim.corpora.svmlightcorpus.SvmLightCorpus.line2doc`.
Parameters
----------
doc : list of (int, float)
Document in BoW format.
label : int, optional
Document label (if provided).
Returns
-------
str
`doc` in SVMlight format.
"""
pairs = ' '.join("%i:%s" % (termid + 1, termval) for termid, termval in doc) # +1 to convert 0-base to 1-base
return "%s %s\n" % (label, pairs)
| lgpl-2.1 | 1,579,964,182,420,635,000 | 30.736559 | 118 | 0.569372 | false |
mancoast/CPythonPyc_test | fail/335_test_socket.py | 2 | 186107 | import unittest
from test import support
from unittest.case import _ExpectedFailure
import errno
import io
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import platform
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
try:
import fcntl
except ImportError:
fcntl = False
try:
import multiprocessing
except ImportError:
multiprocessing = False
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, socket.error, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except socket.error:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except _ExpectedFailure:
# We deliberately ignore expected failures
pass
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except socket.error:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen(1)
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
# Don't use "localhost" here - it may not have an IPv6 address
# assigned to it by default (e.g. in /etc/hosts), and if someone
# has assigned it an IPv4-mapped address, then it's unlikely to
# work with the full IPv6 API.
host = "::1"
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except socket.error as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
self.assertTrue(repr(s).startswith("<socket.socket object"))
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(socket.error, msg=msg % 'socket.error'):
raise socket.error
with self.assertRaises(socket.error, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(socket.error, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except socket.error:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except socket.error:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except socket.error as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(socket.error, socket.if_indextoname, 0)
self.assertRaises(socket.error, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except socket.error:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except socket.error:
pass
else:
raise socket.error
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except socket.error:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(socket.error, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(socket.error, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(socket.error, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(socket.error, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except socket.error:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(socket.error, sock.send, b"spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
host = '0.0.0.0'
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
sock = socket.socket()
try:
self.assertRaises(OverflowError, sock.bind, (host, big_port))
self.assertRaises(OverflowError, sock.bind, (host, neg_port))
sock.bind((host, port))
finally:
sock.close()
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, None, socket.AF_INET)
for family, _, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(socket.error, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test (issue #12804).
try:
socket.gethostbyname('python.org')
except socket.gaierror as e:
if e.errno == socket.EAI_NODATA:
self.skipTest('internet access required for this test')
# these should all be successful
socket.gethostbyname('испытание.python.org')
socket.gethostbyname_ex('испытание.python.org')
socket.getaddrinfo('испытание.python.org',0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
def test_listen_backlog(self):
for backlog in 0, -1:
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
srv.listen(backlog)
srv.close()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
('::1',0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, ('::1', 0, -10))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(socket.error, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(socket.error, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(socket.error) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(socket.error, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux2"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(socket.error) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(socket.error, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(socket.error, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(socket.error, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except socket.error as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except socket.error as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: None)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises socket.error with an
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(socket.error) as cm:
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# socket.error with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(socket.error) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(hasattr(socket, 'socketpair'),
'test needs socket.socketpair()')
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except socket.error:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
# actual testing
start = time.time()
try:
self.serv.accept()
except socket.error:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(IOError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(socket.error, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv_into(self, buffer):
data = next(self._recv_step)()
assert len(buffer) >= len(data)
buffer[:len(data)] = data
return len(data)
def _decref_socketios(self):
pass
def _textiowrap_for_test(self, buffering=-1):
raw = socket.SocketIO(self, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
@staticmethod
def _raise_eintr():
raise socket.error(errno.EINTR, "interrupted")
def _textiowrap_mock_socket(self, mock, buffering=-1):
raw = socket.SocketIO(mock, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
def _test_readline(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
fo = mock_sock._textiowrap_for_test(buffering=buffering)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
expecting = (b"This is the first line\n"
b"And the second line is here\n")
fo = mock_sock._textiowrap_for_test(buffering=buffering)
if buffering == 0:
data = b''
else:
data = ''
expecting = expecting.decode('utf-8')
while len(data) != len(expecting):
part = fo.read(size)
if not part:
break
data += part
self.assertEqual(data, expecting)
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(buffering=1024)
self._test_readline(size=100, buffering=1024)
self._test_read(buffering=1024)
self._test_read(size=100, buffering=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"a",
lambda : b"\n",
lambda : b"B",
self._raise_eintr,
lambda : b"b",
lambda : b"",
])
fo = mock_sock._textiowrap_for_test(buffering=0)
self.assertEqual(fo.readline(size), b"a\n")
self.assertEqual(fo.readline(size), b"Bb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(buffering=0)
self._test_read(size=100, buffering=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(socket.error, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(socket.error) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(socket.error) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(socket.error, Exception))
self.assertTrue(issubclass(socket.herror, socket.error))
self.assertTrue(issubclass(socket.gaierror, socket.error))
self.assertTrue(issubclass(socket.timeout, socket.error))
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen(1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(socket.error, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as a AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(socket.error, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(socket.error, sock.sendall, b'foo')
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@unittest.skipUnless(fcntl, "module fcntl not available")
class CloexecConstantTest(unittest.TestCase):
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertTrue(fcntl.fcntl(s, fcntl.F_GETFD) & fcntl.FD_CLOEXEC)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timout value isn't transfered.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
CloexecConstantTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| gpl-3.0 | -4,171,975,364,921,902,000 | 36.711998 | 117 | 0.618732 | false |
annoviko/pyclustering | pyclustering/utils/metric.py | 1 | 20881 | """!
@brief Module provides various distance metrics - abstraction of the notion of distance in a metric space.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import numpy
from enum import IntEnum
class type_metric(IntEnum):
"""!
@brief Enumeration of supported metrics in the module for distance calculation between two points.
"""
## Euclidean distance, for more information see function 'euclidean_distance'.
EUCLIDEAN = 0
## Square Euclidean distance, for more information see function 'euclidean_distance_square'.
EUCLIDEAN_SQUARE = 1
## Manhattan distance, for more information see function 'manhattan_distance'.
MANHATTAN = 2
## Chebyshev distance, for more information see function 'chebyshev_distance'.
CHEBYSHEV = 3
## Minkowski distance, for more information see function 'minkowski_distance'.
MINKOWSKI = 4
## Canberra distance, for more information see function 'canberra_distance'.
CANBERRA = 5
## Chi square distance, for more information see function 'chi_square_distance'.
CHI_SQUARE = 6
## Gower distance, for more information see function 'gower_distance'.
GOWER = 7
## User defined function for distance calculation between two points.
USER_DEFINED = 1000
class distance_metric:
"""!
@brief Distance metric performs distance calculation between two points in line with encapsulated function, for
example, euclidean distance or chebyshev distance, or even user-defined.
@details
Example of Euclidean distance metric:
@code
metric = distance_metric(type_metric.EUCLIDEAN)
distance = metric([1.0, 2.5], [-1.2, 3.4])
@endcode
Example of Chebyshev distance metric:
@code
metric = distance_metric(type_metric.CHEBYSHEV)
distance = metric([0.0, 0.0], [2.5, 6.0])
@endcode
In following example additional argument should be specified (generally, 'degree' is a optional argument that is
equal to '2' by default) that is specific for Minkowski distance:
@code
metric = distance_metric(type_metric.MINKOWSKI, degree=4)
distance = metric([4.0, 9.2, 1.0], [3.4, 2.5, 6.2])
@endcode
User may define its own function for distance calculation. In this case input is two points, for example, you
want to implement your own version of Manhattan distance:
@code
from pyclustering.utils.metric import distance_metric, type_metric
def my_manhattan(point1, point2):
dimension = len(point1)
result = 0.0
for i in range(dimension):
result += abs(point1[i] - point2[i]) * 0.1
return result
metric = distance_metric(type_metric.USER_DEFINED, func=my_manhattan)
distance = metric([2.0, 3.0], [1.0, 3.0])
@endcode
"""
def __init__(self, metric_type, **kwargs):
"""!
@brief Creates distance metric instance for calculation distance between two points.
@param[in] metric_type (type_metric):
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'numpy_usage' 'func' and corresponding additional argument for
for specific metric types).
<b>Keyword Args:</b><br>
- func (callable): Callable object with two arguments (point #1 and point #2) or (object #1 and object #2) in case of numpy usage.
This argument is used only if metric is 'type_metric.USER_DEFINED'.
- degree (numeric): Only for 'type_metric.MINKOWSKI' - degree of Minkowski equation.
- max_range (array_like): Only for 'type_metric.GOWER' - max range in each dimension. 'data' can be used
instead of this parameter.
- data (array_like): Only for 'type_metric.GOWER' - input data that used for 'max_range' calculation.
'max_range' can be used instead of this parameter.
- numpy_usage (bool): If True then numpy is used for calculation (by default is False).
"""
self.__type = metric_type
self.__args = kwargs
self.__func = self.__args.get('func', None)
self.__numpy = self.__args.get('numpy_usage', False)
self.__calculator = self.__create_distance_calculator()
def __call__(self, point1, point2):
"""!
@brief Calculates distance between two points.
@param[in] point1 (list): The first point.
@param[in] point2 (list): The second point.
@return (double) Distance between two points.
"""
return self.__calculator(point1, point2)
def get_type(self):
"""!
@brief Return type of distance metric that is used.
@return (type_metric) Type of distance metric.
"""
return self.__type
def get_arguments(self):
"""!
@brief Return additional arguments that are used by distance metric.
@return (dict) Additional arguments.
"""
return self.__args
def get_function(self):
"""!
@brief Return user-defined function for calculation distance metric.
@return (callable): User-defined distance metric function.
"""
return self.__func
def enable_numpy_usage(self):
"""!
@brief Start numpy for distance calculation.
@details Useful in case matrices to increase performance. No effect in case of type_metric.USER_DEFINED type.
"""
self.__numpy = True
if self.__type != type_metric.USER_DEFINED:
self.__calculator = self.__create_distance_calculator()
def disable_numpy_usage(self):
"""!
@brief Stop using numpy for distance calculation.
@details Useful in case of big amount of small data portion when numpy call is longer than calculation itself.
No effect in case of type_metric.USER_DEFINED type.
"""
self.__numpy = False
self.__calculator = self.__create_distance_calculator()
def __create_distance_calculator(self):
"""!
@brief Creates distance metric calculator.
@return (callable) Callable object of distance metric calculator.
"""
if self.__numpy is True:
return self.__create_distance_calculator_numpy()
return self.__create_distance_calculator_basic()
def __create_distance_calculator_basic(self):
"""!
@brief Creates distance metric calculator that does not use numpy.
@return (callable) Callable object of distance metric calculator.
"""
if self.__type == type_metric.EUCLIDEAN:
return euclidean_distance
elif self.__type == type_metric.EUCLIDEAN_SQUARE:
return euclidean_distance_square
elif self.__type == type_metric.MANHATTAN:
return manhattan_distance
elif self.__type == type_metric.CHEBYSHEV:
return chebyshev_distance
elif self.__type == type_metric.MINKOWSKI:
return lambda point1, point2: minkowski_distance(point1, point2, self.__args.get('degree', 2))
elif self.__type == type_metric.CANBERRA:
return canberra_distance
elif self.__type == type_metric.CHI_SQUARE:
return chi_square_distance
elif self.__type == type_metric.GOWER:
max_range = self.__get_gower_max_range()
return lambda point1, point2: gower_distance(point1, point2, max_range)
elif self.__type == type_metric.USER_DEFINED:
return self.__func
else:
raise ValueError("Unknown type of metric: '%d'", self.__type)
def __get_gower_max_range(self):
"""!
@brief Returns max range for Gower distance using input parameters ('max_range' or 'data').
@return (numpy.array) Max range for Gower distance.
"""
max_range = self.__args.get('max_range', None)
if max_range is None:
data = self.__args.get('data', None)
if data is None:
raise ValueError("Gower distance requires 'data' or 'max_range' argument to construct metric.")
max_range = numpy.max(data, axis=0) - numpy.min(data, axis=0)
self.__args['max_range'] = max_range
return max_range
def __create_distance_calculator_numpy(self):
"""!
@brief Creates distance metric calculator that uses numpy.
@return (callable) Callable object of distance metric calculator.
"""
if self.__type == type_metric.EUCLIDEAN:
return euclidean_distance_numpy
elif self.__type == type_metric.EUCLIDEAN_SQUARE:
return euclidean_distance_square_numpy
elif self.__type == type_metric.MANHATTAN:
return manhattan_distance_numpy
elif self.__type == type_metric.CHEBYSHEV:
return chebyshev_distance_numpy
elif self.__type == type_metric.MINKOWSKI:
return lambda object1, object2: minkowski_distance_numpy(object1, object2, self.__args.get('degree', 2))
elif self.__type == type_metric.CANBERRA:
return canberra_distance_numpy
elif self.__type == type_metric.CHI_SQUARE:
return chi_square_distance_numpy
elif self.__type == type_metric.GOWER:
max_range = self.__get_gower_max_range()
return lambda object1, object2: gower_distance_numpy(object1, object2, max_range)
elif self.__type == type_metric.USER_DEFINED:
return self.__func
else:
raise ValueError("Unknown type of metric: '%d'", self.__type)
def euclidean_distance(point1, point2):
"""!
@brief Calculate Euclidean distance between two vectors.
@details The Euclidean between vectors (points) a and b is calculated by following formula:
\f[
dist(a, b) = \sqrt{ \sum_{i=0}^{N}(a_{i} - b_{i})^{2} };
\f]
Where N is a length of each vector.
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Euclidean distance between two vectors.
@see euclidean_distance_square, manhattan_distance, chebyshev_distance
"""
distance = euclidean_distance_square(point1, point2)
return distance ** 0.5
def euclidean_distance_numpy(object1, object2):
"""!
@brief Calculate Euclidean distance between two objects using numpy.
@param[in] object1 (array_like): The first array_like object.
@param[in] object2 (array_like): The second array_like object.
@return (double) Euclidean distance between two objects.
"""
if len(object1.shape) > 1 or len(object2.shape) > 1:
return numpy.sqrt(numpy.sum(numpy.square(object1 - object2), axis=1))
else:
return numpy.sqrt(numpy.sum(numpy.square(object1 - object2)))
def euclidean_distance_square(point1, point2):
"""!
@brief Calculate square Euclidean distance between two vectors.
\f[
dist(a, b) = \sum_{i=0}^{N}(a_{i} - b_{i})^{2};
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Square Euclidean distance between two vectors.
@see euclidean_distance, manhattan_distance, chebyshev_distance
"""
distance = 0.0
for i in range(len(point1)):
distance += (point1[i] - point2[i]) ** 2.0
return distance
def euclidean_distance_square_numpy(object1, object2):
"""!
@brief Calculate square Euclidean distance between two objects using numpy.
@param[in] object1 (array_like): The first array_like object.
@param[in] object2 (array_like): The second array_like object.
@return (double) Square Euclidean distance between two objects.
"""
if len(object1.shape) > 1 or len(object2.shape) > 1:
return numpy.sum(numpy.square(object1 - object2), axis=1).T
else:
return numpy.sum(numpy.square(object1 - object2))
def manhattan_distance(point1, point2):
"""!
@brief Calculate Manhattan distance between between two vectors.
\f[
dist(a, b) = \sum_{i=0}^{N}\left | a_{i} - b_{i} \right |;
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Manhattan distance between two vectors.
@see euclidean_distance_square, euclidean_distance, chebyshev_distance
"""
distance = 0.0
dimension = len(point1)
for i in range(dimension):
distance += abs(point1[i] - point2[i])
return distance
def manhattan_distance_numpy(object1, object2):
"""!
@brief Calculate Manhattan distance between two objects using numpy.
@param[in] object1 (array_like): The first array_like object.
@param[in] object2 (array_like): The second array_like object.
@return (double) Manhattan distance between two objects.
"""
if len(object1.shape) > 1 or len(object2.shape) > 1:
return numpy.sum(numpy.absolute(object1 - object2), axis=1).T
else:
return numpy.sum(numpy.absolute(object1 - object2))
def chebyshev_distance(point1, point2):
"""!
@brief Calculate Chebyshev distance (maximum metric) between between two vectors.
@details Chebyshev distance is a metric defined on a vector space where the distance between two vectors is the
greatest of their differences along any coordinate dimension.
\f[
dist(a, b) = \max_{}i\left (\left | a_{i} - b_{i} \right |\right );
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Chebyshev distance between two vectors.
@see euclidean_distance_square, euclidean_distance, minkowski_distance
"""
distance = 0.0
dimension = len(point1)
for i in range(dimension):
distance = max(distance, abs(point1[i] - point2[i]))
return distance
def chebyshev_distance_numpy(object1, object2):
"""!
@brief Calculate Chebyshev distance between two objects using numpy.
@param[in] object1 (array_like): The first array_like object.
@param[in] object2 (array_like): The second array_like object.
@return (double) Chebyshev distance between two objects.
"""
if len(object1.shape) > 1 or len(object2.shape) > 1:
return numpy.max(numpy.absolute(object1 - object2), axis=1).T
else:
return numpy.max(numpy.absolute(object1 - object2))
def minkowski_distance(point1, point2, degree=2):
"""!
@brief Calculate Minkowski distance between two vectors.
\f[
dist(a, b) = \sqrt[p]{ \sum_{i=0}^{N}\left(a_{i} - b_{i}\right)^{p} };
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@param[in] degree (numeric): Degree of that is used for Minkowski distance.
@return (double) Minkowski distance between two vectors.
@see euclidean_distance
"""
distance = 0.0
for i in range(len(point1)):
distance += (point1[i] - point2[i]) ** degree
return distance ** (1.0 / degree)
def minkowski_distance_numpy(object1, object2, degree=2):
"""!
@brief Calculate Minkowski distance between objects using numpy.
@param[in] object1 (array_like): The first array_like object.
@param[in] object2 (array_like): The second array_like object.
@param[in] degree (numeric): Degree of that is used for Minkowski distance.
@return (double) Minkowski distance between two object.
"""
if len(object1.shape) > 1 or len(object2.shape) > 1:
return numpy.power(numpy.sum(numpy.power(object1 - object2, degree), axis=1), 1/degree)
else:
return numpy.power(numpy.sum(numpy.power(object1 - object2, degree)), 1 / degree)
def canberra_distance(point1, point2):
"""!
@brief Calculate Canberra distance between two vectors.
\f[
dist(a, b) = \sum_{i=0}^{N}\frac{\left | a_{i} - b_{i} \right |}{\left | a_{i} \right | + \left | b_{i} \right |};
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (float) Canberra distance between two objects.
"""
distance = 0.0
for i in range(len(point1)):
divider = abs(point1[i]) + abs(point2[i])
if divider == 0.0:
continue
distance += abs(point1[i] - point2[i]) / divider
return distance
def canberra_distance_numpy(object1, object2):
"""!
@brief Calculate Canberra distance between two objects using numpy.
@param[in] object1 (array_like): The first vector.
@param[in] object2 (array_like): The second vector.
@return (float) Canberra distance between two objects.
"""
with numpy.errstate(divide='ignore', invalid='ignore'):
result = numpy.divide(numpy.abs(object1 - object2), numpy.abs(object1) + numpy.abs(object2))
if len(result.shape) > 1:
return numpy.sum(numpy.nan_to_num(result), axis=1).T
else:
return numpy.sum(numpy.nan_to_num(result))
def chi_square_distance(point1, point2):
"""!
@brief Calculate Chi square distance between two vectors.
\f[
dist(a, b) = \sum_{i=0}^{N}\frac{\left ( a_{i} - b_{i} \right )^{2}}{\left | a_{i} \right | + \left | b_{i} \right |};
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (float) Chi square distance between two objects.
"""
distance = 0.0
for i in range(len(point1)):
divider = abs(point1[i]) + abs(point2[i])
if divider != 0.0:
distance += ((point1[i] - point2[i]) ** 2.0) / divider
return distance
def chi_square_distance_numpy(object1, object2):
"""!
@brief Calculate Chi square distance between two vectors using numpy.
@param[in] object1 (array_like): The first vector.
@param[in] object2 (array_like): The second vector.
@return (float) Chi square distance between two objects.
"""
with numpy.errstate(divide='ignore', invalid='ignore'):
result = numpy.divide(numpy.power(object1 - object2, 2), numpy.abs(object1) + numpy.abs(object2))
if len(result.shape) > 1:
return numpy.sum(numpy.nan_to_num(result), axis=1).T
else:
return numpy.sum(numpy.nan_to_num(result))
def gower_distance(point1, point2, max_range):
"""!
@brief Calculate Gower distance between two vectors.
@details Implementation is based on the paper @cite article::utils::metric::gower. Gower distance is calculate
using following formula:
\f[
dist\left ( a, b \right )=\frac{1}{p}\sum_{i=0}^{p}\frac{\left | a_{i} - b_{i} \right |}{R_{i}},
\f]
where \f$R_{i}\f$ is a max range for ith dimension. \f$R\f$ is defined in line following formula:
\f[
R=max\left ( X \right )-min\left ( X \right )
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@param[in] max_range (array_like): Max range in each data dimension.
@return (float) Gower distance between two objects.
"""
distance = 0.0
dimensions = len(point1)
for i in range(dimensions):
if max_range[i] != 0.0:
distance += abs(point1[i] - point2[i]) / max_range[i]
return distance / dimensions
def gower_distance_numpy(point1, point2, max_range):
"""!
@brief Calculate Gower distance between two vectors using numpy.
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@param[in] max_range (array_like): Max range in each data dimension.
@return (float) Gower distance between two objects.
"""
with numpy.errstate(divide='ignore', invalid='ignore'):
result = numpy.divide(numpy.abs(point1 - point2), max_range)
if len(result.shape) > 1:
return numpy.sum(numpy.nan_to_num(result), axis=1).T / len(result[0])
else:
return numpy.sum(numpy.nan_to_num(result)) / len(point1)
| gpl-3.0 | -7,497,159,329,073,956,000 | 30.883465 | 142 | 0.614338 | false |
madhusudancs/pytask | scripts/generate_tasks_from_csv.py | 1 | 2862 | #!/usr/bin/env python
#
# Copyright 2011 Authors of PyTask.
#
# This file is part of PyTask.
#
# PyTask is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyTask is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# You should have received a copy of the GNU General Public License
# along with PyTask. If not, see <http://www.gnu.org/licenses/>.
"""Module to fill database with the tasks supplied in CSV.
This module takes the directory containing the csv files as
argument and creates task for the data in each CSV file in
this directory.
"""
__authors__ = [
'"Madhusudan.C.S" <[email protected]>'
]
import csv
import datetime
import os
import sys
from django.contrib.auth.models import User
from pytask.taskapp.models import Task
STATIC_DATA = {
'created_by': User.objects.get(pk=1),
'creation_datetime': datetime.datetime.now()
}
def get_textbooks_from_csv(directory, file_name):
"""Return the list of the titles of tasks.
Args:
file: name of the CSV file from which tasks must be fetched.
"""
file_absolute_name = os.path.join(directory, file_name)
csv_obj = csv.reader(open(file_absolute_name))
# Nifty trick to separate the file extension out and get the
# remaining part of the filename to use this as the tag for
# branches/departments
branch_name = os.extsep.join(file_name.split(os.extsep)[:-1])
textbooks = []
for line in csv_obj:
if len(line) == 2 and line[0]:
sep = ' by '
else:
sep = ''
textbooks.append({
'title': sep.join(line),
'desc': '(To be filled in by the Coordinator or the T/A.)',
'tags_field': ', '. join(['Textbook', branch_name, line[1]]),
'pynts': 10,
'status': 'Open',
})
return textbooks
def seed_db(data):
"""Seeds the database when the data is passed as the argument
Args:
data: A dictionary containing the data to be seeded into the
task model.
"""
for task in data:
task.update(STATIC_DATA)
task_obj = Task(**task)
task_obj.save()
def main():
"""Just a wrapper function to make call the functions that perform
the action.
"""
for dir in sys.argv[1:]:
args = list(os.walk(dir))
files = args[0][2]
for file_name in files:
tasks = get_textbooks_from_csv(args[0][0], file_name)
seed_db(tasks)
if __name__ == '__main__':
main()
| agpl-3.0 | 2,580,936,690,223,775,000 | 25.747664 | 71 | 0.643256 | false |
UKPLab/sentence-transformers | sentence_transformers/losses/CosineSimilarityLoss.py | 1 | 2213 | import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
class CosineSimilarityLoss(nn.Module):
"""
CosineSimilarityLoss expects, that the InputExamples consists of two texts and a float label.
It computes the vectors u = model(input_text[0]) and v = model(input_text[1]) and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ||input_label - cos_score_transformation(cosine_sim(u,v))||_2.
:param model: SentenceTranformer model
:param loss_fct: Which pytorch loss function should be used to compare the cosine_similartiy(u,v) with the input_label? By default, MSE: ||input_label - cosine_sim(u,v)||_2
:param cos_score_transformation: The cos_score_transformation function is applied on top of cosine_similarity. By default, the identify function is used (i.e. no change).
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, InputExample, losses
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=0.8),
InputExample(texts=['Another pair', 'Unrelated sentence'], label=0.3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, loss_fct = nn.MSELoss(), cos_score_transformation=nn.Identity()):
super(CosineSimilarityLoss, self).__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.view(-1))
| apache-2.0 | 990,618,158,397,479,200 | 51.690476 | 177 | 0.712155 | false |
damiendart/nfsnapi-python | nfsnapi.py | 1 | 5512 | """Stuff to make working with the NearlyFreeSpeech.NET API easier.
>>> import nfsnapi
>>> # Replace USERNAME, API_KEY, and so on with actual values.
>>> nfsnapi.run_request("USERNAME", "API_KEY",
... "/account/ACCOUNT_NUMBER/balance")
'10.56'
>>> nfsnapi.run_request("USERNAME", "API_KEY",
... "/dns/DOMAIN/listRRs", "type=A")
(A bunch of JSON not shown.)
>>> # And so on...
This file was written by Damien Dart, <[email protected]>. This is
free and unencumbered software released into the public domain. For more
information, please refer to the accompanying "UNLICENCE" file.
"""
__author__ = "Damien Dart, <[email protected]>"
__license__ = "Unlicense"
__title__ = "nfsnapi"
__version__ = "0.3.0"
import json
import random
import string
import time
from hashlib import sha1
try:
from http.client import HTTPException
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
basestring = str
except ImportError:
from httplib import HTTPException
from urllib2 import urlopen, Request, HTTPError, URLError
def auth_header(username, API_key, request_path, request_body = b""):
"""Return a NearlyFreeSpeeech.NET authentication HTTP header field.
Returns a dictionary containing an authentication HTTP header field
required for NearlyFreeSpeech.NET API requests. For more information,
see <https://members.nearlyfreespeech.net/wiki/API/Introduction>.
- "username" should be a string containing the member login name of
the user making the request.
- "API_key" should be a string containing the API key associated with
the member login name; an API key can be obtained by submitting a
secure support request to NearlyFreeSpeeech.NET.
- "request_path" should be a string containing the path portion of the
requested URL. For example, if the requested URL is
<https://api.nearlyfreespeech.net/site/example/addAlias>,
"request_path" would be "/site/example/addAlias". The first
forward-slash is optional.
- "request_body" may be a bytestring containing the HTTP request
message body for HTTP POST requests, or an empty bytestring for GET
requests or if no such data is required. The data should be in the
standard "application/x-www-form-urlencoded" format.
"""
if (request_path[0] != "/"):
request_path = "/%s" % request_path
salt = "".join(random.choice(string.ascii_letters) for i in range(16))
timestamp = str(int(time.time()))
return { "X-NFSN-Authentication" : ";".join([username, timestamp, salt,
sha1(str(";".join([username, timestamp, salt, API_key, request_path,
sha1(request_body).hexdigest()])).encode("utf-8")).hexdigest()]) }
def run_request(username, API_key, request_path, request_body = None):
"""Run a NearlyFreeSpeech.NET API request, return a string response.
NOTE: This function does not verify the API server's certificate.
The NearlyFreeSpeech.net API documentation is unclear on whether every
successful API call returns a valid JSON-encoded associative array,
hence why any response is returned as a string. This method raises
"NFSNAPIRequestError" on errors.
- "username" should be a string containing the member login name of
the user making the request.
- "API_key" should be a string containing the API key associated with
the member login name; an API key can be obtained by submitting a
secure support request to NearlyFreeSpeeech.NET.
- "request_path" should be a string containing the path portion of the
requested URL. For example, if the requested URL is
<https://api.nearlyfreespeech.net/site/example/addAlias>,
"request_path" would be "/site/example/addAlias". The first
forward-slash is optional.
- "request_body" may be a string containing the HTTP request message
body for HTTP POST requests or "None" for HTTP GET requests. Pass
an empty string for HTTP POST requests that do not require a message
body. The data should be in the standard
"application/x-www-form-urlencoded" format.
"""
try:
if (request_path[0] != "/"):
request_path = "/%s" % request_path
if isinstance(request_body, basestring):
request_body = request_body.encode("utf-8")
return urlopen(Request("https://api.nearlyfreespeech.net" + request_path,
request_body, dict(auth_header(username, API_key, request_path,
request_body or b""), **{"User-Agent": "nfsnapi/" + __version__ +
" +https://www.robotinaponcho.net/git/?p=nfsnapi-python.git"}))).read().decode()
except HTTPException as e:
raise NFSNAPIRequestError(str(e))
except HTTPError as e:
try:
error = json.loads(e.read().decode())
raise NFSNAPIRequestError("\n".join([error["error"], error["debug"]]))
except (KeyError, ValueError):
raise NFSNAPIRequestError(str(e.reason))
except URLError as e:
raise NFSNAPIRequestError(str(e.reason))
class NFSNAPIRequestError(Exception):
"""Raised when an NearlyFreeSpeech.NET API request fails.
Every instance will have a "reason" attribute, a string with the
reason for the error. If the offending request resulted in a 4XX or
5XX HTTP response, the attribute will contain the "human-readable" and
debug error messages returned by the NearlyFreeSpeech.NET API,
separated by a new-line (for more information, see
<https://members.nearlyfreespeech.net/wiki/API/Introduction>).
"""
def __init__(self, reason):
Exception.__init__(self, reason)
self.reason = reason
| unlicense | -8,174,024,267,578,874,000 | 40.134328 | 88 | 0.717888 | false |
jima80525/KidTasks | tasks/models.py | 1 | 3216 | """ Define the data models for the KidsTasks app """
import datetime
from django.db import models
class Kid(models.Model):
""" Defines the kids which have to do the tasks. """
name = models.CharField(max_length=256)
last_update_date = models.DateField(default=datetime.datetime.today)
days = [
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday'
]
def __str__(self):
return self.name
class Meta:
ordering = ['name', ]
def build_all_tasks(self):
""" Build a list of all tasks. From:
http://stackoverflow.com/questions/4720079/django-query-filter-with-\
variable-column """
tasks = []
for day in self.days:
qs = RepeatingTask.objects.filter(kid=self).filter(**{day: True})
tasks.append((day, [task for task in qs]))
return tasks
def populate_today(self):
""" Create new Tasks from Repeating tasks matching today's day of the
week."""
current_date = datetime.date.today()
day_name = datetime.datetime.now().strftime("%A").lower()
if current_date > self.last_update_date:
for rep_task in RepeatingTask.objects.filter(kid=self).filter(
**{day_name: True}):
date_task = Task(name=rep_task.name, date=current_date,
kid=self)
date_task.save()
self.last_update_date = current_date
self.save()
def update_with_new_repeating_task(self, new_task, cleaned_data):
""" Adds a new dated task to the list IF the newly created repeating
task is for today. Uses the cleaned data from the form to provide
a handy dict for day names."""
current_date = datetime.date.today()
day_name = datetime.datetime.now().strftime("%A").lower()
if cleaned_data[day_name]:
date_task = Task(name=new_task.name, date=current_date, kid=self)
date_task.save()
class Task(models.Model):
""" A Task is associated with a kid and a date. This is the actual thing
the kid has to do! """
name = models.CharField(max_length=256)
completed = models.BooleanField(default=False)
date = models.DateField(default=datetime.datetime.now)
kid = models.ForeignKey(Kid)
def __str__(self):
return "{0}:{1}-{2}".format(self.name, self.kid.name, str(self.date))
class Meta:
ordering = ['name', ]
class RepeatingTask(models.Model):
""" Defines a repeating task """
name = models.CharField(max_length=256)
kid = models.ForeignKey(Kid) # NOTE: RepeatingTasks are kid specific
monday = models.BooleanField(default=False)
tuesday = models.BooleanField(default=False)
wednesday = models.BooleanField(default=False)
thursday = models.BooleanField(default=False)
friday = models.BooleanField(default=False)
saturday = models.BooleanField(default=False)
sunday = models.BooleanField(default=False)
def __str__(self):
return "{0}:{1}".format(self.kid.name, self.name)
class Meta:
ordering = ['kid', 'name', ]
| mit | 8,298,939,762,771,274,000 | 33.956522 | 79 | 0.610386 | false |
listyque/TACTIC-Handler | thlib/side/python_minifier/rename/mapper.py | 1 | 5193 | """
For each node in an AST set the namespace to use for name binding and resolution
"""
import ast
from python_minifier.rename.util import is_namespace
def add_parent_to_arguments(arguments, func):
arguments.parent = func
arguments.namespace = func
for arg in arguments.args:
add_parent(arg, arguments, func)
if hasattr(arg, 'annotation') and arg.annotation is not None:
add_parent(arg.annotation, arguments, func.namespace)
if hasattr(arguments, 'kwonlyargs'):
for arg in arguments.kwonlyargs:
add_parent(arg, arguments, func)
if arg.annotation is not None:
add_parent(arg.annotation, arguments, func.namespace)
for node in arguments.kw_defaults:
if node is not None:
add_parent(node, arguments, func.namespace)
for node in arguments.defaults:
add_parent(node, arguments, func.namespace)
if arguments.vararg:
if hasattr(arguments, 'varargannotation') and arguments.varargannotation is not None:
add_parent(arguments.varargannotation, arguments, func.namespace)
elif isinstance(arguments.vararg, str):
pass
else:
add_parent(arguments.vararg, arguments, func)
if arguments.kwarg:
if hasattr(arguments, 'kwargannotation') and arguments.kwargannotation is not None:
add_parent(arguments.kwargannotation, arguments, func.namespace)
elif isinstance(arguments.kwarg, str):
pass
else:
add_parent(arguments.kwarg, arguments, func)
def add_parent_to_functiondef(functiondef):
"""
Add correct parent and namespace attributes to functiondef nodes
"""
if functiondef.args is not None:
add_parent_to_arguments(functiondef.args, func=functiondef)
for node in functiondef.body:
add_parent(node, parent=functiondef, namespace=functiondef)
for node in functiondef.decorator_list:
add_parent(node, parent=functiondef, namespace=functiondef.namespace)
if hasattr(functiondef, 'returns') and functiondef.returns is not None:
add_parent(functiondef.returns, parent=functiondef, namespace=functiondef.namespace)
def add_parent_to_classdef(classdef):
"""
Add correct parent and namespace attributes to classdef nodes
"""
for node in classdef.bases:
add_parent(node, parent=classdef, namespace=classdef.namespace)
if hasattr(classdef, 'keywords'):
for node in classdef.keywords:
add_parent(node, parent=classdef, namespace=classdef.namespace)
if hasattr(classdef, 'starargs') and classdef.starargs is not None:
add_parent(classdef.starargs, parent=classdef, namespace=classdef.namespace)
if hasattr(classdef, 'kwargs') and classdef.kwargs is not None:
add_parent(classdef.kwargs, parent=classdef, namespace=classdef.namespace)
for node in classdef.body:
add_parent(node, parent=classdef, namespace=classdef)
for node in classdef.decorator_list:
add_parent(node, parent=classdef, namespace=classdef.namespace)
def add_parent(node, parent=None, namespace=None):
"""
Add a parent attribute to child nodes
Add a namespace attribute to child nodes
:param node: The tree to add parent and namespace properties to
:type node: :class:`ast.AST`
:param parent: The parent node of this node
:type parent: :class:`ast.AST`
:param namespace: The namespace Node that this node is in
:type namespace: ast.Lambda or ast.Module or ast.FunctionDef or ast.AsyncFunctionDef or ast.ClassDef or ast.DictComp or ast.SetComp or ast.ListComp or ast.Generator
"""
node.parent = parent if parent is not None else node
node.namespace = namespace if namespace is not None else node
if is_namespace(node):
node.bindings = []
node.global_names = set()
node.nonlocal_names = set()
if isinstance(node, ast.FunctionDef) or (
hasattr(ast, 'AsyncFunctionDef') and isinstance(node, ast.AsyncFunctionDef)
):
add_parent_to_functiondef(node)
elif isinstance(node, ast.Lambda):
add_parent_to_arguments(node.args, func=node)
add_parent(node.body, parent=node, namespace=node)
elif isinstance(node, ast.ClassDef):
add_parent_to_classdef(node)
else:
for child in ast.iter_child_nodes(node):
add_parent(child, parent=node, namespace=node)
return
if isinstance(node, ast.comprehension):
add_parent(node.target, parent=node, namespace=namespace)
add_parent(node.iter, parent=node, namespace=namespace)
for if_ in node.ifs:
add_parent(if_, parent=node, namespace=namespace)
return
if isinstance(node, ast.Global):
namespace.global_names.update(node.names)
if hasattr(ast, 'Nonlocal') and isinstance(node, ast.Nonlocal):
namespace.nonlocal_names.update(node.names)
for child in ast.iter_child_nodes(node):
add_parent(child, parent=node, namespace=namespace)
def add_namespace(module):
add_parent(module)
| epl-1.0 | -7,521,664,311,103,650,000 | 34.568493 | 168 | 0.677258 | false |
rjschwei/azure-sdk-for-python | azure-mgmt-sql/azure/mgmt/sql/models/server_metric.py | 1 | 2274 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ServerMetric(Model):
"""Represents server metrics.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: Name of the server usage metric.
:vartype name: str
:ivar resource_name: The name of the resource.
:vartype resource_name: str
:ivar display_name: The metric display name.
:vartype display_name: str
:ivar current_value: The current value of the metric.
:vartype current_value: float
:ivar limit: The current limit of the metric.
:vartype limit: float
:ivar unit: The units of the metric.
:vartype unit: str
:ivar next_reset_time: The next reset time for the metric (ISO8601
format).
:vartype next_reset_time: datetime
"""
_validation = {
'name': {'readonly': True},
'resource_name': {'readonly': True},
'display_name': {'readonly': True},
'current_value': {'readonly': True},
'limit': {'readonly': True},
'unit': {'readonly': True},
'next_reset_time': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'float'},
'limit': {'key': 'limit', 'type': 'float'},
'unit': {'key': 'unit', 'type': 'str'},
'next_reset_time': {'key': 'nextResetTime', 'type': 'iso-8601'},
}
def __init__(self):
self.name = None
self.resource_name = None
self.display_name = None
self.current_value = None
self.limit = None
self.unit = None
self.next_reset_time = None
| mit | 8,039,844,268,228,363,000 | 33.984615 | 76 | 0.5708 | false |
elopio/snapcraft | tests/integration/general/test_clean_prime_step.py | 1 | 3431 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import (
Contains,
DirExists,
FileExists,
Not
)
from tests import integration
class CleanPrimeStepTestCase(integration.TestCase):
def setUp(self):
super().setUp()
self.copy_project_to_cwd('independent-parts')
self.run_snapcraft('prime')
def test_clean_prime_step(self):
bindir = os.path.join(self.prime_dir, 'bin')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
output = self.run_snapcraft(
['clean', '--step=prime'], debug=False)
self.assertThat(self.prime_dir, Not(DirExists()))
self.assertThat(self.stage_dir, DirExists())
self.assertThat(self.parts_dir, DirExists())
# Assert that the priming area was removed wholesale, not a part at a
# time (since we didn't specify any parts).
self.assertThat(output, Contains("Cleaning up priming area"))
self.expectThat(output, Not(Contains('part1')))
self.expectThat(output, Not(Contains('part2')))
# Now try to prime again
self.run_snapcraft('prime')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
def test_clean_prime_step_single_part(self):
bindir = os.path.join(self.prime_dir, 'bin')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
self.run_snapcraft(['clean', 'part1', '--step=prime'])
self.assertThat(os.path.join(bindir, 'file1'), Not(FileExists()))
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
self.assertThat(self.stage_dir, DirExists())
self.assertThat(self.parts_dir, DirExists())
# Now try to prime again
self.run_snapcraft('prime')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
def test_clean_with_deprecated_strip_step(self):
bindir = os.path.join(self.prime_dir, 'bin')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
self.run_snapcraft(['clean', '--step=strip'])
self.assertThat(self.prime_dir, Not(DirExists()))
self.assertThat(self.stage_dir, DirExists())
self.assertThat(self.parts_dir, DirExists())
# Now try to prime again
self.run_snapcraft('prime')
self.assertThat(os.path.join(bindir, 'file1'), FileExists())
self.assertThat(os.path.join(bindir, 'file2'), FileExists())
| gpl-3.0 | -3,465,672,595,023,628,300 | 37.988636 | 77 | 0.660449 | false |
subeax/grab | grab/transport/requests.py | 1 | 11753 | # Copyright: 2011, Grigoriy Petukhov
# Author: Grigoriy Petukhov (http://lorien.name)
# License: BSD
import email
import logging
#import urllib
#try:
# from StringIO import StringIO
#except ImportError:
# from io import StringIO
import threading
import random
import requests
from grab.error import GrabError, GrabMisuseError
from grab.base import UploadContent, UploadFile
from grab.response import Response
from grab.tools.http import urlencode, normalize_http_values, normalize_unicode
from grab.util.py3k_support import *
logger = logging.getLogger('grab.transport.requests')
class RequestsTransport(object):
def __init__(self):
self.session = requests.session()
def reset(self):
# TODO: WTF???
# Maybe move to super-class???
self.request_headers = ''
self.request_head = ''
self.request_body = ''
self.request_log = ''
#self.request_method = None
self.requests_config = None
#def head_processor(self, chunk):
#"""
#Process head of response.
#"""
#if grab.config['nohead']:
#return 0
#self.response_head_chunks.append(chunk)
## Returning None implies that all bytes were written
#return None
#def body_processor(self, chunk):
#"""
#Process body of response.
#"""
#if grab.config['nobody']:
#return 0
#self.response_body_chunks.append(chunk)
## Returning None implies that all bytes were written
#return None
#def debug_processor(self, _type, text):
#"""
#Parse request headers and save to ``self.request_headers``
#0: CURLINFO_TEXT
#1: CURLINFO_HEADER_IN
#2: CURLINFO_HEADER_OUT
#3: CURLINFO_DATA_IN
#4: CURLINFO_DATA_OUT
#5: CURLINFO_unrecognized_type
#"""
#if _type == pycurl.INFOTYPE_HEADER_OUT:
#self.request_head += text
#lines = text.splitlines()
#text = '\n'.join(lines[1:])
#self.request_headers = dict(email.message_from_string(text))
#if _type == pycurl.INFOTYPE_DATA_OUT:
#self.request_body += text
#if _type == pycurl.INFOTYPE_TEXT:
#if self.request_log is None:
#self.request_log = ''
#self.request_log += text
def process_config(self, grab):
"""
Setup curl instance with values from ``grab.config``.
"""
# Accumulate all request options into `self.requests_config`
self.requests_config = {
'headers': {},
'payload': None,
'cookies': None,
'proxy': None,
}
if isinstance(grab.config['url'], unicode):
grab.config['url'] = grab.config['url'].encode('utf-8')
self.requests_config['url'] = grab.config['url']
#self.curl.setopt(pycurl.URL, url)
#self.curl.setopt(pycurl.FOLLOWLOCATION, 1)
#self.curl.setopt(pycurl.MAXREDIRS, 5)
#self.curl.setopt(pycurl.CONNECTTIMEOUT, grab.config['connect_timeout'])
#self.curl.setopt(pycurl.TIMEOUT, grab.config['timeout'])
#self.curl.setopt(pycurl.NOSIGNAL, 1)
#self.curl.setopt(pycurl.WRITEFUNCTION, self.body_processor)
#self.curl.setopt(pycurl.HEADERFUNCTION, self.head_processor)
# User-Agent
# TODO: move to base class
if grab.config['user_agent'] is None:
if grab.config['user_agent_file'] is not None:
lines = open(grab.config['user_agent_file']).read().splitlines()
grab.config['user_agent'] = random.choice(lines)
# If value is None then set empty string
# None is not acceptable because in such case
# pycurl will set its default user agent "PycURL/x.xx.x"
# For consistency we send empty User-Agent in case of None value
# in all other transports too
if not grab.config['user_agent']:
grab.config['user_agent'] = ''
self.requests_config['headers']['User-Agent'] = grab.config['user_agent']
#if grab.config['debug']:
#self.curl.setopt(pycurl.VERBOSE, 1)
#self.curl.setopt(pycurl.DEBUGFUNCTION, self.debug_processor)
## Ignore SSL errors
#self.curl.setopt(pycurl.SSL_VERIFYPEER, 0)
#self.curl.setopt(pycurl.SSL_VERIFYHOST, 0)
self.requests_config['method'] = grab.request_method.lower()
if grab.request_method == 'POST' or grab.request_method == 'PUT':
if grab.config['multipart_post']:
raise NotImplementedError
#if isinstance(grab.config['multipart_post'], basestring):
#raise GrabMisuseError('multipart_post option could not be a string')
#post_items = normalize_http_values(grab.config['multipart_post'],
#charset=grab.config['charset'])
#self.curl.setopt(pycurl.HTTPPOST, post_items)
elif grab.config['post']:
if isinstance(grab.config['post'], basestring):
# bytes-string should be posted as-is
# unicode should be converted into byte-string
if isinstance(grab.config['post'], unicode):
post_data = normalize_unicode(grab.config['post'])
else:
post_data = grab.config['post']
else:
# dict, tuple, list should be serialized into byte-string
post_data = urlencode(grab.config['post'])
self.requests_config['payload'] = post_data
#self.curl.setopt(pycurl.POSTFIELDS, post_data)
#elif grab.request_method == 'PUT':
#self.curl.setopt(pycurl.PUT, 1)
#self.curl.setopt(pycurl.READFUNCTION, StringIO(grab.config['post']).read)
elif grab.request_method == 'DELETE':
pass
#self.curl.setopt(pycurl.CUSTOMREQUEST, 'delete')
elif grab.request_method == 'HEAD':
pass
#self.curl.setopt(pycurl.NOBODY, 1)
else:
pass
#self.curl.setopt(pycurl.HTTPGET, 1)
headers = grab.config['common_headers']
if grab.config['headers']:
headers.update(grab.config['headers'])
#header_tuples = [str('%s: %s' % x) for x\
#in headers.iteritems()]
#self.curl.setopt(pycurl.HTTPHEADER, header_tuples)
self.requests_config['headers'].update(headers)
# `cookiefile` option shoul be processed before `cookies` option
# because `load_cookies` updates `cookies` option
if grab.config['cookiefile']:
grab.load_cookies(grab.config['cookiefile'])
if grab.config['cookies']:
items = normalize_http_values(grab.config['cookies'])
self.requests_config['cookies'] = dict(items)
#if not grab.config['reuse_cookies'] and not grab.config['cookies']:
#self.curl.setopt(pycurl.COOKIELIST, 'ALL')
#if grab.config['referer']:
#self.curl.setopt(pycurl.REFERER, str(grab.config['referer']))
#if grab.config['proxy']:
#self.curl.setopt(pycurl.PROXY, str(grab.config['proxy']))
#else:
#self.curl.setopt(pycurl.PROXY, '')
#if grab.config['proxy_userpwd']:
#self.curl.setopt(pycurl.PROXYUSERPWD, grab.config['proxy_userpwd'])
if grab.config['proxy']:
self.requests_config['proxy'] = grab.config['proxy']
if grab.config['proxy_userpwd']:
raise GrabMisuseError('requests transport does not support proxy authentication')
if grab.config['proxy_type']:
if grab.config['proxy_type'] != 'http':
raise GrabMisuseError('requests transport supports only proxies of http type')
#if grab.config['encoding']:
#self.curl.setopt(pycurl.ENCODING, grab.config['encoding'])
#if grab.config['userpwd']:
#self.curl.setopt(pycurl.USERPWD, grab.config['userpwd'])
#def _extract_cookies(self):
#"""
#Extract cookies.
#"""
## Example of line:
## www.google.com\tFALSE\t/accounts/\tFALSE\t0\tGoogleAccountsLocale_session\ten
#cookies = {}
#for line in self.curl.getinfo(pycurl.INFO_COOKIELIST):
#chunks = line.split('\t')
#cookies[chunks[-2]] = chunks[-1]
#return cookies
def request(self):
try:
cfg = self.requests_config
func = getattr(requests, cfg['method'])
kwargs = {}
if cfg['payload'] is not None:
kwargs['data'] = cfg['payload']
if cfg['cookies'] is not None:
kwargs['cookies'] = cfg['cookies']
if cfg['proxy'] is not None:
kwargs['proxies'] = {'http': cfg['proxy'],
'https': cfg['proxy']}
self._requests_response = func(
cfg['url'], headers=cfg['headers'], **kwargs)
except Exception as ex:
raise GrabError(0, unicode(ex))
def prepare_response(self, grab):
#self.response.head = ''.join(self.response_head_chunks)
#self.response.body = ''.join(self.response_body_chunks)
#self.response.parse()
#self.response.cookies = self._extract_cookies()
#self.response.code = self.curl.getinfo(pycurl.HTTP_CODE)
#self.response.time = self.curl.getinfo(pycurl.TOTAL_TIME)
#self.response.url = self.curl.getinfo(pycurl.EFFECTIVE_URL)
response = Response()
response.head = ''
#if grab.config['body_max_size'] is not None:
#chunks = []
#read_size = 0
#for chunk in self._requests_responsek
#else:
#response.body = self._requests_response.content
response.body = self._requests_response.content
response.code = self._requests_response.status_code
response.headers = self._requests_response.headers
response.cookies = self._requests_response.cookies or {}
response.url = grab.config['url']
if grab.config['charset'] is not None:
response.parse(charset=grab.config['charset'])
else:
response.parse()
return response
#def load_cookies(self, path):
#"""
#Load cookies from the file.
#The cookie data may be in Netscape / Mozilla cookie data format or just regular HTTP-style headers dumped to a file.
#"""
#self.curl.setopt(pycurl.COOKIEFILE, path)
#def dump_cookies(self, path):
#"""
#Dump all cookies to file.
#Each cookie is dumped in the format:
## www.google.com\tFALSE\t/accounts/\tFALSE\t0\tGoogleAccountsLocale_session\ten
#"""
#open(path, 'w').write('\n'.join(self.curl.getinfo(pycurl.INFO_COOKIELIST)))
#def clear_cookies(self):
#"""
#Clear all cookies.
#"""
## Write tests
#self.curl.setopt(pycurl.COOKIELIST, 'ALL')
#grab.config['cookies'] = {}
#self.response.cookies = None
#def reset_curl_instance(self):
#"""
#Completely recreate curl instance from scratch.
#I add this method because I am not sure that
#``clear_cookies`` method works fine and I should be sure
#I can reset all cokies.
#"""
#self.curl = pycurl.Curl()
#from grab.base import BaseGrab
#class GrabRequests(RequestsTransportExtension, BaseGrab):
#pass
| mit | 3,492,569,160,456,762,000 | 35.163077 | 125 | 0.577725 | false |
fbcotter/dataset_loading | dataset_loading/pascal.py | 1 | 1677 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pandas as pd
from dataset_loading import core
def img_sets():
"""
List all the image sets from Pascal VOC. Don't bother computing
this on the fly, just remember it. It's faster.
"""
return [
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train',
'tvmonitor']
def img_dict(base_dir):
d = {}
for i, cat in enumerate(img_sets()):
filename = os.path.join(base_dir, 'ImageSets', 'Main',
cat+'_trainval.txt')
df = pd.read_csv(filename, delim_whitespace=True, header=None,
names=['filename', 'true'])
df = df[df['true'] == 1]
files = df['filename'].values
for f in files:
if f in d.keys():
d[f].append(i)
else:
d[f] = [i]
return d
def load_pascal_data(data_dir, max_epochs=None, thread_count=3,
imsize=(128,128)):
"""Will use a filename queue and img_queue and load the data
"""
file_queue = core.FileQueue()
# d = img_dict(data_dir)
img_queue = core.ImageQueue(files_in_epoch=250, maxsize=1000)
threads = []
for i in range(thread_count):
thread = core.imLoader('Loader ' + str(i+1), file_queue, img_queue,
imsize, data_dir)
thread.start()
threads.append(thread)
return img_queue
| mit | -7,223,453,819,954,609,000 | 28.946429 | 75 | 0.543828 | false |
DarkPhoenix6/My_Libraries | Python/Utils/mars.py | 1 | 12802 | from __future__ import division
import sys
import math
class Point2D(object):
def __init__(self, x, y):
self.x = x
self.y = y
def square_distance(self, other_point):
""" Calculates the square distance between this Point2D and another Point2D
:param other_point: The other Point2D
:return: The Square Distance
:rtype: float
"""
return (self.x - other_point.x) ** 2 + (self.y - other_point.y) ** 2
def __eq__(self, other_point):
""" Override the equals operator to compare coordinates
:param other_point: The other Point2D
:return: True if points are equal else False
:type: bool
"""
return self.x == other_point.x and self.y == other_point.y
def to_dict(self):
""" Converts point to python dict
:return: dict of x,y coordinates
:rtype: dict
"""
return {"x": self.x, "y": self.y}
def pythagoras_find_c(self):
return math.sqrt(self.x ** 2 + self.y ** 2)
def slope(self, other_point):
""" Calculates the slope between this point and another Point2D
:param other_point: The other point to find the slope with
:return: Slope as a float
"""
# TODO Find a better way to handle this error
if self.x == other_point.x:
return None
# cast to float just in case there is an integer passed in
return (self.y - other_point.y) / float(self.x - other_point.x)
def angle_deg(self, other_point):
""" Calculates the angle in degrees between this point and another Point2D
:param other_point: The other Point2D
:return: The angle in Degrees
"""
if self.x != other_point.x:
slope = other_point.slope(self)
if slope is not None:
return 180 * math.atan(slope) / math.pi
else:
# vertical line
return None
return 90 if other_point.y > self.y else -90
def pos_angle_deg(self, other_point):
angle = self.angle_deg(other_point)
return angle if angle >= 0 else angle + 180.0
@staticmethod
def intersect(point1, point2, point3, point4):
"""
caluculating the intersecting point that will be the new node
:param point1:
:param point2:
:param point3:
:param point4:
:return:
"""
c = (point2.y - point1.y) * (point3.x - point4.x) - (point1.x - point2.x) * (point4.y - point3.y)
if c != 0:
return Point2D(((point3.x - point4.x) * (point1.x * point2.y - point2.x * point1.y) - (point1.x - point2.x)
* (point3.x * point4.y - point4.x * point3.y)) / c,
(-(point4.y - point3.y) * (point1.x * point2.y - point2.x * point1.y) + (point2.y - point1.y)
* (point3.x * point4.y - point4.x * point3.y)) / c)
else:
return None
@staticmethod
def intersect_xy_mp(m, point1, point2, point3):
"""
caluculating the intersecting point that will be the new node
:param m: slope
:param point1:
:param point2:
:param point3:
:return:
"""
c = m * (point3.x - point2.x) + point2.y - point3.y
if abs(m) < 100:
if c != 0:
x_ = ((point3.x - point2.x) * (m * point1.x - point1.y + point2.y) + (point2.y - point3.y) * point2.x) \
/ c
return Point2D(x_, m * (x_ - point1.x) + point1.y)
elif point3.x != point2.x:
return Point2D(point1.x, (point1.y - point2.y) * (point3.y - point2.y) / (point3.x - point2.x) + point2.y)
return Point2D((point1.x + point2.x + point3.x) / 3, (point1.y + point2.y + point3.y) / 3)
def y_intercept(self, other):
slope = other.slope(self)
b = -1 * slope * self.x + self.y
return b
def __str__(self):
return "Point2D({},{})".format(self.x, self.y)
def __mul__(self, other):
if type(other) == type(self):
return Point2D(self.x * other.x, self.y * other.y)
else:
return Point2D(self.x * other, self.y * other)
def __rmul__(self, other):
return Point2D.__mul__(self, other)
def __add__(self, other):
if type(other) == type(self):
return Point2D(self.x + other.x, self.y + other.y)
else:
return Point2D(self.x + other, self.y + other)
def __radd__(self, other):
return Point2D.__add__(self, other)
def __sub__(self, other):
if type(other) == type(self):
return Point2D(self.x - other.x, self.y - other.y)
else:
return Point2D(self.x - other, self.y - other)
def __rsub__(self, other):
return Point2D.__sub__(other, self)
def __truediv__(self, other):
if type(other) == type(self):
return Point2D(self.x / other.x, self.y / other.y)
else:
return Point2D(self.x / other, self.y / other)
def __rtruediv__(self, other):
return Point2D.__truediv__(other, self)
@staticmethod
def find_distance(point1, point2):
""" finds the distance between points
:param point1:
:param point2:
:return:
"""
result = math.sqrt(point2.square_distance(point1))
return result
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
class Mars(object):
def __init__(self, x_arr: list, y_arr: list):
self.x_arr = x_arr
self.y_arr = y_arr
self.surface = []
self.flat_spots = []
self.gravity = 3.711
self.target = Point2D(0, 0)
for i in range(len(self.x_arr)):
self.surface.append(Point2D(self.x_arr[i], self.y_arr[i]))
if (i + 1) != len(self.x_arr):
temp = self.surface[-1]
temp2 = Point2D(self.x_arr[i + 1], self.y_arr[i + 1])
if (temp2.x - temp.x) >= 1000 and temp2.y == temp.y:
self.flat_spots = [temp, temp2]
self.target = Point2D(temp2.x - temp.x, temp.y)
slope = temp.slope(temp2)
b = temp.y_intercept(temp2)
if slope is not None:
for j in range(1, self.x_arr[i + 1] - self.x_arr[i]):
self.surface.append(Point2D(j, ((j * slope) + b)))
else:
pass
class MarsLander(object):
def __init__(self, mars: Mars, x, y, h_velocity, v_velocity, fuel, rotation, power):
self.mars = mars
self.current_position = Point2D(x, y)
self.current_velocity = Point2D(h_velocity, v_velocity)
self.velocity_angle = math.atan2(self.current_velocity.y, self.current_velocity.x)
self.fuel = fuel
self.rotation = rotation
self.power = power
def calculate_trajectory(self, target: Point2D):
temp = self.current_position + (self.current_velocity * 3)
print("Debug messages... Calculating Trajectory", temp, target, file=sys.stderr)
if temp.x - target.x != 0:
trajectory = temp.angle_deg(target)
# TODO
if temp.y < target.y:
return int(trajectory) * -1
else:
return int(trajectory) * -1
elif self.current_position.x - target.x != 0:
trajectory = temp.angle_deg(target)
# TODO
if temp.y < target.y:
return int(trajectory) * -1
else:
return int(trajectory) * -1
else:
return 0
def angle_of_reach(self, distance):
return (1 / 2) * math.asin(self.mars.gravity * distance / (self.current_velocity.pythagoras_find_c() ** 2))
def distance_traveled(self):
v = self.current_velocity.pythagoras_find_c()
theta = self.velocity_angle
g = self.mars.gravity
result1 = v * math.cos(theta) / g
result2 = (v * math.sin(theta)) + math.sqrt(((v * math.sin(theta)) ** 2) + 2 * g * self.current_position.y)
return result1 * result2
def time_of_flight(self):
v = self.current_velocity.pythagoras_find_c()
d = self.distance_traveled()
result =
return d /
def landing_sequence(self):
print("Debug messages... Initiaing Landing Sequence", file=sys.stderr)
if (self.mars.flat_spots[0].x + 10) <= self.current_position.x <= (self.mars.flat_spots[1].x - 10):
if -20 < self.current_velocity.x < 20:
print("Debug messages... 1", file=sys.stderr)
if self.current_velocity.y <= -30:
inst = "0 4"
else:
inst = "0 2"
else:
print("Debug messages... 2", file=sys.stderr)
inst = self.cancel_x_velocity()
else:
if -20 < self.current_velocity.x < 20:
print("Debug messages... 3", file=sys.stderr)
if self.mars.target.y < self.current_position.y:
trajectory = int(self.calculate_trajectory(self.mars.target))
if self.current_velocity.y <= -30:
power2 = 4
else:
power2 = 3
inst = str(trajectory) + " " + str(power2)
else:
trajectory = int(self.calculate_trajectory(Point2D(self.mars.target.x, self.mars.target.y + 200)))
power2 = 4
inst = str(trajectory) + " " + str(power2)
else:
print("Debug messages... 4", file=sys.stderr)
inst = self.cancel_x_velocity()
return inst
def cancel_x_velocity(self):
if -15 > self.current_velocity.x:
if -33 > self.current_velocity.x:
trajectory = str(-62)
power2 = str(4)
elif -15 > self.current_velocity.x:
if self.current_velocity.y <= -30:
power2 = str(4)
trajectory = str(-30)
else:
power2 = str(4)
trajectory = str(-45)
else:
if self.current_velocity.y <= -30:
trajectory = str(-45)
else:
trajectory = str(-73)
power2 = str(4)
else:
if 33 < self.current_velocity.x:
trajectory = str(62)
power2 = str(4)
if self.current_velocity.y <= -30:
power2 = str(4)
trajectory = str(30)
else:
power2 = str(4)
trajectory = str(45)
else:
if self.current_velocity.y <= -30:
trajectory = str(45)
else:
trajectory = str(73)
power2 = str(4)
inst = trajectory + " " + power2
return inst
surface_n = int(input()) # the number of points used to draw the surface of Mars.
x = []
y = []
for k in range(surface_n):
# land_x: X coordinate of a surface point. (0 to 6999)
# land_y: Y coordinate of a surface point. By linking all the points together in a sequential fashion, you form the surface of Mars.
land_x, land_y = [int(j) for j in input().split()]
x.append(land_x)
y.append(land_y)
# game loop
mars = Mars(x, y)
while True:
# h_speed: the horizontal speed (in m/s), can be negative.
# v_speed: the vertical speed (in m/s), can be negative.
# fuel: the quantity of remaining fuel in liters.
# rotate: the rotation angle in degrees (-90 to 90).
# power: the thrust power (0 to 4).
x1, y1, h_speed, v_speed, fuel, rotate, power = [int(i) for i in input().split()]
lander = MarsLander(mars, x1, y1, h_speed, v_speed, fuel, rotate, power)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
if lander.mars.flat_spots[0].x > lander.mars.flat_spots[1].x:
lander.mars.flat_spots[0], lander.mars.flat_spots[1] = lander.mars.flat_spots[1], lander.mars.flat_spots[0]
if ((lander.mars.flat_spots[0].x - 1000) <= lander.current_position.x <= (
lander.mars.flat_spots[1].x + 1000)) and lander.current_position.y > lander.mars.target.y:
comm = lander.landing_sequence()
print(comm)
# rotate power. rotate is the desired rotation angle. power is the desired thrust power.
else:
print(str(lander.calculate_trajectory(lander.mars.target)) + " 4")
| gpl-3.0 | 4,510,197,727,863,930,400 | 36.542522 | 136 | 0.529605 | false |
SylvainCecchetto/plugin.video.catchuptvandmore | plugin.video.catchuptvandmore/resources/lib/skeletons/wo_replay.py | 1 | 5238 | # -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2016 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Script, utils
"""
The following dictionaries describe
the addon's tree architecture.
* Key: item id
* Value: item infos
- route (folder)/resolver (playable URL): Callback function to run once this item is selected
- thumb: Item thumb path relative to "media" folder
- fanart: Item fanart path relative to "meia" folder
"""
menu = {
'tv5mondeafrique': {
'route': '/resources/lib/channels/wo/tv5mondeafrique:list_categories',
'label': 'TV5Monde Afrique',
'thumb': 'channels/wo/tv5mondeafrique.png',
'fanart': 'channels/wo/tv5mondeafrique_fanart.jpg',
'enabled': True,
'order': 1
},
'arte': {
'route': '/resources/lib/channels/wo/arte:list_categories',
'label': 'Arte (' + utils.ensure_unicode(Script.setting['arte.language']) + ')',
'thumb': 'channels/wo/arte.png',
'fanart': 'channels/wo/arte_fanart.jpg',
'enabled': True,
'order': 3
},
'france24': {
'route': '/resources/lib/channels/wo/france24:root',
'label': 'France 24 (' + utils.ensure_unicode(Script.setting['france24.language']) + ')',
'thumb': 'channels/wo/france24.png',
'fanart': 'channels/wo/france24_fanart.jpg',
'enabled': True,
'order': 4
},
'nhkworld': {
'route': '/resources/lib/channels/wo/nhkworld:list_categories',
'label': 'NHK World (' + utils.ensure_unicode(Script.setting['nhkworld.language']) + ')',
'thumb': 'channels/wo/nhkworld.png',
'fanart': 'channels/wo/nhkworld_fanart.jpg',
'enabled': True,
'order': 5
},
'tv5monde': {
'route': '/resources/lib/channels/wo/tv5monde:list_categories',
'label': 'TV5Monde',
'thumb': 'channels/wo/tv5monde.png',
'fanart': 'channels/wo/tv5monde_fanart.jpg',
'enabled': True,
'order': 6
},
'tivi5monde': {
'route': '/resources/lib/channels/wo/tivi5monde:list_categories',
'label': 'Tivi 5Monde',
'thumb': 'channels/wo/tivi5monde.png',
'fanart': 'channels/wo/tivi5monde_fanart.jpg',
'enabled': True,
'order': 7
},
'bvn': {
'route': '/resources/lib/channels/wo/bvn:list_days',
'label': 'BVN',
'thumb': 'channels/wo/bvn.png',
'fanart': 'channels/wo/bvn_fanart.jpg',
'enabled': True,
'order': 8
},
'arirang': {
'route': '/resources/lib/channels/wo/arirang:list_categories',
'label': 'Arirang (아리랑)',
'thumb': 'channels/wo/arirang.png',
'fanart': 'channels/wo/arirang_fanart.jpg',
'enabled': True,
'order': 11
},
'beinsports': {
'route': '/resources/lib/channels/wo/beinsports:list_sites',
'label': 'Bein Sports',
'thumb': 'channels/wo/beinsports.png',
'fanart': 'channels/wo/beinsports_fanart.jpg',
'enabled': True,
'order': 13
},
'afriquemedia': {
'route': '/resources/lib/channels/wo/afriquemedia:list_categories',
'label': 'Afrique Media',
'thumb': 'channels/wo/afriquemedia.png',
'fanart': 'channels/wo/afriquemedia_fanart.jpg',
'enabled': True,
'order': 20
},
'channelnewsasia': {
'route': '/resources/lib/channels/wo/channelnewsasia:list_categories',
'label': 'Channel NewsAsia',
'thumb': 'channels/wo/channelnewsasia.png',
'fanart': 'channels/wo/channelnewsasia_fanart.jpg',
'enabled': True,
'order': 23
},
'rt': {
'route': '/resources/lib/channels/wo/rt:list_categories',
'label': 'RT (' + utils.ensure_unicode(Script.setting['rt.language']) + ')',
'thumb': 'channels/wo/rt.png',
'fanart': 'channels/wo/rt_fanart.jpg',
'available_languages': ['FR', 'EN'],
'enabled': True,
'order': 24
},
'africa24': {
'route': '/resources/lib/channels/wo/africa24:list_categories',
'label': 'Africa 24',
'thumb': 'channels/wo/africa24.png',
'fanart': 'channels/wo/africa24_fanart.jpg',
'enabled': True,
'order': 25
}
}
| gpl-2.0 | 5,558,631,183,589,618,000 | 35.333333 | 97 | 0.599197 | false |
josanvel/BazarPapeleriaLulita | CodigoBazarLulita/pyBotonesReportes.py | 1 | 1813 | '''
Created on 15/03/2015
@author: josanvel
'''
from PyQt4 import QtCore, QtGui
from BotonesReportes import Ui_BotonesReportes
from pyReporteGanancia import MyformReporteGanancias
from pyReporteProducto import MyformReporteProductos
class MyformBotonesReportes(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.uiBotonesReportes= Ui_BotonesReportes()
self.uiBotonesReportes.setupUi(self)
self.center()
self.connect(self.uiBotonesReportes.btnRegresarReportes, QtCore.SIGNAL("clicked()"), self.regresarReportes)
self.connect(self.uiBotonesReportes.btnReporteGanancias, QtCore.SIGNAL("clicked()"), self.entrarReporteGanancias)
self.connect(self.uiBotonesReportes.btnReporteProductos, QtCore.SIGNAL("clicked()"), self.entrarReporteProductos)
def entrarReporteGanancias(self):
self.hide()
self.reporteGanancias = MyformReporteGanancias()
self.reporteGanancias.regresarVentanaR(self)
self.reporteGanancias.show()
def entrarReporteProductos(self):
self.hide()
self.reporteProductos = MyformReporteProductos()
self.reporteProductos.regresarVentanaR(self)
self.reporteProductos.show()
def regresarVentanaR(self,ventanaAtras):
self.ventana = ventanaAtras
def regresarReportes(self):
self.hide()
self.ventana.show()
def center(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft()) | gpl-2.0 | 846,794,267,647,496,200 | 35.28 | 129 | 0.63155 | false |
jpacg/su-binary | jni/selinux/python/sepolicy/sepolicy/manpage.py | 1 | 41002 | #! /usr/bin/python -Es
# Copyright (C) 2012-2013 Red Hat
# AUTHOR: Dan Walsh <[email protected]>
# AUTHOR: Miroslav Grepl <[email protected]>
# see file 'COPYING' for use and warranty information
#
# semanage is a tool for managing SELinux configuration files
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA
#
#
__all__ = ['ManPage', 'HTMLManPages', 'manpage_domains', 'manpage_roles', 'gen_domains']
import string
import selinux
import sepolicy
import os
import time
typealias_types = {
"antivirus_t":("amavis_t", "clamd_t", "clamscan_t", "freshclam_t"),
"cluster_t":("rgmanager_t", "corosync_t", "aisexec_t", "pacemaker_t"),
"svirt_t":("qemu_t"),
"httpd_t":("phpfpm_t"),
}
equiv_dict = {"smbd": ["samba"], "httpd": ["apache"], "virtd": ["virt", "libvirt"], "named": ["bind"], "fsdaemon": ["smartmon"], "mdadm": ["raid"]}
equiv_dirs = ["/var"]
modules_dict = None
def gen_modules_dict(path="/usr/share/selinux/devel/policy.xml"):
global modules_dict
if modules_dict:
return modules_dict
import xml.etree.ElementTree
modules_dict = {}
try:
tree = xml.etree.ElementTree.fromstring(sepolicy.policy_xml(path))
for l in tree.findall("layer"):
for m in l.findall("module"):
name = m.get("name")
if name == "user" or name == "unconfined":
continue
if name == "unprivuser":
name = "user"
if name == "unconfineduser":
name = "unconfined"
for b in m.findall("summary"):
modules_dict[name] = b.text
except IOError:
pass
return modules_dict
users = None
users_range = None
def get_all_users_info():
global users
global users_range
if users and users_range:
return users, users_range
users = []
users_range = {}
allusers = []
allusers_info = sepolicy.info(sepolicy.USER)
for d in allusers_info:
allusers.append(d['name'])
users_range[d['name'].split("_")[0]] = d['range']
for u in allusers:
if u not in ["system_u", "root", "unconfined_u"]:
users.append(u.replace("_u", ""))
users.sort()
return users, users_range
all_entrypoints = None
def get_entrypoints():
global all_entrypoints
if not all_entrypoints:
all_entrypoints = next(sepolicy.info(sepolicy.ATTRIBUTE, "entry_type"))["types"]
return all_entrypoints
domains = None
def gen_domains():
global domains
if domains:
return domains
domains = []
for d in sepolicy.get_all_domains():
found = False
domain = d[:-2]
# if domain + "_exec_t" not in get_entrypoints():
# continue
if domain in domains:
continue
domains.append(domain)
for role in sepolicy.get_all_roles():
if role[:-2] in domains or role == "system_r":
continue
domains.append(role[:-2])
domains.sort()
return domains
types = None
def _gen_types():
global types
if types:
return types
all_types = sepolicy.info(sepolicy.TYPE)
types = {}
for rec in all_types:
try:
types[rec["name"]] = rec["attributes"]
except:
types[rec["name"]] = []
return types
def prettyprint(f, trim):
return " ".join(f[:-len(trim)].split("_"))
# for HTML man pages
manpage_domains = []
manpage_roles = []
fedora_releases = ["Fedora17", "Fedora18"]
rhel_releases = ["RHEL6", "RHEL7"]
def get_alphabet_manpages(manpage_list):
alphabet_manpages = dict.fromkeys(string.ascii_letters, [])
for i in string.ascii_letters:
temp = []
for j in manpage_list:
if j.split("/")[-1][0] == i:
temp.append(j.split("/")[-1])
alphabet_manpages[i] = temp
return alphabet_manpages
def convert_manpage_to_html(html_manpage, manpage):
try:
from commands import getstatusoutput
except ImportError:
from subprocess import getstatusoutput
rc, output = getstatusoutput("/usr/bin/groff -man -Thtml %s 2>/dev/null" % manpage)
if rc == 0:
print(html_manpage, "has been created")
fd = open(html_manpage, 'w')
fd.write(output)
fd.close()
class HTMLManPages:
"""
Generate a HHTML Manpages on an given SELinux domains
"""
def __init__(self, manpage_roles, manpage_domains, path, os_version):
self.manpage_roles = get_alphabet_manpages(manpage_roles)
self.manpage_domains = get_alphabet_manpages(manpage_domains)
self.os_version = os_version
self.old_path = path + "/"
self.new_path = self.old_path + self.os_version + "/"
if self.os_version in fedora_releases or rhel_releases:
self.__gen_html_manpages()
else:
print("SELinux HTML man pages can not be generated for this %s" % os_version)
exit(1)
def __gen_html_manpages(self):
self._write_html_manpage()
self._gen_index()
self._gen_body()
self._gen_css()
def _write_html_manpage(self):
if not os.path.isdir(self.new_path):
os.mkdir(self.new_path)
for domain in self.manpage_domains.values():
if len(domain):
for d in domain:
convert_manpage_to_html((self.new_path + d.rsplit("_selinux", 1)[0] + ".html"), self.old_path + d)
for role in self.manpage_roles.values():
if len(role):
for r in role:
convert_manpage_to_html((self.new_path + r.rsplit("_selinux", 1)[0] + ".html"), self.old_path + r)
def _gen_index(self):
index = self.old_path + "index.html"
fd = open(index, 'w')
fd.write("""
<html>
<head>
<link rel=stylesheet type="text/css" href="style.css" title="style">
<title>SELinux man pages online</title>
</head>
<body>
<h1>SELinux man pages</h1>
<br></br>
Fedora or Red Hat Enterprise Linux Man Pages.</h2>
<br></br>
<hr>
<h3>Fedora</h3>
<table><tr>
<td valign="middle">
</td>
</tr></table>
<pre>
""")
for f in fedora_releases:
fd.write("""
<a href=%s/%s.html>%s</a> - SELinux man pages for %s """ % (f, f, f, f))
fd.write("""
</pre>
<hr>
<h3>RHEL</h3>
<table><tr>
<td valign="middle">
</td>
</tr></table>
<pre>
""")
for r in rhel_releases:
fd.write("""
<a href=%s/%s.html>%s</a> - SELinux man pages for %s """ % (r, r, r, r))
fd.write("""
</pre>
""")
fd.close()
print("%s has been created") % index
def _gen_body(self):
html = self.new_path + self.os_version + ".html"
fd = open(html, 'w')
fd.write("""
<html>
<head>
<link rel=stylesheet type="text/css" href="../style.css" title="style">
<title>Linux man-pages online for Fedora18</title>
</head>
<body>
<h1>SELinux man pages for Fedora18</h1>
<hr>
<table><tr>
<td valign="middle">
<h3>SELinux roles</h3>
""")
for letter in self.manpage_roles:
if len(self.manpage_roles[letter]):
fd.write("""
<a href=#%s_role>%s</a>"""
% (letter, letter))
fd.write("""
</td>
</tr></table>
<pre>
""")
rolename_body = ""
for letter in self.manpage_roles:
if len(self.manpage_roles[letter]):
rolename_body += "<p>"
for r in self.manpage_roles[letter]:
rolename = r.rsplit("_selinux", 1)[0]
rolename_body += "<a name=%s_role></a><a href=%s.html>%s_selinux(8)</a> - Security Enhanced Linux Policy for the %s SELinux user\n" % (letter, rolename, rolename, rolename)
fd.write("""%s
</pre>
<hr>
<table><tr>
<td valign="middle">
<h3>SELinux domains</h3>"""
% rolename_body)
for letter in self.manpage_domains:
if len(self.manpage_domains[letter]):
fd.write("""
<a href=#%s_domain>%s</a>
""" % (letter, letter))
fd.write("""
</td>
</tr></table>
<pre>
""")
domainname_body = ""
for letter in self.manpage_domains:
if len(self.manpage_domains[letter]):
domainname_body += "<p>"
for r in self.manpage_domains[letter]:
domainname = r.rsplit("_selinux", 1)[0]
domainname_body += "<a name=%s_domain></a><a href=%s.html>%s_selinux(8)</a> - Security Enhanced Linux Policy for the %s SELinux processes\n" % (letter, domainname, domainname, domainname)
fd.write("""%s
</pre>
</body>
</html>
""" % domainname_body)
fd.close()
print("%s has been created") % html
def _gen_css(self):
style_css = self.old_path + "style.css"
fd = open(style_css, 'w')
fd.write("""
html, body {
background-color: #fcfcfc;
font-family: arial, sans-serif;
font-size: 110%;
color: #333;
}
h1, h2, h3, h4, h5, h5 {
color: #2d7c0b;
font-family: arial, sans-serif;
margin-top: 25px;
}
a {
color: #336699;
text-decoration: none;
}
a:visited {
color: #4488bb;
}
a:hover, a:focus, a:active {
color: #07488A;
text-decoration: none;
}
a.func {
color: red;
text-decoration: none;
}
a.file {
color: red;
text-decoration: none;
}
pre.code {
background-color: #f4f0f4;
// font-family: monospace, courier;
font-size: 110%;
margin-left: 0px;
margin-right: 60px;
padding-top: 5px;
padding-bottom: 5px;
padding-left: 8px;
padding-right: 8px;
border: 1px solid #AADDAA;
}
.url {
font-family: serif;
font-style: italic;
color: #440064;
}
""")
fd.close()
print("%s has been created") % style_css
class ManPage:
"""
Generate a Manpage on an SELinux domain in the specified path
"""
modules_dict = None
enabled_str = ["Disabled", "Enabled"]
def __init__(self, domainname, path="/tmp", root="/", source_files=False, html=False):
self.html = html
self.source_files = source_files
self.root = root
self.portrecs = sepolicy.gen_port_dict()[0]
self.domains = gen_domains()
self.all_domains = sepolicy.get_all_domains()
self.all_attributes = sepolicy.get_all_attributes()
self.all_bools = sepolicy.get_all_bools()
self.all_port_types = sepolicy.get_all_port_types()
self.all_roles = sepolicy.get_all_roles()
self.all_users = get_all_users_info()[0]
self.all_users_range = get_all_users_info()[1]
self.all_file_types = sepolicy.get_all_file_types()
self.role_allows = sepolicy.get_all_role_allows()
self.types = _gen_types()
if self.source_files:
self.fcpath = self.root + "file_contexts"
else:
self.fcpath = self.root + selinux.selinux_file_context_path()
self.fcdict = sepolicy.get_fcdict(self.fcpath)
if not os.path.exists(path):
os.makedirs(path)
self.path = path
if self.source_files:
self.xmlpath = self.root + "policy.xml"
else:
self.xmlpath = self.root + "/usr/share/selinux/devel/policy.xml"
self.booleans_dict = sepolicy.gen_bool_dict(self.xmlpath)
self.domainname, self.short_name = sepolicy.gen_short_name(domainname)
self.type = self.domainname + "_t"
self._gen_bools()
self.man_page_path = "%s/%s_selinux.8" % (path, self.domainname)
self.fd = open(self.man_page_path, 'w')
if self.domainname + "_r" in self.all_roles:
self.__gen_user_man_page()
if self.html:
manpage_roles.append(self.man_page_path)
else:
if self.html:
manpage_domains.append(self.man_page_path)
self.__gen_man_page()
self.fd.close()
for k in equiv_dict.keys():
if k == self.domainname:
for alias in equiv_dict[k]:
self.__gen_man_page_link(alias)
def _gen_bools(self):
self.bools = []
self.domainbools = []
types = [self.type]
if self.domainname in equiv_dict:
for t in equiv_dict[self.domainname]:
if t + "_t" in self.all_domains:
types.append(t + "_t")
for t in types:
domainbools, bools = sepolicy.get_bools(t)
self.bools += bools
self.domainbools += domainbools
self.bools.sort()
self.domainbools.sort()
def get_man_page_path(self):
return self.man_page_path
def __gen_user_man_page(self):
self.role = self.domainname + "_r"
if not self.modules_dict:
self.modules_dict = gen_modules_dict(self.xmlpath)
try:
self.desc = self.modules_dict[self.domainname]
except:
self.desc = "%s user role" % self.domainname
if self.domainname in self.all_users:
self.attributes = next(sepolicy.info(sepolicy.TYPE, (self.type)))["attributes"]
self._user_header()
self._user_attribute()
self._can_sudo()
self._xwindows_login()
# until a new policy build with login_userdomain attribute
#self.terminal_login()
self._network()
self._booleans()
self._home_exec()
self._transitions()
else:
self._role_header()
self._booleans()
self._port_types()
self._mcs_types()
self._writes()
self._footer()
def __gen_man_page_link(self, alias):
path = "%s/%s_selinux.8" % (self.path, alias)
self.fd = open("%s/%s_selinux.8" % (self.path, alias), 'w')
self.fd.write(".so man8/%s_selinux.8" % self.domainname)
self.fd.close()
print(path)
def __gen_man_page(self):
self.anon_list = []
self.attributes = {}
self.ptypes = []
self._get_ptypes()
for domain_type in self.ptypes:
try:
if typealias_types[domain_type]:
fd = self.fd
man_page_path = self.man_page_path
for t in typealias_types[domain_type]:
self._typealias_gen_man(t)
self.fd = fd
self.man_page_path = man_page_path
except KeyError:
continue;
self.attributes[domain_type] = next(sepolicy.info(sepolicy.TYPE, ("%s") % domain_type))["attributes"]
self._header()
self._entrypoints()
self._process_types()
self._mcs_types()
self._booleans()
self._nsswitch_domain()
self._port_types()
self._writes()
self._file_context()
self._public_content()
self._footer()
def _get_ptypes(self):
for f in self.all_domains:
if f.startswith(self.short_name) or f.startswith(self.domainname):
self.ptypes.append(f)
def _typealias_gen_man(self, t):
self.man_page_path = "%s/%s_selinux.8" % (self.path, t[:-2])
self.ports = []
self.booltext = ""
self.fd = open(self.man_page_path, 'w')
self._typealias(t[:-2])
self._footer()
self.fd.close()
def _typealias(self,typealias):
self.fd.write('.TH "%(typealias)s_selinux" "8" "%(date)s" "%(typealias)s" "SELinux Policy %(typealias)s"'
% {'typealias':typealias, 'date': time.strftime("%y-%m-%d")})
self.fd.write(r"""
.SH "NAME"
%(typealias)s_selinux \- Security Enhanced Linux Policy for the %(typealias)s processes
.SH "DESCRIPTION"
%(typealias)s_t SELinux domain type is now associated with %(domainname)s domain type (%(domainname)s_t).
""" % {'typealias':typealias, 'domainname':self.domainname})
self.fd.write(r"""
Please see
.B %(domainname)s_selinux
man page for more details.
""" % {'domainname':self.domainname})
def _header(self):
self.fd.write('.TH "%(domainname)s_selinux" "8" "%(date)s" "%(domainname)s" "SELinux Policy %(domainname)s"'
% {'domainname': self.domainname, 'date': time.strftime("%y-%m-%d")})
self.fd.write(r"""
.SH "NAME"
%(domainname)s_selinux \- Security Enhanced Linux Policy for the %(domainname)s processes
.SH "DESCRIPTION"
Security-Enhanced Linux secures the %(domainname)s processes via flexible mandatory access control.
The %(domainname)s processes execute with the %(domainname)s_t SELinux type. You can check if you have these processes running by executing the \fBps\fP command with the \fB\-Z\fP qualifier.
For example:
.B ps -eZ | grep %(domainname)s_t
""" % {'domainname': self.domainname})
def _format_boolean_desc(self, b):
desc = self.booleans_dict[b][2][0].lower() + self.booleans_dict[b][2][1:]
if desc[-1] == ".":
desc = desc[:-1]
return desc
def _gen_bool_text(self):
booltext = ""
for b, enabled in self.domainbools + self.bools:
if b.endswith("anon_write") and b not in self.anon_list:
self.anon_list.append(b)
else:
if b not in self.booleans_dict:
continue
booltext += """
.PP
If you want to %s, you must turn on the %s boolean. %s by default.
.EX
.B setsebool -P %s 1
.EE
""" % (self._format_boolean_desc(b), b, self.enabled_str[enabled], b)
return booltext
def _booleans(self):
self.booltext = self._gen_bool_text()
if self.booltext != "":
self.fd.write("""
.SH BOOLEANS
SELinux policy is customizable based on least access required. %s policy is extremely flexible and has several booleans that allow you to manipulate the policy and run %s with the tightest access possible.
""" % (self.domainname, self.domainname))
self.fd.write(self.booltext)
def _nsswitch_domain(self):
nsswitch_types = []
nsswitch_booleans = ['authlogin_nsswitch_use_ldap', 'kerberos_enabled']
nsswitchbooltext = ""
for k in self.attributes.keys():
if "nsswitch_domain" in self.attributes[k]:
nsswitch_types.append(k)
if len(nsswitch_types):
self.fd.write("""
.SH NSSWITCH DOMAIN
""")
for b in nsswitch_booleans:
nsswitchbooltext += """
.PP
If you want to %s for the %s, you must turn on the %s boolean.
.EX
.B setsebool -P %s 1
.EE
""" % (self._format_boolean_desc(b), (", ".join(nsswitch_types)), b, b)
self.fd.write(nsswitchbooltext)
def _process_types(self):
if len(self.ptypes) == 0:
return
self.fd.write(r"""
.SH PROCESS TYPES
SELinux defines process types (domains) for each process running on the system
.PP
You can see the context of a process using the \fB\-Z\fP option to \fBps\bP
.PP
Policy governs the access confined processes have to files.
SELinux %(domainname)s policy is very flexible allowing users to setup their %(domainname)s processes in as secure a method as possible.
.PP
The following process types are defined for %(domainname)s:
""" % {'domainname': self.domainname})
self.fd.write("""
.EX
.B %s
.EE""" % ", ".join(self.ptypes))
self.fd.write("""
.PP
Note:
.B semanage permissive -a %(domainname)s_t
can be used to make the process type %(domainname)s_t permissive. SELinux does not deny access to permissive process types, but the AVC (SELinux denials) messages are still generated.
""" % {'domainname': self.domainname})
def _port_types(self):
self.ports = []
for f in self.all_port_types:
if f.startswith(self.short_name) or f.startswith(self.domainname):
self.ports.append(f)
if len(self.ports) == 0:
return
self.fd.write("""
.SH PORT TYPES
SELinux defines port types to represent TCP and UDP ports.
.PP
You can see the types associated with a port by using the following command:
.B semanage port -l
.PP
Policy governs the access confined processes have to these ports.
SELinux %(domainname)s policy is very flexible allowing users to setup their %(domainname)s processes in as secure a method as possible.
.PP
The following port types are defined for %(domainname)s:""" % {'domainname': self.domainname})
for p in self.ports:
self.fd.write("""
.EX
.TP 5
.B %s
.TP 10
.EE
""" % p)
once = True
for prot in ("tcp", "udp"):
if (p, prot) in self.portrecs:
if once:
self.fd.write("""
Default Defined Ports:""")
once = False
self.fd.write(r"""
%s %s
.EE""" % (prot, ",".join(self.portrecs[(p, prot)])))
def _file_context(self):
flist = []
mpaths = []
for f in self.all_file_types:
if f.startswith(self.domainname):
flist.append(f)
if f in self.fcdict:
mpaths = mpaths + self.fcdict[f]["regex"]
if len(mpaths) == 0:
return
mpaths.sort()
mdirs = {}
for mp in mpaths:
found = False
for md in mdirs:
if mp.startswith(md):
mdirs[md].append(mp)
found = True
break
if not found:
for e in equiv_dirs:
if mp.startswith(e) and mp.endswith('(/.*)?'):
mdirs[mp[:-6]] = []
break
equiv = []
for m in mdirs:
if len(mdirs[m]) > 0:
equiv.append(m)
self.fd.write(r"""
.SH FILE CONTEXTS
SELinux requires files to have an extended attribute to define the file type.
.PP
You can see the context of a file using the \fB\-Z\fP option to \fBls\bP
.PP
Policy governs the access confined processes have to these files.
SELinux %(domainname)s policy is very flexible allowing users to setup their %(domainname)s processes in as secure a method as possible.
.PP
""" % {'domainname': self.domainname})
if len(equiv) > 0:
self.fd.write(r"""
.PP
.B EQUIVALENCE DIRECTORIES
""")
for e in equiv:
self.fd.write(r"""
.PP
%(domainname)s policy stores data with multiple different file context types under the %(equiv)s directory. If you would like to store the data in a different directory you can use the semanage command to create an equivalence mapping. If you wanted to store this data under the /srv dirctory you would execute the following command:
.PP
.B semanage fcontext -a -e %(equiv)s /srv/%(alt)s
.br
.B restorecon -R -v /srv/%(alt)s
.PP
""" % {'domainname': self.domainname, 'equiv': e, 'alt': e.split('/')[-1]})
self.fd.write(r"""
.PP
.B STANDARD FILE CONTEXT
SELinux defines the file context types for the %(domainname)s, if you wanted to
store files with these types in a diffent paths, you need to execute the semanage command to sepecify alternate labeling and then use restorecon to put the labels on disk.
.B semanage fcontext -a -t %(type)s '/srv/%(domainname)s/content(/.*)?'
.br
.B restorecon -R -v /srv/my%(domainname)s_content
Note: SELinux often uses regular expressions to specify labels that match multiple files.
""" % {'domainname': self.domainname, "type": flist[0]})
self.fd.write(r"""
.I The following file types are defined for %(domainname)s:
""" % {'domainname': self.domainname})
for f in flist:
self.fd.write("""
.EX
.PP
.B %s
.EE
- %s
""" % (f, sepolicy.get_description(f)))
if f in self.fcdict:
plural = ""
if len(self.fcdict[f]["regex"]) > 1:
plural = "s"
self.fd.write("""
.br
.TP 5
Path%s:
%s""" % (plural, self.fcdict[f]["regex"][0]))
for x in self.fcdict[f]["regex"][1:]:
self.fd.write(", %s" % x)
self.fd.write("""
.PP
Note: File context can be temporarily modified with the chcon command. If you want to permanently change the file context you need to use the
.B semanage fcontext
command. This will modify the SELinux labeling database. You will need to use
.B restorecon
to apply the labels.
""")
def _see_also(self):
ret = ""
for d in self.domains:
if d == self.domainname:
continue
if d.startswith(self.short_name):
ret += ", %s_selinux(8)" % d
if d.startswith(self.domainname + "_"):
ret += ", %s_selinux(8)" % d
self.fd.write(ret)
def _public_content(self):
if len(self.anon_list) > 0:
self.fd.write("""
.SH SHARING FILES
If you want to share files with multiple domains (Apache, FTP, rsync, Samba), you can set a file context of public_content_t and public_content_rw_t. These context allow any of the above domains to read the content. If you want a particular domain to write to the public_content_rw_t domain, you must set the appropriate boolean.
.TP
Allow %(domainname)s servers to read the /var/%(domainname)s directory by adding the public_content_t file type to the directory and by restoring the file type.
.PP
.B
semanage fcontext -a -t public_content_t "/var/%(domainname)s(/.*)?"
.br
.B restorecon -F -R -v /var/%(domainname)s
.pp
.TP
Allow %(domainname)s servers to read and write /var/%(domainname)s/incoming by adding the public_content_rw_t type to the directory and by restoring the file type. You also need to turn on the %(domainname)s_anon_write boolean.
.PP
.B
semanage fcontext -a -t public_content_rw_t "/var/%(domainname)s/incoming(/.*)?"
.br
.B restorecon -F -R -v /var/%(domainname)s/incoming
.br
.B setsebool -P %(domainname)s_anon_write 1
""" % {'domainname': self.domainname})
for b in self.anon_list:
desc = self.booleans_dict[b][2][0].lower() + self.booleans_dict[b][2][1:]
self.fd.write("""
.PP
If you want to %s, you must turn on the %s boolean.
.EX
.B setsebool -P %s 1
.EE
""" % (desc, b, b))
def _footer(self):
self.fd.write("""
.SH "COMMANDS"
.B semanage fcontext
can also be used to manipulate default file context mappings.
.PP
.B semanage permissive
can also be used to manipulate whether or not a process type is permissive.
.PP
.B semanage module
can also be used to enable/disable/install/remove policy modules.
""")
if len(self.ports) > 0:
self.fd.write("""
.B semanage port
can also be used to manipulate the port definitions
""")
if self.booltext != "":
self.fd.write("""
.B semanage boolean
can also be used to manipulate the booleans
""")
self.fd.write("""
.PP
.B system-config-selinux
is a GUI tool available to customize SELinux policy settings.
.SH AUTHOR
This manual page was auto-generated using
.B "sepolicy manpage".
.SH "SEE ALSO"
selinux(8), %s(8), semanage(8), restorecon(8), chcon(1), sepolicy(8)
""" % (self.domainname))
if self.booltext != "":
self.fd.write(", setsebool(8)")
self._see_also()
def _valid_write(self, check, attributes):
if check in [self.type, "domain"]:
return False
if check.endswith("_t"):
for a in attributes:
if a in self.types[check]:
return False
return True
def _entrypoints(self):
entrypoints = [x['target'] for x in sepolicy.search([sepolicy.ALLOW], {'source': self.type, 'permlist': ['entrypoint'], 'class': 'file'})]
if len(entrypoints) == 0:
return
self.fd.write("""
.SH "ENTRYPOINTS"
""")
if len(entrypoints) > 1:
entrypoints_str = "\\fB%s\\fP file types" % ", ".join(entrypoints)
else:
entrypoints_str = "\\fB%s\\fP file type" % entrypoints[0]
self.fd.write("""
The %s_t SELinux type can be entered via the %s.
The default entrypoint paths for the %s_t domain are the following:
""" % (self.domainname, entrypoints_str, self.domainname))
if "bin_t" in entrypoints:
entrypoints.remove("bin_t")
self.fd.write("""
All executeables with the default executable label, usually stored in /usr/bin and /usr/sbin.""")
paths = []
for entrypoint in entrypoints:
if entrypoint in self.fcdict:
paths += self.fcdict[entrypoint]["regex"]
self.fd.write("""
%s""" % ", ".join(paths))
def _mcs_types(self):
mcs_constrained_type = next(sepolicy.info(sepolicy.ATTRIBUTE, "mcs_constrained_type"))
if self.type not in mcs_constrained_type['types']:
return
self.fd.write ("""
.SH "MCS Constrained"
The SELinux process type %(type)s_t is an MCS (Multi Category Security) constrained type. Sometimes this separation is referred to as sVirt. These types are usually used for securing multi-tenant environments, such as virtualization, containers or separation of users. The tools used to launch MCS types, pick out a different MCS label for each process group.
For example one process might be launched with %(type)s_t:s0:c1,c2, and another process launched with %(type)s_t:s0:c3,c4. The SELinux kernel only allows these processes can only write to content with a matching MCS label, or a MCS Label of s0. A process running with the MCS level of s0:c1,c2 is not allowed to write to content with the MCS label of s0:c3,c4
""" % {'type': self.domainname})
def _writes(self):
permlist = sepolicy.search([sepolicy.ALLOW], {'source': self.type, 'permlist': ['open', 'write'], 'class': 'file'})
if permlist is None or len(permlist) == 0:
return
all_writes = []
attributes = ["proc_type", "sysctl_type"]
for i in permlist:
if not i['target'].endswith("_t"):
attributes.append(i['target'])
for i in permlist:
if self._valid_write(i['target'], attributes):
if i['target'] not in all_writes:
all_writes.append(i['target'])
if len(all_writes) == 0:
return
self.fd.write("""
.SH "MANAGED FILES"
""")
self.fd.write("""
The SELinux process type %s_t can manage files labeled with the following file types. The paths listed are the default paths for these file types. Note the processes UID still need to have DAC permissions.
""" % self.domainname)
all_writes.sort()
if "file_type" in all_writes:
all_writes = ["file_type"]
for f in all_writes:
self.fd.write("""
.br
.B %s
""" % f)
if f in self.fcdict:
for path in self.fcdict[f]["regex"]:
self.fd.write("""\t%s
.br
""" % path)
def _get_users_range(self):
if self.domainname in self.all_users_range:
return self.all_users_range[self.domainname]
return "s0"
def _user_header(self):
self.fd.write('.TH "%(type)s_selinux" "8" "%(type)s" "[email protected]" "%(type)s SELinux Policy documentation"'
% {'type': self.domainname})
self.fd.write(r"""
.SH "NAME"
%(user)s_u \- \fB%(desc)s\fP - Security Enhanced Linux Policy
.SH DESCRIPTION
\fB%(user)s_u\fP is an SELinux User defined in the SELinux
policy. SELinux users have default roles, \fB%(user)s_r\fP. The
default role has a default type, \fB%(user)s_t\fP, associated with it.
The SELinux user will usually login to a system with a context that looks like:
.B %(user)s_u:%(user)s_r:%(user)s_t:%(range)s
Linux users are automatically assigned an SELinux users at login.
Login programs use the SELinux User to assign initial context to the user's shell.
SELinux policy uses the context to control the user's access.
By default all users are assigned to the SELinux user via the \fB__default__\fP flag
On Targeted policy systems the \fB__default__\fP user is assigned to the \fBunconfined_u\fP SELinux user.
You can list all Linux User to SELinux user mapping using:
.B semanage login -l
If you wanted to change the default user mapping to use the %(user)s_u user, you would execute:
.B semanage login -m -s %(user)s_u __default__
""" % {'desc': self.desc, 'type': self.type, 'user': self.domainname, 'range': self._get_users_range()})
if "login_userdomain" in self.attributes and "login_userdomain" in self.all_attributes:
self.fd.write("""
If you want to map the one Linux user (joe) to the SELinux user %(user)s, you would execute:
.B $ semanage login -a -s %(user)s_u joe
""" % {'user': self.domainname})
def _can_sudo(self):
sudotype = "%s_sudo_t" % self.domainname
self.fd.write("""
.SH SUDO
""")
if sudotype in self.types:
role = self.domainname + "_r"
self.fd.write("""
The SELinux user %(user)s can execute sudo.
You can set up sudo to allow %(user)s to transition to an administrative domain:
Add one or more of the following record to sudoers using visudo.
""" % {'user': self.domainname})
for adminrole in self.role_allows[role]:
self.fd.write("""
USERNAME ALL=(ALL) ROLE=%(admin)s_r TYPE=%(admin)s_t COMMAND
.br
sudo will run COMMAND as %(user)s_u:%(admin)s_r:%(admin)s_t:LEVEL
""" % {'admin': adminrole[:-2], 'user': self.domainname})
self.fd.write("""
You might also need to add one or more of these new roles to your SELinux user record.
List the SELinux roles your SELinux user can reach by executing:
.B $ semanage user -l |grep selinux_name
Modify the roles list and add %(user)s_r to this list.
.B $ semanage user -m -R '%(roles)s' %(user)s_u
For more details you can see semanage man page.
""" % {'user': self.domainname, "roles": " ".join([role] + self.role_allows[role])})
else:
self.fd.write("""
The SELinux type %s_t is not allowed to execute sudo.
""" % self.domainname)
def _user_attribute(self):
self.fd.write("""
.SH USER DESCRIPTION
""")
if "unconfined_usertype" in self.attributes:
self.fd.write("""
The SELinux user %s_u is an unconfined user. It means that a mapped Linux user to this SELinux user is supposed to be allow all actions.
""" % self.domainname)
if "unpriv_userdomain" in self.attributes:
self.fd.write("""
The SELinux user %s_u is defined in policy as a unprivileged user. SELinux prevents unprivileged users from doing administration tasks without transitioning to a different role.
""" % self.domainname)
if "admindomain" in self.attributes:
self.fd.write("""
The SELinux user %s_u is an admin user. It means that a mapped Linux user to this SELinux user is intended for administrative actions. Usually this is assigned to a root Linux user.
""" % self.domainname)
def _xwindows_login(self):
if "x_domain" in self.all_attributes:
self.fd.write("""
.SH X WINDOWS LOGIN
""")
if "x_domain" in self.attributes:
self.fd.write("""
The SELinux user %s_u is able to X Windows login.
""" % self.domainname)
else:
self.fd.write("""
The SELinux user %s_u is not able to X Windows login.
""" % self.domainname)
def _terminal_login(self):
if "login_userdomain" in self.all_attributes:
self.fd.write("""
.SH TERMINAL LOGIN
""")
if "login_userdomain" in self.attributes:
self.fd.write("""
The SELinux user %s_u is able to terminal login.
""" % self.domainname)
else:
self.fd.write("""
The SELinux user %s_u is not able to terminal login.
""" % self.domainname)
def _network(self):
from sepolicy import network
self.fd.write("""
.SH NETWORK
""")
for net in ("tcp", "udp"):
portdict = network.get_network_connect(self.type, net, "name_bind")
if len(portdict) > 0:
self.fd.write("""
.TP
The SELinux user %s_u is able to listen on the following %s ports.
""" % (self.domainname, net))
for p in portdict:
for t, ports in portdict[p]:
self.fd.write("""
.B %s
""" % ",".join(ports))
portdict = network.get_network_connect(self.type, "tcp", "name_connect")
if len(portdict) > 0:
self.fd.write("""
.TP
The SELinux user %s_u is able to connect to the following tcp ports.
""" % (self.domainname))
for p in portdict:
for t, ports in portdict[p]:
self.fd.write("""
.B %s
""" % ",".join(ports))
def _home_exec(self):
permlist = sepolicy.search([sepolicy.ALLOW], {'source': self.type, 'target': 'user_home_type', 'class': 'file', 'permlist': ['ioctl', 'read', 'getattr', 'execute', 'execute_no_trans', 'open']})
self.fd.write("""
.SH HOME_EXEC
""")
if permlist is not None:
self.fd.write("""
The SELinux user %s_u is able execute home content files.
""" % self.domainname)
else:
self.fd.write("""
The SELinux user %s_u is not able execute home content files.
""" % self.domainname)
def _transitions(self):
self.fd.write(r"""
.SH TRANSITIONS
Three things can happen when %(type)s attempts to execute a program.
\fB1.\fP SELinux Policy can deny %(type)s from executing the program.
.TP
\fB2.\fP SELinux Policy can allow %(type)s to execute the program in the current user type.
Execute the following to see the types that the SELinux user %(type)s can execute without transitioning:
.B sesearch -A -s %(type)s -c file -p execute_no_trans
.TP
\fB3.\fP SELinux can allow %(type)s to execute the program and transition to a new type.
Execute the following to see the types that the SELinux user %(type)s can execute and transition:
.B $ sesearch -A -s %(type)s -c process -p transition
""" % {'user': self.domainname, 'type': self.type})
def _role_header(self):
self.fd.write('.TH "%(user)s_selinux" "8" "%(user)s" "[email protected]" "%(user)s SELinux Policy documentation"'
% {'user': self.domainname})
self.fd.write(r"""
.SH "NAME"
%(user)s_r \- \fB%(desc)s\fP - Security Enhanced Linux Policy
.SH DESCRIPTION
SELinux supports Roles Based Access Control (RBAC), some Linux roles are login roles, while other roles need to be transition into.
.I Note:
Examples in this man page will use the
.B staff_u
SELinux user.
Non login roles are usually used for administrative tasks. For example, tasks that require root privileges. Roles control which types a user can run processes with. Roles often have default types assigned to them.
The default type for the %(user)s_r role is %(user)s_t.
The
.B newrole
program to transition directly to this role.
.B newrole -r %(user)s_r -t %(user)s_t
.B sudo
is the preferred method to do transition from one role to another. You setup sudo to transition to %(user)s_r by adding a similar line to the /etc/sudoers file.
USERNAME ALL=(ALL) ROLE=%(user)s_r TYPE=%(user)s_t COMMAND
.br
sudo will run COMMAND as staff_u:%(user)s_r:%(user)s_t:LEVEL
When using a a non login role, you need to setup SELinux so that your SELinux user can reach %(user)s_r role.
Execute the following to see all of the assigned SELinux roles:
.B semanage user -l
You need to add %(user)s_r to the staff_u user. You could setup the staff_u user to be able to use the %(user)s_r role with a command like:
.B $ semanage user -m -R 'staff_r system_r %(user)s_r' staff_u
""" % {'desc': self.desc, 'user': self.domainname})
troles = []
for i in self.role_allows:
if self.domainname + "_r" in self.role_allows[i]:
troles.append(i)
if len(troles) > 0:
plural = ""
if len(troles) > 1:
plural = "s"
self.fd.write("""
SELinux policy also controls which roles can transition to a different role.
You can list these rules using the following command.
.B search --role_allow
SELinux policy allows the %s role%s can transition to the %s_r role.
""" % (", ".join(troles), plural, self.domainname))
| gpl-2.0 | -2,246,087,477,278,939,100 | 30.686244 | 361 | 0.598971 | false |
gonicus/gosa | backend/src/gosa/backend/objects/index.py | 1 | 70746 | # This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
"""
Object Index
============
The Object Index is the search engine in GOsa. It keeps track about
all defined object types and can find references to it inside of its
local index database
----
"""
import logging
import multiprocessing
import sys
import re
import traceback
from multiprocessing.pool import Pool
from urllib.parse import urlparse
import ldap
import sqlalchemy
from multiprocessing import RLock
from passlib.hash import bcrypt
from requests import HTTPError
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import CreateTable
from sqlalchemy.sql.ddl import DropTable
from sqlalchemy_searchable import make_searchable, search
from sqlalchemy_utils import TSVectorType
import gosa
from gosa.backend.components.httpd import get_server_url, get_internal_server_url
from gosa.backend.objects.backend.back_foreman import ForemanBackendException
from gosa.backend.utils import BackendTypes
from gosa.common.env import declarative_base, make_session
from gosa.common.event import EventMaker
from lxml import etree
from lxml import objectify
import zope.event
import datetime
import hashlib
import time
import itertools
from gosa.backend.routes.sse.main import SseHandler
from zope.interface import implementer
from gosa.common import Environment
from gosa.common.mqtt_connection_state import BusClientAvailability
from gosa.common.utils import N_
from gosa.common.handler import IInterfaceHandler
from gosa.common.components import Command, Plugin, PluginRegistry, JSONServiceProxy
from gosa.common.error import GosaErrorHandler as C, GosaException
from gosa.backend.objects import ObjectFactory, ObjectProxy, ObjectChanged
from gosa.backend.exceptions import FilterException, IndexException, ProxyException, ObjectException
from gosa.backend.lock import GlobalLock
from sqlalchemy.orm import relationship, subqueryload
from sqlalchemy import Column, String, Integer, Boolean, Sequence, DateTime, ForeignKey, or_, and_, not_, func, orm, \
JSON, Enum
from gosa.backend.routes.system import State
Base = declarative_base()
make_searchable(Base.metadata)
# Register the errors handled by us
C.register_codes(dict(
OBJECT_EXISTS=N_("Object with UUID %(uuid)s already exists"),
OBJECT_NOT_FOUND=N_("Cannot find object %(id)s"),
INDEXING=N_("Index rebuild in progress - try again later"),
NOT_SUPPORTED=N_("Requested search operator %(operator)s is not supported"),
NO_MASTER_BACKEND_FOUND=N_("No master backend found"),
NO_MASTER_BACKEND_CONNECTION=N_("connection to GOsa backend failed"),
NO_BACKEND_CREDENTIALS=N_("Please add valid backend credentials to you configuration (core.backend-user, core.backend-key)"),
DELAYED_UPDATE_FOR_NON_DIRTY_OBJECT=N_("Trying to add a delayed update to a non-dirty object (%(topic)s)")
))
class Schema(Base):
__tablename__ = 'schema'
type = Column(String, primary_key=True)
hash = Column(String(32))
def __repr__(self): # pragma: nocover
return "<Schema(type='%s', hash='%s')>" % (self.type, self.hash)
class SearchObjectIndex(Base):
__tablename__ = "so_index"
so_uuid = Column(String(36), ForeignKey('obj-index.uuid'), primary_key=True)
reverse_parent_dn = Column(String, index=True)
title = Column(String)
description = Column(String)
search = Column(String)
types = Column(String)
search_vector = Column(TSVectorType('title', 'description', 'search', 'types',
weights={'title': 'A', 'types': 'D', 'description': 'C', 'search': 'B'},
regconfig='pg_catalog.simple'
))
object = relationship("ObjectInfoIndex", uselist=False, back_populates="search_object")
def __repr__(self): # pragma: nocover
return "<SearchObjectIndex(so_uuid='%s', reverse_parent_dn='%s', title='%s', description='%s')>" % \
(self.so_uuid, self.reverse_dn, self.title, self.description)
class KeyValueIndex(Base):
__tablename__ = 'kv-index'
key_id = Column(Integer, Sequence('kv_id_seq'), primary_key=True, nullable=False)
uuid = Column(String(36), ForeignKey('obj-index.uuid'))
key = Column(String(64), index=True)
value = Column(String)
def __repr__(self): # pragma: nocover
return "<KeyValueIndex(uuid='%s', key='%s', value='%s')>" % (self.uuid, self.key, self.value)
class ExtensionIndex(Base):
__tablename__ = 'ext-index'
ext_id = Column(Integer, Sequence('ei_id_seq'), primary_key=True, nullable=False)
uuid = Column(String(36), ForeignKey('obj-index.uuid'))
extension = Column(String(64))
def __repr__(self): # pragma: nocover
return "<ExtensionIndex(uuid='%s', extension='%s')>" % (
self.uuid, self.extension)
class ObjectInfoIndex(Base):
__tablename__ = 'obj-index'
uuid = Column(String(36), primary_key=True)
dn = Column(String, index=True)
_parent_dn = Column(String, index=True)
_adjusted_parent_dn = Column(String, index=True)
_type = Column(String(64), index=True)
_last_modified = Column(DateTime)
_invisible = Column(Boolean)
_master_backend = Column(String)
properties = relationship("KeyValueIndex", order_by=KeyValueIndex.key)
extensions = relationship("ExtensionIndex", order_by=ExtensionIndex.extension)
search_object = relationship("SearchObjectIndex", back_populates="object")
def __repr__(self): # pragma: nocover
return "<ObjectInfoIndex(uuid='%s', dn='%s', _parent_dn='%s', _adjusted_parent_dn='%s', _type='%s', _last_modified='%s', _invisible='%s', _master_backend='%s')>" % (
self.uuid, self.dn, self._parent_dn, self._adjusted_parent_dn, self._type, self._last_modified, self._invisible, self._master_backend)
class RegisteredBackend(Base):
__tablename__ = "registered-backends"
uuid = Column(String(36), primary_key=True, nullable=False)
password = Column(String(300), nullable=False)
url = Column(String)
type = Column(Enum(BackendTypes))
def __init__(self, uuid, password, url="", type=BackendTypes.unknown):
self.uuid = uuid
self.password = bcrypt.encrypt(password)
self.url = url
self.type = type
def validate_password(self, password):
return bcrypt.verify(password, self.password)
def __repr__(self): # pragma: nocover
return "<RegisteredBackend(uuid='%s', password='%s', url='%s', type='%s')>" % \
(self.uuid, self.password, self.url, self.type)
class OpenObject(Base):
__tablename__ = "open-objects"
ref = Column(String(36), primary_key=True, nullable=False)
uuid = Column(String(36), nullable=True)
oid = Column(String)
data = Column(JSON)
backend_uuid = Column(String, ForeignKey('registered-backends.uuid'))
backend = relationship("RegisteredBackend")
created = Column(DateTime)
last_interaction = Column(DateTime)
user = Column(String)
session_id = Column(String)
def __repr__(self): # pragma: nocover
return "<OpenObject(ref='%s', uuid='%s', oid='%s', data='%s', backend='%s', created='%s', last_interaction='%s', user='%s', session_id='%s')>" % \
(self.ref, self.uuid, self.oid, self.data, self.backend, self.created, self.last_interaction, self.user, self.session_id)
class UserSession(Base):
__tablename__ = "user-sessions"
sid = Column(String(36), primary_key=True, nullable=False)
user = Column(String)
dn = Column(String)
last_used = Column(DateTime)
auth_state = Column(Integer)
def __repr__(self):
return "<UserSession(sid='%s', user='%s', dn='%s', auth_state='%s', last_used='%s')>" % \
(self.sid, self.user, self.dn, self.auth_state, self.last_used)
class Cache(Base):
__tablename__ = "cache"
key = Column(String, primary_key=True)
data = Column(JSON)
time = Column(DateTime)
def __repr__(self):
return "<Cache(key='%s',data='%s',time='%s')" % (self.key, self.data, self.time)
@compiles(DropTable, "postgresql")
def _compile_drop_table(element, compiler, **kwargs):
return compiler.visit_drop_table(element) + " CASCADE"
class IndexScanFinished(): # pragma: nocover
pass
class IndexSyncFinished(): # pragma: nocover
pass
@implementer(IInterfaceHandler)
class ObjectIndex(Plugin):
"""
The *ObjectIndex* keeps track of objects and their indexed attributes. It
is the search engine that allows quick queries on the data set with
paged results and wildcards.
"""
fuzzy = False
db = None
base = None
_priority_ = 20
_target_ = 'core'
_indexed = False
_post_process_job = None
importing = False
to_be_updated = []
# objects that a currently created (stored in the backend but not in the database yet)
currently_in_creation = []
# objects that are have been changes (changes not in database yet)
__dirty = {}
currently_moving = {}
__search_aid = {}
last_notification = None
# notification period in seconds during indexing
notify_every = 1
__value_extender = None
_acl_resolver = None
procs = multiprocessing.cpu_count()
def __init__(self):
self.env = Environment.getInstance()
# Remove old lock if exists
if GlobalLock.exists("scan_index"):
GlobalLock.release("scan_index")
self.log = logging.getLogger(__name__)
self.log.info("initializing object index handler")
self.factory = ObjectFactory.getInstance()
# Listen for object events
zope.event.subscribers.append(self.__handle_events)
self.lock = RLock()
def serve(self):
# Configure database for the index
orm.configure_mappers()
engine = self.env.getDatabaseEngine("backend-database")
Base.metadata.bind = engine
Base.metadata.create_all()
self.__value_extender = gosa.backend.objects.renderer.get_renderers()
self._acl_resolver = PluginRegistry.getInstance("ACLResolver")
if self.env.mode == "backend":
with make_session() as session:
# create view
try:
# check if extension exists
if session.execute("SELECT * FROM \"pg_extension\" WHERE extname = 'pg_trgm';").rowcount == 0:
session.execute("CREATE EXTENSION pg_trgm;")
if session.execute("SELECT * FROM \"pg_extension\" WHERE extname = 'fuzzystrmatch';").rowcount == 0:
session.execute("CREATE EXTENSION fuzzystrmatch;")
view_name = "unique_lexeme"
# check if view exists
res = session.execute("SELECT count(*) > 0 as \"exists\" FROM pg_catalog.pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind = 'm' AND n.nspname = 'public' AND c.relname = '%s';" % view_name).first()
if res[0] is False:
session.execute("CREATE MATERIALIZED VIEW %s AS SELECT word FROM ts_stat('SELECT so_index.search_vector FROM so_index');" % view_name)
session.execute("CREATE INDEX words_idx ON %s USING gin(word gin_trgm_ops);" % view_name)
self.fuzzy = True
except Exception as e:
self.log.error("Error creating view for unique word index: %s" % str(e))
session.rollback()
try:
current_db_hash = session.query(Schema).filter(Schema.type == 'database').one_or_none()
except:
current_db_hash = None
# check DB schema
tables = [Schema.__table__, KeyValueIndex.__table__, ExtensionIndex.__table__,
SearchObjectIndex.__table__, ObjectInfoIndex.__table__, RegisteredBackend.__table__]
sql = ""
for table in tables:
statement = CreateTable(table)
sql += str(statement.compile(dialect=postgresql.dialect()))
md5s = hashlib.md5()
md5s.update(sql.encode('utf-8'))
md5sum = md5s.hexdigest()
db_recreated = False
schema = self.factory.getXMLObjectSchema(True)
if current_db_hash is None or current_db_hash.hash != md5sum:
# Database schema has changed -> re-create
self.log.info("database schema has changed, dropping object tables")
session.commit()
Base.metadata.drop_all()
Base.metadata.create_all()
self.log.info("created new database tables")
db_schema = Schema(type='database', hash=md5sum)
session.add(db_schema)
session.commit()
# enable indexing
self.env.backend_index = True
db_recreated = True
else:
# If there is already a collection, check if there is a newer schema available
if self.isSchemaUpdated(schema):
session.query(Schema).filter(Schema.type != 'database').delete()
session.query(KeyValueIndex).delete()
session.query(ExtensionIndex).delete()
session.query(SearchObjectIndex).delete()
session.query(ObjectInfoIndex).delete()
session.query(OpenObject).delete() # delete references to backends
self.log.info('object definitions changed, dropped old object index')
# enable indexing
self.env.backend_index = True
# delete the old active master (not the proxies)
session.query(RegisteredBackend).filter(RegisteredBackend.type == 'active_master').delete()
# Create the initial schema information if required
if not session.query(Schema).filter(Schema.type == 'objects').one_or_none():
self.log.info('created schema')
md5s = hashlib.md5()
md5s.update(schema)
md5sum = md5s.hexdigest()
schema = Schema(type='objects', hash=md5sum)
session.add(schema)
session.commit()
# Extract search aid
attrs = {}
mapping = {}
resolve = {}
aliases = {}
for otype in self.factory.getObjectTypes():
# Assemble search aid
item = self.factory.getObjectSearchAid(otype)
if not item:
continue
typ = item['type']
aliases[typ] = [typ]
if not typ in attrs:
attrs[typ] = []
if not typ in resolve:
resolve[typ] = []
if not typ in mapping:
mapping[typ] = dict(dn="dn", title="title", description="description", icon=None)
attrs[typ] += item['search']
if 'keyword' in item:
aliases[typ] += item['keyword']
if 'map' in item:
mapping[typ].update(item['map'])
if 'resolve' in item:
resolve[typ] += item['resolve']
# Add index for attribute used for filtering and memorize
# attributes potentially needed for queries.
tmp = [x for x in attrs.values()]
used_attrs = list(itertools.chain.from_iterable(tmp))
used_attrs += list(itertools.chain.from_iterable([x.values() for x in mapping.values()]))
used_attrs += list(set(itertools.chain.from_iterable([[x[0]['filter'], x[0]['attribute']] for x in resolve.values() if len(x)])))
used_attrs = list(set(used_attrs))
# Remove potentially not assigned values
used_attrs = [u for u in used_attrs if u]
# Memorize search information for later use
self.__search_aid = dict(attrs=attrs,
used_attrs=used_attrs,
mapping=mapping,
resolve=resolve,
aliases=aliases)
# store core_uuid/core_key into DB
if hasattr(self.env, "core_uuid"):
if self.env.mode == "backend":
with make_session() as session:
if db_recreated is False:
tables_to_recreate = [UserSession.__table__, OpenObject.__table__]
for table in tables_to_recreate:
table.drop(engine)
Base.metadata.create_all(tables=tables_to_recreate)
rb = RegisteredBackend(
uuid=self.env.core_uuid,
password=self.env.core_key,
url=get_server_url(),
type=BackendTypes.active_master
)
session.add(rb)
session.commit()
else:
self.registerProxy()
# Schedule index sync
if self.env.backend_index is True and self.env.mode == 'backend':
if not hasattr(sys, '_called_from_test'):
sobj = PluginRegistry.getInstance("SchedulerService")
sobj.getScheduler().add_date_job(self.syncIndex,
datetime.datetime.now() + datetime.timedelta(seconds=1),
tag='_internal', jobstore='ram')
else:
def finish():
zope.event.notify(IndexScanFinished())
zope.event.notify(IndexSyncFinished())
State.system_state = "ready"
sobj = PluginRegistry.getInstance("SchedulerService")
sobj.getScheduler().add_date_job(finish,
datetime.datetime.now() + datetime.timedelta(seconds=10),
tag='_internal', jobstore='ram')
def registerProxy(self, backend_uuid=None):
if self.env.mode == "proxy":
# register on the current master
with make_session() as session:
# get any other registered backend
if backend_uuid is None:
master_backend = session.query(RegisteredBackend) \
.filter(RegisteredBackend.uuid != self.env.core_uuid,
RegisteredBackend.type == BackendTypes.active_master).first()
else:
master_backend = session.query(RegisteredBackend) \
.filter(RegisteredBackend.uuid == backend_uuid,
RegisteredBackend.type == BackendTypes.active_master).first()
if master_backend is None:
raise GosaException(C.make_error("NO_MASTER_BACKEND_FOUND"))
# Try to log in with provided credentials
url = urlparse("%s/rpc" % master_backend.url)
connection = '%s://%s%s' % (url.scheme, url.netloc, url.path)
proxy = JSONServiceProxy(connection)
if self.env.config.get("core.backend-user") is None or self.env.config.get("core.backend-key") is None:
raise GosaException(C.make_error("NO_BACKEND_CREDENTIALS"))
# Try to log in
try:
if not proxy.login(self.env.config.get("core.backend-user"), self.env.config.get("core.backend-key")):
raise GosaException(C.make_error("NO_MASTER_BACKEND_CONNECTION"))
else:
proxy.registerBackend(self.env.core_uuid,
self.env.core_key, get_internal_server_url(),
BackendTypes.proxy)
except HTTPError as e:
if e.code == 401:
raise GosaException(C.make_error("NO_MASTER_BACKEND_CONNECTION"))
else:
self.log.error("Error: %s " % str(e))
raise GosaException(C.make_error("NO_MASTER_BACKEND_CONNECTION"))
# except Exception as e:
# self.log.error("Error: %s " % str(e))
# raise GosaException(C.make_error("NO_MASTER_BACKEND_CONNECTION"))
def stop(self):
if self.__handle_events in zope.event.subscribers:
zope.event.subscribers.remove(self.__handle_events)
def mark_as_dirty(self, obj):
"""
Marks an object as "dirty". Dirty objects are currently being persisted to their backends (aka committed).
:param obj:
:type obj: gosa.backend.proxy.ObjectProxy
:return:
"""
if not self.is_dirty(obj.uuid):
self.__dirty[obj.uuid] = {"obj": obj, "updates": []}
self.log.info("marked %s (%s) as dirty (%s)" % (obj.uuid, obj.dn, self.__dirty))
def is_dirty(self, uuid):
"""
Check if an object identified by UUID is marked as "dirty".
:param uuid: UUID ob the object to check
:type uuid: str
:return: True if "dirty"
"""
return uuid in self.__dirty
def get_dirty_objects(self):
return self.__dirty
def add_delayed_update(self, obj, update, inject=False, skip_backend_writes=[]):
"""
Add a delayed update for an object that is currently being committed (marked "dirty").
This update will be processed after the ongoing commit has been completed.
:param obj: The object to apply the update to
:type obj: gosa.backend.proxy.ObjectProxy
:param update: updated data that can be processed by :meth:`gosa.backend.proxy.ObjectProxy.apply_update`
:type update: dict
"""
if not self.is_dirty(obj.uuid):
self.log.warning("Trying to add a delayed update to a non-dirty object '%s'" % obj.uuid)
obj.apply_update(update)
obj.commit(skip_backend_writes=skip_backend_writes)
return
self.log.info("adding delayed update to %s (%s)" % (obj.uuid, obj.dn))
self.__dirty[obj.uuid]["updates"].append({
"inject": inject,
"data": update,
"skip_backend_writes": skip_backend_writes
})
def unmark_as_dirty(self, id):
"""
removes the "dirty" mark for the object and processes the delayed updates
:param id: UUID of the Object to unmark or ObjectProxy instance
:type id: str|ObjectProxy
"""
if isinstance(id, ObjectProxy):
uuid = id.uuid
else:
uuid = id
if self.is_dirty(uuid):
obj = self.__dirty[uuid]['obj']
if len(self.__dirty[uuid]['updates']) > 0:
# freshly open the object
entry = self.__dirty[uuid]
new_obj = ObjectProxy(entry["obj"].dn)
for update in entry["updates"]:
if update["inject"] is True:
self.log.info("injecting %s to %s" % (update["data"], obj.uuid))
new_obj.inject_backend_data(update["data"], force_update=True)
else:
self.log.info("applying %s to %s" % (update["data"], obj.uuid))
new_obj.apply_update(update["data"])
del self.__dirty[uuid]
new_obj.commit(skip_backend_writes=entry["skip_backend_writes"])
else:
del self.__dirty[uuid]
self.log.info("unmarked %s (%s) as dirty (%s)" % (obj.uuid, obj.dn, self.__dirty))
def is_currently_moving(self, dn, move_target=False):
if move_target:
# check for value (the new dn after movement)
return dn in self.currently_moving.values()
else:
# check for key (the old dn before the movement)
return dn in self.currently_moving.keys()
def __backend_change_processor(self, data):
"""
This method gets called if an external backend reports
a modification of an entry under its hood.
We use it to update / create / delete existing index
entries.
"""
data = data.BackendChange
dn = data.DN.text if hasattr(data, 'DN') else None
new_dn = data.NewDN.text if hasattr(data, 'NewDN') else None
change_type = data.ChangeType.text
_uuid = data.UUID.text if hasattr(data, 'UUID') else None
_last_changed = datetime.datetime.strptime(data.ModificationTime.text, "%Y%m%d%H%M%SZ")
obj = None
if not _uuid and not dn:
return
# Set importing flag to true in order to be able to post process incoming
# objects.
ObjectIndex.importing = True
# Setup or refresh timer job to run the post processing
sched = PluginRegistry.getInstance("SchedulerService").getScheduler()
next_run = datetime.datetime.now() + datetime.timedelta(0, 5)
if not hasattr(sys, '_called_from_test'):
if self._post_process_job:
sched.reschedule_date_job(self._post_process_job, next_run)
else:
self._post_process_job = sched.add_date_job(self._post_process_by_timer, next_run, tag='_internal', jobstore="ram", )
# Resolve dn from uuid if needed
with make_session() as session:
if not dn:
dn = session.query(ObjectInfoIndex.dn).filter(ObjectInfoIndex.uuid == _uuid).one_or_none()
# Modification
if change_type == "modify":
# Get object
obj = self._get_object(dn)
if not obj:
return
# Check if the entry exists - if not, maybe let create it
entry = session.query(ObjectInfoIndex.dn).filter(
or_(
ObjectInfoIndex.uuid == _uuid,
func.lower(ObjectInfoIndex.dn) == func.lower(dn)
)).one_or_none()
if entry:
self.update(obj, session=session)
else:
self.insert(obj, session=session)
# Add
if change_type == "add":
# Get object
obj = self._get_object(dn)
if not obj:
return
self.insert(obj, session=session)
# Delete
if change_type == "delete":
self.log.info("object has changed in backend: indexing %s" % dn)
self.log.warning("external delete might not take care about references")
if _uuid is not None:
self.remove_by_uuid(_uuid, session=session)
else:
obj = self._get_object(dn)
if obj is None:
# lets see if we can find a UUID for the deleted DN
uuid = session.query(ObjectInfoIndex.uuid).filter(func.lower(ObjectInfoIndex.dn) == func.lower(dn)).one_or_none()
if uuid is not None:
self.remove_by_uuid(uuid)
else:
self.remove(obj)
# Move
if change_type in ['modrdn', 'moddn']:
# Check if the entry exists - if not, maybe let create it
entry = session.query(ObjectInfoIndex).filter(
or_(
ObjectInfoIndex.uuid == _uuid,
func.lower(ObjectInfoIndex.dn) == func.lower(dn)
)).one_or_none()
if new_dn is not None and new_dn[-1:] == ",":
# only new RDN received, get parent from db
if entry is not None:
new_dn = new_dn + entry._parent_dn
else:
self.log.error('DN modification event received: could not get parent DN from existing object to complete the new DN')
# Get object
obj = self._get_object(new_dn)
if not obj:
return
if entry:
self.update(obj)
else:
self.insert(obj)
# send the event to the clients
event_change_type = "update"
if change_type == "add":
event_change_type = "create"
elif change_type == "delete":
event_change_type = "remove"
e = EventMaker()
if obj:
ev = e.Event(e.ObjectChanged(
e.UUID(obj.uuid),
e.DN(obj.dn),
e.ModificationTime(_last_changed.strftime("%Y%m%d%H%M%SZ")),
e.ChangeType(event_change_type)
))
elif _uuid is not None:
ev = e.Event(e.ObjectChanged(
e.UUID(_uuid),
e.DN(dn),
e.ModificationTime(_last_changed.strftime("%Y%m%d%H%M%SZ")),
e.ChangeType(event_change_type)
))
else:
ev = e.Event(e.ObjectChanged(
e.DN(dn),
e.ModificationTime(_last_changed.strftime("%Y%m%d%H%M%SZ")),
e.ChangeType(event_change_type)
))
event = "<?xml version='1.0'?>\n%s" % etree.tostring(ev, pretty_print=True).decode('utf-8')
# Validate event
xml = objectify.fromstring(event, PluginRegistry.getEventParser())
SseHandler.notify(xml, channel="broadcast")
if hasattr(sys, '_called_from_test'):
self.post_process()
def get_last_modification(self, backend='LDAP'):
with make_session() as session:
res = session.query(ObjectInfoIndex._last_modified)\
.filter(ObjectInfoIndex._master_backend == backend)\
.order_by(ObjectInfoIndex._last_modified.desc())\
.limit(1)\
.one_or_none()
if res is not None:
return res[0]
return None
def _post_process_by_timer(self):
self._post_process_job = None
self.post_process()
def _get_object(self, dn):
try:
obj = ObjectProxy(dn)
except (ProxyException, ldap.NO_SUCH_OBJECT) as e:
self.log.warning("not found %s: %s" % (dn, str(e)))
obj = None
except ObjectException as e:
self.log.warning("not indexing %s: %s" % (dn, str(e)))
obj = None
return obj
def get_search_aid(self):
return self.__search_aid
def isSchemaUpdated(self, schema):
# Calculate md5 checksum for potentially new schema
md5s = hashlib.md5()
md5s.update(schema)
md5sum = md5s.hexdigest()
with make_session() as session:
stored_md5sum = session.query(Schema.hash).filter(Schema.type == 'objects').one_or_none()
if stored_md5sum and stored_md5sum[0] == md5sum:
return False
return True
def notify_frontends(self, state, progress=None, step=None):
e = EventMaker()
ev = e.Event(e.BackendState(
e.Type("index"),
e.State(state),
e.Progress(str(progress)),
e.Step(str(step)),
e.TotalSteps(str(4))
))
event_object = objectify.fromstring(etree.tostring(ev, pretty_print=True).decode('utf-8'))
SseHandler.notify(event_object, channel="broadcast")
@Command(__help__=N_('Start index synchronizing from an optional root-DN'))
def syncIndex(self, base=None):
State.system_state = "indexing"
# Don't index if someone else is already doing it
if GlobalLock.exists("scan_index"):
return
# Don't run index, if someone else already did until the last
# restart.
cr = PluginRegistry.getInstance("CommandRegistry")
GlobalLock.acquire("scan_index")
ObjectIndex.importing = True
updated = 0
added = 0
existing = 0
total = 0
index_successful = False
t0 = time.time()
if base is None:
start_dn = self.env.base
else:
start_dn = base
try:
self._indexed = True
self.last_notification = time.time()
self.log.info("scanning for objects")
self.notify_frontends(N_("scanning for objects"), step=1)
with Pool(processes=self.procs) as pool:
children = self.factory.getObjectChildren(start_dn)
result = pool.starmap_async(resolve_children, [(dn,) for dn in children.keys()])
while not result.ready():
self.notify_frontends(N_("scanning for objects"), step=1)
self.last_notification = time.time()
time.sleep(self.notify_every)
res = children
for r in result.get():
res = {**res, **r}
# count by type
counts = {}
for o in res.keys():
if res[o] not in counts:
counts[res[o]] = 1
else:
counts[res[o]] += 1
self.log.info("Found objects: %s" % counts)
res[self.env.base] = 'dummy'
self.log.info("generating object index")
self.notify_frontends(N_("Generating object index"))
# Find new entries
backend_objects = []
total = len(res)
oids = sorted(res.keys(), key=len)
with Pool(processes=self.procs) as pool:
self.log.info("processing objects with %d entries" % len(oids))
result = pool.starmap_async(process_objects, [(oid,) for oid in oids], chunksize=1)
while not result.ready():
now = time.time()
current = total-result._number_left
self.notify_frontends(N_("Processing object %s/%s" % (current, total)), round(100/total*current), step=2)
self.last_notification = now
time.sleep(self.notify_every)
for r, uuid, to_be_updated in result.get():
backend_objects.append(uuid)
ObjectIndex.to_be_updated.extend(to_be_updated)
if r == "added":
added += 1
elif r == "existing":
existing += 1
elif r == "updated":
updated += 1
self.notify_frontends(N_("%s objects processed" % total), 100, step=2)
# Remove entries that are in the index, but not in any other backends
if base is None:
self.notify_frontends(N_("removing orphan objects from index"), step=3)
with make_session() as session:
removed = self.__remove_others(backend_objects, session=session)
else:
removed = 0
self.log.info("%s added, %s updated, %s removed, %s are up-to-date" % (added, updated, removed, existing))
index_successful = True
except Exception as e:
self.log.critical("building the index failed: %s" % str(e))
traceback.print_exc()
finally:
if index_successful is True:
self.post_process()
self.log.info("index refresh finished")
self.notify_frontends(N_("Index refresh finished"), 100, step=4)
GlobalLock.release("scan_index")
t1 = time.time()
self.log.info("processed %d objects in %ds" % (total, t1 - t0))
# notify others that the index scan is done, they now can do own sync processed
zope.event.notify(IndexScanFinished())
# now the index is really ready and up-to-date
zope.event.notify(IndexSyncFinished())
State.system_state = "ready"
else:
raise IndexException("Error creating index, please restart.")
def post_process(self):
ObjectIndex.importing = False
self.last_notification = time.time()
uuids = list(set(ObjectIndex.to_be_updated))
ObjectIndex.to_be_updated = []
total = len(uuids)
# Some object may have queued themselves to be re-indexed, process them now.
self.log.info("need to refresh index for %d objects" % total)
with Pool(processes=self.procs) as pool:
result = pool.starmap_async(post_process, [(uuid,) for uuid in uuids], chunksize=1)
while not result.ready():
now = time.time()
current = total-result._number_left
if GlobalLock.exists("scan_index"):
self.notify_frontends(N_("Refreshing object %s/%s" % (current, total)), round(100/total*current), step=4)
self.last_notification = now
time.sleep(self.notify_every)
if len(ObjectIndex.to_be_updated):
self.post_process()
self.update_words()
def index_active(self): # pragma: nocover
return self._indexed
def update_words(self, session=None):
if session is None:
with make_session() as session:
self._update_words(session)
else:
self._update_words(session)
def _update_words(self, session):
# update unique word list
if self.fuzzy is True:
try:
session.execute("REFRESH MATERIALIZED VIEW unique_lexeme;")
except Exception as e:
session.rollback()
raise e
def __handle_events(self, event, retried=0):
if GlobalLock.exists("scan_index"):
return
if isinstance(event, objectify.ObjectifiedElement):
self.__backend_change_processor(event)
elif isinstance(event, ObjectChanged):
change_type = None
_uuid = event.uuid
_dn = None
_last_changed = datetime.datetime.now()
# Try to find the affected DN
with make_session() as session:
e = session.query(ObjectInfoIndex).filter(ObjectInfoIndex.uuid == _uuid).one_or_none()
if e:
# New pre-events don't have a dn. Just skip is in this case...
if hasattr(e, 'dn'):
_dn = e.dn
if e._last_modified is not None:
_last_changed = e._last_modified
else:
_dn = "not known yet"
if event.reason == "post object remove":
self.log.debug("removing object index for %s (%s)" % (_uuid, _dn))
self.remove_by_uuid(_uuid, session=session)
change_type = "remove"
if event.reason == "pre object move":
self.log.debug("starting object movement from %s to %s" % (_dn, event.dn))
self.currently_moving[_dn] = event.dn
try:
if event.reason == "post object move":
self.log.debug("updating object index for %s (%s)" % (_uuid, _dn))
obj = ObjectProxy(event.dn, skip_value_population=True)
self.update(obj, session=session)
_dn = obj.dn
change_type = "move"
if event.orig_dn in self.currently_moving:
del self.currently_moving[event.orig_dn]
if event.reason == "post object create":
self.log.debug("creating object index for %s (%s)" % (_uuid, _dn))
obj = ObjectProxy(event.dn, skip_value_population=True)
self.insert(obj, session=session)
_dn = obj.dn
change_type = "create"
if event.reason == "post object update":
self.log.debug("updating object index for %s (%s)" % (_uuid, _dn))
if not event.dn and _dn != "not known yet":
event.dn = _dn
obj = ObjectProxy(event.dn, skip_value_population=True)
self.update(obj, session=session)
change_type = "update"
except ForemanBackendException as e:
if e.response.status_code == 404:
self.log.info("Foreman object %s (%s) not available yet, skipping index update."
% (_uuid, _dn))
# do nothing else as foreman will send some kind of event, when the object becomes available
else:
raise e
# send the event to the clients
e = EventMaker()
if event.reason[0:4] == "post" and _uuid and _dn and change_type and \
(change_type != "update" or len(event.changed_props)):
ev = e.Event(e.ObjectChanged(
e.UUID(_uuid),
e.DN(_dn),
e.ModificationTime(_last_changed.strftime("%Y%m%d%H%M%SZ")),
e.ChangeType(change_type)
))
event_string = "<?xml version='1.0'?>\n%s" % etree.tostring(ev, pretty_print=True).decode('utf-8')
# Validate event
xml = objectify.fromstring(event_string, PluginRegistry.getEventParser())
SseHandler.notify(xml, channel="broadcast")
elif isinstance(event, BusClientAvailability):
backend_registry = PluginRegistry.getInstance("BackendRegistry")
if event.type == "proxy":
# entering proxies are not handled, because they register themselves with credentials vie JSONRPC
if event.state == "leave":
self.log.debug("unregistering proxy: %s" % event.client_id)
backend_registry.unregisterBackend(event.client_id)
elif event.type == "backend":
if event.state == "ready":
self.log.debug("new backend announced: %s" % event.client_id)
if self.env.mode == "proxy":
# register ourselves to this backend
self.registerProxy(event.client_id)
def insert(self, obj, skip_base_check=False, session=None):
if session is not None:
self._insert(obj, session, skip_base_check=skip_base_check)
else:
with make_session() as session:
self._insert(obj, session, skip_base_check=skip_base_check)
def _insert(self, obj, session, skip_base_check=False):
if not skip_base_check:
pdn = session.query(ObjectInfoIndex.dn).filter(ObjectInfoIndex.dn == obj.get_parent_dn()).one_or_none()
# No parent?
if not pdn:
self.log.debug("ignoring object that has no base in the current index: " + obj.dn)
return
parent = self._get_object(obj.get_parent_dn())
if not parent.can_host(obj.get_base_type()):
self.log.debug("ignoring object that is not relevant for the index: " + obj.dn)
return
self.log.debug("creating object index for %s (%s)" % (obj.uuid, obj.dn))
uuid = session.query(ObjectInfoIndex.uuid).filter(ObjectInfoIndex.uuid == obj.uuid).one_or_none()
if uuid:
raise IndexException(C.make_error('OBJECT_EXISTS', "base", uuid=obj.uuid))
with self.lock:
self.__save(obj.asJSON(True, use_in_value=True), session=session)
def __save(self, data, session=None):
if self.env.mode == "proxy":
self.log.error("GOsa proxy is not allowed to write anything to the database")
if session is not None:
self.__session_save(data, session)
else:
with make_session() as session:
self.__session_save(data, session)
def __session_save(self, data, session):
try:
# Assemble object index object
oi = ObjectInfoIndex(
uuid=data["_uuid"],
dn=data["dn"],
_type=data["_type"],
_parent_dn=data["_parent_dn"],
_adjusted_parent_dn=data["_adjusted_parent_dn"],
_invisible=data["_invisible"],
_master_backend=data["_master_backend"]
)
if '_last_changed' in data:
oi._last_modified = datetime.datetime.fromtimestamp(data["_last_changed"])
session.add(oi)
# Assemble extension index objects
for ext in data["_extensions"]:
ei = ExtensionIndex(uuid=data["_uuid"], extension=ext)
session.add(ei)
# Assemble key value index objects
for key, value in data.items():
# Skip meta information and DN
if key.startswith("_") or key == "dn":
continue
if isinstance(value, list):
for v in value:
kvi = KeyValueIndex(uuid=data["_uuid"], key=key, value=v)
session.add(kvi)
else:
kvi = KeyValueIndex(uuid=data["_uuid"], key=key, value=value)
session.add(kvi)
# assemble search object
if data['_type'] in self.__search_aid['mapping']:
aid = self.__search_aid['mapping'][data['_type']]
attrs = self.__search_aid['attrs'][data['_type']] if data['_type'] in self.__search_aid['attrs'] else []
types = [data['_type']]
types.extend(data["_extensions"])
# append aliases to search words
for type in types[:]:
if type in self.__search_aid['aliases']:
types.extend(self.__search_aid['aliases'][type])
for ext in data["_extensions"]:
if ext in self.__search_aid['mapping']:
aid.update(self.__search_aid['mapping'][ext])
if ext in self.__search_aid['attrs']:
attrs.extend(self.__search_aid['attrs'][ext])
attrs = list(set(attrs))
search_words = [", ".join(data[x]) for x in attrs if x in data and data[x] is not None]
so = SearchObjectIndex(
so_uuid=data["_uuid"],
reverse_parent_dn=','.join([d for d in ldap.dn.explode_dn(data["_parent_dn"], flags=ldap.DN_FORMAT_LDAPV3)[::-1]]),
title=self.__build_value(aid["title"], data),
description=self.__build_value(aid["description"], data),
search=" ".join(search_words),
types=" ".join(list(set(types)))
)
session.add(so)
session.commit()
# update word index on change (if indexing is not running currently)
if not GlobalLock.exists("scan_index"):
self.update_words(session=session)
self.unmark_as_dirty(data["_uuid"])
except Exception as e:
self.log.error('Error during save: %s' % str(e))
def __build_value(self, v, info):
"""
Fill placeholders in the value to be displayed as "description".
"""
if not v:
return None
if v in info:
return ", ".join(info[v])
# Find all placeholders
attrs = {}
for attr in re.findall(r"%\(([^)]+)\)s", v):
# Extract ordinary attributes
if attr in info:
attrs[attr] = ", ".join(info[attr])
# Check for result renderers
elif attr in self.__value_extender:
attrs[attr] = self.__value_extender[attr](info)
# Fallback - just set nothing
else:
attrs[attr] = ""
# Assemble and remove empty lines and multiple whitespaces
res = v % attrs
res = re.sub(r"(<br>)+", "<br>", res)
res = re.sub(r"^<br>", "", res)
res = re.sub(r"<br>$", "", res)
return "<br>".join([s.strip() for s in res.split("<br>")])
def remove(self, obj, session=None):
self.remove_by_uuid(obj.uuid, session=session)
def __remove_others(self, uuids, session=None):
if session is not None:
return self.__session_remove_others(uuids, session)
else:
with make_session() as session:
return self.__session_remove_others(uuids, session)
def __session_remove_others(self, uuids, session):
self.log.debug("removing a bunch of objects")
session.query(KeyValueIndex).filter(~KeyValueIndex.uuid.in_(uuids)).delete(synchronize_session=False)
session.query(ExtensionIndex).filter(~ExtensionIndex.uuid.in_(uuids)).delete(synchronize_session=False)
session.query(SearchObjectIndex).filter(~SearchObjectIndex.so_uuid.in_(uuids)).delete(synchronize_session=False)
removed = session.query(ObjectInfoIndex).filter(~ObjectInfoIndex.uuid.in_(uuids)).delete(synchronize_session=False)
session.commit()
return removed
def remove_by_uuid(self, uuid, session=None):
if session is not None:
self.__remove_by_uuid(uuid, session)
else:
with make_session() as session:
self.__remove_by_uuid(uuid, session)
def __remove_by_uuid(self, uuid, session):
self.log.debug("removing object index for %s" % uuid)
if self.exists(uuid, session=session):
session.query(KeyValueIndex).filter(KeyValueIndex.uuid == uuid).delete()
session.query(ExtensionIndex).filter(ExtensionIndex.uuid == uuid).delete()
session.query(SearchObjectIndex).filter(SearchObjectIndex.so_uuid == uuid).delete()
session.query(ObjectInfoIndex).filter(ObjectInfoIndex.uuid == uuid).delete()
session.commit()
def update(self, obj, session=None):
if session is not None:
self.__update(obj, session)
else:
with make_session() as session:
self.__update(obj, session)
def __update(self, obj, session):
# Gather information
current = obj.asJSON(True, use_in_value=True)
old_dn = session.query(ObjectInfoIndex.dn).filter(ObjectInfoIndex.uuid == obj.uuid).one_or_none()
if not old_dn:
raise IndexException(C.make_error('OBJECT_NOT_FOUND', "base", id=obj.uuid))
old_dn = old_dn[0]
# Remove old entry and insert new
with self.lock:
self.remove_by_uuid(obj.uuid, session=session)
self.__save(current, session=session)
# Has the entry been moved?
if current['dn'] != old_dn:
# Adjust all ParentDN entries of child objects
res = session.query(ObjectInfoIndex).filter(
or_(ObjectInfoIndex._parent_dn == old_dn, ObjectInfoIndex._parent_dn.like('%' + old_dn))
).all()
for entry in res:
o_uuid = entry.uuid
o_dn = entry.dn
o_parent = entry._parent_dn
o_adjusted_parent = entry._adjusted_parent_dn
n_dn = o_dn[:-len(old_dn)] + current['dn']
n_parent = o_parent[:-len(old_dn)] + current['dn']
n_adjusted_parent = o_adjusted_parent[:-len(o_adjusted_parent)] + current['_adjusted_parent_dn']
oi = session.query(ObjectInfoIndex).filter(ObjectInfoIndex.uuid == o_uuid).one()
oi.dn = n_dn
oi._parent_dn = n_parent
oi._adjusted_parent_dn = n_adjusted_parent
session.commit()
@Command(__help__=N_("Check if an object with the given UUID exists."))
def exists(self, uuid, session=None):
"""
Do a database query for the given UUID and return an
existance flag.
========== ==================
Parameter Description
========== ==================
uuid Object identifier
========== ==================
``Return``: True/False
"""
if session is not None:
return session.query(ObjectInfoIndex.uuid).filter(ObjectInfoIndex.uuid == uuid).one_or_none() is not None
else:
with make_session() as session:
return session.query(ObjectInfoIndex.uuid).filter(ObjectInfoIndex.uuid == uuid).one_or_none() is not None
@Command(__help__=N_("Get list of defined base object types."))
def getBaseObjectTypes(self):
ret = []
for k, v in self.factory.getObjectTypes().items():
if v['base']:
ret.append(k)
return ret
@Command(needsUser=True, __help__=N_("Query the index for entries."))
def find(self, user, query, conditions=None):
"""
Perform a raw sqlalchemy query.
========== ==================
Parameter Description
========== ==================
query Query hash
conditions Conditions hash
========== ==================
For more information on the query format, consult the mongodb documentation.
``Return``: List of dicts
"""
res = []
# Always return dn and _type - we need it for ACL control
if isinstance(conditions, dict):
conditions['dn'] = 1
conditions['type'] = 1
else:
conditions = None
if not isinstance(query, dict):
raise FilterException(C.make_error('INVALID_QUERY'))
# Create result-set
for item in self.search(query, conditions):
# Filter out what the current use is not allowed to see
item = self.__filter_entry(user, item)
if item and item['dn'] is not None:
res.append(item)
return res
def _make_filter(self, node, session):
use_extension = False
def __make_filter(n, session):
nonlocal use_extension
res = []
for key, value in n.items():
if isinstance(value, dict):
# Maintain certain key words
if key == "and_":
res.append(and_(*__make_filter(value, session)))
elif key == "or_":
res.append(or_(*__make_filter(value, session)))
elif key == "not_":
res.append(not_(*__make_filter(value, session)))
elif 'not_in_' in value or 'in_' in value:
if key == "extension":
use_extension = True
if 'not_in_' in value:
res.append(~ExtensionIndex.extension.in_(value['not_in_']))
elif 'in_' in value:
res.append(ExtensionIndex.extension.in_(value['in_']))
elif hasattr(ObjectInfoIndex, key):
attr = getattr(ObjectInfoIndex, key)
if 'not_in_' in value:
res.append(~attr.in_(value['not_in_']))
elif 'in_' in value:
res.append(attr.in_(value['in_']))
else:
in_expr = None
if 'not_in_' in value:
in_expr = ~KeyValueIndex.value.in_(value['not_in_'])
elif 'in_' in value:
in_expr = KeyValueIndex.value.in_(value['in_'])
sub_query = session.query(KeyValueIndex.uuid).filter(KeyValueIndex.key == key, in_expr).subquery()
res.append(ObjectInfoIndex.uuid.in_(sub_query))
else:
raise IndexException(C.make_error('NOT_SUPPORTED', "base", operator=key))
elif isinstance(value, list):
# implicit or_ in case of lists - hashes cannot have multiple
# keys with the same name
exprs = []
for v in value:
# convert integers because we need strings
if isinstance(v, int):
v = "%s" % v
if hasattr(ObjectInfoIndex, key):
if "%" in v:
if v == "%":
exprs.append(getattr(ObjectInfoIndex, key).like(v))
else:
exprs.append(getattr(ObjectInfoIndex, key).ilike(v))
else:
exprs.append(getattr(ObjectInfoIndex, key) == v)
elif key == "extension":
use_extension = True
exprs.append(ExtensionIndex.extension == v)
else:
if key == "*":
sub_query = search(session.query(SearchObjectIndex.so_uuid), v, sort=True, regconfig='simple').subquery()
elif "%" in v:
if v == "%":
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value.like(v))). \
subquery()
else:
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value.ilike(v))). \
subquery()
else:
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value == v)). \
subquery()
res.append(ObjectInfoIndex.uuid.in_(sub_query))
res.append(or_(*exprs))
else:
# convert integers because we need strings
if isinstance(value, int):
value = "%s" % value
if hasattr(ObjectInfoIndex, key):
if "%" in value:
res.append(getattr(ObjectInfoIndex, key).ilike(value))
else:
res.append(getattr(ObjectInfoIndex, key) == value)
elif key == "extension":
use_extension = True
res.append(ExtensionIndex.extension == value)
else:
if key == "*":
sub_query = search(session.query(SearchObjectIndex.so_uuid), value, sort=True, regconfig='simple').subquery()
elif "%" in value:
if value == "%":
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value.like(value))). \
subquery()
else:
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value.ilike(value))). \
subquery()
else:
sub_query = session.query(KeyValueIndex.uuid). \
filter(and_(KeyValueIndex.key == key, KeyValueIndex.value == value)). \
subquery()
res.append(ObjectInfoIndex.uuid.in_(sub_query))
return res
# Add query information to be able to search various tables
_args = __make_filter(node, session)
if use_extension:
args = [ObjectInfoIndex.uuid == ExtensionIndex.uuid]
args += _args
return and_(*args)
return _args
def get_extensions(self, uuid):
""" return the list of active extensions for the given uuid-object as store in the db """
with make_session() as session:
q = session.query(ExtensionIndex).filter(ExtensionIndex.uuid == uuid)
return [e.extension for e in q.all()]
def search(self, query, properties, options=None, session=None):
"""
Perform an index search
========== ==================
Parameter Description
========== ==================
query Query hash
properties Conditions hash
========== ==================
For more information on the query format, consult the mongodb documentation.
``Return``: List of dicts
"""
if session is None:
with make_session() as session:
return self._session_search(session, query, properties, options=options)
else:
return self._session_search(session, query, properties, options=options)
def _session_search(self, session, query, properties, options=None):
res = []
fltr = self._make_filter(query, session)
def normalize(data, resultset=None, so_props=None):
_res = {
"_uuid": data.uuid,
"dn": data.dn,
"_type": data._type,
"_parent_dn": data._parent_dn,
"_adjusted_parent_dn": data._adjusted_parent_dn,
"_last_changed": data._last_modified,
"_extensions": []
}
# Add extension list
for extension in data.extensions:
_res["_extensions"].append(extension.extension)
# Add indexed properties
for kv in data.properties:
if kv.key in _res:
_res[kv.key].append(kv.value)
else:
_res[kv.key] = [kv.value]
# get data from SearchObjectIndex (e.g. title, description)
if so_props is not None and len(so_props) > 0 and len(data.search_object) > 0:
for prop in so_props:
_res[prop] = [getattr(data.search_object[0], prop)]
# Clean the result set?
if resultset:
for key in [_key for _key in _res if not _key in resultset.keys() and _key[0:1] != "_"]:
_res.pop(key, None)
return _res
if options is None:
options = {}
q = session.query(ObjectInfoIndex) \
.options(subqueryload(ObjectInfoIndex.properties)) \
.options(subqueryload(ObjectInfoIndex.extensions))
# check if we need something from the searchObject
so_props = None
if properties is not None:
so_props = [x for x in properties if hasattr(SearchObjectIndex, x)]
if len(so_props) > 0:
q = q.options(subqueryload(ObjectInfoIndex.search_object))
q = q.filter(*fltr)
if 'limit' in options:
q.limit(options['limit'])
# self.log.info(print_query(q))
try:
for o in q.all():
res.append(normalize(o, properties, so_props=so_props))
except sqlalchemy.exc.InternalError as e:
self.log.error(str(e))
session.rollback()
return res
def __filter_entry(self, user, entry):
"""
Takes a query entry and decides based on the user what to do
with the result set.
========== ===========================
Parameter Description
========== ===========================
user User ID
entry Search entry as hash
========== ===========================
``Return``: Filtered result entry
"""
if self._acl_resolver.isAdmin(user):
return entry
res = {}
for attr in entry.keys():
if attr in ['dn', '_type', '_uuid', '_last_changed']:
res[attr] = entry[attr]
continue
if self.__has_access_to(user, entry['dn'], entry['_type'], attr):
res[attr] = entry[attr]
return res
def __has_access_to(self, user, object_dn, object_type, attr):
"""
Checks whether the given user has access to the given object/attribute or not.
"""
if user:
topic = "%s.objects.%s.attributes.%s" % (self.env.domain, object_type, attr)
return self._acl_resolver.check(user, topic, "r", base=object_dn)
else:
return True
# needs to be top level to be picklable
def process_objects(o):
res = None
index = PluginRegistry.getInstance("ObjectIndex")
with make_session() as inner_session:
if o is None:
return None, None, ObjectIndex.to_be_updated
# Get object
try:
obj = ObjectProxy(o)
except Exception as e:
index.log.warning("not indexing %s: %s" % (o, str(e)))
return res, None, ObjectIndex.to_be_updated
# Check for index entry
last_modified = inner_session.query(ObjectInfoIndex._last_modified).filter(ObjectInfoIndex.uuid == obj.uuid).one_or_none()
# Entry is not in the database
if not last_modified:
index.insert(obj, True, session=inner_session)
res = "added"
# Entry is in the database
else:
# OK: already there
if obj.modifyTimestamp == last_modified[0]:
index.log.debug("found up-to-date object index for %s (%s)" % (obj.uuid, obj.dn))
res = "existing"
else:
index.log.debug("updating object index for %s (%s)" % (obj.uuid, obj.dn))
index.update(obj, session=inner_session)
res = "updated"
uuid = obj.uuid
del obj
return res, uuid, ObjectIndex.to_be_updated
def post_process(uuid):
index = PluginRegistry.getInstance("ObjectIndex")
with make_session() as inner_session:
if uuid:
try:
obj = ObjectProxy(uuid)
index.update(obj, session=inner_session)
return True
except Exception as e:
index.log.warning("not post-processing %s: %s" % (uuid, str(e)))
traceback.print_exc()
return False
return False
def resolve_children(dn):
index = PluginRegistry.getInstance("ObjectIndex")
index.log.debug("found object '%s'" % dn)
res = {}
children = index.factory.getObjectChildren(dn)
res = {**res, **children}
for chld in children.keys():
res = {**res, **resolve_children(chld)}
return res
@implementer(IInterfaceHandler)
class BackendRegistry(Plugin):
_target_ = 'core'
_priority_ = 99
def __init__(self):
self.env = Environment.getInstance()
self.log = logging.getLogger(__name__)
@Command(__help__=N_("Register a backend to allow MQTT access"))
def registerBackend(self, uuid, password, url=None, type=BackendTypes.unknown):
with make_session() as session:
query = session.query(RegisteredBackend).filter(or_(RegisteredBackend.uuid == uuid,
RegisteredBackend.url == url))
if query.count() > 0:
# delete old entries
query.delete()
rb = RegisteredBackend(
uuid=uuid,
password=password,
url=url,
type=type
)
session.add(rb)
session.commit()
@Command(__help__=N_("Unregister a backend from MQTT access"))
def unregisterBackend(self, uuid):
with make_session() as session:
backend = session.query(RegisteredBackend).filter(RegisteredBackend.uuid == uuid).one_or_none()
if backend is not None:
session.delete(backend)
session.commit()
def check_auth(self, uuid, password):
if hasattr(self.env, "core_uuid") and self.env.core_uuid == uuid and self.env.core_key == password:
return True
with make_session() as session:
backend = session.query(RegisteredBackend).filter(RegisteredBackend.uuid == uuid).one_or_none()
if backend is not None:
return backend.validate_password(password)
return False
def get_type(self, uuid):
# do not use DB if we want to identify ourselves
if hasattr(self.env, "core_uuid") and self.env.core_uuid == uuid:
return BackendTypes.proxy if self.env.mode == "proxy" else BackendTypes.active_master
with make_session() as session:
try:
res = session.query(RegisteredBackend.type).filter(RegisteredBackend.uuid == uuid).one_or_none()
return res[0] if res is not None else None
except Exception as e:
self.log.error('Error querying backend type from db: %s' % str(e))
return None
| lgpl-2.1 | -7,332,828,401,185,501,000 | 39.265225 | 240 | 0.534066 | false |
pombredanne/tahoe-lafs | src/allmydata/client.py | 1 | 24949 | import os, stat, time, weakref
from allmydata import node
from base64 import urlsafe_b64encode
from zope.interface import implements
from twisted.internet import reactor, defer
from twisted.application import service
from twisted.application.internet import TimerService
from twisted.python.filepath import FilePath
from pycryptopp.publickey import rsa
import allmydata
from allmydata.storage.server import StorageServer
from allmydata import storage_client
from allmydata.immutable.upload import Uploader
from allmydata.immutable.offloaded import Helper
from allmydata.control import ControlServer
from allmydata.introducer.client import IntroducerClient
from allmydata.util import hashutil, base32, pollmixin, log, keyutil, idlib
from allmydata.util.encodingutil import get_filesystem_encoding, \
from_utf8_or_none
from allmydata.util.fileutil import abspath_expanduser_unicode
from allmydata.util.abbreviate import parse_abbreviated_size
from allmydata.util.time_format import parse_duration, parse_date
from allmydata.stats import StatsProvider
from allmydata.history import History
from allmydata.interfaces import IStatsProducer, SDMF_VERSION, MDMF_VERSION
from allmydata.nodemaker import NodeMaker
from allmydata.blacklist import Blacklist
from allmydata.node import OldConfigOptionError
KiB=1024
MiB=1024*KiB
GiB=1024*MiB
TiB=1024*GiB
PiB=1024*TiB
def _make_secret():
return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n"
class SecretHolder:
def __init__(self, lease_secret, convergence_secret):
self._lease_secret = lease_secret
self._convergence_secret = convergence_secret
def get_renewal_secret(self):
return hashutil.my_renewal_secret_hash(self._lease_secret)
def get_cancel_secret(self):
return hashutil.my_cancel_secret_hash(self._lease_secret)
def get_convergence_secret(self):
return self._convergence_secret
class KeyGenerator:
"""I create RSA keys for mutable files. Each call to generate() returns a
single keypair. The keysize is specified first by the keysize= argument
to generate(), then with a default set by set_default_keysize(), then
with a built-in default of 2048 bits."""
def __init__(self):
self.default_keysize = 2048
def set_default_keysize(self, keysize):
"""Call this to override the size of the RSA keys created for new
mutable files which don't otherwise specify a size. This will affect
all subsequent calls to generate() without a keysize= argument. The
default size is 2048 bits. Test cases should call this method once
during setup, to cause me to create smaller keys, so the unit tests
run faster."""
self.default_keysize = keysize
def generate(self, keysize=None):
"""I return a Deferred that fires with a (verifyingkey, signingkey)
pair. I accept a keysize in bits (2048 bit keys are standard, smaller
keys are used for testing). If you do not provide a keysize, I will
use my default, which is set by a call to set_default_keysize(). If
set_default_keysize() has never been called, I will create 2048 bit
keys."""
keysize = keysize or self.default_keysize
# RSA key generation for a 2048 bit key takes between 0.8 and 3.2
# secs
signer = rsa.generate(keysize)
verifier = signer.get_verifying_key()
return defer.succeed( (verifier, signer) )
class Terminator(service.Service):
def __init__(self):
self._clients = weakref.WeakKeyDictionary()
def register(self, c):
self._clients[c] = None
def stopService(self):
for c in self._clients:
c.stop()
return service.Service.stopService(self)
class Client(node.Node, pollmixin.PollMixin):
implements(IStatsProducer)
PORTNUMFILE = "client.port"
STOREDIR = 'storage'
NODETYPE = "client"
EXIT_TRIGGER_FILE = "exit_trigger"
# This means that if a storage server treats me as though I were a
# 1.0.0 storage client, it will work as they expect.
OLDEST_SUPPORTED_VERSION = "1.0.0"
# This is a dictionary of (needed, desired, total, max_segment_size). 'needed'
# is the number of shares required to reconstruct a file. 'desired' means
# that we will abort an upload unless we can allocate space for at least
# this many. 'total' is the total number of shares created by encoding.
# If everybody has room then this is is how many we will upload.
DEFAULT_ENCODING_PARAMETERS = {"k": 3,
"happy": 7,
"n": 10,
"max_segment_size": 128*KiB,
}
def __init__(self, basedir="."):
node.Node.__init__(self, basedir)
# All tub.registerReference must happen *after* we upcall, since
# that's what does tub.setLocation()
self.started_timestamp = time.time()
self.logSource="Client"
self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy()
self.init_introducer_client()
self.init_stats_provider()
self.init_secrets()
self.init_node_key()
self.init_storage()
self.init_control()
self._key_generator = KeyGenerator()
key_gen_furl = self.get_config("client", "key_generator.furl", None)
if key_gen_furl:
log.msg("[client]key_generator.furl= is now ignored, see #2783")
self.init_client()
self.helper = None
if self.get_config("helper", "enabled", False, boolean=True):
self.init_helper()
self.init_ftp_server()
self.init_sftp_server()
self.init_drop_uploader()
# If the node sees an exit_trigger file, it will poll every second to see
# whether the file still exists, and what its mtime is. If the file does not
# exist or has not been modified for a given timeout, the node will exit.
exit_trigger_file = os.path.join(self.basedir,
self.EXIT_TRIGGER_FILE)
if os.path.exists(exit_trigger_file):
age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME]
self.log("%s file noticed (%ds old), starting timer" % (self.EXIT_TRIGGER_FILE, age))
exit_trigger = TimerService(1.0, self._check_exit_trigger, exit_trigger_file)
exit_trigger.setServiceParent(self)
# this needs to happen last, so it can use getServiceNamed() to
# acquire references to StorageServer and other web-statusable things
webport = self.get_config("node", "web.port", None)
if webport:
self.init_web(webport) # strports string
def _sequencer(self):
seqnum_s = self.get_config_from_file("announcement-seqnum")
if not seqnum_s:
seqnum_s = "0"
seqnum = int(seqnum_s.strip())
seqnum += 1 # increment
self.write_config("announcement-seqnum", "%d\n" % seqnum)
nonce = _make_secret().strip()
return seqnum, nonce
def init_introducer_client(self):
self.introducer_furl = self.get_config("client", "introducer.furl")
introducer_cache_filepath = FilePath(os.path.join(self.basedir, "private", "introducer_cache.yaml"))
ic = IntroducerClient(self.tub, self.introducer_furl,
self.nickname,
str(allmydata.__full_version__),
str(self.OLDEST_SUPPORTED_VERSION),
self.get_app_versions(),
self._sequencer, introducer_cache_filepath)
self.introducer_client = ic
ic.setServiceParent(self)
def init_stats_provider(self):
gatherer_furl = self.get_config("client", "stats_gatherer.furl", None)
self.stats_provider = StatsProvider(self, gatherer_furl)
self.add_service(self.stats_provider)
self.stats_provider.register_producer(self)
def get_stats(self):
return { 'node.uptime': time.time() - self.started_timestamp }
def init_secrets(self):
lease_s = self.get_or_create_private_config("secret", _make_secret)
lease_secret = base32.a2b(lease_s)
convergence_s = self.get_or_create_private_config('convergence',
_make_secret)
self.convergence = base32.a2b(convergence_s)
self._secret_holder = SecretHolder(lease_secret, self.convergence)
def init_node_key(self):
# we only create the key once. On all subsequent runs, we re-use the
# existing key
def _make_key():
sk_vs,vk_vs = keyutil.make_keypair()
return sk_vs+"\n"
sk_vs = self.get_or_create_private_config("node.privkey", _make_key)
sk,vk_vs = keyutil.parse_privkey(sk_vs.strip())
self.write_config("node.pubkey", vk_vs+"\n")
self._node_key = sk
def get_long_nodeid(self):
# this matches what IServer.get_longname() says about us elsewhere
vk_bytes = self._node_key.get_verifying_key_bytes()
return "v0-"+base32.b2a(vk_bytes)
def get_long_tubid(self):
return idlib.nodeid_b2a(self.nodeid)
def _init_permutation_seed(self, ss):
seed = self.get_config_from_file("permutation-seed")
if not seed:
have_shares = ss.have_shares()
if have_shares:
# if the server has shares but not a recorded
# permutation-seed, then it has been around since pre-#466
# days, and the clients who uploaded those shares used our
# TubID as a permutation-seed. We should keep using that same
# seed to keep the shares in the same place in the permuted
# ring, so those clients don't have to perform excessive
# searches.
seed = base32.b2a(self.nodeid)
else:
# otherwise, we're free to use the more natural seed of our
# pubkey-based serverid
vk_bytes = self._node_key.get_verifying_key_bytes()
seed = base32.b2a(vk_bytes)
self.write_config("permutation-seed", seed+"\n")
return seed.strip()
def init_storage(self):
# should we run a storage server (and publish it for others to use)?
if not self.get_config("storage", "enabled", True, boolean=True):
return
readonly = self.get_config("storage", "readonly", False, boolean=True)
storedir = os.path.join(self.basedir, self.STOREDIR)
data = self.get_config("storage", "reserved_space", None)
try:
reserved = parse_abbreviated_size(data)
except ValueError:
log.msg("[storage]reserved_space= contains unparseable value %s"
% data)
raise
if reserved is None:
reserved = 0
discard = self.get_config("storage", "debug_discard", False,
boolean=True)
expire = self.get_config("storage", "expire.enabled", False, boolean=True)
if expire:
mode = self.get_config("storage", "expire.mode") # require a mode
else:
mode = self.get_config("storage", "expire.mode", "age")
o_l_d = self.get_config("storage", "expire.override_lease_duration", None)
if o_l_d is not None:
o_l_d = parse_duration(o_l_d)
cutoff_date = None
if mode == "cutoff-date":
cutoff_date = self.get_config("storage", "expire.cutoff_date")
cutoff_date = parse_date(cutoff_date)
sharetypes = []
if self.get_config("storage", "expire.immutable", True, boolean=True):
sharetypes.append("immutable")
if self.get_config("storage", "expire.mutable", True, boolean=True):
sharetypes.append("mutable")
expiration_sharetypes = tuple(sharetypes)
ss = StorageServer(storedir, self.nodeid,
reserved_space=reserved,
discard_storage=discard,
readonly_storage=readonly,
stats_provider=self.stats_provider,
expiration_enabled=expire,
expiration_mode=mode,
expiration_override_lease_duration=o_l_d,
expiration_cutoff_date=cutoff_date,
expiration_sharetypes=expiration_sharetypes)
self.add_service(ss)
furl_file = os.path.join(self.basedir, "private", "storage.furl").encode(get_filesystem_encoding())
furl = self.tub.registerReference(ss, furlFile=furl_file)
ann = {"anonymous-storage-FURL": furl,
"permutation-seed-base32": self._init_permutation_seed(ss),
}
self.introducer_client.publish("storage", ann, self._node_key)
def init_client(self):
helper_furl = self.get_config("client", "helper.furl", None)
if helper_furl in ("None", ""):
helper_furl = None
DEP = self.encoding_params
DEP["k"] = int(self.get_config("client", "shares.needed", DEP["k"]))
DEP["n"] = int(self.get_config("client", "shares.total", DEP["n"]))
DEP["happy"] = int(self.get_config("client", "shares.happy", DEP["happy"]))
# for the CLI to authenticate to local JSON endpoints
self._create_auth_token()
self.init_client_storage_broker()
self.history = History(self.stats_provider)
self.terminator = Terminator()
self.terminator.setServiceParent(self)
self.add_service(Uploader(helper_furl, self.stats_provider,
self.history))
self.init_blacklist()
self.init_nodemaker()
def get_auth_token(self):
"""
This returns a local authentication token, which is just some
random data in "api_auth_token" which must be echoed to API
calls.
Currently only the URI '/magic' for magic-folder status; other
endpoints are invited to include this as well, as appropriate.
"""
return self.get_private_config('api_auth_token')
def _create_auth_token(self):
"""
Creates new auth-token data written to 'private/api_auth_token'.
This is intentionally re-created every time the node starts.
"""
self.write_private_config(
'api_auth_token',
urlsafe_b64encode(os.urandom(32)) + '\n',
)
def init_client_storage_broker(self):
# create a StorageFarmBroker object, for use by Uploader/Downloader
# (and everybody else who wants to use storage servers)
ps = self.get_config("client", "peers.preferred", "").split(",")
preferred_peers = tuple([p.strip() for p in ps if p != ""])
sb = storage_client.StorageFarmBroker(permute_peers=True,
preferred_peers=preferred_peers,
tub_options=self.tub_options)
self.storage_broker = sb
sb.setServiceParent(self)
connection_threshold = min(self.encoding_params["k"],
self.encoding_params["happy"] + 1)
helper = storage_client.ConnectedEnough(sb, connection_threshold)
self.upload_ready_d = helper.when_connected_enough()
# load static server specifications from tahoe.cfg, if any.
# Not quite ready yet.
#if self.config.has_section("client-server-selection"):
# server_params = {} # maps serverid to dict of parameters
# for (name, value) in self.config.items("client-server-selection"):
# pieces = name.split(".")
# if pieces[0] == "server":
# serverid = pieces[1]
# if serverid not in server_params:
# server_params[serverid] = {}
# server_params[serverid][pieces[2]] = value
# for serverid, params in server_params.items():
# server_type = params.pop("type")
# if server_type == "tahoe-foolscap":
# s = storage_client.NativeStorageClient(*params)
# else:
# msg = ("unrecognized server type '%s' in "
# "tahoe.cfg [client-server-selection]server.%s.type"
# % (server_type, serverid))
# raise storage_client.UnknownServerTypeError(msg)
# sb.add_server(s.serverid, s)
# check to see if we're supposed to use the introducer too
if self.get_config("client-server-selection", "use_introducer",
default=True, boolean=True):
sb.use_introducer(self.introducer_client)
def get_storage_broker(self):
return self.storage_broker
def init_blacklist(self):
fn = os.path.join(self.basedir, "access.blacklist")
self.blacklist = Blacklist(fn)
def init_nodemaker(self):
default = self.get_config("client", "mutable.format", default="SDMF")
if default.upper() == "MDMF":
self.mutable_file_default = MDMF_VERSION
else:
self.mutable_file_default = SDMF_VERSION
self.nodemaker = NodeMaker(self.storage_broker,
self._secret_holder,
self.get_history(),
self.getServiceNamed("uploader"),
self.terminator,
self.get_encoding_parameters(),
self.mutable_file_default,
self._key_generator,
self.blacklist)
def get_history(self):
return self.history
def init_control(self):
c = ControlServer()
c.setServiceParent(self)
control_url = self.tub.registerReference(c)
self.write_private_config("control.furl", control_url + "\n")
def init_helper(self):
self.helper = Helper(os.path.join(self.basedir, "helper"),
self.storage_broker, self._secret_holder,
self.stats_provider, self.history)
# TODO: this is confusing. BASEDIR/private/helper.furl is created by
# the helper. BASEDIR/helper.furl is consumed by the client who wants
# to use the helper. I like having the filename be the same, since
# that makes 'cp' work smoothly, but the difference between config
# inputs and generated outputs is hard to see.
helper_furlfile = os.path.join(self.basedir,
"private", "helper.furl").encode(get_filesystem_encoding())
self.tub.registerReference(self.helper, furlFile=helper_furlfile)
def set_default_mutable_keysize(self, keysize):
self._key_generator.set_default_keysize(keysize)
def init_web(self, webport):
self.log("init_web(webport=%s)", args=(webport,))
from allmydata.webish import WebishServer
nodeurl_path = os.path.join(self.basedir, "node.url")
staticdir_config = self.get_config("node", "web.static", "public_html").decode("utf-8")
staticdir = abspath_expanduser_unicode(staticdir_config, base=self.basedir)
ws = WebishServer(self, webport, nodeurl_path, staticdir)
self.add_service(ws)
def init_ftp_server(self):
if self.get_config("ftpd", "enabled", False, boolean=True):
accountfile = from_utf8_or_none(
self.get_config("ftpd", "accounts.file", None))
if accountfile:
accountfile = abspath_expanduser_unicode(accountfile, base=self.basedir)
accounturl = self.get_config("ftpd", "accounts.url", None)
ftp_portstr = self.get_config("ftpd", "port", "8021")
from allmydata.frontends import ftpd
s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
s.setServiceParent(self)
def init_sftp_server(self):
if self.get_config("sftpd", "enabled", False, boolean=True):
accountfile = from_utf8_or_none(
self.get_config("sftpd", "accounts.file", None))
if accountfile:
accountfile = abspath_expanduser_unicode(accountfile, base=self.basedir)
accounturl = self.get_config("sftpd", "accounts.url", None)
sftp_portstr = self.get_config("sftpd", "port", "8022")
pubkey_file = from_utf8_or_none(self.get_config("sftpd", "host_pubkey_file"))
privkey_file = from_utf8_or_none(self.get_config("sftpd", "host_privkey_file"))
from allmydata.frontends import sftpd
s = sftpd.SFTPServer(self, accountfile, accounturl,
sftp_portstr, pubkey_file, privkey_file)
s.setServiceParent(self)
def init_drop_uploader(self):
if self.get_config("drop_upload", "enabled", False, boolean=True):
if self.get_config("drop_upload", "upload.dircap", None):
raise OldConfigOptionError("The [drop_upload]upload.dircap option is no longer supported; please "
"put the cap in a 'private/drop_upload_dircap' file, and delete this option.")
upload_dircap = self.get_or_create_private_config("drop_upload_dircap")
local_dir_utf8 = self.get_config("drop_upload", "local.directory")
try:
from allmydata.frontends import drop_upload
s = drop_upload.DropUploader(self, upload_dircap, local_dir_utf8)
s.setServiceParent(self)
s.startService()
# start processing the upload queue when we've connected to enough servers
self.upload_ready_d.addCallback(s.upload_ready)
except Exception, e:
self.log("couldn't start drop-uploader: %r", args=(e,))
def _check_exit_trigger(self, exit_trigger_file):
if os.path.exists(exit_trigger_file):
mtime = os.stat(exit_trigger_file)[stat.ST_MTIME]
if mtime > time.time() - 120.0:
return
else:
self.log("%s file too old, shutting down" % (self.EXIT_TRIGGER_FILE,))
else:
self.log("%s file missing, shutting down" % (self.EXIT_TRIGGER_FILE,))
reactor.stop()
def get_encoding_parameters(self):
return self.encoding_params
def connected_to_introducer(self):
if self.introducer_client:
return self.introducer_client.connected_to_introducer()
return False
def get_renewal_secret(self): # this will go away
return self._secret_holder.get_renewal_secret()
def get_cancel_secret(self):
return self._secret_holder.get_cancel_secret()
def debug_wait_for_client_connections(self, num_clients):
"""Return a Deferred that fires (with None) when we have connections
to the given number of peers. Useful for tests that set up a
temporary test network and need to know when it is safe to proceed
with an upload or download."""
def _check():
return len(self.storage_broker.get_connected_servers()) >= num_clients
d = self.poll(_check, 0.5)
d.addCallback(lambda res: None)
return d
# these four methods are the primitives for creating filenodes and
# dirnodes. The first takes a URI and produces a filenode or (new-style)
# dirnode. The other three create brand-new filenodes/dirnodes.
def create_node_from_uri(self, write_uri, read_uri=None, deep_immutable=False, name="<unknown name>"):
# This returns synchronously.
# Note that it does *not* validate the write_uri and read_uri; instead we
# may get an opaque node if there were any problems.
return self.nodemaker.create_from_cap(write_uri, read_uri, deep_immutable=deep_immutable, name=name)
def create_dirnode(self, initial_children={}, version=None):
d = self.nodemaker.create_new_mutable_directory(initial_children, version=version)
return d
def create_immutable_dirnode(self, children, convergence=None):
return self.nodemaker.create_immutable_directory(children, convergence)
def create_mutable_file(self, contents=None, keysize=None, version=None):
return self.nodemaker.create_mutable_file(contents, keysize,
version=version)
def upload(self, uploadable):
uploader = self.getServiceNamed("uploader")
return uploader.upload(uploadable)
| gpl-2.0 | 4,486,410,822,467,145,700 | 43.791741 | 121 | 0.608882 | false |
jolyonb/edx-platform | cms/djangoapps/contentstore/views/tests/test_assets.py | 1 | 24081 | """
Unit tests for the asset upload endpoint.
"""
import json
from datetime import datetime
from io import BytesIO
import mock
from ddt import data, ddt
from django.conf import settings
from django.test.utils import override_settings
from mock import patch
from opaque_keys.edx.keys import AssetKey
from opaque_keys.edx.locator import CourseLocator
from PIL import Image
from pytz import UTC
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from contentstore.views import assets
from static_replace import replace_static_urls
from xmodule.assetstore import AssetMetadata
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml_importer import import_course_from_xml
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
MAX_FILE_SIZE = settings.MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB * 1000 ** 2
FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
class AssetsTestCase(CourseTestCase):
"""
Parent class for all asset tests.
"""
def setUp(self):
super(AssetsTestCase, self).setUp()
self.url = reverse_course_url('assets_handler', self.course.id)
def upload_asset(self, name="asset-1", asset_type='text'):
"""
Post to the asset upload url
"""
asset = self.get_sample_asset(name, asset_type)
response = self.client.post(self.url, {"name": name, "file": asset})
return response
def get_sample_asset(self, name, asset_type='text'):
"""
Returns an in-memory file of the specified type with the given name for testing
"""
sample_asset = BytesIO()
sample_file_contents = "This file is generated by python unit test"
if asset_type == 'text':
sample_asset.name = '{name}.txt'.format(name=name)
sample_asset.write(sample_file_contents)
elif asset_type == 'image':
image = Image.new("RGB", size=(50, 50), color=(256, 0, 0))
image.save(sample_asset, 'jpeg')
sample_asset.name = '{name}.jpg'.format(name=name)
elif asset_type == 'opendoc':
sample_asset.name = '{name}.odt'.format(name=name)
sample_asset.write(sample_file_contents)
sample_asset.seek(0)
return sample_asset
class BasicAssetsTestCase(AssetsTestCase):
"""
Test getting assets via html w/o additional args
"""
def test_basic(self):
resp = self.client.get(self.url, HTTP_ACCEPT='text/html')
self.assertEquals(resp.status_code, 200)
def test_static_url_generation(self):
course_key = CourseLocator('org', 'class', 'run')
location = course_key.make_asset_key('asset', 'my_file_name.jpg')
path = StaticContent.get_static_path_from_location(location)
self.assertEquals(path, '/static/my_file_name.jpg')
def test_pdf_asset(self):
module_store = modulestore()
course_items = import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
['toy'],
static_content_store=contentstore(),
verbose=True
)
course = course_items[0]
url = reverse_course_url('assets_handler', course.id)
# Test valid contentType for pdf asset (textbook.pdf)
resp = self.client.get(url, HTTP_ACCEPT='application/json')
self.assertContains(resp, "/c4x/edX/toy/asset/textbook.pdf")
asset_location = AssetKey.from_string('/c4x/edX/toy/asset/textbook.pdf')
content = contentstore().find(asset_location)
# Check after import textbook.pdf has valid contentType ('application/pdf')
# Note: Actual contentType for textbook.pdf in asset.json is 'text/pdf'
self.assertEqual(content.content_type, 'application/pdf')
def test_relative_url_for_split_course(self):
"""
Test relative path for split courses assets
"""
with modulestore().default_store(ModuleStoreEnum.Type.split):
module_store = modulestore()
course_id = module_store.make_course_key('edX', 'toy', '2012_Fall')
import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
['toy'],
static_content_store=contentstore(),
target_id=course_id,
create_if_not_present=True
)
course = module_store.get_course(course_id)
filename = 'sample_static.html'
html_src_attribute = '"/static/{}"'.format(filename)
asset_url = replace_static_urls(html_src_attribute, course_id=course.id)
url = asset_url.replace('"', '')
base_url = url.replace(filename, '')
self.assertIn("/{}".format(filename), url)
resp = self.client.get(url)
self.assertEquals(resp.status_code, 200)
# simulation of html page where base_url is up-to asset's main directory
# and relative_path is dom element with its src
relative_path = 'just_a_test.jpg'
# browser append relative_path with base_url
absolute_path = base_url + relative_path
self.assertIn("/{}".format(relative_path), absolute_path)
resp = self.client.get(absolute_path)
self.assertEquals(resp.status_code, 200)
class PaginationTestCase(AssetsTestCase):
"""
Tests the pagination of assets returned from the REST API.
"""
def test_json_responses(self):
"""
Test the ajax asset interfaces
"""
self.upload_asset("asset-1")
self.upload_asset("asset-2")
self.upload_asset("asset-3")
self.upload_asset("asset-4", "opendoc")
# Verify valid page requests
self.assert_correct_asset_response(self.url, 0, 4, 4)
self.assert_correct_asset_response(self.url + "?page_size=2", 0, 2, 4)
self.assert_correct_asset_response(
self.url + "?page_size=2&page=1", 2, 2, 4)
self.assert_correct_sort_response(self.url, 'date_added', 'asc')
self.assert_correct_sort_response(self.url, 'date_added', 'desc')
self.assert_correct_sort_response(self.url, 'display_name', 'asc')
self.assert_correct_sort_response(self.url, 'display_name', 'desc')
self.assert_correct_filter_response(self.url, 'asset_type', '')
self.assert_correct_filter_response(self.url, 'asset_type', 'OTHER')
self.assert_correct_filter_response(
self.url, 'asset_type', 'Documents')
self.assert_correct_filter_response(
self.url, 'asset_type', 'Documents,Images')
self.assert_correct_filter_response(
self.url, 'asset_type', 'Documents,OTHER')
self.assert_correct_text_search_response(self.url, 'asset-1.txt', 1)
self.assert_correct_text_search_response(self.url, 'asset-1', 1)
self.assert_correct_text_search_response(self.url, 'AsSeT-1', 1)
self.assert_correct_text_search_response(self.url, '.txt', 3)
self.assert_correct_text_search_response(self.url, '2', 1)
self.assert_correct_text_search_response(self.url, 'asset 2', 1)
self.assert_correct_text_search_response(self.url, '2 asset', 1)
self.assert_correct_text_search_response(self.url, '*.txt', 0)
self.assert_correct_asset_response(self.url + "?text_search=", 0, 4, 4)
#Verify invalid request parameters
self.assert_invalid_parameters_error(self.url, 'asset_type', 'edX')
self.assert_invalid_parameters_error(self.url, 'asset_type', 'edX, OTHER')
self.assert_invalid_parameters_error(self.url, 'asset_type', 'edX, Images')
# Verify querying outside the range of valid pages
self.assert_correct_asset_response(
self.url + "?page_size=2&page=-1", 0, 2, 4)
self.assert_correct_asset_response(
self.url + "?page_size=2&page=2", 2, 2, 4)
self.assert_correct_asset_response(
self.url + "?page_size=3&page=1", 3, 1, 4)
self.assert_correct_asset_response(
self.url + "?page_size=1&page=5&asset_type=OTHER", 0, 1, 1)
self.assert_correct_asset_response(
self.url + "?page_size=1&page=5&asset_type=Images", 5, 0, 0)
@mock.patch('xmodule.contentstore.mongo.MongoContentStore.get_all_content_for_course')
def test_mocked_filtered_response(self, mock_get_all_content_for_course):
"""
Test the ajax asset interfaces
"""
asset_key = self.course.id.make_asset_key(
AssetMetadata.GENERAL_ASSET_TYPE, 'test.jpg')
upload_date = datetime(2015, 1, 12, 10, 30, tzinfo=UTC)
thumbnail_location = [
'c4x', 'edX', 'toy', 'thumbnail', 'test_thumb.jpg', None]
mock_get_all_content_for_course.return_value = [
[
{
"asset_key": asset_key,
"displayname": "test.jpg",
"contentType": "image/jpg",
"url": "/c4x/A/CS102/asset/test.jpg",
"uploadDate": upload_date,
"id": "/c4x/A/CS102/asset/test.jpg",
"portable_url": "/static/test.jpg",
"thumbnail": None,
"thumbnail_location": thumbnail_location,
"locked": None
}
],
1
]
# Verify valid page requests
self.assert_correct_filter_response(self.url, 'asset_type', 'OTHER')
def assert_correct_asset_response(self, url, expected_start, expected_length, expected_total):
"""
Get from the url and ensure it contains the expected number of responses
"""
resp = self.client.get(url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
assets_response = json_response['assets']
self.assertEquals(json_response['start'], expected_start)
self.assertEquals(len(assets_response), expected_length)
self.assertEquals(json_response['totalCount'], expected_total)
def assert_correct_sort_response(self, url, sort, direction):
"""
Get from the url w/ a sort option and ensure items honor that sort
"""
resp = self.client.get(
url + '?sort=' + sort + '&direction=' + direction, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
assets_response = json_response['assets']
self.assertEquals(sort, json_response['sort'])
self.assertEquals(direction, json_response['direction'])
name1 = assets_response[0][sort]
name2 = assets_response[1][sort]
name3 = assets_response[2][sort]
if direction == 'asc':
self.assertLessEqual(name1, name2)
self.assertLessEqual(name2, name3)
else:
self.assertGreaterEqual(name1, name2)
self.assertGreaterEqual(name2, name3)
def assert_correct_filter_response(self, url, filter_type, filter_value):
"""
Get from the url w/ a filter option and ensure items honor that filter
"""
filter_value_split = filter_value.split(',') if filter_value else []
requested_file_extensions = []
all_file_extensions = []
for requested_filter in filter_value_split:
if requested_filter == 'OTHER':
for file_type in settings.FILES_AND_UPLOAD_TYPE_FILTERS:
all_file_extensions.extend(file_type)
else:
file_extensions = settings.FILES_AND_UPLOAD_TYPE_FILTERS.get(
requested_filter, None)
if file_extensions is not None:
requested_file_extensions.extend(file_extensions)
resp = self.client.get(
url + '?' + filter_type + '=' + filter_value, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
assets_response = json_response['assets']
self.assertEquals(filter_value_split, json_response['assetTypes'])
if filter_value != '':
content_types = [asset['content_type'].lower()
for asset in assets_response]
if 'OTHER' in filter_value_split:
for content_type in content_types:
# content_type is either not any defined type (i.e. OTHER) or is a defined type (if multiple
# parameters including OTHER are used)
self.assertTrue(content_type in requested_file_extensions or content_type not in all_file_extensions)
else:
for content_type in content_types:
self.assertIn(content_type, requested_file_extensions)
def assert_invalid_parameters_error(self, url, filter_type, filter_value):
"""
Get from the url w/ invalid filter option(s) and ensure error is received
"""
resp = self.client.get(
url + '?' + filter_type + '=' + filter_value, HTTP_ACCEPT='application/json')
self.assertEquals(resp.status_code, 400)
def assert_correct_text_search_response(self, url, text_search, number_matches):
"""
Get from the url w/ a text_search option and ensure items honor that search query
"""
resp = self.client.get(
url + '?text_search=' + text_search, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
assets_response = json_response['assets']
self.assertEquals(text_search, json_response['textSearch'])
self.assertEquals(len(assets_response), number_matches)
text_search_tokens = text_search.split()
for asset_response in assets_response:
for token in text_search_tokens:
self.assertTrue(token.lower() in asset_response['display_name'].lower())
@ddt
class UploadTestCase(AssetsTestCase):
"""
Unit tests for uploading a file
"""
def setUp(self):
super(UploadTestCase, self).setUp()
self.url = reverse_course_url('assets_handler', self.course.id)
def test_happy_path(self):
resp = self.upload_asset()
self.assertEquals(resp.status_code, 200)
def test_upload_image(self):
resp = self.upload_asset("test_image", asset_type="image")
self.assertEquals(resp.status_code, 200)
def test_no_file(self):
resp = self.client.post(self.url, {"name": "file.txt"}, "application/json")
self.assertEquals(resp.status_code, 400)
@data(
(int(MAX_FILE_SIZE / 2.0), "small.file.test", 200),
(MAX_FILE_SIZE, "justequals.file.test", 200),
(MAX_FILE_SIZE + 90, "large.file.test", 413),
)
@mock.patch('contentstore.views.assets.get_file_size')
def test_file_size(self, case, get_file_size):
max_file_size, name, status_code = case
get_file_size.return_value = max_file_size
f = self.get_sample_asset(name=name)
resp = self.client.post(self.url, {
"name": name,
"file": f
})
self.assertEquals(resp.status_code, status_code)
class DownloadTestCase(AssetsTestCase):
"""
Unit tests for downloading a file.
"""
def setUp(self):
super(DownloadTestCase, self).setUp()
self.url = reverse_course_url('assets_handler', self.course.id)
# First, upload something.
self.asset_name = 'download_test'
resp = self.upload_asset(self.asset_name)
self.assertEquals(resp.status_code, 200)
self.uploaded_url = json.loads(resp.content)['asset']['url']
def test_download(self):
# Now, download it.
resp = self.client.get(self.uploaded_url, HTTP_ACCEPT='text/html')
self.assertEquals(resp.status_code, 200)
self.assertContains(resp, 'This file is generated by python unit test')
def test_download_not_found_throw(self):
url = self.uploaded_url.replace(self.asset_name, 'not_the_asset_name')
resp = self.client.get(url, HTTP_ACCEPT='text/html')
self.assertEquals(resp.status_code, 404)
@patch('xmodule.modulestore.mixed.MixedModuleStore.find_asset_metadata')
def test_pickling_calls(self, patched_find_asset_metadata):
""" Tests if assets are not calling find_asset_metadata
"""
patched_find_asset_metadata.return_value = None
self.client.get(self.uploaded_url, HTTP_ACCEPT='text/html')
self.assertFalse(patched_find_asset_metadata.called)
class AssetToJsonTestCase(AssetsTestCase):
"""
Unit test for transforming asset information into something
we can send out to the client via JSON.
"""
@override_settings(LMS_BASE="lms_base_url")
def test_basic(self):
upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)
content_type = 'image/jpg'
course_key = CourseLocator('org', 'class', 'run')
location = course_key.make_asset_key('asset', 'my_file_name.jpg')
thumbnail_location = course_key.make_asset_key('thumbnail', 'my_file_name_thumb.jpg')
# pylint: disable=protected-access
output = assets._get_asset_json("my_file", content_type, upload_date, location, thumbnail_location, True)
self.assertEquals(output["display_name"], "my_file")
self.assertEquals(output["date_added"], "Jun 01, 2013 at 10:30 UTC")
self.assertEquals(output["url"], "/asset-v1:org+class+run+type@asset+block@my_file_name.jpg")
self.assertEquals(output["external_url"], "lms_base_url/asset-v1:org+class+run+type@asset+block@my_file_name.jpg")
self.assertEquals(output["portable_url"], "/static/my_file_name.jpg")
self.assertEquals(output["thumbnail"], "/asset-v1:org+class+run+type@thumbnail+block@my_file_name_thumb.jpg")
self.assertEquals(output["id"], unicode(location))
self.assertEquals(output['locked'], True)
output = assets._get_asset_json("name", content_type, upload_date, location, None, False)
self.assertIsNone(output["thumbnail"])
class LockAssetTestCase(AssetsTestCase):
"""
Unit test for locking and unlocking an asset.
"""
def test_locking(self):
"""
Tests a simple locking and unlocking of an asset in the toy course.
"""
def verify_asset_locked_state(locked):
""" Helper method to verify lock state in the contentstore """
asset_location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/sample_static.html')
content = contentstore().find(asset_location)
self.assertEqual(content.locked, locked)
def post_asset_update(lock, course):
""" Helper method for posting asset update. """
content_type = 'application/txt'
upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)
asset_location = course.id.make_asset_key('asset', 'sample_static.html')
url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})
resp = self.client.post(
url,
# pylint: disable=protected-access
json.dumps(assets._get_asset_json(
"sample_static.html", content_type, upload_date, asset_location, None, lock)),
"application/json"
)
self.assertEqual(resp.status_code, 201)
return json.loads(resp.content)
# Load the toy course.
module_store = modulestore()
course_items = import_course_from_xml(
module_store,
self.user.id,
TEST_DATA_DIR,
['toy'],
static_content_store=contentstore(),
verbose=True
)
course = course_items[0]
verify_asset_locked_state(False)
# Lock the asset
resp_asset = post_asset_update(True, course)
self.assertTrue(resp_asset['locked'])
verify_asset_locked_state(True)
# Unlock the asset
resp_asset = post_asset_update(False, course)
self.assertFalse(resp_asset['locked'])
verify_asset_locked_state(False)
class DeleteAssetTestCase(AssetsTestCase):
"""
Unit test for removing an asset.
"""
def setUp(self):
""" Scaffolding """
super(DeleteAssetTestCase, self).setUp()
self.url = reverse_course_url('assets_handler', self.course.id)
# First, upload something.
self.asset_name = 'delete_test'
self.asset = self.get_sample_asset(self.asset_name)
response = self.client.post(self.url, {"name": self.asset_name, "file": self.asset})
self.assertEquals(response.status_code, 200)
self.uploaded_url = json.loads(response.content)['asset']['url']
self.asset_location = AssetKey.from_string(self.uploaded_url)
self.content = contentstore().find(self.asset_location)
def test_delete_asset(self):
""" Tests the happy path :) """
test_url = reverse_course_url(
'assets_handler', self.course.id, kwargs={'asset_key_string': unicode(self.uploaded_url)})
resp = self.client.delete(test_url, HTTP_ACCEPT="application/json")
self.assertEquals(resp.status_code, 204)
def test_delete_image_type_asset(self):
""" Tests deletion of image type asset """
image_asset = self.get_sample_asset(self.asset_name, asset_type="image")
thumbnail_image_asset = self.get_sample_asset('delete_test_thumbnail', asset_type="image")
# upload image
response = self.client.post(self.url, {"name": "delete_image_test", "file": image_asset})
self.assertEquals(response.status_code, 200)
uploaded_image_url = json.loads(response.content)['asset']['url']
# upload image thumbnail
response = self.client.post(self.url, {"name": "delete_image_thumb_test", "file": thumbnail_image_asset})
self.assertEquals(response.status_code, 200)
thumbnail_url = json.loads(response.content)['asset']['url']
thumbnail_location = StaticContent.get_location_from_path(thumbnail_url)
image_asset_location = AssetKey.from_string(uploaded_image_url)
content = contentstore().find(image_asset_location)
content.thumbnail_location = thumbnail_location
contentstore().save(content)
with mock.patch('opaque_keys.edx.locator.CourseLocator.make_asset_key') as mock_asset_key:
mock_asset_key.return_value = thumbnail_location
test_url = reverse_course_url(
'assets_handler', self.course.id, kwargs={'asset_key_string': unicode(uploaded_image_url)})
resp = self.client.delete(test_url, HTTP_ACCEPT="application/json")
self.assertEquals(resp.status_code, 204)
def test_delete_asset_with_invalid_asset(self):
""" Tests the sad path :( """
test_url = reverse_course_url(
'assets_handler', self.course.id, kwargs={'asset_key_string': unicode("/c4x/edX/toy/asset/invalid.pdf")})
resp = self.client.delete(test_url, HTTP_ACCEPT="application/json")
self.assertEquals(resp.status_code, 404)
def test_delete_asset_with_invalid_thumbnail(self):
""" Tests the sad path :( """
test_url = reverse_course_url(
'assets_handler', self.course.id, kwargs={'asset_key_string': unicode(self.uploaded_url)})
self.content.thumbnail_location = StaticContent.get_location_from_path('/c4x/edX/toy/asset/invalid')
contentstore().save(self.content)
resp = self.client.delete(test_url, HTTP_ACCEPT="application/json")
self.assertEquals(resp.status_code, 204)
| agpl-3.0 | 5,597,647,605,721,270,000 | 41.772647 | 122 | 0.623479 | false |
amwelch/ifpy | ifpy/parse.py | 1 | 2325 | import dpkt
import humanfriendly
import nids
import sys
import pandas as pd
import socket
ips = {}
ip_to_domain = {}
def handle_tcp_stream(tcp):
end_states = (nids.NIDS_CLOSE, nids.NIDS_TIMEOUT, nids.NIDS_RESET)
ports = [80, 443]
if tcp.addr[1][1] not in ports:
return
global ips
if tcp.nids_state == nids.NIDS_JUST_EST:
tcp.client.collect = 1
tcp.server.collect = 1
elif tcp.nids_state == nids.NIDS_DATA:
tcp.discard(0)
elif tcp.nids_state in end_states:
ip = tcp.addr[1][0]
ips.setdefault(ip, 0)
ips[ip] += len(tcp.client.data[:tcp.client.count]) + len(tcp.server.data[:tcp.server.count])
def udp_callback(addrs, payload, pkt):
if addrs[0][1] != 53:
return
dns = dpkt.dns.DNS(payload)
global ip_to_domain
for q in dns.qd:
for a in dns.an:
try:
ip = socket.inet_ntoa(a.ip)
ip_to_domain[ip] = a.name
except AttributeError:
pass
return
def extract(pcap_file):
global ip_to_domain
global ips
ips = {}
ip_to_domain = {}
nids.param("tcp_workarounds", 1)
nids.param("scan_num_hosts", 0) # disable portscan detection
nids.chksum_ctl([('0.0.0.0/0', False)]) # disable checksumming
nids.param("filename", pcap_file)
nids.init()
nids.register_tcp(handle_tcp_stream)
nids.register_udp(udp_callback)
try:
nids.run()
except Exception, e:
print "Exception ", pcap_file + " ", e
return
data = []
columns = ('name', 'bytes')
for ip, byte in ips.iteritems():
name = ip_to_domain.get(ip)
if name is None:
try:
name, alias, addresslist = socket.gethostbyaddr(ip)
name += ' (rDNS)'
except socket.herror as e:
name = ip
data.append([str(name), byte])
df = pd.DataFrame(data, columns=columns)
df = df.groupby('name', as_index=False).sum()
df = df.sort('bytes', ascending=False)
df['human_bytes'] = df.apply(lambda row: humanfriendly.format_size(row['bytes']), axis=1)
return df
if __name__ == "__main__":
for f in sys.argv[1:]:
print f
df = extract(f)
if df is not None:
print df.head(10)
| mit | -3,545,299,151,492,854,000 | 26.352941 | 100 | 0.564301 | false |
yahoo/bossmashup | templates/publisher.py | 1 | 3761 | #Copyright (c) 2011 Yahoo! Inc. All rights reserved. Licensed under the BSD License.
# See accompanying LICENSE file or http://www.opensource.org/licenses/BSD-3-Clause for the specific language governing permissions and limitations under the License.
"""
Main class here is Serp (Search Engine Results Page)
This is a simple templating library for binding search results with html templates
Check out the california dir to see how templates are formatted and feel free to model to create your own
Look at examples/ex1 in the root directory to see how to use Serp
If you're looking for a more power templating library, try clearsilver
"""
__author__ = "BOSS Team"
from collections import defaultdict
from os.path import abspath
from util import console
from yos.yql.db import strip_prep
def serp(tr, title, endpoint, results):
html = open(tr + "/page/page.html", "r").read()
ht = tr + "/page/page.css"
at = tr + "/result/result.css"
html = html.replace("<?header_background_img_dir?>", tr + "/page/", 1)
html = html.replace("<?header_css?>", ht, 1)
html = html.replace("<?header_abstract_css?>", at, 1)
html = html.replace("<?header_title?>", title, 1)
html = html.replace("<?header_endpoint?>", endpoint, 1)
return html.replace("<?header_results?>", "".join(results), 1)
def set_result(html, url, title, abstract, dispurl, source, imageurl):
html = html.replace("<?result_imageurl?>", imageurl, 1)
html = html.replace("<?result_source?>", source, 1)
html = html.replace("<?result_clickurl?>", url, 1)
html = html.replace("<?result_title?>", title, 1)
html = html.replace("<?result_abstract?>", abstract, 1)
return html.replace("<?result_dispurl?>", dispurl, 1)
def scratch_result(template, url, title, abstract="", dispurl="", source="", imageurl=""):
html = open(template, "r").read()
return set_result(html, url, title, abstract, dispurl, source, imageurl)
def prepare_row(row):
""" Just removes namespacing in the field names """
nr = defaultdict(lambda: "")
existing = map(lambda item: (strip_prep(item[0]), item[1]), row.iteritems())
nr.update(existing)
return nr
class Serp:
def __init__(self, template_dir, title, endpoint, result_template="result_default.html", maker=set_result):
"""
template_dir specifies which template directory to use e.g. 'templates/california' that is provided
title is the title of the search results html webpage
result_template is an optional parameter to specifying another search result template
maker is a function that follows the result template design to bind html e.g. set_result sets <?result_title?>
"""
self._tr = abspath(template_dir.rstrip("/"))
self._title = title
self._endpoint = endpoint
self._html = open(self._tr + "/result/" + result_template, "r").read()
self.results = []
self._maker = maker
def add(self, url, title, abstract="", dispurl="", source="", imageurl=""):
self.results.append( self._maker(self._html, url, title, abstract, dispurl, source, imageurl) )
def _bind_row(self, row):
nr = prepare_row(row)
return self.add(nr["clickurl"], nr["title"], nr["abstract"], nr["dispurl"], nr["source"], nr["imageurl"])
def bind_table(self, table):
"""
If the table contains rows (dictionaries) which have the fields referenced in _bind_row,
then just pass the table here and forget doing a loop around the add call
"""
for row in table.rows:
self._bind_row(row)
def dumps(self):
""" Return resulting html as a string """
return console.strfix(serp(self._tr, self._title, self._endpoint, results=self.results))
def dump(self, f):
""" Save resulting html as a file named f """
o = open(f, "w")
o.write(self.dumps())
o.close()
| bsd-3-clause | 4,224,145,653,442,078,700 | 41.258427 | 165 | 0.683595 | false |
ainur-fa/python_training_1 | test/test_del_contact_in_group.py | 1 | 1061 | # -*- coding: utf-8 -*-
from model.contact import Contact
from model.group import Group
import random
def test_del_contact_in_group(app, db, orm):
list =[]
app.contact.check_available_min_requirement(app, db)
group_list = db.get_group_list()
for this_group in group_list:
contacts_in_group = orm.get_contacts_in_group(Group(id=this_group.id))
[list.append(elem) for elem in contacts_in_group if elem not in list]
if list ==[]:
group = random.choice(db.get_group_list())
contact = random.choice(db.get_contact_list())
app.contact.add_contact_to_group(contact.id, group.id)
list.append(orm.get_contact_list()[0])
contact = random.choice(list)
group = random.choice(orm.get_group_where_contact(Contact(id=contact.id)))
old_contacts = orm.get_contacts_in_group(group)
app.contact.del_contact_in_group(contact.id, group.id)
new_contacts = orm.get_contacts_in_group(group)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
| apache-2.0 | 7,627,593,699,910,538,000 | 41.44 | 101 | 0.681433 | false |
francisbrochu/microbiome-summer-school-2017_mass-spec | example/tutorial_code/alignment.py | 1 | 3212 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from .spectrum import Spectrum
from .spectrum_utils import take_closest, binary_search_mz_values
from subprocess import call
from os.path import join
from os import remove
class Mass_Spectra_Aligner():
def __init__(self, window_size=10):
self.window_size = window_size
self.reference_mz = []
def fit(self, spectra):
self._train(spectra)
def _train(self, spectra):
"""
Fill the reference_mz attribute with possible m/z values.
:param spectra: A set of spectrum object.
:return: Nothing
"""
path = "tutorial_code/cpp_extensions"
self._write_mz_values_to_file(spectra, path)
call([str(join(path, "alignment")),
"temp_spectra.csv",
str(self.window_size)])
self.reference_mz = self._read_reference_from_file(path)
def transform(self, spectra):
new_spectra = []
for i, s in enumerate(spectra):
new_spectra.append(self._apply(s))
return np.asarray(new_spectra)
def _apply(self, spec):
# Find closest point that is not outside possible window
# If point: change mz
# Else: keep or discard m/z?
aligned_mz = []
aligned_int = []
nf_mz = []
nf_int = []
for i, mz in enumerate(spec.mz_values):
possible_matches = []
try:
possible_matches = binary_search_mz_values(self.reference_mz, mz,
float(self.window_size))
except ValueError:
nf_mz.append(mz)
nf_int.append(spec.intensity_values[i])
continue
if (len(possible_matches) > 1):
possible_matches = [take_closest(possible_matches, mz)]
if (len(possible_matches) == 1):
aligned_mz.append(possible_matches[0])
aligned_int.append(spec.intensity_values[i])
else:
aligned_mz.append(mz)
aligned_int.append(spec.intensity_values[i])
nf_mz.append(mz)
nf_int.append(spec.intensity_values[i])
return Spectrum(np.asarray(aligned_mz), np.asarray(aligned_int),
spec.mz_precision, spec.metadata)
def _write_mz_values_to_file(self, spectra, path):
filename = "temp_spectra.csv"
f = open(filename,"w")
for s in spectra:
line = ""
for mz in s.mz_values:
line += str(mz)
line += ","
line = line[:-1]
line += "\n"
f.write(line)
f.close()
def _read_reference_from_file(self, path):
filename = "alignmentPoints.txt"
f = open(filename,"r")
line = f.readline().strip().split(" ")
mz_values = []
for mz in line:
mz_values.append(round(float(mz),4))
#clear temporary files
#remove("temp_spectra.csv")
#remove(filename)
return np.asarray(mz_values) | mit | -4,486,410,587,900,100,000 | 30.194175 | 83 | 0.540473 | false |
google-research/rigl | rigl/experimental/jax/pruning/init_test.py | 1 | 8125 | # coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for weight_symmetry.pruning.init."""
from typing import Any, Mapping, Optional
from absl.testing import absltest
import flax
import jax
import jax.numpy as jnp
from rigl.experimental.jax.pruning import init
from rigl.experimental.jax.pruning import masked
class MaskedDense(flax.nn.Module):
"""Single-layer Dense Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
mask = None):
inputs = inputs.reshape(inputs.shape[0], -1)
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.nn.Dense,
mask=layer_mask,
kernel_init=flax.nn.initializers.kaiming_normal())
class MaskedDenseSparseInit(flax.nn.Module):
"""Single-layer Dense Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
*args,
mask = None,
**kwargs):
inputs = inputs.reshape(inputs.shape[0], -1)
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.nn.Dense,
mask=layer_mask,
kernel_init=init.kaiming_sparse_normal(
layer_mask['kernel'] if layer_mask is not None else None),
**kwargs)
class MaskedCNN(flax.nn.Module):
"""Single-layer CNN Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
mask = None):
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.nn.Conv,
kernel_size=(3, 3),
mask=layer_mask,
kernel_init=flax.nn.initializers.kaiming_normal())
class MaskedCNNSparseInit(flax.nn.Module):
"""Single-layer CNN Masked Network."""
NUM_FEATURES: int = 32
def apply(self,
inputs,
*args,
mask = None,
**kwargs):
layer_mask = mask['MaskedModule_0'] if mask else None
return masked.MaskedModule(
inputs,
features=self.NUM_FEATURES,
wrapped_module=flax.nn.Conv,
kernel_size=(3, 3),
mask=layer_mask,
kernel_init=init.kaiming_sparse_normal(
layer_mask['kernel'] if layer_mask is not None else None),
**kwargs)
class InitTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._rng = jax.random.PRNGKey(42)
self._batch_size = 2
self._input_shape = ((self._batch_size, 28, 28, 1), jnp.float32)
self._input = jnp.ones(*self._input_shape)
def test_init_kaiming_sparse_normal_output(self):
"""Tests the output shape/type of kaiming normal sparse initialization."""
input_array = jnp.ones((64, 16), jnp.float32)
mask = jax.random.bernoulli(self._rng, shape=(64, 16))
base_init = flax.nn.initializers.kaiming_normal()(self._rng,
input_array.shape,
input_array.dtype)
sparse_init = init.kaiming_sparse_normal(mask)(self._rng, input_array.shape,
input_array.dtype)
with self.subTest(name='test_sparse_init_output_shape'):
self.assertSequenceEqual(sparse_init.shape, base_init.shape)
with self.subTest(name='test_sparse_init_output_dtype'):
self.assertEqual(sparse_init.dtype, base_init.dtype)
with self.subTest(name='test_sparse_init_output_notallzero'):
self.assertTrue((sparse_init != 0).any())
def test_dense_no_mask(self):
"""Checks that in the special case of no mask, init is same as base_init."""
_, initial_params = MaskedDense.init_by_shape(self._rng,
(self._input_shape,))
self._unmasked_model = flax.nn.Model(MaskedDense, initial_params)
_, initial_params = MaskedDenseSparseInit.init_by_shape(
jax.random.PRNGKey(42), (self._input_shape,), mask=None)
self._masked_model_sparse_init = flax.nn.Model(MaskedDenseSparseInit,
initial_params)
self.assertTrue(
jnp.isclose(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'], self._unmasked_model.params['MaskedModule_0']
['unmasked']['kernel']).all())
def test_dense_sparse_init_kaiming(self):
"""Checks kaiming normal sparse initialization for dense layer."""
_, initial_params = MaskedDense.init_by_shape(self._rng,
(self._input_shape,))
self._unmasked_model = flax.nn.Model(MaskedDense, initial_params)
mask = masked.simple_mask(self._unmasked_model, jnp.ones,
masked.WEIGHT_PARAM_NAMES)
_, initial_params = MaskedDenseSparseInit.init_by_shape(
jax.random.PRNGKey(42), (self._input_shape,), mask=mask)
self._masked_model_sparse_init = flax.nn.Model(MaskedDenseSparseInit,
initial_params)
mean_init = jnp.mean(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
stddev_init = jnp.std(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
mean_sparse_init = jnp.mean(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
stddev_sparse_init = jnp.std(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
with self.subTest(name='test_cnn_sparse_init_mean'):
self.assertBetween(mean_sparse_init, mean_init - 2 * stddev_init,
mean_init + 2 * stddev_init)
with self.subTest(name='test_cnn_sparse_init_stddev'):
self.assertBetween(stddev_sparse_init, 0.5 * stddev_init,
1.5 * stddev_init)
def test_cnn_sparse_init_kaiming(self):
"""Checks kaiming normal sparse initialization for convolutional layer."""
_, initial_params = MaskedCNN.init_by_shape(self._rng, (self._input_shape,))
self._unmasked_model = flax.nn.Model(MaskedCNN, initial_params)
mask = masked.simple_mask(self._unmasked_model, jnp.ones,
masked.WEIGHT_PARAM_NAMES)
_, initial_params = MaskedCNNSparseInit.init_by_shape(
jax.random.PRNGKey(42), (self._input_shape,), mask=mask)
self._masked_model_sparse_init = flax.nn.Model(MaskedCNNSparseInit,
initial_params)
mean_init = jnp.mean(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
stddev_init = jnp.std(
self._unmasked_model.params['MaskedModule_0']['unmasked']['kernel'])
mean_sparse_init = jnp.mean(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
stddev_sparse_init = jnp.std(
self._masked_model_sparse_init.params['MaskedModule_0']['unmasked']
['kernel'])
with self.subTest(name='test_cnn_sparse_init_mean'):
self.assertBetween(mean_sparse_init, mean_init - 2 * stddev_init,
mean_init + 2 * stddev_init)
with self.subTest(name='test_cnn_sparse_init_stddev'):
self.assertBetween(stddev_sparse_init, 0.5 * stddev_init,
1.5 * stddev_init)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | -4,098,234,479,641,906,000 | 34.021552 | 80 | 0.617231 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.