filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_12769 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 11:58:01 2020
@author: Jhon Corro
@author: Cristhyan De Marchena
"""
import vtk
tube = vtk.vtkTubeFilter()
def get_program_parameters():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('data_file', nargs='?', default=None, help='data file')
parser.add_argument('texture_file', nargs='?',
default=None, help='texture file')
args = parser.parse_args()
return args.data_file, args.texture_file
def read_file(file_name):
import os
if(file_name):
path, extension = os.path.splitext(file_name)
extension = extension.lower()
if extension == ".vti":
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName(file_name)
elif extension == ".vtp":
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(file_name)
elif extension == ".jpg":
readerFactory = vtk.vtkImageReader2Factory()
img_file = readerFactory.CreateImageReader2(file_name)
img_file.SetFileName(file_name)
img_file.Update()
reader = img_file
else:
# the file provided doesn't match the accepted extenstions
reader = None
else:
reader = None
return reader
def generate_texture(texture_file):
texture_file = read_file(texture_file)
if(texture_file):
texture = vtk.vtkTexture()
texture.SetInputConnection(texture_file.GetOutputPort())
texture.InterpolateOn()
else:
texture = None
return texture
def generate_actors(data, texture):
# contour
iso = vtk.vtkContourFilter()
iso.SetInputConnection(data.GetOutputPort())
iso.GenerateValues(19, -10000, 8000)
ctf = vtk.vtkColorTransferFunction()
ctf.AddRGBPoint(-10000, 31/255, 162/255, 255/255)
ctf.AddRGBPoint(-1, 1, 1, 1)
ctf.AddRGBPoint(0, 255/255, 47/255, 61/255)
ctf.AddRGBPoint(1, 1, 1, 1)
ctf.AddRGBPoint(8000, 255/255, 251/255, 19/255)
# tubes
global tube
tube.SetInputConnection(iso.GetOutputPort())
tube.SetRadius(1000)
tube.SetNumberOfSides(5)
# Add iso surface mapper.
isoMapper = vtk.vtkDataSetMapper()
isoMapper.SetLookupTable(ctf)
isoMapper.SetInputConnection(tube.GetOutputPort())
isoMapper.SetScalarRange(0, 255)
# mapper.ScalarVisibilityOff()
# Generate iso surface actor from iso surface mapper.
isoActor = vtk.vtkActor()
isoActor.SetMapper(isoMapper)
# Add mapper.
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(data.GetOutputPort())
mapper.SetScalarRange(0, 255)
mapper.ScalarVisibilityOff()
# generate actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.SetTexture(texture)
# Included CTF so it can be set to the scalar bar SetLookUpTable
return [actor, isoActor, ctf]
def generate_slide_bar():
# Slidebar colors
red_r = 224/255
red_g = 69/255
red_b = 85/255
green_r = 70/255
green_g = 224/255
green_b = 105/255
white = 242/255
# Create Slidebar
slide_bar = vtk.vtkSliderRepresentation2D()
# Set range and title.
slide_bar.SetMinimumValue(100)
slide_bar.SetMaximumValue(10000.0)
slide_bar.SetValue(1000)
slide_bar.SetTitleText("Tube radius")
# Set colors.
slide_bar.GetSliderProperty().SetColor(red_r, red_g, red_b)
slide_bar.GetTitleProperty().SetColor(white, white, white)
slide_bar.GetLabelProperty().SetColor(red_r, red_g, red_b)
slide_bar.GetSelectedProperty().SetColor(green_r, green_g, green_b)
slide_bar.GetTubeProperty().SetColor(white, white, white)
slide_bar.GetCapProperty().SetColor(red_r, red_g, red_b)
# Set coordinates.
slide_bar.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
slide_bar.GetPoint1Coordinate().SetValue(0.78, 0.1)
slide_bar.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
slide_bar.GetPoint2Coordinate().SetValue(0.98, 0.1)
return slide_bar
def custom_callback(obj, event):
# print("interaction called")
value = int(obj.GetRepresentation().GetValue())
global tube
tube.SetRadius(value)
tube.Update()
def generate_gui(actors):
# Create renderer stuff
renderer = vtk.vtkRenderer()
renderer_window = vtk.vtkRenderWindow()
renderer_window.AddRenderer(renderer)
renderer_window_interactor = vtk.vtkRenderWindowInteractor()
renderer_window_interactor.SetRenderWindow(renderer_window)
# Add slide bar
slide_bar = generate_slide_bar()
slide_bar.SetLabelFormat("%-#6.2f")
slider_widget = vtk.vtkSliderWidget()
slider_widget.SetInteractor(renderer_window_interactor)
slider_widget.SetRepresentation(slide_bar)
slider_widget.AddObserver("InteractionEvent", custom_callback)
slider_widget.EnabledOn()
# Attempt to create scalar bar
# Create the scalar_bar
scalar_bar = vtk.vtkScalarBarActor()
scalar_bar.SetOrientationToHorizontal()
scalar_bar.SetTextPositionToPrecedeScalarBar()
scalar_bar.UnconstrainedFontSizeOff()
# Pops CTF from the actors' list
scalar_bar.SetLookupTable(actors.pop())
scalar_bar.SetNumberOfLabels(3)
scalar_bar.SetLabelFormat("%-6.0f")
# Estas instrucciones no las está tomando, no estoy muy seguro de la razón
scalar_bar.SetPosition(0.24, 0.08)
scalar_bar.SetHeight(0.1)
scalar_bar.SetWidth(0.5)
# Add the actor and camera to the renderer, set background and size
for index, actor in enumerate(actors):
renderer.AddActor(actor)
renderer.AddActor2D(scalar_bar)
renderer.ResetCamera()
renderer.GetActiveCamera().Azimuth(180)
renderer.GetActiveCamera().Roll(180)
renderer.GetActiveCamera().Yaw(0)
renderer.GetActiveCamera().Elevation(0)
renderer.SetBackground(0.1, 0.1, 0.1)
renderer.ResetCameraClippingRange()
renderer_window.SetSize(renderer_window.GetScreenSize())
cam1 = renderer.GetActiveCamera()
cam1.Zoom(1.5)
# Smoother camera controls
renderer_window_interactor.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
renderer_window_interactor.Initialize()
renderer_window.Render()
renderer_window.SetWindowName('Heightfield Visualizer')
renderer_window.Render()
renderer_window_interactor.Start()
def main():
# Get file paths from cli params.
data_file, texture_file = get_program_parameters()
# Read data file.
data = read_file(data_file)
if(data):
# Generate texture.
texture = generate_texture(texture_file)
if(texture):
# Generate actor.
actors = generate_actors(data, texture)
# Generate GUI
generate_gui(actors)
else:
print(
'The texture file was not found or the file provided does not match the .jpg extension.')
else:
print('The data file was not found or the file provided does not match neither the .vti and .vtp extension.')
if __name__ == '__main__':
main()
|
the-stack_0_12770 | import logging
import os
import shutil
import re
from collections import defaultdict
import sys
from bs4 import BeautifulSoup
def _getSubstring(block, delimiters):
# No error checking...don't do anything dumb
return block[delimiters[0]:delimiters[1]]
def _textify(block):
"""
Smash down any html formatting in the provided string
"""
# Format html lists as python/Sphinx lists.
block = block.replace("<li>","<li>* ")
return re.sub('[^\x00-\x7f]', ' ', BeautifulSoup(block, 'lxml').get_text()) # explicitly scrub non-ascii chars
def _padString(strIn, padding=None):
"""
Replace every endline with endline + (#padding) spaces, for indent formatting
"""
if padding is None:
return strIn
if not (isinstance(padding, int) and padding >= 0):
raise ValueError("Invalid padding argument {} ".format(padding))
pad = ' '*padding
return pad + strIn.replace('\n', '\n'+pad)
def _docstringify(strIn, padding=None, collapseEmpty=True):
if strIn is None:
return None
stripped = strIn.strip()
if len(stripped) < 1 and collapseEmpty:
return None
if padding is None:
return stripped
return _padString('\n' + stripped + '\n', padding)
def _htmlUnescape(htmlIn, parts=None, additionalParts=None):
if parts is None:
parts = {
" ": " ", " ": " ", " ": " ", # non-breaking space
"​": ""
}
if additionalParts is not None:
parts.update(additionalParts)
out = htmlIn
for key in parts:
out = out.replace(key, parts[key])
return out
def _findBlock(strIn, startString, endString, startLimit=None, endLimit=None, inclusive=False):
if startLimit is None:
startLimit = 0
if endLimit is None:
endLimit = len(strIn)
if endLimit <= startLimit:
return None
try:
start = strIn.index(startString, startLimit, endLimit)
except ValueError as e:
return None
try:
end = strIn.index(endString, start+len(startString), endLimit)
except ValueError as e:
if inclusive:
return start, None
else:
return start+len(startString), None
if inclusive:
return start, end+len(endString)
else:
return start+len(startString), end
def _splitTerms(part, delim=' ', secDelim=None):
def parseAnchor(block):
termDelimiters = _findBlock(block, '<a', '</a>', inclusive=True)
if termDelimiters is None:
return block
titleDelimiters = _findBlock(block, 'title="', '"', startLimit=termDelimiters[0], inclusive=False)
path = _getSubstring(block, titleDelimiters).split()[-1]
elementPart = _findBlock(block, '>', '</a>', startLimit=titleDelimiters[1], inclusive=False)
element = _getSubstring(block, elementPart)
end = ""
if len(block) > termDelimiters[1]:
end = block[termDelimiters[1]:]
return path + "." + element + end
def parseBrackets(block, dlim, subBrackets=True):
# find and process <> blocks
outblocks = []
cont = True
previousEnd = 0
while cont:
termDelimiters = _findBlock(block, '<', '>', startLimit=previousEnd, inclusive=True)
if termDelimiters is None:
cont = False
else:
# Is this <> nested?
starts = [termDelimiters[0], ]
ends = [termDelimiters[1], ]
cont2 = True
while cont2:
tempDelimiters = _findBlock(block, '<', '>', startLimit=starts[-1] + 4, inclusive=True)
if tempDelimiters is not None and tempDelimiters[0] < ends[0]:
# we found another block.
try:
blockEnd = block.index('>', ends[-1]) # we have to advance to the proper end
except Exception:
logging.error('We failed to find the new end {}, {},\n\t{}'.format(starts, ends, block))
raise
starts.append(tempDelimiters[0])
ends.append(blockEnd)
else:
cont2 = False
start = starts[0]
end = ends[-1]
# backtrack start to previous delimiter
try:
moveTo = block[start::-1].index(dlim)
start -= moveTo
except ValueError:
start = 0 # there is no previous delimiter
# advance end to next delimiter
try:
moveTo = block.index(dlim, end)
end = moveTo
except ValueError:
end = len(block) # there is no next delimiter
if start > previousEnd:
temp = block[previousEnd:start].strip().split(dlim)
outblocks.extend([el.strip() for el in temp if len(el.strip()) > 0])
if subBrackets:
outblocks.append(_htmlUnescape(block[start:end].strip(),
additionalParts={'<': '<', '>': '>'}))
else:
outblocks.append(block[start:end].strip())
previousEnd = end
else:
if previousEnd < len(block):
temp = block[previousEnd:].strip().split(dlim)
outblocks.extend([el.strip() for el in temp if len(el.strip()) > 0])
return outblocks
# find and replace all anchor segments
part1 = ""
cont = True
previousEnd = 0
while cont:
termDelimiters = _findBlock(part, '<a', '</a>', startLimit=previousEnd, inclusive=True)
if termDelimiters is not None:
start = termDelimiters[0]
end = termDelimiters[1]
part1 += part[previousEnd:start] + parseAnchor(part[start:end])
previousEnd = end
else:
cont = False
else:
part1 += part[previousEnd:]
# find and process <> blocks
if secDelim is None:
return parseBrackets(part1, delim, subBrackets=True)
else:
blocks = []
for theBlock in parseBrackets(part1, delim, subBrackets=False):
blocks.append(parseBrackets(theBlock, secDelim, subBrackets=True))
return blocks
def _parseSignature(sigString, methodName):
# get rid of the junk elements
sigString = _htmlUnescape(sigString, additionalParts={'\n': ' '})
segments = sigString.split(methodName+'(')
# segemnts[0] = modifiers (w. generics info) and return type
# segments[1] = params info, then any thrown exception details
# parse the return type and modifiers
modifierParts = _splitTerms(segments[0].strip())
returnType = modifierParts[-1]
modifiers = []
genericsInfo = None
allowedModifiers = {'public', 'private', 'protected', 'static', 'abstract', 'default', 'final', 'strictfp',
'java.lang.@Deprecated', 'io.deephaven.util.annotations.@ScriptApi'}
if len(modifierParts) > 1:
for el in modifierParts[:-1]:
if el in allowedModifiers:
modifiers.append(el)
elif not el.startswith('@'):
genericsInfo = el
other = segments[1].strip().split(" throws ")
params = []
paramString = other[0].strip()[:-1] # eliminate trailing parenthesis from params
if len(paramString) > 0:
params = _splitTerms(paramString, delim=',', secDelim=' ')
# Not especially interested in parsing anything the method throws?
return modifiers, genericsInfo, returnType, params
class ClassDocParser(object):
"""This parses the desired components from the provided java doc (page?)"""
def __init__(self, docString):
self._docString = docString
self._methods = defaultdict(self._newMethodItem)
self._package = None
self._symbol = None
self._type = None
self._text = None
# parse the symbol information
self._parseSymbol()
# parse the method details
self._parseMethods()
# todo: parse any other details?
@property
def docString(self):
"""The provided doc string"""
return self._docString
@property
def methods(self):
"""Dictionary of the form {'<symbol>#method' : MethodDetails object}"""
return self._methods
@property
def className(self):
"""The class name for this class"""
if self._package is None or self._symbol is None:
raise ValueError("Package or Symbol not parsed successfully")
return self._package + '.' + self._symbol
@property
def pathName(self):
"""The fully qualified path name for this class"""
if self._package is None or self._symbol is None:
raise ValueError("Package or Symbol not parsed successfully")
return self.className.replace('$', '.')
@property
def isNested(self):
"""Is this nested in another class?"""
if self._symbol is None:
raise ValueError("Symbol not parsed successfully")
return '$' in self._symbol
@property
def parentPath(self):
"""The parent path if nested class/interface, or None"""
if not self.isNested:
return None
ind = self._symbol[-1::-1].index('$')
return self._package + '.' + self._symbol[:-ind]
@property
def type(self):
"""interface, enum, or class?"""
return self._type
@property
def text(self):
"""Document string for the class itself"""
return self._text
def __str__(self):
return 'ClassDocParser<< pathName={}, type={} >>'.format(self.pathName, self.type)
def __repr__(self):
out = []
for key in sorted(self.methods.keys()):
out2 = ',\n'.join([str(meth) for meth in self.methods[key]])
out.append('{}=[\n{}\n]'.format(key, out2))
if self.isNested:
return 'ClassDocParser(\n' + \
'pathName={}\n,'.format(self.pathName) + \
'className={}\n,'.format(self.className) + \
'methods={\n' + ',\n'.join(out) + '})'
else:
return 'ClassDocParser(\n' + \
'pathName={}\n,'.format(self.pathName) + \
'methods={\n' + ',\n'.join(out) + '})'
@staticmethod
def _newMethodItem():
"""Helper method"""
return []
def _parseSymbol(self):
# find the symbol information
classStartBlock = '<!-- ======== START OF CLASS DATA ======== -->'
packageStartBlock = '<div class="subTitle">'
packageEndBlock = '</div'
symbolStartBlock = '<h2'
symbolEndBlock = '</h2>'
symbolInfoDelimiters = _findBlock(self.docString, classStartBlock, symbolEndBlock, inclusive=True)
if symbolInfoDelimiters is None:
raise ValueError('Failed to find the symbol information block')
symbolInfoBlock = _getSubstring(self.docString, symbolInfoDelimiters)
packageInfoDelimiters = _findBlock(symbolInfoBlock, packageStartBlock, packageEndBlock, inclusive=True)
if packageInfoDelimiters is None:
raise ValueError('Failed to find the package block inside the symbol '
'information block = {}'.format(symbolInfoBlock))
pack = _textify(_getSubstring(symbolInfoBlock, packageInfoDelimiters)).strip().split()[-1]
self._package = pack
symbolBlockDelimiters = _findBlock(symbolInfoBlock, symbolStartBlock, symbolEndBlock, inclusive=True)
if symbolBlockDelimiters is None:
raise ValueError('Failed to find the symbol block inside the symbol '
'information block = {}'.format(symbolInfoBlock))
symb = _textify(_getSubstring(symbolInfoBlock, symbolBlockDelimiters)).strip()
# is this a class or an interface?
temp = symb.lower().split()
if 'interface' in temp:
self._type = 'interface'
elif 'enum' in temp:
self._type = 'enum'
else:
self._type = 'class'
# get rid of bracket crapola
try:
ind = symb.index('<')
symb = symb[:ind]
except ValueError:
pass
# get rid of any initial cruft
symb = symb.split()[-1]
symb = symb.replace('.', '$')
self._symbol = symb
# Try to parse the text for this class/enum/interface
classDetailsStartBlock = '<div class="description">' # after symbolEndBlock
classDetailsEndBlock = '<div class="summary">'
classSpecificStart = '<pre>'
classSpecificEnd = '</pre>'
textStart = '<div class="block">' # directly after class specific stuff
textEnd = "</div>"
classDetailsDelimiters = _findBlock(self.docString, classDetailsStartBlock, classDetailsEndBlock,
startLimit=symbolInfoDelimiters[1], inclusive=False)
if classDetailsDelimiters is not None:
classBlock = _getSubstring(self.docString, classDetailsDelimiters)
# find the class specific stuff
classSpecificDelimiters = _findBlock(classBlock, classSpecificStart, classSpecificEnd, inclusive=True)
if classDetailsDelimiters is not None:
textDelimiters = _findBlock(classBlock, textStart, textEnd,
startLimit=classSpecificDelimiters[1], inclusive=True)
if textDelimiters is not None:
self._text = _textify(_getSubstring(classBlock, textDelimiters))
def _parseMethods(self):
# look for a methods section
methodStartString = '<h3>Method Detail</h3>'
methodEndString = '</section>'
limits = _findBlock(self.docString, methodStartString, methodEndString, inclusive=False)
if limits is not None:
methodBlockString = self.docString[limits[0]:limits[1]]
thisStart = 0
theEnd = len(methodBlockString)
# iterate over each method and populate
while (thisStart is not None) and thisStart < theEnd:
methodLimits = _findBlock(methodBlockString, '<li class="blockList">\n<h4>',
'</li>\n</ul>', thisStart, theEnd, inclusive=True)
if methodLimits is not None:
if self.type == 'interface':
defMods = {'public', } # everything for an interface is implicitly public
else:
defMods = set()
methodDetail = MethodDetail(methodBlockString, methodLimits[0], methodLimits[1], defaultModifiers=defMods)
self.methods[methodDetail.name].append(methodDetail)
thisStart = methodLimits[1]
else:
thisStart = None
class MethodDetail(object):
ignoreInSignature = {','}
def __init__(self, strIn, start, end, defaultModifiers=set()):
self.name = None
self.modifiers = None
self.genericsInfo = None
self.returnType = None
self.returnText = None
self.parameters = []
self.parameterTypes = []
self.parametersText = {}
self.text = None
self.documentBlock = strIn[start:end]
thisEnd = len(self.documentBlock)
step = self._getName(0, thisEnd)
if step is not None:
step = self._getSignature(step, thisEnd)
# add in any default modifiers
if self.modifiers is None:
self.modifiers = defaultModifiers
else:
self.modifiers = defaultModifiers.union(self.modifiers)
# make parameters & parameters a tuple - must be hashable
self.parameters = tuple(self.parameters)
self.parameterTypes = tuple(self.parameterTypes)
if step is not None:
step = self._getText(step, thisEnd)
if step is not None:
step = self._getParameterDetails(step, thisEnd)
def __str__(self):
out = []
for arg in ['name', 'modifiers', 'genericsInfo', 'text', 'parameters', 'parameterTypes',
'parametersText', 'returnType', 'returnText']:
val = getattr(self, arg)
if val is not None:
out.append('{}={}'.format(arg, val))
return 'MethodDetail(\n\t' + ',\n\t'.join(out) + ')'
def _getName(self, start, end):
"""Parses name and returns the end of the name block"""
nameStartString = '<h4>'
nameEndString = '</h4>'
nameDelimiters = _findBlock(self.documentBlock, nameStartString, nameEndString, start, end, inclusive=False)
if nameDelimiters is not None:
if nameDelimiters[1] is not None:
self.name = self.documentBlock[nameDelimiters[0]:nameDelimiters[1]]
return nameDelimiters[1] + len(nameEndString)
else:
self.name = self.documentBlock[nameDelimiters[0]:end]
return None
def _getSignature(self, start, end):
"""Parses signature and returns the end of the signature block"""
sigStartString = ['<pre class="methodSignature">', '<pre>']
sigEndString = '</pre>'
sigDelimiters = None
for sigStartStr in sigStartString:
if sigDelimiters is None:
sigDelimiters = _findBlock(self.documentBlock, sigStartStr, sigEndString,
start, end, inclusive=False)
if sigDelimiters is None or sigDelimiters[1] is None:
return None
modifiers, genericsInfo, returnType, params = _parseSignature(self.documentBlock[sigDelimiters[0]: sigDelimiters[1]], self.name)
self.modifiers = modifiers
self.genericsInfo = genericsInfo
self.returnType = returnType
badParsing = False
for seg in params:
el = [entry for entry in seg if entry not in self.ignoreInSignature]
if len(el) == 2:
self.parameterTypes.append(el[0])
self.parameters.append(el[1])
elif len(el) == 3:
self.parameterTypes.append(el[1])
self.parameters.append(el[2])
else:
logging.error("Misparsed argument {}".format(el))
badParsing = True
if badParsing:
logging.error('Evidently bad parsing for the parameters in {}'.format(
_htmlUnescape(self.documentBlock[sigDelimiters[0]: sigDelimiters[1]])))
raise ValueError
return sigDelimiters[1] + len(sigEndString)
def _getText(self, start, end):
"""Parses method text - if it's there - and returns the next starting point"""
textStartString = '<div class="block">'
textEndString = '</div>'
block = None
while block is None:
textDelimiters = _findBlock(self.documentBlock, textStartString, textEndString, start, end, inclusive=False)
if textDelimiters is None or textDelimiters[1] is None:
return start
block = self.documentBlock[textDelimiters[0]:textDelimiters[1]]
# we should squish the html formatting out of the text
if "Description copied" in block:
block = None
start = textDelimiters[1]
self.text = _textify(block)
return textDelimiters[1] + len(textEndString)
def _getParameterDetails(self, start, end):
"""Parses parameter details text - if it's there - and returns the next starting point"""
paramStartString = '<dl>\n<dt><span class="paramLabel">Parameters:</span></dt>\n'
returnStartString = '<dt><span class="returnLabel">Returns:</span></dt>\n'
blockEnd = '</dl>\n</li>'
paramsDelimiters = _findBlock(self.documentBlock, paramStartString, blockEnd, start, end, inclusive=False)
returnsDelimiters = _findBlock(self.documentBlock, returnStartString, blockEnd, start, end, inclusive=False)
paramsBlock = None
returnsBlock = None
endPoint = start
if paramsDelimiters is None and returnsDelimiters is None:
return start
elif returnsDelimiters is None:
# just params block
paramsBlock = self.documentBlock[paramsDelimiters[0]:paramsDelimiters[1]]
endPoint = paramsDelimiters[1] + len(blockEnd)
elif paramsDelimiters is None:
# just returns block
returnsBlock = self.documentBlock[returnsDelimiters[0]:returnsDelimiters[1]]
endPoint = returnsDelimiters[1] + len(blockEnd)
else:
# both are present
paramsBlock = self.documentBlock[paramsDelimiters[0]: returnsDelimiters[0]-len(returnStartString)]
returnsBlock = self.documentBlock[returnsDelimiters[0]:returnsDelimiters[1]]
endPoint = returnsDelimiters[1] + len(blockEnd)
entryStartString = '<dd>'
entryEndString = '</dd>'
pNameStartString = '<code>'
pNameEndString = '</code>'
if returnsBlock is not None:
returnTextDelimiters = _findBlock(returnsBlock, entryStartString, entryEndString, inclusive=False)
if returnTextDelimiters is not None:
self.returnText = _textify(returnsBlock[returnTextDelimiters[0]:returnTextDelimiters[1]])
if paramsBlock is not None:
paramsStep = 0
while (paramsStep is not None) and (paramsStep < len(paramsBlock)):
thisParamDelimiters = _findBlock(paramsBlock, entryStartString, entryEndString, paramsStep, inclusive=False)
paramsStep = None
if thisParamDelimiters is not None:
paramsStep = thisParamDelimiters[0]
paramNameDelimiters = _findBlock(paramsBlock, pNameStartString, pNameEndString, paramsStep, inclusive=False)
paramsStep = None
if paramNameDelimiters is not None:
self.parametersText[paramsBlock[paramNameDelimiters[0]:paramNameDelimiters[1]]] = \
_textify(paramsBlock[paramNameDelimiters[1] + 7:thisParamDelimiters[1]])
paramsStep = thisParamDelimiters[1] + len(entryEndString)
return endPoint
def createDocString(self, padding=None, excludeText=False, collapseEmpty=True):
out = ""
if (self.text is not None) and (len(self.text) > 0) and (not excludeText):
out += '{}\n\n'.format(self.text)
if self.genericsInfo is not None:
out += 'Note: Java generics information - {}\n\n'.format(self.genericsInfo)
for pname, ptype in zip(self.parameters, self.parameterTypes):
pText = self.parametersText.get(pname, None)
if pText is None:
out += ':param {}: {}\n'.format(pname, ptype)
else:
out += ':param {}: ({}) {}\n'.format(pname, ptype, pText)
if self.returnType is not None and self.returnType != 'void':
if self.returnText is None:
out += ':return: {}\n'.format(self.returnType)
else:
out += ':return: ({}) {}\n'.format(self.returnType, self.returnText)
return _docstringify(out, padding, collapseEmpty=collapseEmpty)
def methodDigest(methodDetailList, details, requiredModifiers={'public'}, maxCount=5, padding=None, verbose=False):
maxMaxCount = 50
try:
maxCount = int(maxCount)
except ValueError:
maxCount = 5
finally:
if maxCount < 1:
logging.warning("maxCount was set to {} (< 1), and will be redefined as 1".format(maxCount))
maxCount = 1
if maxCount > maxMaxCount:
logging.warning("maxCount was set to {} (> {}), and will be redefined as {}".format(maxCount, maxMaxCount, maxMaxCount))
maxCount = maxMaxCount
useList = []
for el in methodDetailList:
mods = requiredModifiers.intersection(el.modifiers)
if mods == requiredModifiers:
useList.append(el)
if len(useList) < 1:
return _docstringify(None, padding)
# Is there just one MethodDetail? If so, just return a decent doc string
if len(useList) == 1:
return useList[0].createDocString(padding)
# digest all the things
text = set()
for el in useList:
if el.text is None:
text.add("")
else:
text.add(el.text.strip())
# Is there just one text?
if len(text) == 1:
textPart = text.pop()
else:
texts = {el.text for el in useList if el.text is not None}
texts = list(texts)
texts.sort()
if len(texts) == 0:
textPart = None
elif len(texts) == 1:
textPart = texts[0]
else:
textPart = "**Incompatible overloads text - text from the first overload:**\n\n{}".format(texts[0])
if verbose:
className = details["className"]
print(f"vvvvv INCOMPATIBLE JAVADOC FOR PYTHON vvvvv")
print(f"\tclassName: {className}\n")
print(f"\t{useList[0]}\n")
for i in range(len(texts)):
txt = texts[i].replace("\n"," ")
print(f"\tdocstring {i}: {txt}")
print(f"^^^^^ INCOMPATIBLE JAVADOC FOR PYTHON ^^^^^")
if textPart is None:
out = ""
else:
out = '{}\n\n'.format(textPart.strip())
if len(useList) > 2*maxCount-1:
out += "There are {} overloads, restricting signature summary to first {}:\n".format(len(useList), maxCount)
for i, md in enumerate(useList[:maxCount]):
out += "*Overload {}*{}\n".format(i+1, md.createDocString(padding=2, excludeText=True, collapseEmpty=False))
else:
for i, md in enumerate(useList):
out += "*Overload {}*{}\n".format(i+1, md.createDocString(padding=2, excludeText=True, collapseEmpty=False))
return _docstringify(out, padding)
if __name__ == '__main__':
# NOTE: this will fail (currently) unless the working directory is this location
from docGenUtil import populateCurrentDocs, classDocGeneration, finalize
maxSignatures = 50
verbose = False
# NOTE: weak arg parsing here, do we need more?
if len(sys.argv) < 2:
raise ValueError("The script requires at least one argument: devroot")
if sys.argv[1].lower() in ['-h', '--help']:
print("Called as:\n"
" python javadocExtraction.py <devroot> <assertNoChange>[False]\n"
"\n"
" - <devroot> specifies the development root, below which we expect directories\n"
" `build/docs/javadoc` and `Integrations/python/deephaven/doc`\n"
" - <assertNoChange> [default `False`] optional argument.\n"
" * False indicates to extract the javadocs to .json format below\n"
" `Integrations/python/deephaven/doc`\n"
" * True indicates to check that the .json files in the file system below\n"
" `Integrations/python/deephaven/doc` match what WOULD be generated.\n"
" **NO ACTUAL GENERATION HERE**")
# Parse the arguments
devRoot = sys.argv[1]
assertNoChange = False
if len(sys.argv) > 2:
assert_t = sys.argv[2].lower()
if assert_t in ['true', 't', '1']:
assertNoChange = True
docRoot = os.path.join(devRoot, 'build', 'docs', 'javadoc')
outDir = os.path.join(devRoot, 'Integrations', 'python', 'deephaven', 'doc')
# junk any contents of outDir, if it exists - it's easier than trying to sync somehow
if (not assertNoChange) and os.path.exists(outDir):
shutil.rmtree(outDir)
# walk the contents of outDir, and figure the current list of javadoc extracts
currentDocs = populateCurrentDocs(outDir)
# walk down the com directory of docRoot, and find all the html files
for root, dirs, files in os.walk(os.path.join(docRoot, 'com')):
for fil in files:
fstem, fext = os.path.splitext(fil)
if (fstem[0] == '.') or (fext != '.html') or (fstem.startswith('package-')):
continue
# parse the file
with open(os.path.join(root, fil), 'r', encoding="utf8") as fi:
classDetails = ClassDocParser(fi.read())
logging.info('Converting docs for {}'.format(classDetails))
# get classname, pathname and text for class/interface/enum itself
className = classDetails.className
pathName = classDetails.pathName
symbDocString = _docstringify(classDetails.text, padding=None)
# prepare the docstring dictionary
details = {"className": className, "path": pathName, "typeName": classDetails.type}
if symbDocString is None:
logging.info("className = {} has empty doc string".format(className))
else:
details["text"] = symbDocString
# parse details for explicit methods
methodDetails = {}
for methodName in classDetails.methods:
methodList = classDetails.methods[methodName]
entryDocString = methodDigest(methodList, details, requiredModifiers={'public'}, maxCount=maxSignatures, padding=None, verbose=verbose)
if entryDocString is None:
logging.info("className = {}, methodName = {} has empty docstring".format(className, methodName))
else:
methodDetails[methodName] = entryDocString
details["methods"] = methodDetails
# finalize the generation task for this class
classDocGeneration(currentDocs, assertNoChange, details, outDir)
finalize(currentDocs, assertNoChange, '\nTo resolve failure, run the task "./gradlew :Generators:generatePyDoc -PwithPy=true" '
'to regenerate, and then commit the generated changes.\n'
'To diagnose trouble, run the generation task followed by \"git diff\" to see the changes.\n'
'To diagnose possible indeterminism in the generation process, regenerate the code and check '
'the diff **multiple times**.')
|
the-stack_0_12773 | import os
import unittest
from programytest.client import TestClient
class StarPrecedenceTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_storage(self):
super(StarPrecedenceTestClient, self).load_storage()
self.add_default_stores()
self.add_single_categories_store(os.path.dirname(__file__) + os.sep + "precedence.aiml")
class StarPrecedenceAIMLTests(unittest.TestCase):
def setUp(self):
client =StarPrecedenceTestClient()
self._client_context = client.create_client_context("testid")
def test_star_precedence(self):
response = self._client_context.bot.ask_question(self._client_context, "FIRSTWORD")
self.assertIsNotNone(response)
self.assertEqual(response, 'FOUND1.')
response = self._client_context.bot.ask_question(self._client_context, "SECONDWORD")
self.assertIsNotNone(response)
self.assertEqual(response, 'NOTHING FOUND.')
|
the-stack_0_12774 | from chef import DataBag, DataBagItem, Search
from chef.exceptions import ChefError
from chef.tests import ChefTestCase
class DataBagTestCase(ChefTestCase):
def test_list(self):
bags = DataBag.list()
self.assertIn('test_1', bags)
self.assertIsInstance(bags['test_1'], DataBag)
def test_keys(self):
bag = DataBag('test_1')
self.assertItemsEqual(bag.keys(), ['item_1', 'item_2'])
self.assertItemsEqual(iter(bag), ['item_1', 'item_2'])
def test_item(self):
bag = DataBag('test_1')
item = bag['item_1']
self.assertEqual(item['test_attr'], 1)
self.assertEqual(item['other'], 'foo')
def test_search_item(self):
self.assertIn('test_1', Search.list())
q = Search('test_1')
self.assertIn('item_1', q)
self.assertIn('item_2', q)
self.assertEqual(q['item_1']['raw_data']['test_attr'], 1)
item = q['item_1'].object
self.assertIsInstance(item, DataBagItem)
self.assertEqual(item['test_attr'], 1)
def test_direct_item(self):
item = DataBagItem('test_1', 'item_1')
self.assertEqual(item['test_attr'], 1)
self.assertEqual(item['other'], 'foo')
def test_direct_item_bag(self):
bag = DataBag('test_1')
item = DataBagItem(bag, 'item_1')
self.assertEqual(item['test_attr'], 1)
self.assertEqual(item['other'], 'foo')
def test_create_bag(self):
name = self.random()
bag = DataBag.create(name)
self.register(bag)
self.assertIn(name, DataBag.list())
def test_create_item(self):
value = self.random()
bag_name = self.random()
bag = DataBag.create(bag_name)
self.register(bag)
item_name = self.random()
item = DataBagItem.create(bag, item_name, foo=value)
self.assertIn('foo', item)
self.assertEqual(item['foo'], value)
self.assertIn(item_name, bag)
bag2 = DataBag(bag_name)
self.assertIn(item_name, bag2)
item2 = bag2[item_name]
self.assertIn('foo', item)
self.assertEqual(item['foo'], value)
def test_set_item(self):
value = self.random()
value2 = self.random()
bag_name = self.random()
bag = DataBag.create(bag_name)
self.register(bag)
item_name = self.random()
item = DataBagItem.create(bag, item_name, foo=value)
item['foo'] = value2
item.save()
self.assertEqual(item['foo'], value2)
item2 = DataBagItem(bag, item_name)
self.assertEqual(item2['foo'], value2)
|
the-stack_0_12776 | from .position import Position
class Portfolio(object):
def __init__(self, price_handler, cash):
"""
On creation, the Portfolio object contains no
positions and all values are "reset" to the initial
cash, with no PnL - realised or unrealised.
Note that realised_pnl is the running tally pnl from closed
positions (closed_pnl), as well as realised_pnl
from currently open positions.
"""
self.price_handler = price_handler
self.init_cash = cash
self.equity = cash
self.cur_cash = cash
self.positions = {}
self.closed_positions = []
self.realised_pnl = 0
def _update_portfolio(self):
"""
Updates the value of all positions that are currently open.
Value of closed positions is tallied as self.realised_pnl.
"""
self.unrealised_pnl = 0
self.equity = self.realised_pnl
self.equity += self.init_cash
for ticker in self.positions:
pt = self.positions[ticker]
if self.price_handler.istick():
bid, ask = self.price_handler.get_best_bid_ask(ticker)
else:
close_price = self.price_handler.get_last_close(ticker)
bid = close_price
ask = close_price
pt.update_market_value(bid, ask)
self.unrealised_pnl += pt.unrealised_pnl
pnl_diff = pt.realised_pnl - pt.unrealised_pnl
self.equity += (
pt.market_value - pt.cost_basis + pnl_diff
)
def _add_position(
self, action, ticker,
quantity, price, commission
):
"""
Adds a new Position object to the Portfolio. This
requires getting the best bid/ask price from the
price handler in order to calculate a reasonable
"market value".
Once the Position is added, the Portfolio values
are updated.
"""
if ticker not in self.positions:
if self.price_handler.istick():
bid, ask = self.price_handler.get_best_bid_ask(ticker)
else:
close_price = self.price_handler.get_last_close(ticker)
bid = close_price
ask = close_price
position = Position(
action, ticker, quantity,
price, commission, bid, ask
)
self.positions[ticker] = position
self._update_portfolio()
else:
print(
"Ticker %s is already in the positions list. "
"Could not add a new position." % ticker
)
def _modify_position(
self, action, ticker,
quantity, price, commission
):
"""
Modifies a current Position object to the Portfolio.
This requires getting the best bid/ask price from the
price handler in order to calculate a reasonable
"market value".
Once the Position is modified, the Portfolio values
are updated.
"""
if ticker in self.positions:
self.positions[ticker].transact_shares(
action, quantity, price, commission
)
if self.price_handler.istick():
bid, ask = self.price_handler.get_best_bid_ask(ticker)
else:
close_price = self.price_handler.get_last_close(ticker)
bid = close_price
ask = close_price
self.positions[ticker].update_market_value(bid, ask)
if self.positions[ticker].quantity == 0:
closed = self.positions.pop(ticker)
self.realised_pnl += closed.realised_pnl
self.closed_positions.append(closed)
self._update_portfolio()
else:
print(
"Ticker %s not in the current position list. "
"Could not modify a current position." % ticker
)
def transact_position(
self, action, ticker,
quantity, price, commission
):
"""
Handles any new position or modification to
a current position, by calling the respective
_add_position and _modify_position methods.
Hence, this single method will be called by the
PortfolioHandler to update the Portfolio itself.
"""
if action == "BOT":
self.cur_cash -= ((quantity * price) + commission)
elif action == "SLD":
self.cur_cash += ((quantity * price) - commission)
if ticker not in self.positions:
self._add_position(
action, ticker, quantity,
price, commission
)
else:
self._modify_position(
action, ticker, quantity,
price, commission
)
|
the-stack_0_12777 | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""blob helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import cPickle as pickle
import numpy as np
import cv2
from core.config import cfg
def get_image_blob(image, target_scale, target_max_size):
"""Convert an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale (float): image scale (target size) / (original size)
im_info (ndarray)
"""
#TODO: choose suitable pixel_means for DEEPLESION data, see also roi_data/minibatch.py:_get_image_blob
if cfg.LESION.LESION_ENABLED:
if cfg.LESION.USE_3DCE or cfg.LESION.MULTI_MODALITY:
pixel_means = np.tile(np.array([100]), cfg.LESION.NUM_IMAGES_3DCE * 3)
else:
pixel_means = np.tile(np.array([100]), cfg.LESION.SLICE_NUM)
else:
pixel_means = cfg.PIXEL_MEANS
if isinstance(image, list):
im = image[0]
other_im = image[1]
processed_im = []
im, im_scale = prep_im_for_blob(
im, pixel_means, [target_scale], target_max_size, None)
other_im, other_im_scale = prep_im_for_blob(
other_im, pixel_means, [target_scale], target_max_size, None)
processed_im.append(im[0])
processed_im.append(other_im[0])
else:
processed_im, im_scale = prep_im_for_blob(
image, pixel_means, [target_scale], target_max_size, None
)
# Note: processed_im might have different shape with blob. blob might be larger than
# processed_im, or max_size
blob = im_list_to_blob(processed_im)
# NOTE: this height and width may be larger than actual scaled input image
# due to the FPN.COARSEST_STRIDE related padding in im_list_to_blob. We are
# maintaining this behavior for now to make existing results exactly
# reproducible (in practice using the true input image height and width
# yields nearly the same results, but they are sometimes slightly different
# because predictions near the edge of the image will be pruned more
# aggressively).
# N,C,H,W for 2D input; N,C,D,H,W for 3D input.
if cfg.LESION.USE_3D_INPUT:
height, width = blob.shape[3], blob.shape[4]
else:
height, width = blob.shape[2], blob.shape[3]
im_info = np.hstack((height, width, im_scale))[np.newaxis, :]
return blob, im_scale, im_info.astype(np.float32)
def im_list_to_blob(ims):
"""Convert a list of images into a network input. Assumes images were
prepared using prep_im_for_blob or equivalent: i.e.
- BGR channel order
- pixel means subtracted
- resized to the desired input size
- float32 numpy ndarray format
- H,W,C for 2D input , H,W,D for 3D input
Output: 4D N,C,H,W for 2D input (5D N,C,D,H,W for 3D input).
"""
if not isinstance(ims, list):
ims = [ims]
num_images = len(ims)
if cfg.LESION.USE_3D_INPUT:
# transform 3D Lesion data(H,W,D) to (N,C,D,H,W).
max_shape = get_3d_max_shape([im.shape for im in ims])
# depth axis is not padded.
blob = np.zeros(
(num_images, 1,max_shape[0], max_shape[1], ims[0].shape[2]), dtype=np.float32)
for i in range(num_images):
im = ims[i]
blob[i, 0, 0:im.shape[0], 0:im.shape[1], :im.shape[2]] = im
channel_swap = (0, 1, 4, 2, 3)
# Axis order will become: (n, c, d, h, w), eg. (1,1,9,800,800) for 9 slices
blob = blob.transpose(channel_swap)
else:
max_shape = get_max_shape([im.shape[:2] for im in ims])
if cfg.LESION.LESION_ENABLED:
if cfg.LESION.USE_3DCE or cfg.LESION.MULTI_MODALITY:
blob = np.zeros((num_images, max_shape[0], max_shape[1], cfg.LESION.NUM_IMAGES_3DCE * 3), dtype=np.float32)
else:
blob = np.zeros((num_images, max_shape[0], max_shape[1], cfg.LESION.SLICE_NUM), dtype=np.float32)
else:
blob = np.zeros(
(num_images, max_shape[0], max_shape[1], 3), dtype=np.float32)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
# Move channels (axis 3) to axis 1
# Axis order will become: (batch elem, channel, height, width)
channel_swap = (0, 3, 1, 2)
blob = blob.transpose(channel_swap)
return blob
def get_3d_max_shape(im_shapes):
"""
Calculate max spatial size for batching given a list of image shapes.
Note That this function is called twice during dealing one batch,
first in blob.get_minibatch(),H,W,D order, then in loader.collate_minibatch(),D,H,W order.
Depth pad should be ignored.
"""
max_shape = np.array(im_shapes).max(axis=0)
assert max_shape.size == 3
# Pad the image so they can be divisible by a stride
if cfg.FPN.FPN_ON:
stride = float(cfg.FPN.COARSEST_STRIDE)
max_shape[0] = int(np.ceil(max_shape[0] / stride) * stride)
max_shape[1] = int(np.ceil(max_shape[1] / stride) * stride)
max_shape[2] = int(np.ceil(max_shape[2] / stride) * stride)
return max_shape
def get_max_shape(im_shapes):
"""Calculate max spatial size (h, w) for batching given a list of image shapes
"""
max_shape = np.array(im_shapes).max(axis=0)
assert max_shape.size == 2
# Pad the image so they can be divisible by a stride
if cfg.FPN.FPN_ON:
stride = float(cfg.FPN.COARSEST_STRIDE)
max_shape[0] = int(np.ceil(max_shape[0] / stride) * stride)
max_shape[1] = int(np.ceil(max_shape[1] / stride) * stride)
return max_shape
def prep_im_for_blob(im, pixel_means, target_sizes, max_size, transform_cv=None):
"""Prepare an image for use as a network input blob. Specially:
- Subtract per-channel pixel mean
- Convert to float32
- Rescale to each of the specified target size (capped at max_size)
Returns a list of transformed images, one for each target size. Also returns
the scale factors that were used to compute each returned image.
"""
if transform_cv != None:
im = transform_cv(im)
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
ims = []
im_scales = []
for target_size in target_sizes:
im_scale = get_target_scale(im_size_min, im_size_max, target_size, max_size)
im_resized = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
ims.append(im_resized)
im_scales.append(im_scale)
return ims, im_scales
def get_im_blob_sizes(im_shape, target_sizes, max_size):
"""Calculate im blob size for multiple target_sizes given original im shape
"""
im_size_min = np.min(im_shape)
im_size_max = np.max(im_shape)
im_sizes = []
for target_size in target_sizes:
im_scale = get_target_scale(im_size_min, im_size_max, target_size, max_size)
im_sizes.append(np.round(im_shape * im_scale))
return np.array(im_sizes)
def get_target_scale(im_size_min, im_size_max, target_size, max_size):
"""Calculate target resize scale
"""
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than max_size
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
return im_scale
def zeros(shape, int32=False):
"""Return a blob of all zeros of the given shape with the correct float or
int data type.
"""
return np.zeros(shape, dtype=np.int32 if int32 else np.float32)
def ones(shape, int32=False):
"""Return a blob of all ones of the given shape with the correct float or
int data type.
"""
return np.ones(shape, dtype=np.int32 if int32 else np.float32)
def serialize(obj):
"""Serialize a Python object using pickle and encode it as an array of
float32 values so that it can be feed into the workspace. See deserialize().
"""
return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)
def deserialize(arr):
"""Unserialize a Python object from an array of float32 values fetched from
a workspace. See serialize().
"""
return pickle.loads(arr.astype(np.uint8).tobytes())
|
the-stack_0_12778 | # -*- coding: utf-8 -*-
"""Interface for running Python functions as subprocess-mode commands.
Code for several helper methods in the `ProcProxy` class have been reproduced
without modification from `subprocess.py` in the Python 3.4.2 standard library.
The contents of `subprocess.py` (and, thus, the reproduced methods) are
Copyright (c) 2003-2005 by Peter Astrand <[email protected]> and were
licensed to the Python Software foundation under a Contributor Agreement.
"""
import io
import os
import re
import sys
import time
import queue
import array
import ctypes
import signal
import inspect
import builtins
import functools
import threading
import subprocess
import collections.abc as cabc
from xonsh.platform import (
ON_WINDOWS,
ON_POSIX,
ON_MSYS,
ON_CYGWIN,
CAN_RESIZE_WINDOW,
LFLAG,
CC,
)
from xonsh.tools import (
redirect_stdout,
redirect_stderr,
print_exception,
XonshCalledProcessError,
findfirst,
on_main_thread,
XonshError,
format_std_prepost,
)
from xonsh.lazyasd import lazyobject, LazyObject
from xonsh.jobs import wait_for_active_job, give_terminal_to, _continue
from xonsh.lazyimps import fcntl, termios, _winapi, msvcrt, winutils
# these decorators are imported for users back-compatible
from xonsh.tools import unthreadable, uncapturable # NOQA
# foreground has be deprecated
foreground = unthreadable
@lazyobject
def STDOUT_CAPTURE_KINDS():
return frozenset(["stdout", "object"])
# The following escape codes are xterm codes.
# See http://rtfm.etla.org/xterm/ctlseq.html for more.
MODE_NUMS = ("1049", "47", "1047")
START_ALTERNATE_MODE = LazyObject(
lambda: frozenset("\x1b[?{0}h".format(i).encode() for i in MODE_NUMS),
globals(),
"START_ALTERNATE_MODE",
)
END_ALTERNATE_MODE = LazyObject(
lambda: frozenset("\x1b[?{0}l".format(i).encode() for i in MODE_NUMS),
globals(),
"END_ALTERNATE_MODE",
)
ALTERNATE_MODE_FLAGS = LazyObject(
lambda: tuple(START_ALTERNATE_MODE) + tuple(END_ALTERNATE_MODE),
globals(),
"ALTERNATE_MODE_FLAGS",
)
RE_HIDDEN_BYTES = LazyObject(
lambda: re.compile(b"(\001.*?\002)"), globals(), "RE_HIDDEN"
)
@lazyobject
def RE_VT100_ESCAPE():
return re.compile(b"(\x9B|\x1B\\[)[0-?]*[ -\\/]*[@-~]")
@lazyobject
def RE_HIDE_ESCAPE():
return re.compile(
b"(" + RE_HIDDEN_BYTES.pattern + b"|" + RE_VT100_ESCAPE.pattern + b")"
)
class QueueReader:
"""Provides a file-like interface to reading from a queue."""
def __init__(self, fd, timeout=None):
"""
Parameters
----------
fd : int
A file descriptor
timeout : float or None, optional
The queue reading timeout.
"""
self.fd = fd
self.timeout = timeout
self.closed = False
self.queue = queue.Queue()
self.thread = None
def close(self):
"""close the reader"""
self.closed = True
def is_fully_read(self):
"""Returns whether or not the queue is fully read and the reader is
closed.
"""
return (
self.closed
and (self.thread is None or not self.thread.is_alive())
and self.queue.empty()
)
def read_queue(self):
"""Reads a single chunk from the queue. This is blocking if
the timeout is None and non-blocking otherwise.
"""
try:
return self.queue.get(block=True, timeout=self.timeout)
except queue.Empty:
return b""
def read(self, size=-1):
"""Reads bytes from the file."""
i = 0
buf = b""
while size < 0 or i != size:
line = self.read_queue()
if line:
buf += line
else:
break
i += len(line)
return buf
def readline(self, size=-1):
"""Reads a line, or a partial line from the file descriptor."""
i = 0
nl = b"\n"
buf = b""
while size < 0 or i != size:
line = self.read_queue()
if line:
buf += line
if line.endswith(nl):
break
else:
break
i += len(line)
return buf
def _read_all_lines(self):
"""This reads all remaining lines in a blocking fashion."""
lines = []
while not self.is_fully_read():
chunk = self.read_queue()
lines.extend(chunk.splitlines(keepends=True))
return lines
def readlines(self, hint=-1):
"""Reads lines from the file descriptor. This is blocking for negative
hints (i.e. read all the remaining lines) and non-blocking otherwise.
"""
if hint == -1:
return self._read_all_lines()
lines = []
while len(lines) != hint:
chunk = self.read_queue()
if not chunk:
break
lines.extend(chunk.splitlines(keepends=True))
return lines
def fileno(self):
"""Returns the file descriptor number."""
return self.fd
@staticmethod
def readable():
"""Returns true, because this object is always readable."""
return True
def iterqueue(self):
"""Iterates through all remaining chunks in a blocking fashion."""
while not self.is_fully_read():
chunk = self.read_queue()
if not chunk:
continue
yield chunk
def populate_fd_queue(reader, fd, queue):
"""Reads 1 kb of data from a file descriptor into a queue.
If this ends or fails, it flags the calling reader object as closed.
"""
while True:
try:
c = os.read(fd, 1024)
except OSError:
reader.closed = True
break
if c:
queue.put(c)
else:
reader.closed = True
break
class NonBlockingFDReader(QueueReader):
"""A class for reading characters from a file descriptor on a background
thread. This has the advantages that the calling thread can close the
file and that the reading does not block the calling thread.
"""
def __init__(self, fd, timeout=None):
"""
Parameters
----------
fd : int
A file descriptor
timeout : float or None, optional
The queue reading timeout.
"""
super().__init__(fd, timeout=timeout)
# start reading from stream
self.thread = threading.Thread(
target=populate_fd_queue, args=(self, self.fd, self.queue)
)
self.thread.daemon = True
self.thread.start()
def populate_buffer(reader, fd, buffer, chunksize):
"""Reads bytes from the file descriptor and copies them into a buffer.
The reads happen in parallel using the pread() syscall; which is only
available on POSIX systems. If the read fails for any reason, the reader is
flagged as closed.
"""
offset = 0
while True:
try:
buf = os.pread(fd, chunksize, offset)
except OSError:
reader.closed = True
break
if buf:
buffer.write(buf)
offset += len(buf)
else:
reader.closed = True
break
class BufferedFDParallelReader:
"""Buffered, parallel background thread reader."""
def __init__(self, fd, buffer=None, chunksize=1024):
"""
Parameters
----------
fd : int
File descriptor from which to read.
buffer : binary file-like or None, optional
A buffer to write bytes into. If None, a new BytesIO object
is created.
chunksize : int, optional
The max size of the parallel reads, default 1 kb.
"""
self.fd = fd
self.buffer = io.BytesIO() if buffer is None else buffer
self.chunksize = chunksize
self.closed = False
# start reading from stream
self.thread = threading.Thread(
target=populate_buffer, args=(self, fd, self.buffer, chunksize)
)
self.thread.daemon = True
self.thread.start()
def _expand_console_buffer(cols, max_offset, expandsize, orig_posize, fd):
# if we are getting close to the end of the console buffer,
# expand it so that we can read from it successfully.
if cols == 0:
return orig_posize[-1], max_offset, orig_posize
rows = ((max_offset + expandsize) // cols) + 1
winutils.set_console_screen_buffer_size(cols, rows, fd=fd)
orig_posize = orig_posize[:3] + (rows,)
max_offset = (rows - 1) * cols
return rows, max_offset, orig_posize
def populate_console(reader, fd, buffer, chunksize, queue, expandsize=None):
"""Reads bytes from the file descriptor and puts lines into the queue.
The reads happened in parallel,
using xonsh.winutils.read_console_output_character(),
and is thus only available on windows. If the read fails for any reason,
the reader is flagged as closed.
"""
# OK, so this function is super annoying because Windows stores its
# buffers as a 2D regular, dense array -- without trailing newlines.
# Meanwhile, we want to add *lines* to the queue. Also, as is typical
# with parallel reads, the entire buffer that you ask for may not be
# filled. Thus we have to deal with the full generality.
# 1. reads may end in the middle of a line
# 2. excess whitespace at the end of a line may not be real, unless
# 3. you haven't read to the end of the line yet!
# So there are alignment issues everywhere. Also, Windows will automatically
# read past the current cursor position, even though there is presumably
# nothing to see there.
#
# These chunked reads basically need to happen like this because,
# a. The default buffer size is HUGE for the console (90k lines x 120 cols)
# as so we can't just read in everything at the end and see what we
# care about without a noticeable performance hit.
# b. Even with this huge size, it is still possible to write more lines than
# this, so we should scroll along with the console.
# Unfortunately, because we do not have control over the terminal emulator,
# It is not possible to compute how far back we should set the beginning
# read position because we don't know how many characters have been popped
# off the top of the buffer. If we did somehow know this number we could do
# something like the following:
#
# new_offset = (y*cols) + x
# if new_offset == max_offset:
# new_offset -= scrolled_offset
# x = new_offset%cols
# y = new_offset//cols
# continue
#
# So this method is imperfect and only works as long as the screen has
# room to expand to. Thus the trick here is to expand the screen size
# when we get close enough to the end of the screen. There remain some
# async issues related to not being able to set the cursor position.
# but they just affect the alignment / capture of the output of the
# first command run after a screen resize.
if expandsize is None:
expandsize = 100 * chunksize
x, y, cols, rows = posize = winutils.get_position_size(fd)
pre_x = pre_y = -1
orig_posize = posize
offset = (cols * y) + x
max_offset = (rows - 1) * cols
# I believe that there is a bug in PTK that if we reset the
# cursor position, the cursor on the next prompt is accidentally on
# the next line. If this is fixed, uncomment the following line.
# if max_offset < offset + expandsize:
# rows, max_offset, orig_posize = _expand_console_buffer(
# cols, max_offset, expandsize,
# orig_posize, fd)
# winutils.set_console_cursor_position(x, y, fd=fd)
while True:
posize = winutils.get_position_size(fd)
offset = (cols * y) + x
if ((posize[1], posize[0]) <= (y, x) and posize[2:] == (cols, rows)) or (
pre_x == x and pre_y == y
):
# already at or ahead of the current cursor position.
if reader.closed:
break
else:
time.sleep(reader.timeout)
continue
elif max_offset <= offset + expandsize:
ecb = _expand_console_buffer(cols, max_offset, expandsize, orig_posize, fd)
rows, max_offset, orig_posize = ecb
continue
elif posize[2:] == (cols, rows):
# cursor updated but screen size is the same.
pass
else:
# screen size changed, which is offset preserving
orig_posize = posize
cols, rows = posize[2:]
x = offset % cols
y = offset // cols
pre_x = pre_y = -1
max_offset = (rows - 1) * cols
continue
try:
buf = winutils.read_console_output_character(
x=x, y=y, fd=fd, buf=buffer, bufsize=chunksize, raw=True
)
except (OSError, IOError):
reader.closed = True
break
# cursor position and offset
if not reader.closed:
buf = buf.rstrip()
nread = len(buf)
if nread == 0:
time.sleep(reader.timeout)
continue
cur_x, cur_y = posize[0], posize[1]
cur_offset = (cols * cur_y) + cur_x
beg_offset = (cols * y) + x
end_offset = beg_offset + nread
if end_offset > cur_offset and cur_offset != max_offset:
buf = buf[: cur_offset - end_offset]
# convert to lines
xshift = cols - x
yshift = (nread // cols) + (1 if nread % cols > 0 else 0)
lines = [buf[:xshift]]
lines += [
buf[l * cols + xshift : (l + 1) * cols + xshift] for l in range(yshift)
]
lines = [line for line in lines if line]
if not lines:
time.sleep(reader.timeout)
continue
# put lines in the queue
nl = b"\n"
for line in lines[:-1]:
queue.put(line.rstrip() + nl)
if len(lines[-1]) == xshift:
queue.put(lines[-1].rstrip() + nl)
else:
queue.put(lines[-1])
# update x and y locations
if (beg_offset + len(buf)) % cols == 0:
new_offset = beg_offset + len(buf)
else:
new_offset = beg_offset + len(buf.rstrip())
pre_x = x
pre_y = y
x = new_offset % cols
y = new_offset // cols
time.sleep(reader.timeout)
class ConsoleParallelReader(QueueReader):
"""Parallel reader for consoles that runs in a background thread.
This is only needed, available, and useful on Windows.
"""
def __init__(self, fd, buffer=None, chunksize=1024, timeout=None):
"""
Parameters
----------
fd : int
Standard buffer file descriptor, 0 for stdin, 1 for stdout (default),
and 2 for stderr.
buffer : ctypes.c_wchar_p, optional
An existing buffer to (re-)use.
chunksize : int, optional
The max size of the parallel reads, default 1 kb.
timeout : float, optional
The queue reading timeout.
"""
timeout = timeout or builtins.__xonsh__.env.get("XONSH_PROC_FREQUENCY")
super().__init__(fd, timeout=timeout)
self._buffer = buffer # this cannot be public
if buffer is None:
self._buffer = ctypes.c_char_p(b" " * chunksize)
self.chunksize = chunksize
# start reading from stream
self.thread = threading.Thread(
target=populate_console,
args=(self, fd, self._buffer, chunksize, self.queue),
)
self.thread.daemon = True
self.thread.start()
def safe_fdclose(handle, cache=None):
"""Closes a file handle in the safest way possible, and potentially
storing the result.
"""
if cache is not None and cache.get(handle, False):
return
status = True
if handle is None:
pass
elif isinstance(handle, int):
if handle >= 3:
# don't close stdin, stdout, stderr, -1
try:
os.close(handle)
except OSError:
status = False
elif handle is sys.stdin or handle is sys.stdout or handle is sys.stderr:
# don't close stdin, stdout, or stderr
pass
else:
try:
handle.close()
except OSError:
status = False
if cache is not None:
cache[handle] = status
def safe_flush(handle):
"""Attempts to safely flush a file handle, returns success bool."""
status = True
try:
handle.flush()
except OSError:
status = False
return status
def still_writable(fd):
"""Determines whether a file descriptor is still writable by trying to
write an empty string and seeing if it fails.
"""
try:
os.write(fd, b"")
status = True
except OSError:
status = False
return status
class PopenThread(threading.Thread):
"""A thread for running and managing subprocess. This allows reading
from the stdin, stdout, and stderr streams in a non-blocking fashion.
This takes the same arguments and keyword arguments as regular Popen.
This requires that the captured_stdout and captured_stderr attributes
to be set following instantiation.
"""
def __init__(self, *args, stdin=None, stdout=None, stderr=None, **kwargs):
super().__init__()
self.lock = threading.RLock()
env = builtins.__xonsh__.env
# stdin setup
self.orig_stdin = stdin
if stdin is None:
self.stdin_fd = 0
elif isinstance(stdin, int):
self.stdin_fd = stdin
else:
self.stdin_fd = stdin.fileno()
self.store_stdin = env.get("XONSH_STORE_STDIN")
self.timeout = env.get("XONSH_PROC_FREQUENCY")
self.in_alt_mode = False
self.stdin_mode = None
# stdout setup
self.orig_stdout = stdout
self.stdout_fd = 1 if stdout is None else stdout.fileno()
self._set_pty_size()
# stderr setup
self.orig_stderr = stderr
# Set some signal handles, if we can. Must come before process
# is started to prevent deadlock on windows
self.proc = None # has to be here for closure for handles
self.old_int_handler = self.old_winch_handler = None
self.old_tstp_handler = self.old_quit_handler = None
if on_main_thread():
self.old_int_handler = signal.signal(signal.SIGINT, self._signal_int)
if ON_POSIX:
self.old_tstp_handler = signal.signal(signal.SIGTSTP, self._signal_tstp)
self.old_quit_handler = signal.signal(signal.SIGQUIT, self._signal_quit)
if CAN_RESIZE_WINDOW:
self.old_winch_handler = signal.signal(
signal.SIGWINCH, self._signal_winch
)
# start up process
if ON_WINDOWS and stdout is not None:
os.set_inheritable(stdout.fileno(), False)
try:
self.proc = proc = subprocess.Popen(
*args, stdin=stdin, stdout=stdout, stderr=stderr, **kwargs
)
except Exception:
self._clean_up()
raise
self.pid = proc.pid
self.universal_newlines = uninew = proc.universal_newlines
if uninew:
self.encoding = enc = env.get("XONSH_ENCODING")
self.encoding_errors = err = env.get("XONSH_ENCODING_ERRORS")
self.stdin = io.BytesIO() # stdin is always bytes!
self.stdout = io.TextIOWrapper(io.BytesIO(), encoding=enc, errors=err)
self.stderr = io.TextIOWrapper(io.BytesIO(), encoding=enc, errors=err)
else:
self.encoding = self.encoding_errors = None
self.stdin = io.BytesIO()
self.stdout = io.BytesIO()
self.stderr = io.BytesIO()
self.suspended = False
self.prevs_are_closed = False
self.start()
def run(self):
"""Runs the subprocess by performing a parallel read on stdin if allowed,
and copying bytes from captured_stdout to stdout and bytes from
captured_stderr to stderr.
"""
proc = self.proc
spec = self._wait_and_getattr("spec")
# get stdin and apply parallel reader if needed.
stdin = self.stdin
if self.orig_stdin is None:
origin = None
elif ON_POSIX and self.store_stdin:
origin = self.orig_stdin
origfd = origin if isinstance(origin, int) else origin.fileno()
origin = BufferedFDParallelReader(origfd, buffer=stdin)
else:
origin = None
# get non-blocking stdout
stdout = self.stdout.buffer if self.universal_newlines else self.stdout
capout = spec.captured_stdout
if capout is None:
procout = None
else:
procout = NonBlockingFDReader(capout.fileno(), timeout=self.timeout)
# get non-blocking stderr
stderr = self.stderr.buffer if self.universal_newlines else self.stderr
caperr = spec.captured_stderr
if caperr is None:
procerr = None
else:
procerr = NonBlockingFDReader(caperr.fileno(), timeout=self.timeout)
# initial read from buffer
self._read_write(procout, stdout, sys.__stdout__)
self._read_write(procerr, stderr, sys.__stderr__)
# loop over reads while process is running.
i = j = cnt = 1
while proc.poll() is None:
# this is here for CPU performance reasons.
if i + j == 0:
cnt = min(cnt + 1, 1000)
tout = self.timeout * cnt
if procout is not None:
procout.timeout = tout
if procerr is not None:
procerr.timeout = tout
elif cnt == 1:
pass
else:
cnt = 1
if procout is not None:
procout.timeout = self.timeout
if procerr is not None:
procerr.timeout = self.timeout
# redirect some output!
i = self._read_write(procout, stdout, sys.__stdout__)
j = self._read_write(procerr, stderr, sys.__stderr__)
if self.suspended:
break
if self.suspended:
return
# close files to send EOF to non-blocking reader.
# capout & caperr seem to be needed only by Windows, while
# orig_stdout & orig_stderr are need by posix and Windows.
# Also, order seems to matter here,
# with orig_* needed to be closed before cap*
safe_fdclose(self.orig_stdout)
safe_fdclose(self.orig_stderr)
if ON_WINDOWS:
safe_fdclose(capout)
safe_fdclose(caperr)
# read in the remaining data in a blocking fashion.
while (procout is not None and not procout.is_fully_read()) or (
procerr is not None and not procerr.is_fully_read()
):
self._read_write(procout, stdout, sys.__stdout__)
self._read_write(procerr, stderr, sys.__stderr__)
# kill the process if it is still alive. Happens when piping.
if proc.poll() is None:
proc.terminate()
def _wait_and_getattr(self, name):
"""make sure the instance has a certain attr, and return it."""
while not hasattr(self, name):
time.sleep(1e-7)
return getattr(self, name)
def _read_write(self, reader, writer, stdbuf):
"""Reads a chunk of bytes from a buffer and write into memory or back
down to the standard buffer, as appropriate. Returns the number of
successful reads.
"""
if reader is None:
return 0
i = -1
for i, chunk in enumerate(iter(reader.read_queue, b"")):
self._alt_mode_switch(chunk, writer, stdbuf)
if i >= 0:
writer.flush()
stdbuf.flush()
return i + 1
def _alt_mode_switch(self, chunk, membuf, stdbuf):
"""Enables recursively switching between normal capturing mode
and 'alt' mode, which passes through values to the standard
buffer. Pagers, text editors, curses applications, etc. use
alternate mode.
"""
i, flag = findfirst(chunk, ALTERNATE_MODE_FLAGS)
if flag is None:
self._alt_mode_writer(chunk, membuf, stdbuf)
else:
# This code is executed when the child process switches the
# terminal into or out of alternate mode. The line below assumes
# that the user has opened vim, less, or similar, and writes writes
# to stdin.
j = i + len(flag)
# write the first part of the chunk in the current mode.
self._alt_mode_writer(chunk[:i], membuf, stdbuf)
# switch modes
# write the flag itself the current mode where alt mode is on
# so that it is streamed to the terminal ASAP.
# this is needed for terminal emulators to find the correct
# positions before and after alt mode.
alt_mode = flag in START_ALTERNATE_MODE
if alt_mode:
self.in_alt_mode = alt_mode
self._alt_mode_writer(flag, membuf, stdbuf)
self._enable_cbreak_stdin()
else:
self._alt_mode_writer(flag, membuf, stdbuf)
self.in_alt_mode = alt_mode
self._disable_cbreak_stdin()
# recurse this function, but without the current flag.
self._alt_mode_switch(chunk[j:], membuf, stdbuf)
def _alt_mode_writer(self, chunk, membuf, stdbuf):
"""Write bytes to the standard buffer if in alt mode or otherwise
to the in-memory buffer.
"""
if not chunk:
pass # don't write empty values
elif self.in_alt_mode:
stdbuf.buffer.write(chunk)
else:
with self.lock:
p = membuf.tell()
membuf.seek(0, io.SEEK_END)
membuf.write(chunk)
membuf.seek(p)
#
# Window resize handlers
#
def _signal_winch(self, signum, frame):
"""Signal handler for SIGWINCH - window size has changed."""
self.send_signal(signal.SIGWINCH)
self._set_pty_size()
def _set_pty_size(self):
"""Sets the window size of the child pty based on the window size of
our own controlling terminal.
"""
if ON_WINDOWS or not os.isatty(self.stdout_fd):
return
# Get the terminal size of the real terminal, set it on the
# pseudoterminal.
buf = array.array("h", [0, 0, 0, 0])
# 1 = stdout here
try:
fcntl.ioctl(1, termios.TIOCGWINSZ, buf, True)
fcntl.ioctl(self.stdout_fd, termios.TIOCSWINSZ, buf)
except OSError:
pass
#
# SIGINT handler
#
def _signal_int(self, signum, frame):
"""Signal handler for SIGINT - Ctrl+C may have been pressed."""
self.send_signal(signum)
if self.proc is not None and self.proc.poll() is not None:
self._restore_sigint(frame=frame)
if on_main_thread():
signal.pthread_kill(threading.get_ident(), signal.SIGINT)
def _restore_sigint(self, frame=None):
old = self.old_int_handler
if old is not None:
if on_main_thread():
signal.signal(signal.SIGINT, old)
self.old_int_handler = None
if frame is not None:
self._disable_cbreak_stdin()
if old is not None and old is not self._signal_int:
old(signal.SIGINT, frame)
#
# SIGTSTP handler
#
def _signal_tstp(self, signum, frame):
"""Signal handler for suspending SIGTSTP - Ctrl+Z may have been pressed.
"""
self.suspended = True
self.send_signal(signum)
self._restore_sigtstp(frame=frame)
def _restore_sigtstp(self, frame=None):
old = self.old_tstp_handler
if old is not None:
if on_main_thread():
signal.signal(signal.SIGTSTP, old)
self.old_tstp_handler = None
if frame is not None:
self._disable_cbreak_stdin()
#
# SIGQUIT handler
#
def _signal_quit(self, signum, frame):
r"""Signal handler for quiting SIGQUIT - Ctrl+\ may have been pressed.
"""
self.send_signal(signum)
self._restore_sigquit(frame=frame)
def _restore_sigquit(self, frame=None):
old = self.old_quit_handler
if old is not None:
if on_main_thread():
signal.signal(signal.SIGQUIT, old)
self.old_quit_handler = None
if frame is not None:
self._disable_cbreak_stdin()
#
# cbreak mode handlers
#
def _enable_cbreak_stdin(self):
if not ON_POSIX:
return
try:
self.stdin_mode = termios.tcgetattr(self.stdin_fd)[:]
except termios.error:
# this can happen for cases where another process is controlling
# xonsh's tty device, such as in testing.
self.stdin_mode = None
return
new = self.stdin_mode[:]
new[LFLAG] &= ~(termios.ECHO | termios.ICANON)
new[CC][termios.VMIN] = 1
new[CC][termios.VTIME] = 0
try:
# termios.TCSAFLUSH may be less reliable than termios.TCSANOW
termios.tcsetattr(self.stdin_fd, termios.TCSANOW, new)
except termios.error:
self._disable_cbreak_stdin()
def _disable_cbreak_stdin(self):
if not ON_POSIX or self.stdin_mode is None:
return
new = self.stdin_mode[:]
new[LFLAG] |= termios.ECHO | termios.ICANON
new[CC][termios.VMIN] = 1
new[CC][termios.VTIME] = 0
try:
termios.tcsetattr(self.stdin_fd, termios.TCSANOW, new)
except termios.error:
pass
#
# Dispatch methods
#
def poll(self):
"""Dispatches to Popen.returncode."""
return self.proc.returncode
def wait(self, timeout=None):
"""Dispatches to Popen.wait(), but also does process cleanup such as
joining this thread and replacing the original window size signal
handler.
"""
self._disable_cbreak_stdin()
rtn = self.proc.wait(timeout=timeout)
self.join()
# need to replace the old signal handlers somewhere...
if self.old_winch_handler is not None and on_main_thread():
signal.signal(signal.SIGWINCH, self.old_winch_handler)
self.old_winch_handler = None
self._clean_up()
return rtn
def _clean_up(self):
self._restore_sigint()
self._restore_sigtstp()
self._restore_sigquit()
@property
def returncode(self):
"""Process return code."""
return self.proc.returncode
@returncode.setter
def returncode(self, value):
"""Process return code."""
self.proc.returncode = value
@property
def signal(self):
"""Process signal, or None."""
s = getattr(self.proc, "signal", None)
if s is None:
rtn = self.returncode
if rtn is not None and rtn != 0:
s = (-1 * rtn, rtn < 0 if ON_WINDOWS else os.WCOREDUMP(rtn))
return s
@signal.setter
def signal(self, value):
"""Process signal, or None."""
self.proc.signal = value
def send_signal(self, signal):
"""Dispatches to Popen.send_signal()."""
dt = 0.0
while self.proc is None and dt < self.timeout:
time.sleep(1e-7)
dt += 1e-7
if self.proc is None:
return
try:
rtn = self.proc.send_signal(signal)
except ProcessLookupError:
# This can happen in the case of !(cmd) when the command has ended
rtn = None
return rtn
def terminate(self):
"""Dispatches to Popen.terminate()."""
return self.proc.terminate()
def kill(self):
"""Dispatches to Popen.kill()."""
return self.proc.kill()
class Handle(int):
closed = False
def Close(self, CloseHandle=None):
CloseHandle = CloseHandle or _winapi.CloseHandle
if not self.closed:
self.closed = True
CloseHandle(self)
def Detach(self):
if not self.closed:
self.closed = True
return int(self)
raise ValueError("already closed")
def __repr__(self):
return "Handle(%d)" % int(self)
__del__ = Close
__str__ = __repr__
class FileThreadDispatcher:
"""Dispatches to different file handles depending on the
current thread. Useful if you want file operation to go to different
places for different threads.
"""
def __init__(self, default=None):
"""
Parameters
----------
default : file-like or None, optional
The file handle to write to if a thread cannot be found in
the registry. If None, a new in-memory instance.
Attributes
----------
registry : dict
Maps thread idents to file handles.
"""
if default is None:
default = io.TextIOWrapper(io.BytesIO())
self.default = default
self.registry = {}
def register(self, handle):
"""Registers a file handle for the current thread. Returns self so
that this method can be used in a with-statement.
"""
if handle is self:
# prevent weird recurssion errors
return self
self.registry[threading.get_ident()] = handle
return self
def deregister(self):
"""Removes the current thread from the registry."""
ident = threading.get_ident()
if ident in self.registry:
# don't remove if we have already been deregistered
del self.registry[threading.get_ident()]
@property
def available(self):
"""True if the thread is available in the registry."""
return threading.get_ident() in self.registry
@property
def handle(self):
"""Gets the current handle for the thread."""
return self.registry.get(threading.get_ident(), self.default)
def __enter__(self):
pass
def __exit__(self, ex_type, ex_value, ex_traceback):
self.deregister()
#
# io.TextIOBase interface
#
@property
def encoding(self):
"""Gets the encoding for this thread's handle."""
return self.handle.encoding
@property
def errors(self):
"""Gets the errors for this thread's handle."""
return self.handle.errors
@property
def newlines(self):
"""Gets the newlines for this thread's handle."""
return self.handle.newlines
@property
def buffer(self):
"""Gets the buffer for this thread's handle."""
return self.handle.buffer
def detach(self):
"""Detaches the buffer for the current thread."""
return self.handle.detach()
def read(self, size=None):
"""Reads from the handle for the current thread."""
return self.handle.read(size)
def readline(self, size=-1):
"""Reads a line from the handle for the current thread."""
return self.handle.readline(size)
def readlines(self, hint=-1):
"""Reads lines from the handle for the current thread."""
return self.handle.readlines(hint)
def seek(self, offset, whence=io.SEEK_SET):
"""Seeks the current file."""
return self.handle.seek(offset, whence)
def tell(self):
"""Reports the current position in the handle for the current thread."""
return self.handle.tell()
def write(self, s):
"""Writes to this thread's handle. This also flushes, just to be
extra sure the string was written.
"""
h = self.handle
try:
r = h.write(s)
h.flush()
except OSError:
r = None
return r
@property
def line_buffering(self):
"""Gets if line buffering for this thread's handle enabled."""
return self.handle.line_buffering
#
# io.IOBase interface
#
def close(self):
"""Closes the current thread's handle."""
return self.handle.close()
@property
def closed(self):
"""Is the thread's handle closed."""
return self.handle.closed
def fileno(self):
"""Returns the file descriptor for the current thread."""
return self.handle.fileno()
def flush(self):
"""Flushes the file descriptor for the current thread."""
return safe_flush(self.handle)
def isatty(self):
"""Returns if the file descriptor for the current thread is a tty."""
return self.handle.isatty()
def readable(self):
"""Returns if file descriptor for the current thread is readable."""
return self.handle.readable()
def seekable(self):
"""Returns if file descriptor for the current thread is seekable."""
return self.handle.seekable()
def truncate(self, size=None):
"""Truncates the file for for the current thread."""
return self.handle.truncate()
def writable(self, size=None):
"""Returns if file descriptor for the current thread is writable."""
return self.handle.writable(size)
def writelines(self):
"""Writes lines for the file descriptor for the current thread."""
return self.handle.writelines()
# These should NOT be lazy since they *need* to get the true stdout from the
# main thread. Also their creation time should be negligible.
STDOUT_DISPATCHER = FileThreadDispatcher(default=sys.stdout)
STDERR_DISPATCHER = FileThreadDispatcher(default=sys.stderr)
def parse_proxy_return(r, stdout, stderr):
"""Proxies may return a variety of outputs. This handles them generally.
Parameters
----------
r : tuple, str, int, or None
Return from proxy function
stdout : file-like
Current stdout stream
stdout : file-like
Current stderr stream
Returns
-------
cmd_result : int
The return code of the proxy
"""
cmd_result = 0
if isinstance(r, str):
stdout.write(r)
stdout.flush()
elif isinstance(r, int):
cmd_result = r
elif isinstance(r, cabc.Sequence):
rlen = len(r)
if rlen > 0 and r[0] is not None:
stdout.write(r[0])
stdout.flush()
if rlen > 1 and r[1] is not None:
stderr.write(r[1])
stderr.flush()
if rlen > 2 and r[2] is not None:
cmd_result = r[2]
elif r is not None:
# for the random object...
stdout.write(str(r))
stdout.flush()
return cmd_result
def proxy_zero(f, args, stdin, stdout, stderr, spec, stack):
"""Calls a proxy function which takes no parameters."""
return f()
def proxy_one(f, args, stdin, stdout, stderr, spec, stack):
"""Calls a proxy function which takes one parameter: args"""
return f(args)
def proxy_two(f, args, stdin, stdout, stderr, spec, stack):
"""Calls a proxy function which takes two parameter: args and stdin."""
return f(args, stdin)
def proxy_three(f, args, stdin, stdout, stderr, spec, stack):
"""Calls a proxy function which takes three parameter: args, stdin, stdout.
"""
return f(args, stdin, stdout)
def proxy_four(f, args, stdin, stdout, stderr, spec, stack):
"""Calls a proxy function which takes four parameter: args, stdin, stdout,
and stderr.
"""
return f(args, stdin, stdout, stderr)
def proxy_five(f, args, stdin, stdout, stderr, spec, stack):
"""Calls a proxy function which takes four parameter: args, stdin, stdout,
stderr, and spec.
"""
return f(args, stdin, stdout, stderr, spec)
PROXIES = (proxy_zero, proxy_one, proxy_two, proxy_three, proxy_four, proxy_five)
PROXY_KWARG_NAMES = frozenset(["args", "stdin", "stdout", "stderr", "spec", "stack"])
def partial_proxy(f):
"""Dispatches the appropriate proxy function based on the number of args."""
numargs = 0
for name, param in inspect.signature(f).parameters.items():
if (
param.kind == param.POSITIONAL_ONLY
or param.kind == param.POSITIONAL_OR_KEYWORD
):
numargs += 1
elif name in PROXY_KWARG_NAMES and param.kind == param.KEYWORD_ONLY:
numargs += 1
if numargs < 6:
return functools.partial(PROXIES[numargs], f)
elif numargs == 6:
# don't need to partial.
return f
else:
e = "Expected proxy with 6 or fewer arguments for {}, not {}"
raise XonshError(e.format(", ".join(PROXY_KWARG_NAMES), numargs))
class ProcProxyThread(threading.Thread):
"""
Class representing a function to be run as a subprocess-mode command.
"""
def __init__(
self,
f,
args,
stdin=None,
stdout=None,
stderr=None,
universal_newlines=False,
env=None,
):
"""Parameters
----------
f : function
The function to be executed.
args : list
A (possibly empty) list containing the arguments that were given on
the command line
stdin : file-like, optional
A file-like object representing stdin (input can be read from
here). If `stdin` is not provided or if it is explicitly set to
`None`, then an instance of `io.StringIO` representing an empty
file is used.
stdout : file-like, optional
A file-like object representing stdout (normal output can be
written here). If `stdout` is not provided or if it is explicitly
set to `None`, then `sys.stdout` is used.
stderr : file-like, optional
A file-like object representing stderr (error output can be
written here). If `stderr` is not provided or if it is explicitly
set to `None`, then `sys.stderr` is used.
universal_newlines : bool, optional
Whether or not to use universal newlines.
env : Mapping, optional
Environment mapping.
"""
self.orig_f = f
self.f = partial_proxy(f)
self.args = args
self.pid = None
self.returncode = None
self._closed_handle_cache = {}
handles = self._get_handles(stdin, stdout, stderr)
(
self.p2cread,
self.p2cwrite,
self.c2pread,
self.c2pwrite,
self.errread,
self.errwrite,
) = handles
# default values
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.env = env or builtins.__xonsh__.env
self._interrupted = False
if ON_WINDOWS:
if self.p2cwrite != -1:
self.p2cwrite = msvcrt.open_osfhandle(self.p2cwrite.Detach(), 0)
if self.c2pread != -1:
self.c2pread = msvcrt.open_osfhandle(self.c2pread.Detach(), 0)
if self.errread != -1:
self.errread = msvcrt.open_osfhandle(self.errread.Detach(), 0)
if self.p2cwrite != -1:
self.stdin = io.open(self.p2cwrite, "wb", -1)
if universal_newlines:
self.stdin = io.TextIOWrapper(
self.stdin, write_through=True, line_buffering=False
)
elif isinstance(stdin, int) and stdin != 0:
self.stdin = io.open(stdin, "wb", -1)
if self.c2pread != -1:
self.stdout = io.open(self.c2pread, "rb", -1)
if universal_newlines:
self.stdout = io.TextIOWrapper(self.stdout)
if self.errread != -1:
self.stderr = io.open(self.errread, "rb", -1)
if universal_newlines:
self.stderr = io.TextIOWrapper(self.stderr)
# Set some signal handles, if we can. Must come before process
# is started to prevent deadlock on windows
self.old_int_handler = None
if on_main_thread():
self.old_int_handler = signal.signal(signal.SIGINT, self._signal_int)
# start up the proc
super().__init__()
self.start()
def __del__(self):
self._restore_sigint()
def run(self):
"""Set up input/output streams and execute the child function in a new
thread. This is part of the `threading.Thread` interface and should
not be called directly.
"""
if self.f is None:
return
spec = self._wait_and_getattr("spec")
last_in_pipeline = spec.last_in_pipeline
if last_in_pipeline:
capout = spec.captured_stdout # NOQA
caperr = spec.captured_stderr # NOQA
env = builtins.__xonsh__.env
enc = env.get("XONSH_ENCODING")
err = env.get("XONSH_ENCODING_ERRORS")
if ON_WINDOWS:
if self.p2cread != -1:
self.p2cread = msvcrt.open_osfhandle(self.p2cread.Detach(), 0)
if self.c2pwrite != -1:
self.c2pwrite = msvcrt.open_osfhandle(self.c2pwrite.Detach(), 0)
if self.errwrite != -1:
self.errwrite = msvcrt.open_osfhandle(self.errwrite.Detach(), 0)
# get stdin
if self.stdin is None:
sp_stdin = None
elif self.p2cread != -1:
sp_stdin = io.TextIOWrapper(
io.open(self.p2cread, "rb", -1), encoding=enc, errors=err
)
else:
sp_stdin = sys.stdin
# stdout
if self.c2pwrite != -1:
sp_stdout = io.TextIOWrapper(
io.open(self.c2pwrite, "wb", -1), encoding=enc, errors=err
)
else:
sp_stdout = sys.stdout
# stderr
if self.errwrite == self.c2pwrite:
sp_stderr = sp_stdout
elif self.errwrite != -1:
sp_stderr = io.TextIOWrapper(
io.open(self.errwrite, "wb", -1), encoding=enc, errors=err
)
else:
sp_stderr = sys.stderr
# run the function itself
try:
with STDOUT_DISPATCHER.register(sp_stdout), STDERR_DISPATCHER.register(
sp_stderr
), redirect_stdout(STDOUT_DISPATCHER), redirect_stderr(STDERR_DISPATCHER):
r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec, spec.stack)
except SystemExit as e:
r = e.code if isinstance(e.code, int) else int(bool(e.code))
except OSError:
status = still_writable(self.c2pwrite) and still_writable(self.errwrite)
if status:
# stdout and stderr are still writable, so error must
# come from function itself.
print_exception()
r = 1
else:
# stdout and stderr are no longer writable, so error must
# come from the fact that the next process in the pipeline
# has closed the other side of the pipe. The function then
# attempted to write to this side of the pipe anyway. This
# is not truly an error and we should exit gracefully.
r = 0
except Exception:
print_exception()
r = 1
safe_flush(sp_stdout)
safe_flush(sp_stderr)
self.returncode = parse_proxy_return(r, sp_stdout, sp_stderr)
if not last_in_pipeline and not ON_WINDOWS:
# mac requires us *not to* close the handles here while
# windows requires us *to* close the handles here
return
# clean up
# scopz: not sure why this is needed, but stdin cannot go here
# and stdout & stderr must.
handles = [self.stdout, self.stderr]
for handle in handles:
safe_fdclose(handle, cache=self._closed_handle_cache)
def _wait_and_getattr(self, name):
"""make sure the instance has a certain attr, and return it."""
while not hasattr(self, name):
time.sleep(1e-7)
return getattr(self, name)
def poll(self):
"""Check if the function has completed.
Returns
-------
None if the function is still executing, and the returncode otherwise
"""
return self.returncode
def wait(self, timeout=None):
"""Waits for the process to finish and returns the return code."""
self.join()
self._restore_sigint()
return self.returncode
#
# SIGINT handler
#
def _signal_int(self, signum, frame):
"""Signal handler for SIGINT - Ctrl+C may have been pressed."""
# Check if we have already been interrupted. This should prevent
# the possibility of infinite recursion.
if self._interrupted:
return
self._interrupted = True
# close file handles here to stop an processes piped to us.
handles = (
self.p2cread,
self.p2cwrite,
self.c2pread,
self.c2pwrite,
self.errread,
self.errwrite,
)
for handle in handles:
safe_fdclose(handle)
if self.poll() is not None:
self._restore_sigint(frame=frame)
if on_main_thread():
signal.pthread_kill(threading.get_ident(), signal.SIGINT)
def _restore_sigint(self, frame=None):
old = self.old_int_handler
if old is not None:
if on_main_thread():
signal.signal(signal.SIGINT, old)
self.old_int_handler = None
if frame is not None:
if old is not None and old is not self._signal_int:
old(signal.SIGINT, frame)
if self._interrupted:
self.returncode = 1
# The code below (_get_devnull, _get_handles, and _make_inheritable) comes
# from subprocess.py in the Python 3.4.2 Standard Library
def _get_devnull(self):
if not hasattr(self, "_devnull"):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
if ON_WINDOWS:
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
h = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle,
_winapi.GetCurrentProcess(),
0,
1,
_winapi.DUPLICATE_SAME_ACCESS,
)
return Handle(h)
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _winapi.CreatePipe(None, 0)
p2cread = Handle(p2cread)
_winapi.CloseHandle(_)
elif stdin == subprocess.PIPE:
p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
elif stdin == subprocess.DEVNULL:
p2cread = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _winapi.CreatePipe(None, 0)
c2pwrite = Handle(c2pwrite)
_winapi.CloseHandle(_)
elif stdout == subprocess.PIPE:
c2pread, c2pwrite = _winapi.CreatePipe(None, 0)
c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
elif stdout == subprocess.DEVNULL:
c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _winapi.CreatePipe(None, 0)
errwrite = Handle(errwrite)
_winapi.CloseHandle(_)
elif stderr == subprocess.PIPE:
errread, errwrite = _winapi.CreatePipe(None, 0)
errread, errwrite = Handle(errread), Handle(errwrite)
elif stderr == subprocess.STDOUT:
errwrite = c2pwrite
elif stderr == subprocess.DEVNULL:
errwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)
else:
# POSIX versions
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == subprocess.PIPE:
p2cread, p2cwrite = os.pipe()
elif stdin == subprocess.DEVNULL:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == subprocess.PIPE:
c2pread, c2pwrite = os.pipe()
elif stdout == subprocess.DEVNULL:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == subprocess.PIPE:
errread, errwrite = os.pipe()
elif stderr == subprocess.STDOUT:
errwrite = c2pwrite
elif stderr == subprocess.DEVNULL:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)
#
# Foreground Thread Process Proxies
#
class ProcProxy(object):
"""This is process proxy class that runs its alias functions on the
same thread that it was called from, which is typically the main thread.
This prevents the process from running on a background thread, but enables
debugger and profiler tools (functions) be run on the same thread that they
are attempting to debug.
"""
def __init__(
self,
f,
args,
stdin=None,
stdout=None,
stderr=None,
universal_newlines=False,
env=None,
):
self.orig_f = f
self.f = partial_proxy(f)
self.args = args
self.pid = os.getpid()
self.returncode = None
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.universal_newlines = universal_newlines
self.env = env
def poll(self):
"""Check if the function has completed via the returncode or None.
"""
return self.returncode
def wait(self, timeout=None):
"""Runs the function and returns the result. Timeout argument only
present for API compatibility.
"""
if self.f is None:
return 0
env = builtins.__xonsh__.env
enc = env.get("XONSH_ENCODING")
err = env.get("XONSH_ENCODING_ERRORS")
spec = self._wait_and_getattr("spec")
# set file handles
if self.stdin is None:
stdin = None
else:
if isinstance(self.stdin, int):
inbuf = io.open(self.stdin, "rb", -1)
else:
inbuf = self.stdin
stdin = io.TextIOWrapper(inbuf, encoding=enc, errors=err)
stdout = self._pick_buf(self.stdout, sys.stdout, enc, err)
stderr = self._pick_buf(self.stderr, sys.stderr, enc, err)
# run the actual function
try:
r = self.f(self.args, stdin, stdout, stderr, spec, spec.stack)
except Exception:
print_exception()
r = 1
self.returncode = parse_proxy_return(r, stdout, stderr)
safe_flush(stdout)
safe_flush(stderr)
return self.returncode
@staticmethod
def _pick_buf(handle, sysbuf, enc, err):
if handle is None or handle is sysbuf:
buf = sysbuf
elif isinstance(handle, int):
if handle < 3:
buf = sysbuf
else:
buf = io.TextIOWrapper(
io.open(handle, "wb", -1), encoding=enc, errors=err
)
elif hasattr(handle, "encoding"):
# must be a text stream, no need to wrap.
buf = handle
else:
# must be a binary stream, should wrap it.
buf = io.TextIOWrapper(handle, encoding=enc, errors=err)
return buf
def _wait_and_getattr(self, name):
"""make sure the instance has a certain attr, and return it."""
while not hasattr(self, name):
time.sleep(1e-7)
return getattr(self, name)
@lazyobject
def SIGNAL_MESSAGES():
sm = {
signal.SIGABRT: "Aborted",
signal.SIGFPE: "Floating point exception",
signal.SIGILL: "Illegal instructions",
signal.SIGTERM: "Terminated",
signal.SIGSEGV: "Segmentation fault",
}
if ON_POSIX:
sm.update(
{signal.SIGQUIT: "Quit", signal.SIGHUP: "Hangup", signal.SIGKILL: "Killed"}
)
return sm
def safe_readlines(handle, hint=-1):
"""Attempts to read lines without throwing an error."""
try:
lines = handle.readlines(hint)
except OSError:
lines = []
return lines
def safe_readable(handle):
"""Attempts to find if the handle is readable without throwing an error."""
try:
status = handle.readable()
except (OSError, ValueError):
status = False
return status
def update_fg_process_group(pipeline_group, background):
if background:
return False
if not ON_POSIX:
return False
env = builtins.__xonsh__.env
if not env.get("XONSH_INTERACTIVE"):
return False
return give_terminal_to(pipeline_group)
class CommandPipeline:
"""Represents a subprocess-mode command pipeline."""
attrnames = (
"stdin",
"stdout",
"stderr",
"pid",
"returncode",
"args",
"alias",
"stdin_redirect",
"stdout_redirect",
"stderr_redirect",
"timestamps",
"executed_cmd",
"input",
"output",
"errors",
)
nonblocking = (io.BytesIO, NonBlockingFDReader, ConsoleParallelReader)
def __init__(self, specs):
"""
Parameters
----------
specs : list of SubprocSpec
Process specifications
Attributes
----------
spec : SubprocSpec
The last specification in specs
proc : Popen-like
The process in procs
ended : bool
Boolean for if the command has stopped executing.
input : str
A string of the standard input.
output : str
A string of the standard output.
errors : str
A string of the standard error.
lines : list of str
The output lines
starttime : floats or None
Pipeline start timestamp.
"""
self.starttime = None
self.ended = False
self.procs = []
self.specs = specs
self.spec = specs[-1]
self.captured = specs[-1].captured
self.input = self._output = self.errors = self.endtime = None
self._closed_handle_cache = {}
self.lines = []
self._stderr_prefix = self._stderr_postfix = None
self.term_pgid = None
background = self.spec.background
pipeline_group = None
for spec in specs:
if self.starttime is None:
self.starttime = time.time()
try:
proc = spec.run(pipeline_group=pipeline_group)
except Exception:
print_exception()
self._return_terminal()
self.proc = None
return
if (
proc.pid
and pipeline_group is None
and not spec.is_proxy
and self.captured != "object"
):
pipeline_group = proc.pid
if update_fg_process_group(pipeline_group, background):
self.term_pgid = pipeline_group
self.procs.append(proc)
self.proc = self.procs[-1]
def __repr__(self):
s = self.__class__.__name__ + "("
s += ", ".join(a + "=" + str(getattr(self, a)) for a in self.attrnames)
s += ")"
return s
def __bool__(self):
return self.returncode == 0
def __len__(self):
return len(self.procs)
def __iter__(self):
"""Iterates through stdout and returns the lines, converting to
strings and universal newlines if needed.
"""
if self.ended:
yield from iter(self.lines)
else:
yield from self.tee_stdout()
def iterraw(self):
"""Iterates through the last stdout, and returns the lines
exactly as found.
"""
# get appropriate handles
spec = self.spec
proc = self.proc
if proc is None:
return
timeout = builtins.__xonsh__.env.get("XONSH_PROC_FREQUENCY")
# get the correct stdout
stdout = proc.stdout
if (
stdout is None or spec.stdout is None or not safe_readable(stdout)
) and spec.captured_stdout is not None:
stdout = spec.captured_stdout
if hasattr(stdout, "buffer"):
stdout = stdout.buffer
if stdout is not None and not isinstance(stdout, self.nonblocking):
stdout = NonBlockingFDReader(stdout.fileno(), timeout=timeout)
if (
not stdout
or self.captured == "stdout"
or not safe_readable(stdout)
or not spec.threadable
):
# we get here if the process is not threadable or the
# class is the real Popen
PrevProcCloser(pipeline=self)
task = wait_for_active_job()
if task is None or task["status"] != "stopped":
proc.wait()
self._endtime()
if self.captured == "object":
self.end(tee_output=False)
elif self.captured == "hiddenobject" and stdout:
b = stdout.read()
lines = b.splitlines(keepends=True)
yield from lines
self.end(tee_output=False)
elif self.captured == "stdout":
b = stdout.read()
s = self._decode_uninew(b, universal_newlines=True)
self.lines = s.splitlines(keepends=True)
return
# get the correct stderr
stderr = proc.stderr
if (
stderr is None or spec.stderr is None or not safe_readable(stderr)
) and spec.captured_stderr is not None:
stderr = spec.captured_stderr
if hasattr(stderr, "buffer"):
stderr = stderr.buffer
if stderr is not None and not isinstance(stderr, self.nonblocking):
stderr = NonBlockingFDReader(stderr.fileno(), timeout=timeout)
# read from process while it is running
check_prev_done = len(self.procs) == 1
prev_end_time = None
i = j = cnt = 1
while proc.poll() is None:
if getattr(proc, "suspended", False):
return
elif getattr(proc, "in_alt_mode", False):
time.sleep(0.1) # probably not leaving any time soon
continue
elif not check_prev_done:
# In the case of pipelines with more than one command
# we should give the commands a little time
# to start up fully. This is particularly true for
# GNU Parallel, which has a long startup time.
pass
elif self._prev_procs_done():
self._close_prev_procs()
proc.prevs_are_closed = True
break
stdout_lines = safe_readlines(stdout, 1024)
i = len(stdout_lines)
if i != 0:
yield from stdout_lines
stderr_lines = safe_readlines(stderr, 1024)
j = len(stderr_lines)
if j != 0:
self.stream_stderr(stderr_lines)
if not check_prev_done:
# if we are piping...
if stdout_lines or stderr_lines:
# see if we have some output.
check_prev_done = True
elif prev_end_time is None:
# or see if we already know that the next-to-last
# proc in the pipeline has ended.
if self._prev_procs_done():
# if it has, record the time
prev_end_time = time.time()
elif time.time() - prev_end_time >= 0.1:
# if we still don't have any output, even though the
# next-to-last proc has finished, wait a bit to make
# sure we have fully started up, etc.
check_prev_done = True
# this is for CPU usage
if i + j == 0:
cnt = min(cnt + 1, 1000)
else:
cnt = 1
time.sleep(timeout * cnt)
# read from process now that it is over
yield from safe_readlines(stdout)
self.stream_stderr(safe_readlines(stderr))
proc.wait()
self._endtime()
yield from safe_readlines(stdout)
self.stream_stderr(safe_readlines(stderr))
if self.captured == "object":
self.end(tee_output=False)
def itercheck(self):
"""Iterates through the command lines and throws an error if the
returncode is non-zero.
"""
yield from self
if self.returncode:
# I included self, as providing access to stderr and other details
# useful when instance isn't assigned to a variable in the shell.
raise XonshCalledProcessError(
self.returncode, self.executed_cmd, self.stdout, self.stderr, self
)
def tee_stdout(self):
"""Writes the process stdout to the output variable, line-by-line, and
yields each line. This may optionally accept lines (in bytes) to iterate
over, in which case it does not call iterraw().
"""
env = builtins.__xonsh__.env
enc = env.get("XONSH_ENCODING")
err = env.get("XONSH_ENCODING_ERRORS")
lines = self.lines
stream = self.captured not in STDOUT_CAPTURE_KINDS
if stream and not self.spec.stdout:
stream = False
stdout_has_buffer = hasattr(sys.stdout, "buffer")
nl = b"\n"
cr = b"\r"
crnl = b"\r\n"
for line in self.iterraw():
# write to stdout line ASAP, if needed
if stream:
if stdout_has_buffer:
sys.stdout.buffer.write(line)
else:
sys.stdout.write(line.decode(encoding=enc, errors=err))
sys.stdout.flush()
# do some munging of the line before we return it
if line.endswith(crnl):
line = line[:-2] + nl
elif line.endswith(cr):
line = line[:-1] + nl
line = RE_HIDE_ESCAPE.sub(b"", line)
line = line.decode(encoding=enc, errors=err)
# tee it up!
lines.append(line)
yield line
def stream_stderr(self, lines):
"""Streams lines to sys.stderr and the errors attribute."""
if not lines:
return
env = builtins.__xonsh__.env
enc = env.get("XONSH_ENCODING")
err = env.get("XONSH_ENCODING_ERRORS")
b = b"".join(lines)
if self.stderr_prefix:
b = self.stderr_prefix + b
if self.stderr_postfix:
b += self.stderr_postfix
stderr_has_buffer = hasattr(sys.stderr, "buffer")
# write bytes to std stream
if stderr_has_buffer:
sys.stderr.buffer.write(b)
else:
sys.stderr.write(b.decode(encoding=enc, errors=err))
sys.stderr.flush()
# do some munging of the line before we save it to the attr
b = b.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
b = RE_HIDE_ESCAPE.sub(b"", b)
env = builtins.__xonsh__.env
s = b.decode(
encoding=env.get("XONSH_ENCODING"), errors=env.get("XONSH_ENCODING_ERRORS")
)
# set the errors
if self.errors is None:
self.errors = s
else:
self.errors += s
def _decode_uninew(self, b, universal_newlines=None):
"""Decode bytes into a str and apply universal newlines as needed."""
if not b:
return ""
if isinstance(b, bytes):
env = builtins.__xonsh__.env
s = b.decode(
encoding=env.get("XONSH_ENCODING"),
errors=env.get("XONSH_ENCODING_ERRORS"),
)
else:
s = b
if universal_newlines or self.spec.universal_newlines:
s = s.replace("\r\n", "\n").replace("\r", "\n")
return s
#
# Ending methods
#
def end(self, tee_output=True):
"""
End the pipeline, return the controlling terminal if needed.
Main things done in self._end().
"""
if self.ended:
return
self._end(tee_output=tee_output)
self._return_terminal()
def _end(self, tee_output):
"""Waits for the command to complete and then runs any closing and
cleanup procedures that need to be run.
"""
if tee_output:
for _ in self.tee_stdout():
pass
self._endtime()
# since we are driven by getting output, input may not be available
# until the command has completed.
self._set_input()
self._close_prev_procs()
self._close_proc()
self._check_signal()
self._apply_to_history()
self.ended = True
self._raise_subproc_error()
def _return_terminal(self):
if ON_WINDOWS or not ON_POSIX:
return
pgid = os.getpgid(0)
if self.term_pgid is None or pgid == self.term_pgid:
return
if give_terminal_to(pgid): # if gave term succeed
self.term_pgid = pgid
if builtins.__xonsh__.shell is not None:
# restoring sanity could probably be called whenever we return
# control to the shell. But it only seems to matter after a
# ^Z event. This *has* to be called after we give the terminal
# back to the shell.
builtins.__xonsh__.shell.shell.restore_tty_sanity()
def resume(self, job, tee_output=True):
self.ended = False
if give_terminal_to(job["pgrp"]):
self.term_pgid = job["pgrp"]
_continue(job)
self.end(tee_output=tee_output)
def _endtime(self):
"""Sets the closing timestamp if it hasn't been already."""
if self.endtime is None:
self.endtime = time.time()
def _safe_close(self, handle):
safe_fdclose(handle, cache=self._closed_handle_cache)
def _prev_procs_done(self):
"""Boolean for if all previous processes have completed. If there
is only a single process in the pipeline, this returns False.
"""
any_running = False
for s, p in zip(self.specs[:-1], self.procs[:-1]):
if p.poll() is None:
any_running = True
continue
self._safe_close(s.stdin)
self._safe_close(s.stdout)
self._safe_close(s.stderr)
if p is None:
continue
self._safe_close(p.stdin)
self._safe_close(p.stdout)
self._safe_close(p.stderr)
return False if any_running else (len(self) > 1)
def _close_prev_procs(self):
"""Closes all but the last proc's stdout."""
for s, p in zip(self.specs[:-1], self.procs[:-1]):
self._safe_close(s.stdin)
self._safe_close(s.stdout)
self._safe_close(s.stderr)
if p is None:
continue
self._safe_close(p.stdin)
self._safe_close(p.stdout)
self._safe_close(p.stderr)
def _close_proc(self):
"""Closes last proc's stdout."""
s = self.spec
p = self.proc
self._safe_close(s.stdin)
self._safe_close(s.stdout)
self._safe_close(s.stderr)
self._safe_close(s.captured_stdout)
self._safe_close(s.captured_stderr)
if p is None:
return
self._safe_close(p.stdin)
self._safe_close(p.stdout)
self._safe_close(p.stderr)
def _set_input(self):
"""Sets the input variable."""
if self.proc is None:
return
stdin = self.proc.stdin
if (
stdin is None
or isinstance(stdin, int)
or stdin.closed
or not stdin.seekable()
or not safe_readable(stdin)
):
input = b""
else:
stdin.seek(0)
input = stdin.read()
self.input = self._decode_uninew(input)
def _check_signal(self):
"""Checks if a signal was received and issues a message."""
proc_signal = getattr(self.proc, "signal", None)
if proc_signal is None:
return
sig, core = proc_signal
sig_str = SIGNAL_MESSAGES.get(sig)
if sig_str:
if core:
sig_str += " (core dumped)"
print(sig_str, file=sys.stderr)
if self.errors is not None:
self.errors += sig_str + "\n"
def _apply_to_history(self):
"""Applies the results to the current history object."""
hist = builtins.__xonsh__.history
if hist is not None:
hist.last_cmd_rtn = 1 if self.proc is None else self.proc.returncode
def _raise_subproc_error(self):
"""Raises a subprocess error, if we are supposed to."""
spec = self.spec
rtn = self.returncode
if (
not spec.is_proxy
and rtn is not None
and rtn > 0
and builtins.__xonsh__.env.get("RAISE_SUBPROC_ERROR")
):
try:
raise subprocess.CalledProcessError(rtn, spec.cmd, output=self.output)
finally:
# this is need to get a working terminal in interactive mode
self._return_terminal()
#
# Properties
#
@property
def stdin(self):
"""Process stdin."""
return self.proc.stdin
@property
def stdout(self):
"""Process stdout."""
return self.proc.stdout
@property
def stderr(self):
"""Process stderr."""
return self.proc.stderr
@property
def inp(self):
"""Creates normalized input string from args."""
return " ".join(self.args)
@property
def output(self):
"""Non-blocking, lazy access to output"""
if self.ended:
if self._output is None:
self._output = "".join(self.lines)
return self._output
else:
return "".join(self.lines)
@property
def out(self):
"""Output value as a str."""
self.end()
return self.output
@property
def err(self):
"""Error messages as a string."""
self.end()
return self.errors
@property
def pid(self):
"""Process identifier."""
return self.proc.pid
@property
def returncode(self):
"""Process return code, waits until command is completed."""
self.end()
if self.proc is None:
return 1
return self.proc.returncode
rtn = returncode
@property
def args(self):
"""Arguments to the process."""
return self.spec.args
@property
def rtn(self):
"""Alias to return code."""
return self.returncode
@property
def alias(self):
"""Alias the process used."""
return self.spec.alias
@property
def stdin_redirect(self):
"""Redirection used for stdin."""
stdin = self.spec.stdin
name = getattr(stdin, "name", "<stdin>")
mode = getattr(stdin, "mode", "r")
return [name, mode]
@property
def stdout_redirect(self):
"""Redirection used for stdout."""
stdout = self.spec.stdout
name = getattr(stdout, "name", "<stdout>")
mode = getattr(stdout, "mode", "a")
return [name, mode]
@property
def stderr_redirect(self):
"""Redirection used for stderr."""
stderr = self.spec.stderr
name = getattr(stderr, "name", "<stderr>")
mode = getattr(stderr, "mode", "r")
return [name, mode]
@property
def timestamps(self):
"""The start and end time stamps."""
return [self.starttime, self.endtime]
@property
def executed_cmd(self):
"""The resolve and executed command."""
return self.spec.cmd
@property
def stderr_prefix(self):
"""Prefix to print in front of stderr, as bytes."""
p = self._stderr_prefix
if p is None:
env = builtins.__xonsh__.env
t = env.get("XONSH_STDERR_PREFIX")
s = format_std_prepost(t, env=env)
p = s.encode(
encoding=env.get("XONSH_ENCODING"),
errors=env.get("XONSH_ENCODING_ERRORS"),
)
self._stderr_prefix = p
return p
@property
def stderr_postfix(self):
"""Postfix to print after stderr, as bytes."""
p = self._stderr_postfix
if p is None:
env = builtins.__xonsh__.env
t = env.get("XONSH_STDERR_POSTFIX")
s = format_std_prepost(t, env=env)
p = s.encode(
encoding=env.get("XONSH_ENCODING"),
errors=env.get("XONSH_ENCODING_ERRORS"),
)
self._stderr_postfix = p
return p
class HiddenCommandPipeline(CommandPipeline):
def __repr__(self):
return ""
def pause_call_resume(p, f, *args, **kwargs):
"""For a process p, this will call a function f with the remaining args and
and kwargs. If the process cannot accept signals, the function will be called.
Parameters
----------
p : Popen object or similar
f : callable
args : remaining arguments
kwargs : keyword arguments
"""
can_send_signal = (
hasattr(p, "send_signal") and ON_POSIX and not ON_MSYS and not ON_CYGWIN
)
if can_send_signal:
try:
p.send_signal(signal.SIGSTOP)
except PermissionError:
pass
try:
f(*args, **kwargs)
except Exception:
pass
if can_send_signal:
p.send_signal(signal.SIGCONT)
class PrevProcCloser(threading.Thread):
"""Previous process closer thread for pipelines whose last command
is itself unthreadable. This makes sure that the pipeline is
driven forward and does not deadlock.
"""
def __init__(self, pipeline):
"""
Parameters
----------
pipeline : CommandPipeline
The pipeline whose prev procs we should close.
"""
self.pipeline = pipeline
super().__init__()
self.daemon = True
self.start()
def run(self):
"""Runs the closing algorithm."""
pipeline = self.pipeline
check_prev_done = len(pipeline.procs) == 1
if check_prev_done:
return
proc = pipeline.proc
prev_end_time = None
timeout = builtins.__xonsh__.env.get("XONSH_PROC_FREQUENCY")
sleeptime = min(timeout * 1000, 0.1)
while proc.poll() is None:
if not check_prev_done:
# In the case of pipelines with more than one command
# we should give the commands a little time
# to start up fully. This is particularly true for
# GNU Parallel, which has a long startup time.
pass
elif pipeline._prev_procs_done():
pipeline._close_prev_procs()
proc.prevs_are_closed = True
break
if not check_prev_done:
# if we are piping...
if prev_end_time is None:
# or see if we already know that the next-to-last
# proc in the pipeline has ended.
if pipeline._prev_procs_done():
# if it has, record the time
prev_end_time = time.time()
elif time.time() - prev_end_time >= 0.1:
# if we still don't have any output, even though the
# next-to-last proc has finished, wait a bit to make
# sure we have fully started up, etc.
check_prev_done = True
# this is for CPU usage
time.sleep(sleeptime)
|
the-stack_0_12780 | #!/usr/bin/python27
#coding:utf-8
#pylab inline
from __future__ import division
import matplotlib
matplotlib.use('TkAgg') # matplotlib 'agg'是不画图的,'Tkagg'是画图的.
import os
import numpy as np
import PIL.Image as pil
import tensorflow as tf
from SfMLearner import SfMLearner
from utils import normalize_depth_for_display
import matplotlib.pyplot as plt
import operator
img_height=128
img_width=416
# ckpt_file = 'models/model-190532' #depth
ckpt_file='checkpoints/model-117270'
fh = open('misc/sample.png', 'r')
#
# fh=open('raw_data_KITTI/2011_09_28/2011_09_28_drive_0001_sync/image_02/data/0000000012.png','r') # 自己测试所用
I = pil.open(fh) #读取图片
I = I.resize((img_width, img_height), pil.ANTIALIAS) #antialias滤镜缩放
I = np.array(I)
print(I.shape) #(128, 416, 3)
print(I[None,:,:,:].shape) #(1,128, 416, 3)
sfm = SfMLearner() #initialize
sfm.setup_inference(img_height,
img_width,
mode='depth')
#执行了build_depth_test_graph()
saver = tf.train.Saver([var for
var in tf.model_variables()]) #保存和恢复变量,保存到checkpoints中
with tf.Session() as sess:
saver.restore(sess, ckpt_file)
pred = sfm.inference(I[None,:,:,:], sess, mode='depth') #I[None,:,:,:] None的作用是增加了一个轴
print(pred) #is a dictionary
print(pred['depth'][0,:,:,0])
print(pred['depth'][0,:,:,0].shape)
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.imshow(I)
plt.subplot(1,2,2); plt.imshow(normalize_depth_for_display(pred['depth'][0,:,:,0]))
plt.show()
|
the-stack_0_12781 | import numpy as np
import pytest
import unyt as u
from unyt.testing import assert_allclose_units
from gmso import Topology
from gmso.formats.mol2 import from_mol2
from gmso.tests.base_test import BaseTest
from gmso.utils.io import get_fn
class TestMol2(BaseTest):
def test_read_mol2(self):
top = Topology.load(get_fn("parmed.mol2"))
assert top.name == "parmed"
assert top.n_sites == 8
assert_allclose_units(
top.box.lengths,
([8.2693, 7.9100, 6.6460] * u.Å).to("nm"),
rtol=1e-5,
atol=1e-8,
)
assert list(top.sites)[0].element.name == "carbon"
assert_allclose_units(
list(top.sites)[0].element.mass,
np.array(1.9944733e-26) * u.kg,
rtol=1e-5,
atol=1e-8,
)
top = Topology.load(get_fn("tip3p.mol2"))
assert top.name == "tip3p"
assert top.n_sites == 3
assert_allclose_units(
top.box.lengths, 3.0130 * np.ones(3) * u.Å, rtol=1e-5, atol=1e-8
)
positions_check = [
[0.061, 0.1, 0.1],
[0.017, 0.09, 0.177],
[0.011, 0.154, 0.04],
]
for check, site in zip(positions_check, top.sites):
assert_allclose_units(
site.position,
check * u.nm,
rtol=1e-5,
atol=1e-8,
)
top = Topology.load(get_fn("vmd.mol2"))
assert top.name == "vmd"
assert top.n_sites == 6
assert len(top.bonds) == 5
assert top.bonds[0].connection_members[0] == top.sites[0]
assert top.box == None
with pytest.warns(
UserWarning,
match=r"No charges were detected for site C with index 1",
):
top = Topology.load(get_fn("ethane.mol2"))
assert list(top.sites)[0].charge is None
with pytest.warns(
UserWarning,
match=r"No element detected for site C with index1\, consider manually adding the element to the topology",
):
Topology.load(get_fn("benzene.mol2"))
def test_residue(self):
top = Topology.load(get_fn("ethanol_aa.mol2"))
assert np.all([site.residue_name == "ETO" for site in top.sites])
assert np.all([site.residue_number == 1 for site in top.sites])
top = Topology.load(get_fn("benzene_ua.mol2"), site_type="lj")
assert np.all(
[
site.residue_name == "BEN1"
for site in top.iter_sites("residue_name", "BEN1")
]
)
assert np.all(
[
site.residue_number == 1
for site in top.iter_sites("residue_name", "BEN1")
]
)
assert np.all(
[
site.residue_name == "BEN2"
for site in top.iter_sites("residue_name", "BEN2")
]
)
assert np.all(
[
site.residue_number == 2
for site in top.iter_sites("residue_name", "BEN2")
]
)
def test_lj_system(self):
top = Topology.load(get_fn("methane.mol2"), site_type="lj")
assert np.all([site.element == None for site in top.sites])
def test_wrong_path(self):
with pytest.raises(
OSError, match=r"Provided path to file that does not exist"
):
Topology.load("not_a_file.mol2")
top = Topology.load(get_fn("ethanegro.mol2"))
assert len(top.sites) == 0
assert len(top.bonds) == 0
def test_broken_files(self):
with pytest.warns(
UserWarning,
match=r"The record type indicator @<TRIPOS>MOLECULE_extra_text\n is not supported. Skipping current section and moving to the next RTI header.",
):
Topology.load(get_fn("broken.mol2"))
with pytest.warns(
UserWarning,
match=r"This mol2 file has two boxes to be read in, only reading in one with dimensions Box\(a=0.72",
):
Topology.load(get_fn("broken.mol2"))
|
the-stack_0_12782 | from . import common
import pandas as pd
import os
FILENAME_ATTR = 'Filename'
VOLUME_ATTR = 'Volume'
URL_ATTR = 'Mirror'
class NoiseDownloader:
def __init__(
self,
output_files_key,
output_volumes_key,
data,
download_directory):
self.output_files_key = output_files_key
self.output_volumes_key = output_volumes_key
self.data = data
self.download_directory = download_directory
def execute(self, context):
output_files = context[self.output_files_key] = []
output_volumes = context[self.output_volumes_key] = []
common.create_directory(self.download_directory)
data = pd.read_csv(self.data)
for index, row in data.iterrows():
output_volumes.append(float(row[VOLUME_ATTR]))
output_file = os.path.join(
self.download_directory, row[FILENAME_ATTR])
output_files.append(output_file)
common.fetch(row[URL_ATTR], output_file)
|
the-stack_0_12783 | """
cwpair2.py
Takes a list of called peaks on both strands and produces a list of matched pairs and a list
of unmatched orphans using a specified method for finding matched pairs. Methods for finding
matched pairs are mode, closest, largest or all, where the analysis is run for each method
Input: list of one or more gff format files
Output: files produced for each input/mode combination:
MP (matched_pair), D (details), O (orphans), P (frequency preview plot), F (frequency final plot),
C (statistics graph), statistics.tabular
"""
import argparse
import csv
import cwpair2_util
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='inputs', action='append', nargs=2, help="Input datasets")
parser.add_argument('--method', dest='method', default='mode', help='Method of finding match.')
parser.add_argument('--up_distance', dest='up_distance', type=int, default=50, help='Distance upstream from a pair.')
parser.add_argument('--down_distance', dest='down_distance', type=int, default=100, help='Distance downstream of a pair.')
parser.add_argument('--binsize', dest='binsize', type=int, default=1, help='Width of bins for plots and mode.')
parser.add_argument('--threshold_format', dest='threshold_format', help='Percentage to filter the 95th percentile.')
parser.add_argument('--relative_threshold', dest='relative_threshold', type=float, default=0.0, help='Percentage to filter the 95th percentile.')
parser.add_argument('--absolute_threshold', dest='absolute_threshold', type=float, default=0.0, help='Absolute value to filter.')
parser.add_argument('--output_files', dest='output_files', default='matched_pair', help='Restrict output dataset collections.')
parser.add_argument('--statistics_output', dest='statistics_output', help='Statistics output file.')
args = parser.parse_args()
cwpair2_util.create_directories()
statistics = []
if args.absolute_threshold > 0:
threshold = args.absolute_threshold
elif args.relative_threshold > 0:
threshold = args.relative_threshold / 100.0
else:
threshold = 0
for (dataset_path, hid) in args.inputs:
stats = cwpair2_util.process_file(dataset_path,
hid,
args.method,
threshold,
args.up_distance,
args.down_distance,
args.binsize,
args.output_files)
statistics.extend(stats)
# Accumulate statistics.
by_file = {}
for stats in statistics:
# Skip "None" statistics from failed files
if not stats:
continue
path = stats['stats_path']
if path not in by_file:
by_file[path] = []
by_file[path].append(stats)
# Write tabular statistics file.
keys = ['fname', 'final_mode', 'preview_mode', 'perc95', 'paired', 'orphans']
statistics_out = csv.writer(open(args.statistics_output, 'wt'), delimiter='\t', lineterminator="\n")
statistics_out.writerow(keys)
for file_path, statistics in by_file.items():
for stats in statistics:
statistics_out.writerow([stats[key] for key in keys])
|
the-stack_0_12787 | ################################################################################
# Copyright (C) 2016 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
# This script only gets called by CMake
from Common import globalParameters, HR, print1, print2, printExit, ensurePath, CHeader, CMakeHeader, assignGlobalParameters, ProgressBar
from SolutionStructs import Solution
import YAMLIO
from SolutionWriter import SolutionWriter
from KernelWriterSource import KernelWriterSource
from KernelWriterAssembly import KernelWriterAssembly
import os
import os.path
import argparse
import sys
from shutil import copy as shutil_copy
################################################################################
# Write Solutions and Kernels for BenchmarkClient or LibraryClient
################################################################################
def writeSolutionsAndKernels(outputPath, solutions, kernels, kernelsBetaOnly, \
solutionWriter, kernelWriterSource, kernelWriterAssembly):
print1("# Writing Solutions and Kernels")
if not globalParameters["MergeFiles"]:
ensurePath(os.path.join(outputPath, "Solutions"))
ensurePath(os.path.join(outputPath, "Kernels"))
progressBar = ProgressBar(len(solutions)+len(kernels))
##############################################################################
# Write Solutions
##############################################################################
if globalParameters["MergeFiles"]:
solutionSourceFile = open(os.path.join(outputPath, \
"Solutions.cpp"), "w")
solutionHeaderFile = open(os.path.join(outputPath, \
"Solutions.h"), "w")
if globalParameters["MergeFiles"]:
solutionSourceFile.write(CHeader)
solutionHeaderFile.write(CHeader)
solutionSourceFile.write("#include \"Solutions.h\"\n")
solutionHeaderFile.write("#include \"TensileTypes.h\"\n")
solutionHeaderFile.write("#include \"Kernels.h\"\n")
solutionHeaderFile.write("#include \"SolutionHelper.h\"\n")
solutionHeaderFile.write("#include \"Tools.h\"\n")
for solution in solutions:
# get solution name
if not globalParameters["MergeFiles"]:
solutionFileName = solutionWriter.getSolutionName(solution)
# write solution.cpp
if not globalParameters["MergeFiles"]:
solutionSourceFile = open(os.path.join(outputPath, \
"Solutions", solutionFileName+".cpp"), "w")
solutionSourceFile.write(CHeader)
solutionSourceFile.write( \
solutionWriter.getSourceFileString(solution))
if not globalParameters["MergeFiles"]:
solutionSourceFile.close()
# write solution.h
if not globalParameters["MergeFiles"]:
solutionHeaderFile = open(os.path.join(outputPath, \
"Solutions", solutionFileName+".h"), "w")
solutionHeaderFile.write(CHeader)
solutionHeaderFile.write( \
solutionWriter.getHeaderFileString(solution))
if not globalParameters["MergeFiles"]:
solutionHeaderFile.close()
progressBar.increment()
# close merged
if not globalParameters["MergeFiles"]:
solutionHeaderFile.close()
##############################################################################
# Write Kernels
##############################################################################
if globalParameters["MergeFiles"]:
kernelSourceFile = open(os.path.join(outputPath, \
"Kernels.cpp"), "w")
kernelHeaderFile = open(os.path.join(outputPath, \
"Kernels.h"), "w")
kernelSourceFile.write(CHeader)
kernelHeaderFile.write(CHeader)
kernelSourceFile.write("#include \"Kernels.h\"\n")
kernelHeaderFile.write("#pragma once\n")
if globalParameters["RuntimeLanguage"] == "HIP":
kernelHeaderFile.write("#define HCC_ENABLE_ACCELERATOR_PRINTF\n\n")
kernelHeaderFile.write("#include <hip/hip_runtime.h>\n")
kernelHeaderFile.write("#include \"TensileTypes.h\"\n")
kernelHeaderFile.write("#include \"KernelHeader.h\"\n")
else:
kernelHeaderFile.write("#include <string>\n")
# tensor contraction kernels
for kernel in kernels:
kernelWriter = kernelWriterSource if kernel["KernelLanguage"] == "Source" else kernelWriterAssembly
# get kernel name
if not globalParameters["MergeFiles"]:
kernelName = kernelWriter.getKernelName(kernel)
# write kernel.cpp
if not globalParameters["MergeFiles"]:
kernelSourceFile = open(os.path.join(outputPath, \
"Kernels", kernelName+".cpp"), "w")
kernelSourceFile.write(CHeader)
kernelSourceFile.write( kernelWriter.getSourceFileString(kernel))
if not globalParameters["MergeFiles"]:
kernelSourceFile.close()
# write kernel.h
if not globalParameters["MergeFiles"]:
kernelHeaderFile = open(os.path.join(outputPath, \
"Kernels", kernelName+".h"), "w")
kernelHeaderFile.write(CHeader)
kernelHeaderFile.write( kernelWriter.getHeaderFileString(kernel))
if not globalParameters["MergeFiles"]:
kernelHeaderFile.close()
progressBar.increment()
# beta-only kernels
for kernel in kernelsBetaOnly:
kernelWriter = kernelWriterSource
kernelName = kernelWriter.getKernelNameBetaOnly(kernel)
# write kernel.cpp
if not globalParameters["MergeFiles"]:
kernelSourceFile = open(os.path.join(outputPath, \
"Kernels", kernelName+".cpp"), "w")
kernelSourceFile.write(CHeader)
kernelSourceFile.write( kernelWriter.getSourceFileStringBetaOnly(kernel))
if not globalParameters["MergeFiles"]:
kernelSourceFile.close()
# write kernel.h
if not globalParameters["MergeFiles"]:
kernelHeaderFile = open(os.path.join(outputPath, \
"Kernels", kernelName + ".h"), "w")
kernelHeaderFile.write(CHeader)
kernelHeaderFile.write( kernelWriter.getHeaderFileStringBetaOnly(kernel))
if not globalParameters["MergeFiles"]:
kernelHeaderFile.close()
# close merged
if globalParameters["MergeFiles"]:
kernelHeaderFile.close()
################################################################################
# Write Logic
################################################################################
def writeLogic(outputPath, logicData, solutionWriter ):
print1("# Writing Library Logic")
if not globalParameters["MergeFiles"]:
ensurePath(os.path.join(outputPath, "Logic"))
# Tensile.h
h = ""
h += "#pragma once\n"
h += "#include \"TensileTypes.h\"\n"
# TensileInternal.h
ih = ""
ih += "#include \"Tensile.h\"\n"
ih += "#include \"SolutionHelper.h\"\n"
if globalParameters["SolutionMapHash"]:
ih += "#include <unordered_map>\n"
else:
ih += "#include <map>\n"
ih += "#include <tuple>\n"
# problem type Key
problemSizeTemplate = "unsigned int, unsigned int, unsigned int"
if globalParameters["RuntimeLanguage"] == "OCL":
problemSizeTemplate += ", cl_command_queue"
ih += "typedef std::tuple<%s> ProblemSizeKey;\n" \
% (problemSizeTemplate)
# hash function
ih += "\n"
ih += "size_t tensileProblemSizeHasher( const ProblemSizeKey & problemSize ) {\n"
ih += " size_t hash = 0;\n"
ih += " // ignore lowest 4 bits; keep next 21 bits\n"
ih += " size_t hash0 = (std::get<0>(problemSize) >> 4) & ((1<<22)-1); // 21 bits of size0\n"
ih += " size_t hash1 = (std::get<1>(problemSize) >> 4) & ((1<<22)-1); // 21 bits of size1\n"
ih += " size_t hashU = (std::get<2>(problemSize) >> 4) & ((1<<22)-1); // 21 bits of sizeU\n"
ih += " // 21+21+21 = 63 bit hash\n"
ih += " hash |= hash0;\n"
ih += " hash |= hash1<<21;\n"
ih += " hash |= hashU<<42;\n"
ih += " return hash;\n"
ih += "}\n"
ih += "\n"
# Tensile.cpp
s = ""
s += "#include \"Tensile.h\"\n"
s += "#include \"TensileInternal.h\"\n"
s += "#include \"Solutions.h\"\n"
########################################
# problemType
for problemType in logicData:
# function argument list
argListSizes = solutionWriter.getArgList(problemType, False, False, False)
argListStream = solutionWriter.getArgList(problemType, False, False, True)
argListData = solutionWriter.getArgList(problemType, True, True, True)
# declare tensile_ProblemType
h += "\n// enqueue solution\n"
h += "TensileStatus tensile_%s(\n" % problemType
for i in range(0, len(argListData)):
h += " %s %s%s" \
% (argListData[i][0], argListData[i][1], \
",\n" if i < len(argListData)-1 else ");\n\n")
# declare TensileSolutionPointer_ProblemType
h += "\n// solution pointer\n"
h += "typedef TensileStatus (*TensileSolutionPointer_%s)(\n" \
% problemType
for i in range(0, len(argListData)):
h += " %s %s%s" % (argListData[i][0], argListData[i][1], ",\n" \
if i < len(argListData)-1 else ");\n\n")
# declare tensileGetSolutionPointer_ProblemType
h += "\n// get solution pointer\n"
h += "TensileSolutionPointer_%s tensileGetSolutionPointer_%s(\n" \
% (problemType, problemType)
for i in range(0, len(argListStream)):
h += " %s %s%s" \
% (argListStream[i][0], argListStream[i][1], \
",\n" if i < len(argListStream)-1 else ");\n\n")
# declare tensileName_
h += "// get solution name\n"
h += "const char * tensileGetSolutionName_%s(\n" \
% (problemType)
for i in range(0, len(argListStream)):
h += " %s %s%s" \
% (argListStream[i][0], argListStream[i][1], \
",\n" if i < len(argListStream)-1 else ");\n\n")
# get solution naming for problem type
solutionsForProblemType = []
for scheduleTuple in logicData[problemType]:
solutionsForSchedule = scheduleTuple[2]
for solution in solutionsForSchedule:
if solution not in solutionsForProblemType:
solutionsForProblemType.append(solution)
# solution names for problem type
solutionNamesForProblemType = []
for solution in solutionsForProblemType:
solutionName = solutionWriter.getSolutionName(solution)
solutionNamesForProblemType.append(solutionName)
# reset problemType source
if not globalParameters["MergeFiles"]:
filePrefix = "Tensile_%s" % (problemType)
s = "#include \"Tensile.h\"\n"
s += "#include \"TensileInternal.h\"\n"
for solutionName in solutionNamesForProblemType:
s += "#include \"%s.h\"\n" % solutionName
########################################
# implement per-Schedule functions in source
s += "/*******************************************************************************\n * Per-Schedule Functions\n *******************************************************************************/"
for scheduleTuple in logicData[problemType]:
# get logic parameters for problem type
scheduleName = scheduleTuple[0]
deviceNames = scheduleTuple[1]
solutionsForSchedule = scheduleTuple[2]
indexOrder = scheduleTuple[3]
exactLogic = scheduleTuple[4]
rangeLogic = scheduleTuple[5]
# solution names for schedule
solutionNamesForSchedule = []
for solution in solutionsForSchedule:
solutionName = solutionWriter.getSolutionName(solution)
solutionNamesForSchedule.append(solutionName)
# function tensileGetSolutionPointerUncached_Schedule_ProblemType
s += "\n// problem size -> solution logic\n"
s += "TensileSolutionPointer_%s tensileGetSolutionPointerUncached_%s_%s(\n" \
% (problemType, scheduleName, problemType)
for i in range(0, len(argListSizes)):
s += " %s %s%s" \
% (argListSizes[i][0], argListSizes[i][1], \
",\n" if i < len(argListSizes)-1 else ") {\n\n")
exactLogicStr = writeExactLogic(exactLogic, \
solutionNamesForSchedule, True)
if rangeLogic != None:
rangeLogicStr = writeRangeLogicRec(0, indexOrder, rangeLogic, \
solutionNamesForSchedule, problemType, True)
else:
rangeLogicStr = " return NULL; // none\n"
s += " /* exact mappings */\n"
s += exactLogicStr
s += "\n /* range mappings */\n"
s += rangeLogicStr
s += "\n}\n"
# function tensileGetSolutionName_Schedule_ProblemType
s += "\n// get solution name for problem size\n"
s += "const char * tensileGetSolutionName_%s_%s(\n" \
% (scheduleName, problemType)
for i in range(0, len(argListSizes)):
s += " %s %s%s" \
% (argListSizes[i][0], argListSizes[i][1], \
",\n" if i < len(argListSizes)-1 else ") {\n\n")
exactLogicStr = writeExactLogic(exactLogic, \
solutionNamesForSchedule, False)
if rangeLogic != None:
rangeLogicStr = writeRangeLogicRec(0, indexOrder, rangeLogic, \
solutionNamesForSchedule, problemType, False)
else:
rangeLogicStr = " return NULL; // none\n"
s += " /* exact mappings */\n"
s += exactLogicStr
s += "\n /* range mappings */\n"
s += rangeLogicStr
s += "\n}\n"
########################################
# implement problem-type functions in source
s += "/*******************************************************************************\n * Per-ProblemType Functions\n *******************************************************************************/"
if globalParameters["SolutionMapHash"]:
ih += "typedef std::unordered_map<ProblemSizeKey, TensileSolutionPointer_%s, std::function<size_t (ProblemSizeKey)>> Map_%s;\n" \
% (problemType, problemType )
else:
ih += "typedef std::map<ProblemSizeKey, TensileSolutionPointer_%s> Map_%s;\n" \
% (problemType, problemType)
ih += "extern Map_%s solutionMap_%s;\n" % (problemType, problemType)
# implement tensileGetSolutionPointerUncached_ProblemType
for ptr in [True, False]:
returnType = "PointerUncached" if ptr else "Name"
s += "\n// return solution %s\n" % returnType
s += ("TensileSolutionPointer_%s "%problemType) if ptr else "const char *"
s += "tensileGetSolution%s_%s(\n" \
% (returnType, problemType)
for i in range(0, len(argListStream)):
s += " %s %s%s" \
% (argListStream[i][0], argListStream[i][1], \
",\n" if i < len(argListStream)-1 else ") {\n")
# choose from schedules based on device name
# print logicData
schedules = logicData[problemType]
numSchedules = len(schedules)
if numSchedules > 1:
reordered_schedules = []
for scheduleIdx in range(0, numSchedules):
schedule = schedules[scheduleIdx]
deviceNames = schedule[1]
if deviceNames != ["fallback"]:
reordered_schedules.append(schedule)
for scheduleIdx in range(0, numSchedules):
schedule = schedules[scheduleIdx]
deviceNames = schedule[1]
if deviceNames == ["fallback"]:
reordered_schedules.append(schedule)
# get device name
if globalParameters["RuntimeLanguage"] == "OCL":
s += "get device name opencl;\n"
else:
s += "\n// get device name hip;\n"
s += " int deviceId;\n"
s += " hipCtxGetDevice(&deviceId);\n"
s += " hipDeviceProp_t deviceProperties;\n"
s += " hipGetDeviceProperties(&deviceProperties, deviceId);\n"
s += " std::string name = deviceProperties.name;\n"
s += "\n "
for scheduleIdx in range(0, numSchedules):
schedule = reordered_schedules[scheduleIdx]
scheduleName = schedule[0]
deviceNames = schedule[1]
if scheduleIdx > 0:
s += " else "
if scheduleIdx < numSchedules-1:
s += "if ("
for deviceNameIdx in range(0, len(deviceNames)):
deviceName = deviceNames[deviceNameIdx]
if deviceNameIdx > 0:
s += " || "
s += "name == \"%s\"" % deviceName
s += ")"
s += "{\n"
s += " return tensileGetSolution%s_%s_%s(" \
% ( returnType, scheduleName, problemType)
for i in range(0, len(argListSizes)):
s += "%s%s" \
% (argListSizes[i][1],
", " if i < len(argListSizes)-1 else ");\n")
s += " }\n"
else: # == 1
schedule = schedules[0]
scheduleName = schedule[0]
s += " return tensileGetSolution%s_%s_%s(" \
% ( returnType, scheduleName, problemType)
for i in range(0, len(argListSizes)):
s += "%s%s" \
% (argListSizes[i][1],
", " if i < len(argListSizes)-1 else ");\n")
s += "\n}\n"
# implement tensileGetSolutionPointer_ProblemType
s += "\n// return solution pointer; user calls it\n"
s += "Map_%s solutionMap_%s%s;\n" % (problemType, problemType, "(1024, tensileProblemSizeHasher)" if globalParameters["SolutionMapHash"] else "")
s += "TensileSolutionPointer_%s tensileGetSolutionPointer_%s(\n" \
% (problemType, problemType)
for i in range(0, len(argListStream)):
s += " %s %s%s" \
% (argListStream[i][0], argListStream[i][1], \
",\n" if i < len(argListStream)-1 else ") {\n")
# create key
s += " ProblemSizeKey key = std::make_tuple( size%s, size%s, size%s%s );\n" \
% ( \
globalParameters["IndexChars"][problemType["Index0"]], \
globalParameters["IndexChars"][problemType["Index1"]], \
globalParameters["IndexChars"][problemType["IndexUnroll"]], \
", stream" if globalParameters["RuntimeLanguage"] == "OCL" else "")
# check for key in map
s += " static std::mutex findKernelMutex;\n"
s += " std::lock_guard<std::mutex> findKernelLock(findKernelMutex);\n"
s += " Map_%s::iterator iter = solutionMap_%s.find(key);\n" \
% (problemType, problemType)
s += " if (iter != solutionMap_%s.end()) {\n" % problemType
s += " return iter->second;\n"
s += " } else {\n"
s += " TensileSolutionPointer_%s ptr = tensileGetSolutionPointerUncached_%s(\n" \
% (problemType, problemType)
for i in range(0, len(argListStream)):
s += " %s%s" \
% (argListStream[i][1], "," if i < len(argListStream)-1 else ");")
s += "\n"
s += " solutionMap_%s[key] = ptr;\n" % problemType
s += " return ptr;\n"
s += " }\n"
s += "}\n"
# declare tensile_ProblemType
s += "\n// main call to solution; enqueues a kernel\n"
s += "TensileStatus tensile_%s(\n" % problemType
for i in range(0, len(argListData)):
s += " %s %s%s" \
% (argListData[i][0], argListData[i][1], \
",\n" if i < len(argListData)-1 else ") {\n")
s += " TensileSolutionPointer_%s ptr = tensileGetSolutionPointer_%s(\n" \
% (problemType, problemType)
for i in range(0, len(argListStream)):
s += " %s%s" \
% (argListStream[i][1], ", " if i < len(argListStream)-1 else ");")
s += "\n"
s += " if ( ptr ) {\n"
s += " return ptr("
for i in range(0, len(argListData)):
s += "%s%s" \
% (argListData[i][1], ", " if i < len(argListData)-1 else ");\n")
s += " } else {\n"
s += " return tensileStatusFailure; // no solution found\n"
s += " }\n"
s += "}\n"
# open and close problemType files
if not globalParameters["MergeFiles"]:
logicSourceFile = open(os.path.join(outputPath, "Logic", \
"%s.cpp" % filePrefix), "w")
logicSourceFile.write(s)
logicSourceFile.close()
# close merged files
if globalParameters["MergeFiles"]:
logicSourceFile = open(os.path.join(outputPath, \
"Tensile.cpp"), "w")
logicSourceFile.write(s)
logicSourceFile.close()
logicHeaderFile = open(os.path.join(outputPath, \
"Tensile.h"), "w")
logicHeaderFile.write(h)
logicHeaderFile.close()
internalHeaderFile = open(os.path.join(outputPath, \
"TensileInternal.h"), "w")
internalHeaderFile.write(ih)
internalHeaderFile.close()
################################################################################
# Write Range Logic Recursive
################################################################################
def writeExactLogic(exactLogic, solutionNames, ptr):
s = ""
indent = " "
for ruleIdx in range(0, len(exactLogic)):
rule = exactLogic[ruleIdx]
problemSize = rule[0]
solutionIdx = rule[1][0]
solutionGFlops = rule[1][1]
s += indent
if ruleIdx > 0:
s += "else "
s += "if ("
s += " size%s == %u " % (globalParameters["IndexChars"][0], problemSize[0])
for i in range(1, len(problemSize)):
s += "&& size%s == %u " % (globalParameters["IndexChars"][i], \
problemSize[i])
solutionName = solutionNames[solutionIdx]
if ptr:
returnValue = solutionName
else:
returnValue = "\"%s~\"" % solutionName
s += ") return %s; // %.0f GFlop/s\n" % (returnValue, solutionGFlops)
return s
################################################################################
# Write Range Logic Recursive
################################################################################
def writeRangeLogicRec(depth, indexOrder, rangeLogic, solutionNames, \
problemType, ptr):
indexChars = globalParameters["IndexChars"]
indent = " "
indent += " "*depth
s = ""
lowestLevel = depth == len(indexOrder)-1
numRules = len(rangeLogic)
for ruleIdx in range(0, numRules):
rule = rangeLogic[ruleIdx]
threshold = rule[0]
if lowestLevel:
solutionIdx = rule[1]
solutionName = solutionNames[solutionIdx]
if ptr:
returnValue = solutionName
else:
returnValue = "\"%s\"" % solutionName
if threshold > 0:
s += "%sif (size%s <= %u) return %s;\n" \
% (indent, indexChars[indexOrder[depth]], threshold, returnValue)
else:
s += "%sreturn %s;\n" % (indent, returnValue)
else:
if threshold > 0:
s += "%sif (size%s <= %u) {\n" \
% (indent, indexChars[indexOrder[depth]], threshold)
else:
s += "%s{\n" % (indent)
s += writeRangeLogicRec(depth+1, indexOrder, rule[1], solutionNames, \
problemType, ptr)
s += "%s}\n" % (indent)
return s
################################################################################
# Write Solution Call
################################################################################
def writeSolutionCall(solutionName, problemType):
indexChars = globalParameters["IndexChars"]
s = ""
s += "%s(" % solutionName
# solution parameters
s += " dataC, dataA, dataB, alpha"
if problemType["UseBeta"]:
s += ", beta"
s += ", offsetC, offsetA, offsetB"
firstStride = 1
if problemType["UseInitialStrides"]:
firstStride = 0
lastStrideC = problemType["NumIndicesC"]
lastStrideA = len(problemType["IndexAssignmentsA"])
lastStrideB = len(problemType["IndexAssignmentsB"])
for i in range(firstStride,lastStrideC):
s += ", strideC%u%s" % (i, indexChars[i])
for i in range(firstStride,lastStrideA):
s += ", strideA%u%s" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for i in range(firstStride,lastStrideB):
s += ", strideB%u%s" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for i in range(0, problemType["TotalIndices"]):
s += ", size%s" % indexChars[i]
s += ", stream, numInputEvents, inputEvents, outputEvent )"
return s
################################################################################
# Write CMake
################################################################################
def writeCMake(outputPath, solutions, kernels, libraryStaticFiles, clientName ):
print1("# Writing Custom CMake")
##############################################################################
# Min Naming
##############################################################################
if globalParameters["ShortNames"] and not globalParameters["MergeFiles"] :
solutionSerialNaming = Solution.getSerialNaming(solutions)
kernelSerialNaming = Solution.getSerialNaming(kernels)
else:
solutionSerialNaming = None
kernelSerialNaming = None
solutionMinNaming = Solution.getMinNaming(solutions)
kernelMinNaming = Solution.getMinNaming(kernels)
solutionWriter = SolutionWriter( \
solutionMinNaming, solutionSerialNaming, \
kernelMinNaming, kernelSerialNaming)
kernelWriterSource = KernelWriterSource( \
kernelMinNaming, kernelSerialNaming)
kernelWriterAssembly = KernelWriterAssembly( \
kernelMinNaming, kernelSerialNaming)
generatedFile = open(os.path.join(outputPath, "Generated.cmake"), "w")
generatedFile.write(CMakeHeader)
generatedFile.write("set( TensileClient_SOLUTIONS\n")
# write solution names
if globalParameters["MergeFiles"]:
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Solutions.h\n")
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Solutions.cpp\n")
else:
for solution in solutions:
solutionName = solutionWriter.getSolutionName(solution)
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Solutions/%s.h\n" \
% (solutionName) )
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Solutions/%s.cpp\n" \
% (solutionName) )
generatedFile.write(" )\n")
# write kernel names
generatedFile.write("set( TensileClient_KERNELS\n")
if globalParameters["MergeFiles"]:
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Kernels.h\n")
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Kernels.cpp\n")
else:
for kernel in kernels:
kernelName = kernelWriterSource.getKernelName(kernel) if kernel["KernelLanguage"] == "Source" else kernelWriterAssembly.getKernelName(kernel)
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Kernels/%s.h\n" % (kernelName))
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Kernels/%s.cpp\n" % kernelName)
generatedFile.write(" )\n")
generatedFile.write("set( TensileClient_SOURCE\n")
for fileName in libraryStaticFiles:
# copy file
shutil_copy( os.path.join(globalParameters["SourcePath"], fileName), \
outputPath )
# add file to cmake
generatedFile.write(" ${CMAKE_SOURCE_DIR}/%s\n" % fileName)
generatedFile.write(" )\n\n")
# close generated cmake
generatedFile.close()
################################################################################
# Tensile Create Library
################################################################################
def TensileCreateLibrary():
print1("")
print1(HR)
print1("# Tensile Create Library")
print2(HR)
print2("")
##############################################################################
# Parse Command Line Arguments
##############################################################################
print2("Arguments: %s" % sys.argv)
argParser = argparse.ArgumentParser()
argParser.add_argument("LogicPath", help="Path to LibraryLogic.yaml files.")
argParser.add_argument("OutputPath", help="Where to write library files?")
argParser.add_argument("RuntimeLanguage", help="Which runtime language?", \
choices=["OCL", "HIP", "HSA"])
argParser.add_argument("--merge-files", dest="MergeFiles", \
action="store_true")
argParser.add_argument("--no-merge-files", dest="MergeFiles", \
action="store_false")
argParser.add_argument("--short-file-names", dest="ShortNames", \
action="store_true")
argParser.add_argument("--no-short-file-names", dest="ShortNames", \
action="store_false")
argParser.add_argument("--library-print-debug", dest="LibraryPrintDebug", \
action="store_true")
argParser.add_argument("--no-library-print-debug", dest="LibraryPrintDebug", \
action="store_false")
args = argParser.parse_args()
logicPath = args.LogicPath
outputPath = args.OutputPath
print2("OutputPath: %s" % outputPath)
ensurePath(outputPath)
arguments = {}
arguments["RuntimeLanguage"] = args.RuntimeLanguage
arguments["MergeFiles"] = args.MergeFiles
arguments["ShortNames"] = args.ShortNames
arguments["LibraryPrintDebug"] = args.LibraryPrintDebug
assignGlobalParameters(arguments)
if not os.path.exists(logicPath):
printExit("LogicPath %s doesn't exist" % logicPath)
logicFiles = [os.path.join(logicPath, f) for f in os.listdir(logicPath) \
if (os.path.isfile(os.path.join(logicPath, f)) \
and os.path.splitext(f)[1]==".yaml")]
print1("# LibraryLogicFiles:" % logicFiles)
for logicFile in logicFiles:
print1("# %s" % logicFile)
##############################################################################
# Parse config files
##############################################################################
solutions = []
logicData = {} # keys are problemTypes, values are schedules
for logicFileName in logicFiles:
(scheduleName, deviceNames, problemType, solutionsForSchedule, \
indexOrder, exactLogic, rangeLogic) \
= YAMLIO.readLibraryLogicForSchedule(logicFileName)
if problemType not in logicData:
logicData[problemType] = []
logicData[problemType].append((scheduleName, deviceNames, \
solutionsForSchedule, indexOrder, exactLogic, rangeLogic ))
for solution in solutionsForSchedule:
if solution not in solutions:
solutions.append(solution)
# create solution writer and kernel writer
kernels = []
kernelsBetaOnly = []
for solution in solutions:
solutionKernels = solution.getKernels()
for kernel in solutionKernels:
if kernel not in kernels:
kernels.append(kernel)
solutionKernelsBetaOnly = solution.getKernelsBetaOnly()
for kernel in solutionKernelsBetaOnly:
if kernel not in kernelsBetaOnly:
kernelsBetaOnly.append(kernel)
# if any kernels are assembly, append every ISA supported
if globalParameters["ShortNames"] and not globalParameters["MergeFiles"]:
solutionSerialNaming = Solution.getSerialNaming(solutions)
kernelSerialNaming = Solution.getSerialNaming(kernels)
else:
solutionSerialNaming = None
kernelSerialNaming = None
solutionMinNaming = Solution.getMinNaming(solutions)
kernelMinNaming = Solution.getMinNaming(kernels)
solutionWriter = SolutionWriter( \
solutionMinNaming, solutionSerialNaming, \
kernelMinNaming, kernelSerialNaming)
kernelWriterSource = KernelWriterSource( \
kernelMinNaming, kernelSerialNaming)
kernelWriterAssembly = KernelWriterAssembly( \
kernelMinNaming, kernelSerialNaming)
# write solutions and kernels
writeSolutionsAndKernels(outputPath, solutions, kernels, kernelsBetaOnly, \
solutionWriter, kernelWriterSource, kernelWriterAssembly)
libraryStaticFiles = [
"TensileTypes.h",
"KernelHeader.h",
"SolutionHelper.cpp",
"SolutionHelper.h",
"Tools.cpp",
"Tools.h" ]
# write cmake
clientName = "LibraryClient"
writeCMake(outputPath, solutions, kernels, libraryStaticFiles, clientName )
# write logic
writeLogic(outputPath, logicData, solutionWriter)
print1("# Tensile Library Writer DONE")
print1(HR)
print1("")
################################################################################
# Main
################################################################################
if __name__ == "__main__":
TensileCreateLibrary()
|
the-stack_0_12788 | import os
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--data_size', type=int, default=1000)
parser.add_argument('--batch_time', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--niters', type=int, default=2000)
parser.add_argument('--test_freq', type=int, default=20)
parser.add_argument('--ntest', type=int, default=10)
parser.add_argument('--shrink_std', type=float, default=0.1)
parser.add_argument('--shrink_proportion', type=float, default=0.5)
parser.add_argument('--viz', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--adjoint', action='store_true')
args = parser.parse_args()
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
from torchdiffeq import odeint_adjoint_stochastic_end_v2 as odeint_stochastic_end_v2
else:
from torchdiffeq import odeint_stochastic_end_v2
from torchdiffeq import odeint
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
true_y0 = torch.tensor([0.])
t = torch.linspace(0., 25., args.data_size)
true_A = torch.tensor([[-0.1, 2.0], [-2.0, -0.1]])
class Lambda(nn.Module):
def forward(self, t, y):
t = t.unsqueeze(0)
#equation = -1000*y + 3000 - 2000 * torch.exp(-t) + 1000 * torch.sin(t)
equation = -1000*y + 3000 - 2000 * torch.exp(-t)
#equation = -1000*y + 3000 - 2000 * torch.exp(-1000*t)
#equation = 10 * torch.sin(t)
return equation
#return torch.mm(y**3, true_A)
#return torch.mm(y**3, true_A)
with torch.no_grad():
true_y = odeint(Lambda(), true_y0, t, method='dopri5')
#true_y = odeint(Lambda(), true_y0, t, method='adams')
def get_batch():
s = torch.from_numpy(np.random.choice(np.arange(args.data_size - args.batch_time, dtype=np.int64), args.batch_size, replace=False))
batch_y0 = true_y[s] # (M, D)
batch_t = t[:args.batch_time] # (T)
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0) # (T, M, D)
return batch_y0, batch_t, batch_y
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
if args.viz:
makedirs('png_alternate_stochastic_end_v2')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 4), facecolor='white')
ax_traj = fig.add_subplot(131, frameon=False)
ax_phase = fig.add_subplot(132, frameon=False)
#ax_multiple = fig.add_subplot(133, frameon=False)
plt.show(block=False)
def visualize(true_y, pred_y, odefunc, itr):
if args.viz:
ax_traj.cla()
ax_traj.set_title('True vs Predicted')
ax_traj.set_xlabel('t')
ax_traj.set_ylabel('y')
ax_traj.plot(t.numpy(), true_y.numpy()[:, 0], 'g-')
ax_traj.plot(t.numpy(), pred_y.numpy()[:, 0], '--', 'b--')
ax_traj.set_xlim(t.min(), t.max())
ax_traj.set_ylim(-100, 100)
ax_traj.legend()
ax_phase.cla()
ax_phase.set_title('Predicted')
ax_phase.set_xlabel('t')
ax_phase.set_ylabel('y')
ax_phase.plot(t.numpy(), pred_y.numpy()[:, 0], '--', 'b--')
ax_phase.set_xlim(t.min(), t.max())
ax_phase.set_ylim(-100, 100)
ax_phase.legend()
#ax_multiple.cla()
#ax_multiple.set_title('Variations')
#ax_multiple.set_xlabel('t')
#ax_multiple.set_ylabel('y')
#for component in pred_ys:
# ax_multiple.plot(t.numpy(), component.numpy()[:, 0], '--')
#ax_multiple.set_xlim(t.min(), t.max())
#ax_multiple.set_ylim(-100, 100)
#ax_multiple.legend()
fig.tight_layout()
plt.savefig('png_alternate_stochastic_end_v2/{:04d}'.format(itr))
plt.draw()
plt.pause(0.001)
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
self.net = nn.Sequential(
nn.Linear(2, 500),
nn.Tanh(),
nn.Linear(500, 500),
nn.Tanh(),
nn.Linear(500, 1),
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.1)
nn.init.constant_(m.bias, val=0)
def forward(self, t, y):
t=t.unsqueeze(0)
t = t.view(1,1)
y = y.view(y.size(0),1)
t = t.expand_as(y)
equation = torch.cat([t,y],1)
result = self.net(equation)
if y.size(0)==1:
result = result.squeeze()
return result
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
if __name__ == '__main__':
ii = 0
func = ODEFunc()
optimizer = optim.RMSprop(func.parameters(), lr=1e-3)
end = time.time()
time_meter = RunningAverageMeter(0.97)
loss_meter = RunningAverageMeter(0.97)
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
batch_y0, batch_t, batch_y = get_batch()
pred_y = odeint_stochastic_end_v2(func, batch_y0, batch_t,shrink_proportion=args.shrink_proportion,shrink_std=args.shrink_std,mode='train')
#pred_y = odeint_stochastic_end_v2(func, batch_y0, batch_t)
loss = torch.mean(torch.abs(pred_y - batch_y))
loss.backward()
optimizer.step()
time_meter.update(time.time() - end)
loss_meter.update(loss.item())
if itr % args.test_freq == 0:
with torch.no_grad():
pred_y = odeint(func, true_y0, t)
loss = torch.mean(torch.abs(pred_y - true_y))
print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
#pred_ys = []
#for i in range(args.ntest):
# pred_ys.append( odeint(func, batch_y0, batch_t))
#visualize(true_y, pred_y,pred_ys, func, ii)
visualize(true_y, pred_y, func, ii)
ii += 1
end = time.time()
|
the-stack_0_12790 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.enums import HorizontalLocation, MarkerType, VerticalLocation
from ..core.properties import (
Any,
Auto,
Either,
Enum,
Instance,
Int,
List,
Null,
Nullable,
Seq,
String,
TextLike,
Tuple,
)
from ..models import (
ColumnDataSource,
CoordinateMapping,
GraphRenderer,
Plot,
Range,
Scale,
Tool,
)
from ..models.dom import Template
from ..models.tools import (
Drag,
GestureTool,
InspectTool,
Scroll,
Tap,
)
from ..transform import linear_cmap
from ..util.options import Options
from ._graph import get_graph_kwargs
from ._plot import get_range, get_scale, process_axis_and_grid
from ._stack import double_stack, single_stack
from ._tools import process_active_tools, process_tools_arg
from .glyph_api import _MARKER_SHORTCUTS, GlyphAPI
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#: A default set of tools configured if no configuration is provided
DEFAULT_TOOLS = "pan,wheel_zoom,box_zoom,save,reset,help"
__all__ = (
'figure',
'markers',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class figure(Plot, GlyphAPI):
''' Create a new figure for plotting.
A subclass of |Plot| that simplifies plot creation with default axes, grids,
tools, etc.
Figure objects have many glyph methods that can be used to draw
vectorized graphical glyphs:
.. hlist::
:columns: 3
* :func:`~bokeh.plotting.figure.annular_wedge`
* :func:`~bokeh.plotting.figure.annulus`
* :func:`~bokeh.plotting.figure.arc`
* :func:`~bokeh.plotting.figure.asterisk`
* :func:`~bokeh.plotting.figure.bezier`
* :func:`~bokeh.plotting.figure.circle`
* :func:`~bokeh.plotting.figure.circle_cross`
* :func:`~bokeh.plotting.figure.circle_dot`
* :func:`~bokeh.plotting.figure.circle_x`
* :func:`~bokeh.plotting.figure.circle_y`
* :func:`~bokeh.plotting.figure.cross`
* :func:`~bokeh.plotting.figure.dash`
* :func:`~bokeh.plotting.figure.diamond`
* :func:`~bokeh.plotting.figure.diamond_cross`
* :func:`~bokeh.plotting.figure.diamond_dot`
* :func:`~bokeh.plotting.figure.dot`
* :func:`~bokeh.plotting.figure.ellipse`
* :func:`~bokeh.plotting.figure.harea`
* :func:`~bokeh.plotting.figure.hbar`
* :func:`~bokeh.plotting.figure.hex`
* :func:`~bokeh.plotting.figure.hex_tile`
* :func:`~bokeh.plotting.figure.image`
* :func:`~bokeh.plotting.figure.image_rgba`
* :func:`~bokeh.plotting.figure.image_url`
* :func:`~bokeh.plotting.figure.inverted_triangle`
* :func:`~bokeh.plotting.figure.line`
* :func:`~bokeh.plotting.figure.multi_line`
* :func:`~bokeh.plotting.figure.multi_polygons`
* :func:`~bokeh.plotting.figure.oval`
* :func:`~bokeh.plotting.figure.patch`
* :func:`~bokeh.plotting.figure.patches`
* :func:`~bokeh.plotting.figure.plus`
* :func:`~bokeh.plotting.figure.quad`
* :func:`~bokeh.plotting.figure.quadratic`
* :func:`~bokeh.plotting.figure.ray`
* :func:`~bokeh.plotting.figure.rect`
* :func:`~bokeh.plotting.figure.segment`
* :func:`~bokeh.plotting.figure.square`
* :func:`~bokeh.plotting.figure.square_cross`
* :func:`~bokeh.plotting.figure.square_dot`
* :func:`~bokeh.plotting.figure.square_pin`
* :func:`~bokeh.plotting.figure.square_x`
* :func:`~bokeh.plotting.figure.star`
* :func:`~bokeh.plotting.figure.star_dot`
* :func:`~bokeh.plotting.figure.step`
* :func:`~bokeh.plotting.figure.text`
* :func:`~bokeh.plotting.figure.triangle`
* :func:`~bokeh.plotting.figure.triangle_dot`
* :func:`~bokeh.plotting.figure.triangle_pin`
* :func:`~bokeh.plotting.figure.varea`
* :func:`~bokeh.plotting.figure.vbar`
* :func:`~bokeh.plotting.figure.wedge`
* :func:`~bokeh.plotting.figure.x`
* :func:`~bokeh.plotting.figure.y`
There is a scatter function that can be parameterized by marker type:
* :func:`~bokeh.plotting.figure.scatter`
There are also specialized methods for stacking bars:
* bars: :func:`~bokeh.plotting.figure.hbar_stack`, :func:`~bokeh.plotting.figure.vbar_stack`
* lines: :func:`~bokeh.plotting.figure.hline_stack`, :func:`~bokeh.plotting.figure.vline_stack`
* areas: :func:`~bokeh.plotting.figure.harea_stack`, :func:`~bokeh.plotting.figure.varea_stack`
As well as one specialized method for making simple hexbin plots:
* :func:`~bokeh.plotting.figure.hexbin`
In addition to all the ``figure`` property attributes, the following
options are also accepted:
.. bokeh-options:: FigureOptions
:module: bokeh.plotting._figure
'''
__view_model__ = "Figure"
def __init__(self, *arg, **kw) -> None:
opts = FigureOptions(kw)
super().__init__(*arg, **kw)
self.x_range = get_range(opts.x_range)
self.y_range = get_range(opts.y_range)
self.x_scale = get_scale(self.x_range, opts.x_axis_type)
self.y_scale = get_scale(self.y_range, opts.y_axis_type)
process_axis_and_grid(self, opts.x_axis_type, opts.x_axis_location, opts.x_minor_ticks, opts.x_axis_label, self.x_range, 0)
process_axis_and_grid(self, opts.y_axis_type, opts.y_axis_location, opts.y_minor_ticks, opts.y_axis_label, self.y_range, 1)
tool_objs, tool_map = process_tools_arg(self, opts.tools, opts.tooltips)
self.add_tools(*tool_objs)
process_active_tools(
self.toolbar,
tool_map,
opts.active_drag,
opts.active_inspect,
opts.active_scroll,
opts.active_tap,
opts.active_multi,
)
@property
def plot(self):
return self
@property
def coordinates(self):
return None
def subplot(self,
*,
x_source: Range | None = None, y_source: Range | None = None,
x_scale: Scale | None = None, y_scale: Scale | None = None,
x_target: Range, y_target: Range,
) -> GlyphAPI:
""" Create a new sub-coordinate system and expose a plotting API. """
coordinates = CoordinateMapping(x_source=x_source, y_source=y_source, x_target=x_target, y_target=y_target)
return GlyphAPI(self, coordinates)
def hexbin(self, x, y, size, orientation="pointytop", palette="Viridis256", line_color=None, fill_color=None, aspect_scale=1, **kwargs):
''' Perform a simple equal-weight hexagonal binning.
A :class:`~bokeh.models.glyphs.HexTile` glyph will be added to display
the binning. The :class:`~bokeh.models.sources.ColumnDataSource` for
the glyph will have columns ``q``, ``r``, and ``count``, where ``q``
and ``r`` are `axial coordinates`_ for a tile, and ``count`` is the
associated bin count.
It is often useful to set ``match_aspect=True`` on the associated plot,
so that hexagonal tiles are all regular (i.e. not "stretched") in
screen space.
For more sophisticated use-cases, e.g. weighted binning or individually
scaling hex tiles, use :func:`hex_tile` directly, or consider a higher
level library such as HoloViews.
Args:
x (array[float]) :
A NumPy array of x-coordinates to bin into hexagonal tiles.
y (array[float]) :
A NumPy array of y-coordinates to bin into hexagonal tiles
size (float) :
The size of the hexagonal tiling to use. The size is defined as
distance from the center of a hexagon to a corner.
In case the aspect scaling is not 1-1, then specifically `size`
is the distance from the center to the "top" corner with the
`"pointytop"` orientation, and the distance from the center to
a "side" corner with the "flattop" orientation.
orientation ("pointytop" or "flattop", optional) :
Whether the hexagonal tiles should be oriented with a pointed
corner on top, or a flat side on top. (default: "pointytop")
palette (str or seq[color], optional) :
A palette (or palette name) to use to colormap the bins according
to count. (default: 'Viridis256')
If ``fill_color`` is supplied, it overrides this value.
line_color (color, optional) :
The outline color for hex tiles, or None (default: None)
fill_color (color, optional) :
An optional fill color for hex tiles, or None. If None, then
the ``palette`` will be used to color map the tiles by
count. (default: None)
aspect_scale (float) :
Match a plot's aspect ratio scaling.
When working with a plot with ``aspect_scale != 1``, this
parameter can be set to match the plot, in order to draw
regular hexagons (instead of "stretched" ones).
This is roughly equivalent to binning in "screen space", and
it may be better to use axis-aligned rectangular bins when
plot aspect scales are not one.
Any additional keyword arguments are passed to :func:`hex_tile`.
Returns
(Glyphrender, DataFrame)
A tuple with the ``HexTile`` renderer generated to display the
binning, and a Pandas ``DataFrame`` with columns ``q``, ``r``,
and ``count``, where ``q`` and ``r`` are `axial coordinates`_
for a tile, and ``count`` is the associated bin count.
Example:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.models import HoverTool
from bokeh.plotting import figure, show
x = 2 + 2*np.random.standard_normal(500)
y = 2 + 2*np.random.standard_normal(500)
p = figure(match_aspect=True, tools="wheel_zoom,reset")
p.background_fill_color = '#440154'
p.grid.visible = False
p.hexbin(x, y, size=0.5, hover_color="pink", hover_alpha=0.8)
hover = HoverTool(tooltips=[("count", "@c"), ("(q,r)", "(@q, @r)")])
p.add_tools(hover)
show(p)
.. _axial coordinates: https://www.redblobgames.com/grids/hexagons/#coordinates-axial
'''
from ..util.hex import hexbin
bins = hexbin(x, y, size, orientation, aspect_scale=aspect_scale)
if fill_color is None:
fill_color = linear_cmap('c', palette, 0, max(bins.counts))
source = ColumnDataSource(data=dict(q=bins.q, r=bins.r, c=bins.counts))
r = self.hex_tile(q="q", r="r", size=size, orientation=orientation, aspect_scale=aspect_scale,
source=source, line_color=line_color, fill_color=fill_color, **kwargs)
return (r, bins)
def harea_stack(self, stackers, **kw):
''' Generate multiple ``HArea`` renderers for levels stacked left
to right.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``x1`` and ``x2`` harea coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``harea``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``harea_stack`` will
will create two ``HArea`` renderers that stack:
.. code-block:: python
p.harea_stack(['2016', '2017'], y='y', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.harea(x1=stack(), x2=stack('2016'), y='y', color='blue', source=source, name='2016')
p.harea(x1=stack('2016'), x2=stack('2016', '2017'), y='y', color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "x1", "x2", **kw):
result.append(self.harea(**kw))
return result
def hbar_stack(self, stackers, **kw):
''' Generate multiple ``HBar`` renderers for levels stacked left to right.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``left`` and ``right`` bar coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``hbar``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``hbar_stack`` will
will create two ``HBar`` renderers that stack:
.. code-block:: python
p.hbar_stack(['2016', '2017'], y=10, width=0.9, color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.hbar(bottom=stack(), top=stack('2016'), y=10, width=0.9, color='blue', source=source, name='2016')
p.hbar(bottom=stack('2016'), top=stack('2016', '2017'), y=10, width=0.9, color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "left", "right", **kw):
result.append(self.hbar(**kw))
return result
def _line_stack(self, x, y, **kw):
''' Generate multiple ``Line`` renderers for lines stacked vertically
or horizontally.
Args:
x (seq[str]) :
y (seq[str]) :
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``hbar``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``line_stack`` with
stackers for the y-coordinates will will create two ``Line``
renderers that stack:
.. code-block:: python
p.line_stack(['2016', '2017'], x='x', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.line(y=stack('2016'), x='x', color='blue', source=source, name='2016')
p.line(y=stack('2016', '2017'), x='x', color='red', source=source, name='2017')
'''
if all(isinstance(val, (list, tuple)) for val in (x,y)):
raise ValueError("Only one of x or y may be a list of stackers")
result = []
if isinstance(y, (list, tuple)):
kw['x'] = x
for kw in single_stack(y, "y", **kw):
result.append(self.line(**kw))
return result
if isinstance(x, (list, tuple)):
kw['y'] = y
for kw in single_stack(x, "x", **kw):
result.append(self.line(**kw))
return result
return [self.line(x, y, **kw)]
def hline_stack(self, stackers, **kw):
''' Generate multiple ``Line`` renderers for lines stacked horizontally.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``x`` line coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``line``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``hline_stack`` with
stackers for the x-coordinates will will create two ``Line``
renderers that stack:
.. code-block:: python
p.hline_stack(['2016', '2017'], y='y', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.line(x=stack('2016'), y='y', color='blue', source=source, name='2016')
p.line(x=stack('2016', '2017'), y='y', color='red', source=source, name='2017')
'''
return self._line_stack(x=stackers, **kw)
def varea_stack(self, stackers, **kw):
''' Generate multiple ``VArea`` renderers for levels stacked bottom
to top.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``y1`` and ``y1`` varea coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``varea``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``varea_stack`` will
will create two ``VArea`` renderers that stack:
.. code-block:: python
p.varea_stack(['2016', '2017'], x='x', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.varea(y1=stack(), y2=stack('2016'), x='x', color='blue', source=source, name='2016')
p.varea(y1=stack('2016'), y2=stack('2016', '2017'), x='x', color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "y1", "y2", **kw):
result.append(self.varea(**kw))
return result
def vbar_stack(self, stackers, **kw):
''' Generate multiple ``VBar`` renderers for levels stacked bottom
to top.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``left`` and ``right`` bar coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``vbar``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``vbar_stack`` will
will create two ``VBar`` renderers that stack:
.. code-block:: python
p.vbar_stack(['2016', '2017'], x=10, width=0.9, color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.vbar(bottom=stack(), top=stack('2016'), x=10, width=0.9, color='blue', source=source, name='2016')
p.vbar(bottom=stack('2016'), top=stack('2016', '2017'), x=10, width=0.9, color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "bottom", "top", **kw):
result.append(self.vbar(**kw))
return result
def vline_stack(self, stackers, **kw):
''' Generate multiple ``Line`` renderers for lines stacked vertically.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``y`` line coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``line``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``vline_stack`` with
stackers for the y-coordinates will will create two ``Line``
renderers that stack:
.. code-block:: python
p.vline_stack(['2016', '2017'], x='x', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.line(y=stack('2016'), x='x', color='blue', source=source, name='2016')
p.line(y=stack('2016', '2017'), x='x', color='red', source=source, name='2017')
'''
return self._line_stack(y=stackers, **kw)
def graph(self, node_source, edge_source, layout_provider, **kwargs):
''' Creates a network graph using the given node, edge and layout provider.
Args:
node_source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source
for the graph nodes. An attempt will be made to convert the object to
:class:`~bokeh.models.sources.ColumnDataSource` if needed. If none is supplied, one is created
for the user automatically.
edge_source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source
for the graph edges. An attempt will be made to convert the object to
:class:`~bokeh.models.sources.ColumnDataSource` if needed. If none is supplied, one is created
for the user automatically.
layout_provider (:class:`~bokeh.models.graphs.LayoutProvider`) : a ``LayoutProvider`` instance to
provide the graph coordinates in Cartesian space.
**kwargs: |line properties| and |fill properties|
'''
kw = get_graph_kwargs(node_source, edge_source, **kwargs)
graph_renderer = GraphRenderer(layout_provider=layout_provider, **kw)
self.renderers.append(graph_renderer)
return graph_renderer
def markers():
''' Prints a list of valid marker types for scatter()
Returns:
None
'''
print("Available markers: \n\n - " + "\n - ".join(list(MarkerType)))
print()
print("Shortcuts: \n\n" + "\n".join(" %r: %s" % item for item in _MARKER_SHORTCUTS.items()))
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
# This class itself is intentionally undocumented (it is used to generate
# documentation elsewhere)
class BaseFigureOptions(Options):
tools = Either(String, Seq(Either(String, Instance(Tool))), default=DEFAULT_TOOLS, help="""
Tools the plot should start with.
""")
x_minor_ticks = Either(Auto, Int, default="auto", help="""
Number of minor ticks between adjacent x-axis major ticks.
""")
y_minor_ticks = Either(Auto, Int, default="auto", help="""
Number of minor ticks between adjacent y-axis major ticks.
""")
x_axis_location = Nullable(Enum(VerticalLocation), default="below", help="""
Where the x-axis should be located.
""")
y_axis_location = Nullable(Enum(HorizontalLocation), default="left", help="""
Where the y-axis should be located.
""")
x_axis_label = Nullable(TextLike, default="", help="""
A label for the x-axis.
""")
y_axis_label = Nullable(TextLike, default="", help="""
A label for the y-axis.
""")
active_drag = Either(Null, Auto, String, Instance(Drag), default="auto", help="""
Which drag tool should initially be active.
""")
active_inspect = Either(Null, Auto, String, Instance(InspectTool), Seq(Instance(InspectTool)), default="auto", help="""
Which drag tool should initially be active.
""")
active_scroll = Either(Null, Auto, String, Instance(Scroll), default="auto", help="""
Which scroll tool should initially be active.
""")
active_tap = Either(Null, Auto, String, Instance(Tap), default="auto", help="""
Which tap tool should initially be active.
""")
active_multi = Either(Null, Auto, String, Instance(GestureTool), default="auto", help="""
Specify an active multi-gesture tool, for instance an edit tool or a range tool.
""")
tooltips = Either(Null, Instance(Template), String, List(Tuple(String, String)), help="""
An optional argument to configure tooltips for the Figure. This argument
accepts the same values as the ``HoverTool.tooltips`` property. If a hover
tool is specified in the ``tools`` argument, this value will override that
hover tools ``tooltips`` value. If no hover tool is specified in the
``tools`` argument, then passing tooltips here will cause one to be created
and added.
""")
class FigureOptions(BaseFigureOptions):
x_range = Any(help="""
Customize the x-range of the plot.
""")
y_range = Any(help="""
Customize the y-range of the plot.
""")
x_axis_type = Either(Null, Auto, Enum("linear", "log", "datetime", "mercator"), default="auto", help="""
The type of the x-axis.
""")
y_axis_type = Either(Null, Auto, Enum("linear", "log", "datetime", "mercator"), default="auto", help="""
The type of the y-axis.
""")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_color_fields = {"color", "fill_color", "line_color"}
_alpha_fields = {"alpha", "fill_alpha", "line_alpha"}
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
the-stack_0_12793 | import unittest
from test import script_helper
from test import support
import subprocess
import sys
import signal
import io
import locale
import os
import errno
import tempfile
import time
import re
import sysconfig
import warnings
import select
import shutil
import gc
try:
import resource
except ImportError:
resource = None
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
try:
mkstemp = tempfile.mkstemp
except AttributeError:
# tempfile.mkstemp is not available
def mkstemp():
"""Replacement for mkstemp, calling mktemp."""
fname = tempfile.mktemp()
return os.open(fname, os.O_RDWR|os.O_CREAT), fname
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdout_arg(self):
# check_output() function stderr redirected to stdout
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
original_cwd = os.getcwd()
os.chdir(cwd)
cwd = os.getcwd()
os.chdir(original_cwd)
return cwd
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(OSError, subprocess.Popen,
[rel_python])
self.assertRaises(OSError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(OSError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(OSError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with script_helper.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(OSError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd('', "somethingyoudonthave", executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.addCleanup(p.stderr.close)
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.addCleanup(p.stdout.close)
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') is not None,
'the python library cannot be loaded '
'with an empty environment')
def test_empty_env(self):
with subprocess.Popen([sys.executable, "-c",
'import os; '
'print(list(os.environ.keys()))'],
stdout=subprocess.PIPE,
env={}) as p:
stdout, stderr = p.communicate()
self.assertIn(stdout.strip(),
(b"[]",
# Mac OS X adds __CF_USER_TEXT_ENCODING variable to an empty
# environment
b"['__CF_USER_TEXT_ENCODING']"))
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
if mswindows:
pipe_buf = 512
else:
pipe_buf = os.fpathconf(x, "PC_PIPE_BUF")
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("xyz"*%d);'
'sys.stdout.write(sys.stdin.read())' % pipe_buf],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"abc"*pipe_buf
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
p.stdin.write("line1\n")
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + '''\nif True:
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
'''],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + '''\nif True:
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
'''],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
old_getpreferredencoding = locale.getpreferredencoding
# Indirectly via io.TextIOWrapper, Popen() defaults to
# locale.getpreferredencoding(False) and earlier in Python 3.2 to
# locale.getpreferredencoding().
def getpreferredencoding(do_setlocale=True):
return encoding
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
try:
locale.getpreferredencoding = getpreferredencoding
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = popen.communicate(input='')
finally:
locale.getpreferredencoding = old_getpreferredencoding
self.assertEqual(stdout, '1\n2\n3\n4')
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(1)"])
count = 0
while p.poll() is None:
time.sleep(0.1)
count += 1
# We expect that the poll loop probably went around about 10 times,
# but, based on system scheduling we can't control, it's possible
# poll() never returned None. It "should be" very rare that it
# didn't go around at least twice.
self.assertGreaterEqual(count, 2)
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(2)"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
# Windows raises IOError. Others raise OSError.
with self.assertRaises(EnvironmentError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = mkstemp()
ofhandle, ofname = mkstemp()
efhandle, efname = mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
time.sleep(2)
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'),
"Requires signal.SIGALRM")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGALRM, handler)
self.addCleanup(signal.signal, signal.SIGALRM, old_handler)
# the process is running for 2 seconds
args = [sys.executable, "-c", 'import time; time.sleep(2)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
signal.alarm(1)
# communicate() will be interrupted by SIGALRM
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
# context manager
class _SuppressCoreFiles(object):
"""Try to prevent core files from being created."""
old_limit = None
def __enter__(self):
"""Try to save previous ulimit, then set it to (0, 0)."""
if resource is not None:
try:
self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except (ValueError, resource.error):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
value = subprocess.Popen(['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType'],
stdout=subprocess.PIPE).communicate()[0]
if value.strip() == b'developer':
print("this tests triggers the Crash Reporter, "
"that is intentional", end='')
sys.stdout.flush()
def __exit__(self, *args):
"""Return core file behavior to default."""
if self.old_limit is None:
return
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_limit)
except (ValueError, resource.error):
pass
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistant directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_restore_signals(self):
# Code coverage for both values of restore_signals to make sure it
# at least does not blow up.
# A test for behavior would be complex. Contributions welcome.
subprocess.call([sys.executable, "-c", ""], restore_signals=True)
subprocess.call([sys.executable, "-c", ""], restore_signals=False)
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with _SuppressCoreFiles():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except RuntimeError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
map(os.close, devzero_fds)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise RuntimeError("force the _execute_child() errpipe_data path.")
with self.assertRaises(RuntimeError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
def test_args_string(self):
# args is a string
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
newfds = []
for a in fds:
b = os.dup(a)
newfds.append(b)
if a == 0:
stdin = b
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
for b, a in zip(newfds, fds):
os.dup2(b, a)
for b in newfds:
os.close(b)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_remapping_std_fds(self):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = [os.dup(fd) for fd in range(3)]
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
# restore the original fd's underneath sys.stdin, etc.
for std, saved in enumerate(saved_fds):
os.dup2(saved, std)
os.close(saved)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = [os.dup(fd) for fd in range(3)]
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
for std, saved in enumerate(saved_fds):
os.dup2(saved, std)
os.close(saved)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except RuntimeError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or RuntimeError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ascii for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(value))
# test bytes
key = key.encode("ascii", "surrogateescape")
value = value.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open("/dev/null", os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=())
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & fds_to_keep & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(EnvironmentError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
# The module says:
# "NB This only works (and is only relevant) for UNIX."
#
# Actually, getoutput should work on any platform with an os.popen, but
# I'll take the comment as given, and skip this suite.
@unittest.skipUnless(os.name == 'posix', "only relevant for UNIX")
class CommandTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput('cat ' + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
@unittest.skipUnless(getattr(subprocess, '_has_poll', False),
"poll system call not supported")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
subprocess._has_poll = False
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._has_poll = True
ProcessTestCase.tearDown(self)
@unittest.skipUnless(getattr(subprocess, '_posixsubprocess', False),
"_posixsubprocess extension module not found.")
class ProcessTestCasePOSIXPurePython(ProcessTestCase, POSIXProcessTestCase):
@classmethod
def setUpClass(cls):
global subprocess
assert subprocess._posixsubprocess
# Reimport subprocess while forcing _posixsubprocess to not exist.
with support.check_warnings(('.*_posixsubprocess .* not being used.*',
RuntimeWarning)):
subprocess = support.import_fresh_module(
'subprocess', blocked=['_posixsubprocess'])
assert not subprocess._posixsubprocess
@classmethod
def tearDownClass(cls):
global subprocess
# Reimport subprocess as it should be, restoring order to the universe.
subprocess = support.import_fresh_module('subprocess')
assert subprocess._posixsubprocess
class HelperFunctionTests(unittest.TestCase):
@unittest.skipIf(mswindows, "errno and EINTR make no sense on windows")
def test_eintr_retry_call(self):
record_calls = []
def fake_os_func(*args):
record_calls.append(args)
if len(record_calls) == 2:
raise OSError(errno.EINTR, "fake interrupted system call")
return tuple(reversed(args))
self.assertEqual((999, 256),
subprocess._eintr_retry_call(fake_os_func, 256, 999))
self.assertEqual([(256, 999)], record_calls)
# This time there will be an EINTR so it will loop once.
self.assertEqual((666,),
subprocess._eintr_retry_call(fake_os_func, 666))
self.assertEqual([(256, 999), (666,), (666,)], record_calls)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
self.addCleanup(p.stdout.close)
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(EnvironmentError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
self.assertEqual(c.exception.errno, errno.ENOENT)
def test_main():
unit_tests = (ProcessTestCase,
POSIXProcessTestCase,
Win32ProcessTestCase,
ProcessTestCasePOSIXPurePython,
CommandTests,
ProcessTestCaseNoPoll,
HelperFunctionTests,
CommandsWithSpaces,
ContextManagerTests,
)
support.run_unittest(*unit_tests)
support.reap_children()
if __name__ == "__main__":
unittest.main()
|
the-stack_0_12796 | from stix_shifter_utils.modules.base.stix_transmission.base_ping_connector import BasePingConnector
from stix_shifter_utils.utils import logger
from stix_shifter_utils.utils.error_response import ErrorResponder
class PingConnector(BasePingConnector):
def __init__(self, api_client):
self.api_client = api_client
self.logger = logger.set_logger(__name__)
self.connector = __name__.split('.')[1]
def ping_connection(self):
try:
response_dict = self.api_client.ping_data_source()
response_code = response_dict["code"]
# Construct a response object
return_obj = dict()
if response_code == 200:
return_obj['success'] = True
else:
ErrorResponder.fill_error(return_obj, response_dict, ['message'], connector=self.connector)
return return_obj
except Exception as err:
self.logger.error('error when pinging datasource: %s', err, exc_info=True)
raise
|
the-stack_0_12797 | import os
import numpy as np
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.datasets import mnist
from tensorflow.keras.initializers import Constant
from tensorflow.keras.initializers import TruncatedNormal
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from tf_utils.callbacks import ConfusionMatrix
MODEL_DIR = os.path.abspath("C:/Users/jan/Dropbox/_Coding/UdemyTF/models")
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
MODEL_FILE_PATH = os.path.join(MODEL_DIR, "mnist_model.h5")
LOGS_DIR = os.path.abspath("C:/Users/jan/Dropbox/_Coding/UdemyTF/logs/")
if not os.path.exists(LOGS_DIR):
os.mkdir(LOGS_DIR)
MODEL_LOG_DIR = os.path.join(LOGS_DIR, "mnist_cm")
def prepare_dataset(num_features: int, num_classes: int) -> tuple:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
y_train = to_categorical(y_train, num_classes=num_classes, dtype=np.float32)
y_test = to_categorical(y_test, num_classes=num_classes, dtype=np.float32)
x_train = x_train.reshape(-1, num_features).astype(np.float32)
x_test = x_test.reshape(-1, num_features).astype(np.float32)
return (x_train, y_train), (x_test, y_test)
def build_model(num_features: int, num_classes: int) -> Sequential:
init_w = TruncatedNormal(mean=0.0, stddev=0.01)
init_b = Constant(value=0.0)
model = Sequential()
model.add(
Dense(
units=500,
kernel_initializer=init_w,
bias_initializer=init_b,
input_shape=(num_features,),
)
)
model.add(Activation("relu"))
model.add(Dense(units=300, kernel_initializer=init_w, bias_initializer=init_b))
model.add(Activation("relu"))
model.add(Dense(units=100, kernel_initializer=init_w, bias_initializer=init_b))
model.add(Activation("relu"))
model.add(Dense(units=50, kernel_initializer=init_w, bias_initializer=init_b))
model.add(Activation("relu"))
model.add(
Dense(
units=num_classes,
kernel_initializer=init_w,
bias_initializer=init_b,
)
)
model.add(Activation("softmax"))
model.summary()
return model
if __name__ == "__main__":
num_features = 784
num_classes = 10
(x_train, y_train), (x_test, y_test) = prepare_dataset(num_features, num_classes)
optimizer = Adam(learning_rate=0.001)
epochs = 2
batch_size = 256
model = build_model(num_features, num_classes)
model.compile(
loss="categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"],
)
tb_callback = TensorBoard(log_dir=MODEL_LOG_DIR, histogram_freq=1, write_graph=True)
classes_list = [class_idx for class_idx in range(num_classes)]
cm_callback = ConfusionMatrix(
model, x_test, y_test, classes_list=classes_list, log_dir=MODEL_LOG_DIR
)
model.fit(
x=x_train,
y=y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, y_test),
callbacks=[tb_callback, cm_callback],
)
scores = model.evaluate(x=x_test, y=y_test, verbose=0)
print("Scores: ", scores)
|
the-stack_0_12799 | """
Problem 53: Combinatoric selections
https://projecteuler.net/problem=53
There are exactly ten ways of selecting three from five, 12345:
123, 124, 125, 134, 135, 145, 234, 235, 245, and 345
In combinatorics, we use the notation, (5 over 3) = 10.
In general, (n over r) = n! / (r! * (n−r)!), where r <= n, n! = n * (n−1) * ... * 3 * 2 * 1,
and 0! = 1.
It is not until n = 23, that a value exceeds one-million: (23 over 10) = 1144066.
How many, not necessarily distinct, values of (n over r) for 1 <= n <= 100,
are greater than one-million?
"""
from typing import Iterable, Tuple
from src.common.calculations import calculate_binomial_coefficient
# pylint: disable=invalid-name
def get_large_binomial_coefficients(max_n: int, threshold: int) -> Iterable[Tuple[int, int, int]]:
"""
Get binomial coefficients (n over r) for `1 <= n <= max_n` that are greater than `threshold`.
Returns tuples `(n, r, (n over r))`.
"""
for n in range(1, max_n + 1):
for r in range(n + 1):
binomial_coefficient = calculate_binomial_coefficient(n, r)
if binomial_coefficient > threshold:
yield n, r, binomial_coefficient
def main() -> None:
"""Main function."""
max_n = 100
threshold = int(1e6)
count = len(list(get_large_binomial_coefficients(max_n, threshold)))
print(f'The number of values of (n over r) for 1 <= n <= {max_n} ' \
f'that are greater than {threshold:,} is {count}.')
if __name__ == '__main__':
main()
|
the-stack_0_12803 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
from oslo_concurrency import lockutils
from oslo_log import log as logging
import yaml
from neutron.tests.tempest.common import cred_provider
from neutron.tests.tempest import config
from neutron.tests.tempest import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
def read_accounts_yaml(path):
yaml_file = open(path, 'r')
accounts = yaml.load(yaml_file)
return accounts
class Accounts(cred_provider.CredentialProvider):
def __init__(self, name):
super(Accounts, self).__init__(name)
self.name = name
if os.path.isfile(CONF.auth.test_accounts_file):
accounts = read_accounts_yaml(CONF.auth.test_accounts_file)
self.use_default_creds = False
else:
accounts = {}
self.use_default_creds = True
self.hash_dict = self.get_hash_dict(accounts)
# FIXME(dhellmann): The configuration option is not part of
# the API of the library, because if we change the option name
# or group it will break this use. Tempest needs to set this
# value somewhere that it owns, and then use
# lockutils.set_defaults() to tell oslo.concurrency what value
# to use.
self.accounts_dir = os.path.join(CONF.oslo_concurrency.lock_path,
'test_accounts')
self.isolated_creds = {}
@classmethod
def _append_role(cls, role, account_hash, hash_dict):
if role in hash_dict['roles']:
hash_dict['roles'][role].append(account_hash)
else:
hash_dict['roles'][role] = [account_hash]
return hash_dict
@classmethod
def get_hash_dict(cls, accounts):
hash_dict = {'roles': {}, 'creds': {}}
# Loop over the accounts read from the yaml file
for account in accounts:
roles = []
types = []
if 'roles' in account:
roles = account.pop('roles')
if 'types' in account:
types = account.pop('types')
temp_hash = hashlib.md5()
temp_hash.update(str(account))
temp_hash_key = temp_hash.hexdigest()
hash_dict['creds'][temp_hash_key] = account
for role in roles:
hash_dict = cls._append_role(role, temp_hash_key,
hash_dict)
# If types are set for the account append the matching role
# subdict with the hash
for type in types:
if type == 'admin':
hash_dict = cls._append_role(CONF.identity.admin_role,
temp_hash_key, hash_dict)
elif type == 'operator':
hash_dict = cls._append_role(
CONF.object_storage.operator_role, temp_hash_key,
hash_dict)
elif type == 'reseller_admin':
hash_dict = cls._append_role(
CONF.object_storage.reseller_admin_role,
temp_hash_key,
hash_dict)
return hash_dict
def is_multi_user(self):
# Default credentials is not a valid option with locking Account
if self.use_default_creds:
raise exceptions.InvalidConfiguration(
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
else:
return len(self.hash_dict['creds']) > 1
def is_multi_tenant(self):
return self.is_multi_user()
def _create_hash_file(self, hash_string):
path = os.path.join(os.path.join(self.accounts_dir, hash_string))
if not os.path.isfile(path):
with open(path, 'w') as fd:
fd.write(self.name)
return True
return False
@lockutils.synchronized('test_accounts_io', external=True)
def _get_free_hash(self, hashes):
# Cast as a list because in some edge cases a set will be passed in
hashes = list(hashes)
if not os.path.isdir(self.accounts_dir):
os.mkdir(self.accounts_dir)
# Create File from first hash (since none are in use)
self._create_hash_file(hashes[0])
return hashes[0]
names = []
for _hash in hashes:
res = self._create_hash_file(_hash)
if res:
return _hash
else:
path = os.path.join(os.path.join(self.accounts_dir,
_hash))
with open(path, 'r') as fd:
names.append(fd.read())
msg = ('Insufficient number of users provided. %s have allocated all '
'the credentials for this allocation request' % ','.join(names))
raise exceptions.InvalidConfiguration(msg)
def _get_match_hash_list(self, roles=None):
hashes = []
if roles:
# Loop over all the creds for each role in the subdict and generate
# a list of cred lists for each role
for role in roles:
temp_hashes = self.hash_dict['roles'].get(role, None)
if not temp_hashes:
raise exceptions.InvalidConfiguration(
"No credentials with role: %s specified in the "
"accounts ""file" % role)
hashes.append(temp_hashes)
# Take the list of lists and do a boolean and between each list to
# find the creds which fall under all the specified roles
temp_list = set(hashes[0])
for hash_list in hashes[1:]:
temp_list = temp_list & set(hash_list)
hashes = temp_list
else:
hashes = self.hash_dict['creds'].keys()
# NOTE(mtreinish): admin is a special case because of the increased
# privlege set which could potentially cause issues on tests where that
# is not expected. So unless the admin role isn't specified do not
# allocate admin.
admin_hashes = self.hash_dict['roles'].get(CONF.identity.admin_role,
None)
if ((not roles or CONF.identity.admin_role not in roles) and
admin_hashes):
useable_hashes = [x for x in hashes if x not in admin_hashes]
else:
useable_hashes = hashes
return useable_hashes
def _get_creds(self, roles=None):
if self.use_default_creds:
raise exceptions.InvalidConfiguration(
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
useable_hashes = self._get_match_hash_list(roles)
free_hash = self._get_free_hash(useable_hashes)
return self.hash_dict['creds'][free_hash]
@lockutils.synchronized('test_accounts_io', external=True)
def remove_hash(self, hash_string):
hash_path = os.path.join(self.accounts_dir, hash_string)
if not os.path.isfile(hash_path):
LOG.warning('Expected an account lock file %s to remove, but '
'one did not exist' % hash_path)
else:
os.remove(hash_path)
if not os.listdir(self.accounts_dir):
os.rmdir(self.accounts_dir)
def get_hash(self, creds):
for _hash in self.hash_dict['creds']:
# Comparing on the attributes that are expected in the YAML
if all([getattr(creds, k) == self.hash_dict['creds'][_hash][k] for
k in creds.get_init_attributes()]):
return _hash
raise AttributeError('Invalid credentials %s' % creds)
def remove_credentials(self, creds):
_hash = self.get_hash(creds)
self.remove_hash(_hash)
def get_primary_creds(self):
if self.isolated_creds.get('primary'):
return self.isolated_creds.get('primary')
creds = self._get_creds()
primary_credential = cred_provider.get_credentials(**creds)
self.isolated_creds['primary'] = primary_credential
return primary_credential
def get_alt_creds(self):
if self.isolated_creds.get('alt'):
return self.isolated_creds.get('alt')
creds = self._get_creds()
alt_credential = cred_provider.get_credentials(**creds)
self.isolated_creds['alt'] = alt_credential
return alt_credential
def get_creds_by_roles(self, roles, force_new=False):
roles = list(set(roles))
exist_creds = self.isolated_creds.get(str(roles), None)
# The force kwarg is used to allocate an additional set of creds with
# the same role list. The index used for the previously allocation
# in the isolated_creds dict will be moved.
if exist_creds and not force_new:
return exist_creds
elif exist_creds and force_new:
new_index = str(roles) + '-' + str(len(self.isolated_creds))
self.isolated_creds[new_index] = exist_creds
creds = self._get_creds(roles=roles)
role_credential = cred_provider.get_credentials(**creds)
self.isolated_creds[str(roles)] = role_credential
return role_credential
def clear_isolated_creds(self):
for creds in self.isolated_creds.values():
self.remove_credentials(creds)
def get_admin_creds(self):
return self.get_creds_by_roles([CONF.identity.admin_role])
def is_role_available(self, role):
if self.use_default_creds:
return False
else:
if self.hash_dict['roles'].get(role):
return True
return False
def admin_available(self):
return self.is_role_available(CONF.identity.admin_role)
class NotLockingAccounts(Accounts):
"""Credentials provider which always returns the first and second
configured accounts as primary and alt users.
This credential provider can be used in case of serial test execution
to preserve the current behaviour of the serial tempest run.
"""
def _unique_creds(self, cred_arg=None):
"""Verify that the configured credentials are valid and distinct """
if self.use_default_creds:
try:
user = self.get_primary_creds()
alt_user = self.get_alt_creds()
return getattr(user, cred_arg) != getattr(alt_user, cred_arg)
except exceptions.InvalidCredentials as ic:
msg = "At least one of the configured credentials is " \
"not valid: %s" % ic
raise exceptions.InvalidConfiguration(msg)
else:
# TODO(andreaf) Add a uniqueness check here
return len(self.hash_dict['creds']) > 1
def is_multi_user(self):
return self._unique_creds('username')
def is_multi_tenant(self):
return self._unique_creds('tenant_id')
def get_creds(self, id, roles=None):
try:
hashes = self._get_match_hash_list(roles)
# No need to sort the dict as within the same python process
# the HASH seed won't change, so subsequent calls to keys()
# will return the same result
_hash = hashes[id]
except IndexError:
msg = 'Insufficient number of users provided'
raise exceptions.InvalidConfiguration(msg)
return self.hash_dict['creds'][_hash]
def get_primary_creds(self):
if self.isolated_creds.get('primary'):
return self.isolated_creds.get('primary')
if not self.use_default_creds:
creds = self.get_creds(0)
primary_credential = cred_provider.get_credentials(**creds)
else:
primary_credential = cred_provider.get_configured_credentials(
'user')
self.isolated_creds['primary'] = primary_credential
return primary_credential
def get_alt_creds(self):
if self.isolated_creds.get('alt'):
return self.isolated_creds.get('alt')
if not self.use_default_creds:
creds = self.get_creds(1)
alt_credential = cred_provider.get_credentials(**creds)
else:
alt_credential = cred_provider.get_configured_credentials(
'alt_user')
self.isolated_creds['alt'] = alt_credential
return alt_credential
def clear_isolated_creds(self):
self.isolated_creds = {}
def get_admin_creds(self):
if not self.use_default_creds:
return self.get_creds_by_roles([CONF.identity.admin_role])
else:
creds = cred_provider.get_configured_credentials(
"identity_admin", fill_in=False)
self.isolated_creds['admin'] = creds
return creds
def get_creds_by_roles(self, roles, force_new=False):
roles = list(set(roles))
exist_creds = self.isolated_creds.get(str(roles), None)
index = 0
if exist_creds and not force_new:
return exist_creds
elif exist_creds and force_new:
new_index = str(roles) + '-' + str(len(self.isolated_creds))
self.isolated_creds[new_index] = exist_creds
# Figure out how many existing creds for this roles set are present
# use this as the index the returning hash list to ensure separate
# creds are returned with force_new being True
for creds_names in self.isolated_creds:
if str(roles) in creds_names:
index = index + 1
if not self.use_default_creds:
creds = self.get_creds(index, roles=roles)
role_credential = cred_provider.get_credentials(**creds)
self.isolated_creds[str(roles)] = role_credential
else:
msg = "Default credentials can not be used with specifying "\
"credentials by roles"
raise exceptions.InvalidConfiguration(msg)
return role_credential
|
the-stack_0_12804 | from .models import Agent
from model_bakery.recipe import Recipe, seq
from model_bakery import baker
from itertools import cycle
from django.utils import timezone as djangotime
agent = Recipe(
Agent,
client="Default",
site="Default",
hostname=seq("TestHostname"),
monitoring_type=cycle(["workstation", "server"]),
)
server_agent = agent.extend(
monitoring_type="server",
)
workstation_agent = agent.extend(
monitoring_type="workstation",
)
online_agent = agent.extend(last_seen=djangotime.now())
overdue_agent = agent.extend(
last_seen=djangotime.now() - djangotime.timedelta(minutes=6)
) |
the-stack_0_12808 | """Grafico con los valores obtenidos en la implementacion serial en CPU"""
import matplotlib.pyplot as plt
import numpy as np
import csv
path = "Data/"
if __name__ == "__main__":
size = []
time = []
with open(path + 'serial_CPU.csv', mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
size += [ int(row['width']) * int(row['height']) ]
tmp_time = float(row['time']) * 0.001
time += [tmp_time / float(row['iter']) ]
print(size[-1], time[-1])
n_size = np.array(size)
n_time = np.array(time)
print(n_size)
print(n_time)
n_eval = n_size / n_time / 1000000
print(n_eval)
fig, ax = plt.subplots(figsize=(10,7))
ax.set_xscale('log')
ax.plot(n_size, n_eval, 'r-o')
ax.set(xlabel='Tamaño del mundo [Células]', ylabel='Células evaluadas por segundo [Millones]',
title='Células evaluadas por segundo para distintos tamaños de mundo\ncon implementación secuencial en CPU ')
ax.grid()
fig.savefig("images/serial_CPU.png")
plt.show() |
the-stack_0_12809 | """
Functions to write atomic coordinates in commmon chemical formats.
"""
import os
def write_pdb(file_name, atoms, coordinates, header='mol'):
""" Write given atomic coordinates to file in pdb format """
with open(file_name, 'w') as pdb_file:
pdb_file.write('HEADER ' + header + '\n')
format = 'HETATM%5d%3s MOL 1 %8.3f%8.3f%8.3f 1.00 0.00 %2s\n'
for atom_index, (atom_name, atom_coor) in enumerate(zip(atoms, coordinates), start=1):
x, y, z = atom_coor
pdb_file.write(format % (atom_index, atom_name, x, y, z, atom_name.rjust(2)))
pdb_file.write('END\n')
def write_xyz(file_name, atoms, coordinates, header='mol'):
""" Write given atomic coordinates to file in xyz format """
with open(file_name, 'w') as xyz_file:
xyz_file.write(str(len(coordinates)) + '\n')
xyz_file.write(header + '\n')
format = '%s %.4f %.4f %.4f\n'
for atom, coor in zip(atoms, coordinates):
xyz_file.write(format % (atom, coor[0], coor[1], coor[2]))
def write_cif(file_name, atoms, coordinates, header='mol', cell=[1, 1, 1, 90, 90, 90]):
""" Write given atomic coordinates to file in cif format """
with open(file_name, 'w') as cif_file:
cif_file.write('data_%s\n' % header)
cif_file.write('_cell_length_a %7.4f\n' % cell[0])
cif_file.write('_cell_length_b %7.4f\n' % cell[1])
cif_file.write('_cell_length_c %7.4f\n' % cell[2])
cif_file.write('_cell_angle_alpha %7.4f\n' % cell[3])
cif_file.write('_cell_angle_beta %7.4f\n' % cell[4])
cif_file.write('_cell_angle_gamma %7.4f\n' % cell[5])
cif_file.write('loop_\n')
cif_file.write('_atom_site_label\n')
cif_file.write('_atom_site_type_symbol\n')
cif_file.write('_atom_site_fract_x\n')
cif_file.write('_atom_site_fract_y\n')
cif_file.write('_atom_site_fract_z\n')
cif_format = '%s%-4i %2s %7.4f %7.4f %7.4f\n'
for i, (atom, coor) in enumerate(zip(atoms, coordinates)):
cif_file.write(cif_format % (atom, i, atom, coor[0], coor[1], coor[2]))
|
the-stack_0_12813 | import random
import discord
import json
import requests
import io
from random import randint
from discord.ext import commands
from utils import lists, http, default, eapi, sfapi
processapi = eapi.processapi
processshowapi = eapi.processshowapi
search = sfapi.search
class ResultNotFound(Exception):
"""Used if ResultNotFound is triggered by e* API."""
pass
class InvalidHTTPResponse(Exception):
"""Used if non-200 HTTP Response got from server."""
pass
class Fun:
def __init__(self, bot):
self.bot = bot
self.config = default.get("config.json")
@commands.command(aliases=['8ball'])
async def eightball(self, ctx, *, question: commands.clean_content):
""" Consult 8ball to receive an answer """
answer = random.choice(lists.ballresponse)
await ctx.send(f"🎱 **Question:** {question}\n**Answer:** {answer}")
@staticmethod
async def randomimageapi(ctx, url, endpoint):
try:
r = await http.get(url, res_method="json", no_cache=True)
except json.JSONDecodeError:
return await ctx.send("Couldn't find anything from the API")
embed = discord.Embed(colour=249742)
embed.set_image(url=r[endpoint])
await ctx.send(embed=embed)
@staticmethod
async def textapi(ctx, url, endpoint):
try:
r = await http.get(url, res_method="json", no_cache=True)
except json.JSONDecodeError:
return await ctx.send("Couldn't find anything from the API")
await ctx.send(f"{r[endpoint]}")
@staticmethod
async def factapi(ctx, url, endpoint):
try:
r = await http.get(url, res_method="json", no_cache=True)
except json.JSONDecodeError:
return await ctx.send("Couldn't find anything from the API")
await ctx.send(f'**Did you know?** 🤔\n\n{r[endpoint]}')
@staticmethod
async def asciitext(ctx, url):
try:
with requests.get(url) as f:
html = f.text
await ctx.send(f"```\n{html}\n```")
except InvalidHTTPResponse as e:
print(e)
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def cat(self, ctx):
""" Posts a random cat """
await self.randomimageapi(ctx, 'https://nekos.life/api/v2/img/meow', 'url')
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def dog(self, ctx):
""" Posts a random dog """ # https://dog.ceo/api/breeds/image/random Fetch!
await self.randomimageapi(ctx, 'https://random.dog/woof.json', 'url')
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def doggo(self, ctx):
""" Posts a random dog """
await self.randomimageapi(ctx, 'https://dog.ceo/api/breeds/image/random', 'message')
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def neko(self, ctx):
""" Posts a random neko """
await self.randomimageapi(ctx, 'https://nekos.life/api/v2/img/neko', 'url')
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def duck(self, ctx):
""" Posts a random duck """
await self.randomimageapi(ctx, 'https://random-d.uk/api/v1/random', 'url')
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def fox(self, ctx):
""" Posts a random fox girl """
await self.randomimageapi(ctx, 'https://nekos.life/api/v2/img/fox_girl', 'url')
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def rabbit(self, ctx):
""" Posts a random rabbit """
await self.randomimageapi(ctx, f'https://api.chewey-bot.ga/rabbit?auth={self.config.cheweyauth}', 'data')
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def snek(self, ctx):
""" Does a heckin snek image """
await self.randomimageapi(ctx, f'https://api.chewey-bot.ga/snake?auth={self.config.cheweyauth}', 'data')
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def otter(self, ctx):
""" Posts a random otter """
await self.randomimageapi(ctx, f'https://api.chewey-bot.ga/otter?auth={self.config.cheweyauth}', 'data')
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def birb(self, ctx):
""" Posts a random birb """
await self.randomimageapi(ctx, f'https://api.chewey-bot.ga/birb?auth={self.config.cheweyauth}', 'data')
@commands.command(aliases=['flip', 'coin'])
async def coinflip(self, ctx):
""" Coinflip! """
coinsides = ['Heads', 'Tails']
await ctx.send(f"**{ctx.author.name}** flipped a coin and got **{random.choice(coinsides)}**!")
@commands.command()
async def reverse(self, ctx, *, text: str):
""" !poow ,ffuts esreveR
Everything you type after reverse will of course, be reversed
"""
t_rev = text[::-1].replace("@", "@\u200B").replace("&", "&\u200B")
await ctx.send(f"🔁 {t_rev}")
@commands.command()
async def rate(self, ctx, *, thing: commands.clean_content):
""" Rates what you desire """
numbers = random.randint(0, 100)
decimals = random.randint(0, 9)
if numbers == 100:
decimals = 0
await ctx.send(f"I'd rate {thing} a **{numbers}.{decimals} / 100**")
@commands.command(aliases=['howhot', 'hot'])
async def hotcalc(self, ctx, user: discord.Member = None):
""" Returns a random percent for how hot is a discord user """
if user is None:
user = ctx.author
random.seed(user.id)
r = random.randint(1, 100)
hot = r / 1.17
emoji = "💔"
if hot > 25:
emoji = "❤"
if hot > 50:
emoji = "💖"
if hot > 75:
emoji = "💞"
await ctx.send(f"**{user.name}** is **{hot:.2f}%** hot {emoji}")
@commands.command()
async def e926(self, ctx, *args):
"""Searches e926 with given queries.
Arguments:
`*args` : list
The quer(y/ies)"""
msgtoedit = await ctx.send("Searching...")
args = ' '.join(args)
args = str(args)
netloc = "e926"
print("------")
print("Got command with args: " + args)
if "order:score_asc" in args:
await ctx.send("I'm not going to fall into that one, silly~")
return
if "score:" in args:
apilink = 'https://e926.net/post/index.json?tags=' + args + '&limit=320'
else:
apilink = 'https://e926.net/post/index.json?tags=' + args + ' score:>25&limit=320'
try:
await eapi.processapi(apilink)
except ResultNotFound:
await ctx.send("Result not found!")
return
except InvalidHTTPResponse:
await ctx.send("We're getting invalid response from the API, please try again later!")
return
msgtoedit = await ctx.channel.get_message(msgtoedit.id)
msgtosend = "Post link: `https://""" + netloc + """.net/post/show/""" + eapi.processapi.imgid + """/`\r\nArtist: `""" + eapi.processapi.imgartist + """`\r\nSource: `""" + eapi.processapi.imgsource + """`\r\nRating: """ + eapi.processapi.imgrating + """\r\nTags: `""" + eapi.processapi.imgtags + """` ...and more\r\nImage link: """ + eapi.processapi.file_link
await msgtoedit.edit(content=msgtosend)
@commands.command()
async def yell(self, ctx, *, text: str):
""" AAAAAAAAA!
Everything you type after yell will of course, be yelled
"""
t_upper = text.upper().replace("@", "@\u200B").replace("&", "&\u200B")
await ctx.send(f"⬆️ {t_upper}")
@commands.command()
async def whisper(self, ctx, *, text: str):
""" Shh
Be quiet..
"""
t_lower = text.lower().replace("@", "@\u200B").replace("&", "&\u200B")
await ctx.send(f"⬇️ {t_lower}")
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def headpat(self, ctx):
"""Posts a random headpat from headp.at"""
def url_to_bytes(url):
data = requests.get(url)
content = io.BytesIO(data.content)
filename = url.rsplit("/", 1)[-1]
return {"content": content, "filename": filename}
pats = requests.get("http://headp.at/js/pats.json").json()
pat = random.choice(pats)
file = url_to_bytes("http://headp.at/pats/{}".format(pat))
await ctx.send(file=discord.File(file["content"], file["filename"]))
@commands.command()
async def hug(self, ctx, user: discord.Member = None):
""" Hug a user! """
if user is None:
user = ctx.author
await ctx.send(f"💖 | **{ctx.author.name}** hugs **{user.name}**")
@commands.command()
async def cookie(self, ctx, user: discord.Member = None):
""" Hug a user! """
if user is None:
user = ctx.author
await ctx.send(f"🍪 | **{ctx.author.name}** gives **{user.name}** a cookie!")
@commands.command()
async def stab(self, ctx, user: discord.Member = None):
""" Ssstab a perssson! """
if user is None:
user = ctx.author
await ctx.send(f"🔪 | **{ctx.author.name}** stabbed **{user.name}** in the hand (How rude)!")
@commands.command()
async def pat(self, ctx, user: discord.Member = None):
""" Headpats for all! """
if user is None:
user = ctx.author
await ctx.send(f"<a:patkyutie:444890889513598986> | **{ctx.author.name}** pats **{user.name}** on the head!")
@commands.command()
async def nom(self, ctx, user: discord.Member = None):
""" Nom a user! """
if user is None:
user = ctx.author
await ctx.send(f"<a:WanTriggered:437201280918618112> | **{ctx.author.name}** nommed **{user.name}**'s arm!")
@commands.command()
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def fact(self, ctx):
""" sends a random fact """
await self.factapi(ctx, 'https://nekos.life/api/v2/fact', 'fact')
@commands.command()
async def bamboozle(self, ctx):
""" You just got bamboozled! """
await ctx.send(f"**{ctx.author.name}** just got heckin' bamboozled!")
@commands.command(hidden=True)
async def highcontrastphotooffruitfloatingthreateninglyinthedark(self, ctx):
""" .. """
await ctx.send("https://i.imgur.com/gtm1VKQ.jpg")
@commands.command(hidden=True)
async def lighttheme(self, ctx):
""" E """
await ctx.send("Ew https://i.imgur.com/fbIE97N.png")
@commands.command()
@commands.guild_only()
async def ship(self, ctx, user: discord.User, *, user2: discord.User=None):
"""Checks the shiprate for 2 users"""
author = ctx.message.author
if not user2:
user2 = author
if not user:
await ctx.send("can't ship nothing y'know..")
elif user.id == user2.id:
await ctx.send("i-i can't ship the same person..")
elif user.id == author.id and user2.id == author.id:
await ctx.send(f"wow, you're in love with yourself, huh {ctx.author.name}?")
elif user == self.bot.user and user2 == author or user2 == self.bot.user and user == author:
blushes = ["m-me..? 0////0", "m-me..? >////<"]
return await ctx.send(random.choice(blushes))
else:
n = randint(1, 100)
if n == 100:
bar = "██████████"
heart = '💞'
elif n >= 90:
bar = "█████████."
heart = '💕'
elif n >= 80:
bar = "████████.."
heart = '😍'
elif n >= 70:
bar = "███████..."
heart = '💗'
elif n >= 60:
bar = "██████...."
heart = '❤'
elif n >= 50:
bar = '█████.....'
heart = '❤'
elif n >= 40:
bar = "████......"
heart = '💔'
elif n >= 30:
bar = "███......."
heart = '💔'
elif n >= 20:
bar = "██........"
heart = '💔'
elif n >= 10:
bar = "█........."
heart = '💔'
elif n < 10:
bar = ".........."
heart = '🖤'
else:
bar = ".........."
heart = '🖤'
name1 = user.name.replace(" ", "")
name1 = name1[:int(len(name1) / 2):]
name2 = user2.name.replace(" ", "")
name2 = name2[int(len(name2) / 2)::]
ship = discord.Embed(description=f"**{n}%** **`{bar}`** {heart}", color=ctx.me.colour)
ship.title = f"{user.name} x {user2.name}"
ship.set_footer(text=f"Shipname: {str(name1 + name2).lower()}")
await ctx.send(embed=ship)
@commands.command(aliases=['👏'])
@commands.guild_only()
async def emojify(self, ctx, emote, *, text_to_clap: str):
""" 👏bottom👏text👏 """
clapped_text = text_to_clap.replace("@everyone", f"{emote}everyone").replace("@here", f"{emote}here").replace(" ", f"{emote}")
clapped_text = f"{emote}{clapped_text}{emote}"
await ctx.send(clapped_text)
@commands.command()
async def owo(self, ctx):
"""Sends a random owo face"""
owo = random.choice(lists.owos)
await ctx.send(f"{owo} whats this~?")
@commands.command()
async def choose(self, ctx, *args):
"""Choose one of a lot arguments (Split with |) """
args = ' '.join(args)
args = str(args)
choices = args.split('|')
if len(choices) < 2:
await ctx.send("You need to send at least 2 argument!")
return
await ctx.send(random.choice(choices))
@commands.command()
async def jpeg(self, ctx, urltojpeg: str):
""" Does what it says on the can """
if "http" not in urltojpeg:
return ctx.send("Include a url you donk!")
await self.randomimageapi(ctx, f'https://nekobot.xyz/api/imagegen?type=jpeg&url={urltojpeg}', 'message')
@commands.command()
async def deepfry(self, ctx, urltojpeg: str):
""" Deepfries an image """
if "http" not in urltojpeg:
return ctx.send("Include a url you donk!")
await self.randomimageapi(ctx, f'https://nekobot.xyz/api/imagegen?type=deepfry&image={urltojpeg}', 'message')
@commands.command()
async def clyde(self, ctx, clydetext: str):
""" Makes Clyde say something """
if clydetext is None:
return ctx.send("Include some text you donk!")
await self.randomimageapi(ctx, f'https://nekobot.xyz/api/imagegen?type=clyde&text={clydetext}', 'message')
@commands.command()
async def magik(self, ctx, intensity: str, imgtomagik: str):
""" why don'T WE JUST RELAX AND TURn on THe rADIO? wOuLd You LIKE AM OR FM """
if imgtomagik is None:
return ctx.send("Include some text you donk!")
if intensity not in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']:
return ctx.send("Include an intensity to magik (1-10)")
await self.randomimageapi(ctx, f'https://nekobot.xyz/api/imagegen?type=magik&image={imgtomagik}&intensity={intensity}', 'message')
@commands.command(aliases=['ascii'])
async def asciify(self, ctx, *, text: str):
""" Test """
texttoascii = text.replace(" ", "%20")
await self.asciitext(ctx, f"http://artii.herokuapp.com/make?text={texttoascii}")
def setup(bot):
bot.add_cog(Fun(bot))
|
the-stack_0_12814 | import os
import json
import pandas
from flask import Flask, jsonify, redirect, render_template, request
from google.cloud import secretmanager
from alpha_vantage.timeseries import TimeSeries
app = Flask(__name__)
PROJECT_ID = os.environ.get("PROJECTID")
secrets = secretmanager.SecretManagerServiceClient()
ALPHA_VANTAGE_KEY = secrets.access_secret_version(request={"name": "projects/"+PROJECT_ID+"/secrets/alpha-vantage-key/versions/1"}).payload.data.decode("utf-8")
ts = TimeSeries(key=ALPHA_VANTAGE_KEY)
@app.route("/")
def hello():
return "Hello World!!!"
@app.route('/api/v1/symbol', methods=['POST'])
def get_time_series():
if request.method == 'POST':
symbol = request.args['symbol']
data, metadata = ts.get_intraday(
symbol, interval='15min', outputsize="25")
return jsonify(data=data)
if __name__ == "__main__":
app.debug=True
app.run() |
the-stack_0_12815 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import torch
import torch.distributed as dist
from monai.data import (
CacheDataset,
DataLoader,
load_decathlon_datalist,
partition_dataset,
)
from monai.engines import SupervisedEvaluator, SupervisedTrainer
from monai.handlers import (
CheckpointSaver,
LrScheduleHandler,
MeanDice,
StatsHandler,
TensorBoardStatsHandler,
ValidationHandler,
)
from monai.inferers import SimpleInferer, SlidingWindowInferer
from monai.losses import DiceLoss
from monai.networks.layers import Norm
from monai.networks.nets import UNet
from monai.transforms import (
Activationsd,
AsDiscreted,
Compose,
CropForegroundd,
EnsureChannelFirstd,
LoadImaged,
Orientationd,
RandCropByPosNegLabeld,
ScaleIntensityRanged,
Spacingd,
ToTensord,
)
from torch.nn.parallel import DistributedDataParallel
from monai.handlers import from_engine
class TrainConfiger:
"""
This class is used to config the necessary components of train and evaluate engines
for MONAI trainer.
Please check the implementation of `SupervisedEvaluator` and `SupervisedTrainer`
from `monai.engines` and determine which components can be used.
Args:
config_root: root folder path of config files.
wf_config_file_name: json file name of the workflow config file.
"""
def __init__(
self,
config_root: str,
wf_config_file_name: str,
local_rank: int = 0,
):
with open(os.path.join(config_root, wf_config_file_name)) as file:
wf_config = json.load(file)
self.wf_config = wf_config
"""
config Args:
max_epochs: the total epoch number for trainer to run.
learning_rate: the learning rate for optimizer.
data_list_base_dir: the directory containing the data list json file.
data_list_json_file: the data list json file.
val_interval: the interval (number of epochs) to do validation.
ckpt_dir: the directory to save the checkpoint.
amp: whether to enable auto-mixed-precision training.
use_gpu: whether to use GPU in training.
multi_gpu: whether to use multiple GPUs for distributed training.
"""
self.max_epochs = wf_config["max_epochs"]
self.learning_rate = wf_config["learning_rate"]
self.data_list_base_dir = wf_config["data_list_base_dir"]
self.data_list_json_file = wf_config["data_list_json_file"]
self.val_interval = wf_config["val_interval"]
self.ckpt_dir = wf_config["ckpt_dir"]
self.amp = wf_config["amp"]
self.use_gpu = wf_config["use_gpu"]
self.multi_gpu = wf_config["multi_gpu"]
self.local_rank = local_rank
def set_device(self):
if self.multi_gpu:
# initialize distributed training
dist.init_process_group(backend="nccl", init_method="env://")
device = torch.device(f"cuda:{self.local_rank}")
torch.cuda.set_device(device)
else:
device = torch.device("cuda" if self.use_gpu else "cpu")
self.device = device
def configure(self):
self.set_device()
network = UNet(
dimensions=3,
in_channels=1,
out_channels=2,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
norm=Norm.BATCH,
).to(self.device)
if self.multi_gpu:
network = DistributedDataParallel(
module=network,
device_ids=[self.device],
find_unused_parameters=False,
)
train_transforms = Compose(
[
LoadImaged(keys=("image", "label")),
EnsureChannelFirstd(keys=("image", "label")),
Spacingd(
keys=["image", "label"],
pixdim=(1.5, 1.5, 2.0),
mode=("bilinear", "nearest"),
),
Orientationd(keys=["image", "label"], axcodes="RAS"),
ScaleIntensityRanged(
keys="image",
a_min=-57,
a_max=164,
b_min=0.0,
b_max=1.0,
clip=True,
),
CropForegroundd(keys=("image", "label"), source_key="image"),
RandCropByPosNegLabeld(
keys=("image", "label"),
label_key="label",
spatial_size=(64, 64, 64),
pos=1,
neg=1,
num_samples=4,
image_key="image",
image_threshold=0,
),
ToTensord(keys=("image", "label")),
]
)
# set datalist
train_datalist = load_decathlon_datalist(
os.path.join(self.data_list_base_dir, self.data_list_json_file),
is_segmentation=True,
data_list_key="training",
base_dir=self.data_list_base_dir,
)
val_datalist = load_decathlon_datalist(
os.path.join(self.data_list_base_dir, self.data_list_json_file),
is_segmentation=True,
data_list_key="validation",
base_dir=self.data_list_base_dir,
)
if self.multi_gpu:
train_datalist = partition_dataset(
data=train_datalist,
shuffle=True,
num_partitions=dist.get_world_size(),
even_divisible=True,
)[dist.get_rank()]
train_ds = CacheDataset(
data=train_datalist,
transform=train_transforms,
cache_rate=1.0,
num_workers=4,
)
train_data_loader = DataLoader(
train_ds,
batch_size=2,
shuffle=True,
num_workers=4,
)
val_transforms = Compose(
[
LoadImaged(keys=("image", "label")),
EnsureChannelFirstd(keys=("image", "label")),
Spacingd(
keys=["image", "label"],
pixdim=(1.5, 1.5, 2.0),
mode=("bilinear", "nearest"),
),
Orientationd(keys=["image", "label"], axcodes="RAS"),
ScaleIntensityRanged(
keys="image",
a_min=-57,
a_max=164,
b_min=0.0,
b_max=1.0,
clip=True,
),
CropForegroundd(keys=("image", "label"), source_key="image"),
ToTensord(keys=("image", "label")),
]
)
val_ds = CacheDataset(
data=val_datalist, transform=val_transforms, cache_rate=0.0, num_workers=4
)
val_data_loader = DataLoader(
val_ds,
batch_size=1,
shuffle=False,
num_workers=4,
)
post_transform = Compose(
[
Activationsd(keys="pred", softmax=True),
AsDiscreted(
keys=["pred", "label"],
argmax=[True, False],
to_onehot=True,
num_classes=2,
),
]
)
# metric
key_val_metric = {
"val_mean_dice": MeanDice(
include_background=False,
output_transform=from_engine(["pred", "label"]),
#device=self.device,
)
}
val_handlers = [
StatsHandler(output_transform=lambda x: None),
CheckpointSaver(
save_dir=self.ckpt_dir,
save_dict={"model": network},
save_key_metric=True,
),
TensorBoardStatsHandler(
log_dir=self.ckpt_dir, output_transform=lambda x: None
),
]
self.eval_engine = SupervisedEvaluator(
device=self.device,
val_data_loader=val_data_loader,
network=network,
inferer=SlidingWindowInferer(
roi_size=[160, 160, 160],
sw_batch_size=4,
overlap=0.5,
),
postprocessing=post_transform,
key_val_metric=key_val_metric,
val_handlers=val_handlers,
amp=self.amp,
)
optimizer = torch.optim.Adam(network.parameters(), self.learning_rate)
loss_function = DiceLoss(to_onehot_y=True, softmax=True)
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=5000, gamma=0.1
)
train_handlers = [
LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True),
ValidationHandler(
validator=self.eval_engine, interval=self.val_interval, epoch_level=True
),
StatsHandler(tag_name="train_loss", output_transform=from_engine("loss", first=True)),
TensorBoardStatsHandler(
log_dir=self.ckpt_dir,
tag_name="train_loss",
output_transform=from_engine("loss", first=True),
),
]
self.train_engine = SupervisedTrainer(
device=self.device,
max_epochs=self.max_epochs,
train_data_loader=train_data_loader,
network=network,
optimizer=optimizer,
loss_function=loss_function,
inferer=SimpleInferer(),
postprocessing=post_transform,
key_train_metric=None,
train_handlers=train_handlers,
amp=self.amp,
)
if self.local_rank > 0:
self.train_engine.logger.setLevel(logging.WARNING)
self.eval_engine.logger.setLevel(logging.WARNING)
|
the-stack_0_12816 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#-----------------------------------------------------------------------
# Author: delimitry
#-----------------------------------------------------------------------
import os
import time
import math
import datetime
from asciicanvas import AsciiCanvas
x_scale_ratio = 1.75
def draw_second_hand(ascii_canvas, seconds, length, fill_char):
"""
Draw second hand
"""
x0 = int(math.ceil(ascii_canvas.cols / 2.0))
y0 = int(math.ceil(ascii_canvas.lines / 2.0))
x1 = x0 + int(math.cos((seconds + 45) * 6 * math.pi / 180) * length * x_scale_ratio)
y1 = y0 + int(math.sin((seconds + 45) * 6 * math.pi / 180) * length)
ascii_canvas.add_line(int(x0), int(y0), int(x1), int(y1), fill_char=fill_char)
def draw_minute_hand(ascii_canvas, minutes, length, fill_char):
"""
Draw minute hand
"""
x0 = int(math.ceil(ascii_canvas.cols / 2.0))
y0 = int(math.ceil(ascii_canvas.lines / 2.0))
x1 = x0 + int(math.cos((minutes + 45) * 6 * math.pi / 180) * length * x_scale_ratio)
y1 = y0 + int(math.sin((minutes + 45) * 6 * math.pi / 180) * length)
ascii_canvas.add_line(int(x0), int(y0), int(x1), int(y1), fill_char=fill_char)
def draw_hour_hand(ascii_canvas, hours, minutes, length, fill_char):
"""
Draw hour hand
"""
x0 = int(math.ceil(ascii_canvas.cols / 2.0))
y0 = int(math.ceil(ascii_canvas.lines / 2.0))
total_hours = hours + minutes / 60.0
x1 = x0 + int(math.cos((total_hours + 45) * 30 * math.pi / 180) * length * x_scale_ratio)
y1 = y0 + int(math.sin((total_hours + 45) * 30 * math.pi / 180) * length)
ascii_canvas.add_line(int(x0), int(y0), int(x1), int(y1), fill_char=fill_char)
def draw_clock_face(ascii_canvas, radius, mark_char):
"""
Draw clock face with hour and minute marks
"""
x0 = ascii_canvas.cols // 2
y0 = ascii_canvas.lines // 2
# draw marks first
for mark in range(1, 12 * 5 + 1):
x1 = x0 + int(math.cos((mark + 45) * 6 * math.pi / 180) * radius * x_scale_ratio)
y1 = y0 + int(math.sin((mark + 45) * 6 * math.pi / 180) * radius)
if mark % 5 != 0:
ascii_canvas.add_text(x1, y1, mark_char)
# start from 1 because at 0 index - 12 hour
for mark in range(1, 12 + 1):
x1 = x0 + int(math.cos((mark + 45) * 30 * math.pi / 180) * radius * x_scale_ratio)
y1 = y0 + int(math.sin((mark + 45) * 30 * math.pi / 180) * radius)
ascii_canvas.add_text(x1, y1, '%s' % mark)
def draw_clock(cols, lines):
"""
Draw clock
"""
if cols < 25 or lines < 25:
print('Too little columns/lines for print out the clock!')
exit()
# prepare chars
single_line_border_chars = ('.', '-', '.', '|', ' ', '|', '`', '-', "'")
second_hand_char = '.'
minute_hand_char = 'o'
hour_hand_char = 'O'
mark_char = '`'
if os.name == 'nt':
single_line_border_chars = ('.', '-', '.', '|', ' ', '|', '`', '-', "'") # ('\xDA', '\xC4', '\xBF', '\xB3', '\x20', '\xB3', '\xC0', '\xC4', '\xD9')
second_hand_char = '.' # '\xFA'
minute_hand_char = 'o' # '\xF9'
hour_hand_char = 'O' # 'o'
mark_char = '`' # '\xF9'
# create ascii canvas for clock and eval vars
ascii_canvas = AsciiCanvas(cols, lines)
center_x = int(math.ceil(cols / 2.0))
center_y = int(math.ceil(lines / 2.0))
radius = center_y - 5
second_hand_length = int(radius / 1.17)
minute_hand_length = int(radius / 1.25)
hour_hand_length = int(radius / 1.95)
# add clock region and clock face
ascii_canvas.add_rect(5, 3, int(math.floor(cols / 2.0)) * 2 - 9, int(math.floor(lines / 2.0)) * 2 - 5)
draw_clock_face(ascii_canvas, radius, mark_char)
now = datetime.datetime.now()
# add regions with weekday and day if possible
if center_x > 25:
left_pos = int(radius * x_scale_ratio) / 2 - 4
ascii_canvas.add_nine_patch_rect(int(center_x + left_pos), int(center_y - 1), 5, 3, single_line_border_chars)
ascii_canvas.add_text(int(center_x + left_pos + 1), int(center_y), now.strftime('%a'))
ascii_canvas.add_nine_patch_rect(int(center_x + left_pos + 5), int(center_y - 1), 4, 3, single_line_border_chars)
ascii_canvas.add_text(int(center_x + left_pos + 1 + 5), int(center_y), now.strftime('%d'))
# add clock hands
draw_second_hand(ascii_canvas, now.second, second_hand_length, fill_char=second_hand_char)
draw_minute_hand(ascii_canvas, now.minute, minute_hand_length, fill_char=minute_hand_char)
draw_hour_hand(ascii_canvas, now.hour, now.minute, hour_hand_length, fill_char=hour_hand_char)
# print out canvas
ascii_canvas.print_out()
def main():
lines = 40
cols = int(lines * x_scale_ratio)
# set console window size and screen buffer size
if os.name == 'nt':
os.system('mode con: cols=%s lines=%s' % (cols + 1, lines + 1))
while True:
os.system('cls' if os.name == 'nt' else 'clear')
draw_clock(cols, lines)
time.sleep(0.2)
if __name__ == '__main__':
main()
|
the-stack_0_12817 | # coding: utf-8
import socketserver
import os
# Copyright 2013 Abram Hindle, Eddie Antonio Santos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
def getFileContents(path):
fileText = ""
with open(path, "r") as fin:
fileText = fin.read()
return fileText
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
requestParams = self.data.decode().split(' ')
requestedFile = requestParams[1]
print("Got a request of: %s\n" % self.data)
if requestParams[0] == "GET":
if "../" not in requestedFile and os.path.exists("./www"+requestedFile):
self.index("./www"+requestedFile)
else:
self.pageNotFound()
else:
self.methodNotAllowed()
def pageNotFound(self):
self.request.sendall(bytearray("HTTP/1.1 404 Not Found\r\n", "utf-8"))
def methodNotAllowed(self):
self.request.sendall(
bytearray("HTTP/1.1 405 Method Not Allowed\r\n", "utf-8"))
def movedPermantently(self, location):
host = self.server.server_address[0]
port = self.server.server_address[1]
baseUrl = "http://%s:%s" % (host, port)
self.request.sendall(
bytearray("HTTP/1.1 301 Moved Permanently\n", "utf-8"))
self.request.sendall(
bytearray("Location:" + baseUrl + location, "utf-8"))
def serveFile(self, fileText, fileType, httpHeader):
self.request.sendall(bytearray(httpHeader, "utf-8"))
self.request.sendall(
bytearray("Content-Type:" + fileType + "\n\n", "utf-8"))
self.request.sendall(bytearray(fileText, "utf-8"))
def index(self, path):
httpHeader = "HTTP/1.1 200 OK\n"
if os.path.isdir(path):
if path[-1] != "/":
path += "/"
location = path[5:]
self.movedPermantently(location)
return
# httpHeader = "HTTP/1.1 302 Found\n"
path += "index.html"
fileText = getFileContents(path)
fileType = "text/html"
if path[-3:] == "css":
fileType = "text/css"
self.serveFile(fileText, fileType, httpHeader)
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
|
the-stack_0_12819 | # Copyright 2018 Xu Chen All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
import os
from input_data.cifar10 import load_cifar10_data
def _dream_cropping(image, label, specs, cropped_size):
if cropped_size < specs['image_size']:
image = tf.image.resize_image_with_crop_or_pad(
image, cropped_size, cropped_size)
# convert from 0 ~ 255 to 0. ~ 1.
image = tf.cast(image, tf.float32) * (1. / 255.)
# transpose image into (CHW)
image = tf.transpose(image, [2, 0, 1]) # (CHW)
feature = {
'image': image,
'label': tf.one_hot(label, 10)
}
return feature
def _dream_process(feature):
batched_features = {
'images': feature['image'],
'labels': feature['label']
}
return batched_features
def _dream_sample_pairs(split, data_dir, max_epochs, n_repeats,
total_batch_size=1):
"""
We do the following steps to produce the dataset:
1. sample one (image, label) pair in one class;
2. repeat pair in 1. {n_repeats} times;
3. go back to do 1. unless we finish one iteration
(after a {num_classes} time loop). And we consider
this as one epoch.
4. go back to do 1. again to finish {max_epochs} loop.
So there will be {max_epochs} number of unique pairs selected for
each class.
Args:
split: 'train' or 'test', which split of dataset to read from;
data_dir: path to the mnist data directory;
max_epochs: maximum epochs to go through the model;
n_repeats: number of computed gradients;
batch_size: total number of images per batch.
Returns:
processed images, labels and specs
"""
"""Dataset specs"""
specs = {
'split': split,
'max_epochs': max_epochs,
'steps_per_epoch': n_repeats,
'batch_size': total_batch_size,
'image_size': 32,
'depth': 3,
'num_classes': 10
}
"""Load data from mat files"""
images, labels = load_cifar10_data.load_cifar10(data_dir, split)
assert images.shape[0] == labels.shape[0]
specs['total_size'] = int(images.shape[0])
"""Process np array"""
# sort by labels to get the index permutations
# classes: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
indices = [specs['total_size'] // specs['num_classes'] * i
for i in range(specs['num_classes'])]
indices.append(specs['total_size'])
perm = labels.argsort()
images = images[perm]
labels = labels[perm]
sampled_idc_lists = []
for start in indices[:-1]:
sampled_idc_lists.append(
np.arange(start, start + max_epochs).tolist())
sampled_idc_mat = np.array(sampled_idc_lists)
sampled_idc_mat = np.transpose(sampled_idc_mat, [1, 0])
sampled_idc_lists = sampled_idc_mat.flatten().tolist()
assert len(sampled_idc_lists) == max_epochs * specs['num_classes']
# we let n_repeats = steps_per_epoch = number of computed gradients
list_of_images = []
list_of_labels = []
for idx in sampled_idc_lists:
for _ in range(n_repeats):
list_of_images.append(images[idx])
list_of_labels.append(labels[idx])
res_images = np.stack(list_of_images, axis=0)
res_labels = np.array(list_of_labels)
assert res_images.shape == (max_epochs*specs['num_classes']*n_repeats, specs['image_size'], specs['image_size'], specs['depth'])
assert res_labels.shape == (max_epochs*specs['num_classes']*n_repeats,)
specs['total_size'] = res_labels.shape[0]
return (res_images, res_labels), specs
def inputs(split, data_dir, max_epochs, n_repeats, cropped_size,
total_batch_size=1):
"""Construct fashion mnist inputs for dream experiment.
Args:
split: 'train' or 'test' split to read from dataset;
data_dir: path to mnist data directory;
max_epochs: maximum epochs to go through the model;
n_repeats: number of computed gradients / number of the same input to repeat;
cropped_size: image size after cropping;
total_batch_size: total number of images per batch.
Returns:
batched_features: a dictionary of the input data features.
"""
assert split == 'train' or split == 'test'
"""Load sampled images and labels"""
(images, labels), specs = _dream_sample_pairs(
split, data_dir, max_epochs, n_repeats, total_batch_size)
if cropped_size == None:
cropped_size = specs['image_size']
assert cropped_size <= specs['image_size']
"""Process dataset object"""
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
dataset = dataset.prefetch(1)
dataset = dataset.map(
lambda image, label: _dream_cropping(image, label, specs, cropped_size),
num_parallel_calls=3)
specs['image_size'] = cropped_size
batched_dataset = dataset.batch(specs['batch_size'])
batched_dataset = batched_dataset.map(_dream_process, num_parallel_calls=3)
batched_dataset = batched_dataset.prefetch(1)
return batched_dataset, specs |
the-stack_0_12821 | # Copyright 2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The IBMQ device class for PennyLane-Orquestra.
"""
import os
import warnings
from pennylane_orquestra.orquestra_device import OrquestraDevice
class QeIBMQDevice(OrquestraDevice):
"""The Orquestra IBMQ device.
Args:
wires (int, Iterable[Number, str]]): Number of subsystems represented
by the device, or iterable that contains unique labels for the
subsystems as numbers (i.e., ``[-1, 0, 2]``) or strings (``['ancilla',
'q1', 'q2']``). Default 1 if not specified.
shots (int or list[int]): Number of circuit evaluations/random samples used to estimate
expectation values of observables. If ``None``, the device calculates
probability, expectation values, and variances analytically. If an integer,
it specifies the number of samples to estimate these quantities.
If a list of integers is passed, the circuit evaluations are batched over the list of shots.
backend (str): the name of the Qiskit backend to use supported by
Orquestra, e.g., ``"ibmq_qasm_simulator"`` or the name of real hardware
devices
Keyword Args:
ibmqx_token=None (str): the authentication token needed to run a job on
IBMQ
"""
short_name = "orquestra.ibmq"
qe_component = "qe-qiskit"
qe_module_name = "qeqiskit.backend"
qe_function_name = "QiskitBackend"
def __init__(self, wires, shots=8192, backend="ibmq_qasm_simulator", **kwargs):
self._token = kwargs.get("ibmqx_token", None) or os.getenv("IBMQX_TOKEN")
if self._token is None:
raise ValueError(
"Please pass a valid IBMQX token to the device using the "
"'ibmqx_token' argument or by specifying the IBMQX_TOKEN "
"environment variable."
)
if shots is None:
# Raise a warning if the analytic attribute was set to True
warnings.warn(
f"The {self.short_name} device cannot be used in analytic "
"mode. Setting shots to 8192. Results are based on "
"sampling."
)
shots = 8192
super().__init__(wires, backend=backend, shots=shots, **kwargs)
def create_backend_specs(self):
backend_dict = super().create_backend_specs()
# Plug in the IBMQ token
backend_dict["api_token"] = self._token
return backend_dict
|
the-stack_0_12822 | # -*- coding: utf-8 -*-
from __future__ import print_function
# tag::mcts_go_cnn_preprocessing[]
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
np.random.seed(123)
X = np.load('../generated_games/features-200.npy')
Y = np.load('../generated_games/labels-200.npy')
samples = X.shape[0]
size = 9
input_shape = (size, size, 1)
X = X.reshape(samples, size, size, 1)
train_samples = 10000
X_train, X_test = X[:train_samples], X[train_samples:]
Y_train, Y_test = Y[:train_samples], Y[train_samples:]
# end::mcts_go_cnn_preprocessing[]
# tag::mcts_go_cnn_model[]
model = Sequential()
"""
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
"""
# filter = 48
# 3×3の畳み込みカーネルを選択する
# 通常、畳み込みの出力は入力よりも小さくなる。
# padding = 'same'を追加することで、Kerasに行列をエッジの周りに0で埋めるようにできるため、出力は入力と同じ次元を持つようになる。
model.add(Conv2D(filters=48, # <1>
kernel_size=(3, 3), # <2>
activation='sigmoid',
padding='same',
input_shape=input_shape))
model.add(Dropout(rate=0.6))
model.add(Conv2D(64, (3, 3), activation='relu'))
# 最大プーリング
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.6))
# 平坦化
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(rate=0.6))
# ソフトマックス
model.add(Dense(size * size, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# end::mcts_go_cnn_model[]
# tag::mcts_go_cnn_eval[]
model.fit(X_train, Y_train,
batch_size=64,
epochs=5,
verbose=1,
validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# end::mcts_go_cnn_eval[]
|
the-stack_0_12825 | from pyunity import Behaviour, SceneManager, GameObject, Vector3, MeshRenderer, Mesh, Material, RGB, ShowInInspector
class Rotator(Behaviour):
def Update(self, dt):
self.transform.eulerAngles += Vector3(0, 90, 135) * dt
def main():
scene = SceneManager.AddScene("Scene")
scene.mainCamera.transform.localPosition = Vector3(0, 0, -10)
cube = GameObject("Cube")
renderer = cube.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(255, 0, 0))
cube.AddComponent(Rotator)
scene.Add(cube)
scene.List()
SceneManager.LoadScene(scene)
if __name__ == "__main__":
main()
|
the-stack_0_12829 | import os, sys
# pylint: disable-msg=F0401
from setuptools import setup, find_packages
here = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.normpath(os.path.join(here,
'openmdao',
'examples',
'metamodel_tutorial')))
import releaseinfo
version = releaseinfo.__version__
setup(name='openmdao.examples.metamodel_tutorial',
version=version,
description="OpenMDAO examples - Metamodel Tutorial",
long_description="""\
""",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
],
keywords='optimization multidisciplinary multi-disciplinary analysis',
author='',
author_email='',
url='http://openmdao.org',
license='Apache License, Version 2.0',
namespace_packages=["openmdao", "openmdao.examples"],
packages=find_packages(), #['openmdao','openmdao.examples'],
include_package_data=True,
test_suite='nose.collector',
zip_safe=False,
install_requires=[
'setuptools',
'openmdao.lib',
],
entry_points="""
# -*- Entry points: -*-
"""
)
|
the-stack_0_12830 | # -*- coding: utf-8 -*-
"""
GoPro Encoding
==============
Defines the *GoPro* *Protune* encoding:
- :func:`colour.models.log_encoding_Protune`
- :func:`colour.models.log_decoding_Protune`
See Also
--------
`RGB Colourspaces Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/models/rgb.ipynb>`_
References
----------
- :cite:`GoPro2016a` : GoPro, Duiker, H.-P., & Mansencal, T. (2016).
gopro.py. Retrieved April 12, 2017, from
https://github.com/hpd/OpenColorIO-Configs/blob/master/aces_1.0.3/python/\
aces_ocio/colorspaces/gopro.py
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.utilities import from_range_1, to_domain_1
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['log_encoding_Protune', 'log_decoding_Protune']
def log_encoding_Protune(x):
"""
Defines the *Protune* log encoding curve / opto-electronic transfer
function.
Parameters
----------
x : numeric or array_like
Linear data :math:`x`.
Returns
-------
numeric or ndarray
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`GoPro2016a`
Examples
--------
>>> log_encoding_Protune(0.18) # doctest: +ELLIPSIS
0.6456234...
"""
x = to_domain_1(x)
y = np.log(x * 112 + 1) / np.log(113)
return from_range_1(y)
def log_decoding_Protune(y):
"""
Defines the *Protune* log decoding curve / electro-optical transfer
function.
Parameters
----------
y : numeric or array_like
Non-linear data :math:`y`.
Returns
-------
numeric or ndarray
Linear data :math:`x`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`GoPro2016a`
Examples
--------
>>> log_decoding_Protune(0.645623486803636) # doctest: +ELLIPSIS
0.1...
"""
y = to_domain_1(y)
x = (113 ** y - 1) / 112
return from_range_1(x)
|
the-stack_0_12832 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import ServiceFabricManagementClientConfiguration
from .operations import ClustersOperations
from .operations import ClusterVersionsOperations
from .operations import Operations
from .operations import ApplicationTypesOperations
from .operations import ApplicationTypeVersionsOperations
from .operations import ApplicationsOperations
from .operations import ServicesOperations
from . import models
class ServiceFabricManagementClient(object):
"""Service Fabric Management Client.
:ivar clusters: ClustersOperations operations
:vartype clusters: azure.mgmt.servicefabric.operations.ClustersOperations
:ivar cluster_versions: ClusterVersionsOperations operations
:vartype cluster_versions: azure.mgmt.servicefabric.operations.ClusterVersionsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.servicefabric.operations.Operations
:ivar application_types: ApplicationTypesOperations operations
:vartype application_types: azure.mgmt.servicefabric.operations.ApplicationTypesOperations
:ivar application_type_versions: ApplicationTypeVersionsOperations operations
:vartype application_type_versions: azure.mgmt.servicefabric.operations.ApplicationTypeVersionsOperations
:ivar applications: ApplicationsOperations operations
:vartype applications: azure.mgmt.servicefabric.operations.ApplicationsOperations
:ivar services: ServicesOperations operations
:vartype services: azure.mgmt.servicefabric.operations.ServicesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The customer subscription identifier.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = ServiceFabricManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.clusters = ClustersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.cluster_versions = ClusterVersionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.application_types = ApplicationTypesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.application_type_versions = ApplicationTypeVersionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.applications = ApplicationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.services = ServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ServiceFabricManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
the-stack_0_12833 | import pandas as pd
from jnius import autoclass
from awesome_data import DataSet
from marspy.convert.molecule import *
class Archive:
def __init__(self, filepath):
self.filepath = filepath
self.name = self.filepath.split('/')[-1]
self.File = autoclass('java.io.File')
self.yamaFile = self.File(self.filepath)
def get_molecule_by_uid(self, uid):
raise NotImplementedError
def get_molecules_by_tags(self, tags):
raise NotImplementedError
def validate_params(self):
pass
class SingleMoleculeArchive(Archive):
instances = []
def __init__(self, filepath, accept_tag, label=dict()):
Archive.__init__(self, filepath)
self.instances.append(self)
self.Archive = autoclass('de.mpg.biochem.mars.molecule.SingleMoleculeArchive')
self.archive_link = self.Archive(self.yamaFile)
self.metadata_uids = list(self.archive_link.getMetadataUIDs())
self.label = label
# nucleotide
# check if all metadata parameters match & raise warning if conditions to in one archive are not identical
if len({self.archive_link.getMetadata(metadata_uid).getStringParameter('nucleotide')
for metadata_uid in self.metadata_uids}) > 1:
raise MarsPyWarning()
# if StringParameter is not set, getStringParameter returns empty string ''
if len(self.archive_link.getMetadata(self.metadata_uids[0]).getStringParameter('nucleotide')) == 0:
# default n/a
self.nucleotide = 'n/a'
print(f'nucleotide not found. Setting default to {self.nucleotide}')
# parameter properly set
else:
self.nucleotide = self.archive_link.getMetadata(self.metadata_uids[0]).getStringParameter('nucleotide')
# highsalt_wash
# check if all metadata parameters match & raise warning if conditions to in one archive are not identical
if len({self.archive_link.getMetadata(metadata_uid).getParameter('highsalt_wash')
for metadata_uid in self.metadata_uids}) > 1:
raise MarsPyWarning()
# if Parameter is not set, getParameter returns np.nan
if np.isnan(self.archive_link.getMetadata(self.metadata_uids[0]).getParameter('highsalt_wash')):
# default False
self.highsalt_wash = False
print(f'highsalt_wash not found. Setting default to {self.highsalt_wash}')
else:
self.highsalt_wash = \
self.archive_link.getMetadata(self.metadata_uids[0]).getParameter('highsalt_wash') == 1
# cdc6
# check if all metadata parameters match & raise warning if conditions to in one archive are not identical
if len({self.archive_link.getMetadata(metadata_uid).getStringParameter('cdc6')
for metadata_uid in self.metadata_uids}) > 1:
raise MarsPyWarning()
# if StringParameter is not set, getStringParameter returns empty string ''
if len(self.archive_link.getMetadata(self.metadata_uids[0]).getStringParameter('cdc6')) == 0:
# default n/a
self.cdc6 = 'n/a'
print(f'cdc6 not found. Setting default to {self.cdc6}')
# parameter properly set
else:
self.cdc6 = self.archive_link.getMetadata(self.metadata_uids[0]).getStringParameter('cdc6')
self.protein = list(self.label.keys())[0]
# instantiate a new SingleMolecule for each uid and store instances as list
self.molecules = [SingleMolecule(uid, self.protein, archive=self.archive_link) for uid in
self.archive_link.getMoleculeUIDs() if self.archive_link.get(uid).hasTag(accept_tag)]
self.tags = set()
for molecule in self.molecules:
self.tags.update(molecule.tags)
def get_molecule_by_uid(self, uid):
"""
Returns molecule object with provided UID.
"""
return list(filter(lambda molecule: molecule.uid == uid, self.molecules))[0]
def get_molecules_by_tags(self, tags):
"""
Provide tags as list.
Returns filter of all molecules which have all the specified tags
"""
return filter(lambda molecule: set(tags).issubset(set(molecule.tags)), self.molecules)
def __len__(self):
return len(self.molecules)
class DnaMoleculeArchive(Archive):
instances = []
def __init__(self, filepath, accept_tag, labels=dict()):
Archive.__init__(self, filepath)
self.instances.append(self)
self.Archive = autoclass('de.mpg.biochem.mars.molecule.DnaMoleculeArchive')
self.archive_link = self.Archive(self.yamaFile)
self.metadata_uids = list(self.archive_link.getMetadataUIDs())
self.dna_molecule_count = 0
for metadata in self.metadata_uids:
self.dna_molecule_count += dict(sc.to_python(self.archive_link.getMetadata(metadata).getParameters()))[
'DnaMoleculeCount']
# subtract # of reject_dna tags
self.dna_molecule_count -= len(list(filter(lambda uid:
self.archive_link.get(uid).hasTag('reject_dna'),
self.archive_link.moleculeUIDs)))
self.labels = labels
# nucleotide
# check if all metadata parameters match & raise warning if conditions to in one archive are not identical
if len({self.archive_link.getMetadata(metadata_uid).getStringParameter('nucleotide')
for metadata_uid in self.metadata_uids}) > 1:
raise MarsPyWarning()
# if StringParameter is not set, getStringParameter returns empty string ''
if len(self.archive_link.getMetadata(self.metadata_uids[0]).getStringParameter('nucleotide')) == 0:
# default n/a
self.nucleotide = 'n/a'
print(f'nucleotide not found. Setting default to {self.nucleotide}')
# parameter properly set
else:
self.nucleotide = self.archive_link.getMetadata(self.metadata_uids[0]).getStringParameter('nucleotide')
# highsalt_wash
# check if all metadata parameters match & raise warning if conditions to in one archive are not identical
if len({self.archive_link.getMetadata(metadata_uid).getParameter('highsalt_wash')
for metadata_uid in self.metadata_uids}) > 1:
raise MarsPyWarning()
# if Parameter is not set, getParameter returns np.nan
if np.isnan(self.archive_link.getMetadata(self.metadata_uids[0]).getParameter('highsalt_wash')):
# default False
self.highsalt_wash = False
print(f'highsalt_wash not found. Setting default to {self.highsalt_wash}')
else:
self.highsalt_wash = \
self.archive_link.getMetadata(self.metadata_uids[0]).getParameter('highsalt_wash') == 1
# dna_count_valid: data was fully analyzed - ALL DNA molecules fitted
# check if all metadata parameters match & raise warning if conditions to in one archive are not identical
if len({self.archive_link.getMetadata(metadata_uid).getParameter('dna_count_valid')
for metadata_uid in self.metadata_uids}) > 1:
raise MarsPyWarning()
# if Parameter is not set, getParameter returns np.nan
if np.isnan(self.archive_link.getMetadata(self.metadata_uids[0]).getParameter('dna_count_valid')):
# default True
self.dna_count_valid = True
print(f'dna_count_valid not found. Setting default to {self.dna_count_valid}')
else:
self.dna_count_valid = \
self.archive_link.getMetadata(self.metadata_uids[0]).getParameter('dna_count_valid') == 1
# t7_terminator
# check if all metadata parameters match & raise warning if conditions to in one archive are not identical
if len({self.archive_link.getMetadata(metadata_uid).getParameter('t7_terminator')
for metadata_uid in self.metadata_uids}) > 1:
raise MarsPyWarning()
# if Parameter is not set, getParameter returns np.nan
if np.isnan(self.archive_link.getMetadata(self.metadata_uids[0]).getParameter('t7_terminator')):
# default False
self.t7_terminator = False
print(f't7_terminator not found. Setting default to {self.t7_terminator}')
else:
self.t7_terminator = \
self.archive_link.getMetadata(self.metadata_uids[0]).getParameter('t7_terminator') == 1
# chromatin
# check if all metadata parameters match & raise warning if conditions to in one archive are not identical
if len({self.archive_link.getMetadata(metadata_uid).getStringParameter('chromatin')
for metadata_uid in self.metadata_uids}) > 1:
raise MarsPyWarning()
# if StringParameter is not set, getStringParameter returns empty string ''
if len(self.archive_link.getMetadata(self.metadata_uids[0]).getStringParameter('chromatin')) == 0:
# default n/a
self.chromatin = 'n/a'
print(f'chromatin not found. Setting default to {self.chromatin}')
# parameter properly set
else:
self.chromatin = self.archive_link.getMetadata(self.metadata_uids[0]).getStringParameter('chromatin')
# cdc6
# check if all metadata parameters match & raise warning if conditions to in one archive are not identical
if len({self.archive_link.getMetadata(metadata_uid).getStringParameter('cdc6')
for metadata_uid in self.metadata_uids}) > 1:
raise MarsPyWarning()
# if StringParameter is not set, getStringParameter returns empty string ''
if len(self.archive_link.getMetadata(self.metadata_uids[0]).getStringParameter('cdc6')) == 0:
# default n/a
self.cdc6 = 'n/a'
print(f'cdc6 not found. Setting default to {self.cdc6}')
# parameter properly set
else:
self.cdc6 = self.archive_link.getMetadata(self.metadata_uids[0]).getStringParameter('cdc6')
self.proteins = set()
# will get all columns in DataTable with 'Protein_n_Position_on_Dna'
for match in re.findall('\w+_Position_on_DNA', '$'.join(set(sc.to_python(
self.archive_link.properties().getColumnSet())))):
self.proteins.add(match.split('_')[0])
# instantiate a new DnaMolecule for each uid and store instances as list
self.molecules = [DnaMolecule(uid, self.proteins, archive=self.archive_link) for uid in
self.archive_link.getMoleculeUIDs()
if self.archive_link.get(uid).hasTag(accept_tag)]
# define archive tags union of all molecule tags
# define archive prefixes as union of all molecule prefixes (will be used for top level columns in big df later)
self.tags = set()
self.prefixes = set()
for molecule in self.molecules:
self.tags.update(molecule.tags)
self.prefixes.update(molecule.prefixes)
def validate_params(self):
"""
Integrity check of passed Archive.
"""
# compare number protein in params vs actual one (retrieved from metadata)
for molecule in self.molecules:
# take global protein to confirm dict was pasted correctly
for protein in self.proteins:
if not (molecule.proteins[protein] == molecule.params['Number_' + protein]):
err_message = f"Conflict in molecule {molecule.uid}!\n\
Number of {protein} retrieved from metadata: {molecule.proteins[protein]}\n\
Number of {protein} based on Parameter: {molecule.params['Number_' + protein]}"
raise MarsPyException(err_message)
return 'passed'
def add_segments_tables(self):
"""
Attach all segment tables to molecule records (stored as dict)
"""
# Do we have collisions in the archive?
coll_exp = False
for tag in self.tags:
if re.match('coll', tag):
coll_exp = True
for molecule in self.molecules:
molecule.seg_dfs = list()
# all segmentTableNames
for x, y, region in (sc.to_python(self.archive_link.get(molecule.uid).getSegmentsTableNames())):
# internal control that all seg_dfs are valid
_assigned = False
# all proteins on molecule
for prefix in molecule.prefixes:
if re.match(prefix, x) and re.match(prefix, y):
molecule.seg_dfs.append(SegmentsTable(molecule=molecule, prefix=prefix,
col_x=x, col_y=y, region=region, coll_exp=coll_exp))
_assigned = True
break
if not _assigned:
err_message = f"Conflict in molecule {molecule.uid}!\nSegmentTable {x} {y} {region} not assigned!"
raise MarsPyException(err_message)
def filter_segments(self, b_min=0, sigma_b_max=0):
"""
Filter all segments for all molecules in archive based on SegmentsTable type.
Also see filter_segments() in SegmentsTable object:
Mode 1: SegmentTable type: 'bleaching' -
Reject all steps with increase in fluorescence intensity (initial quenching).
If increase in fluorescence is detected, remove segments with same intensity (double counts).
Mode 2: SegmentsTable type: 'rate' -
Reject all segments with B value (velocity) < b_min and sigma_B > sigma_b_max (poor fits)
"""
for molecule in self.molecules:
for prefix in molecule.prefixes:
# check if one protein molecule has only one seg_df with type 'bleaching'
if len(list(filter(lambda df:
prefix == df.prefix and df.type == 'bleaching', molecule.seg_dfs))) > 1:
err_message = f"Conflict in molecule {molecule.uid}!\nMore than one SegmentTable for {prefix}."
raise MarsPyException(err_message)
# apply filter to all seg_dfs
for seg_df in molecule.seg_dfs:
seg_df.filter_segments(b_min=b_min, sigma_b_max=sigma_b_max)
# in case seg_df is empty after filtering, delete object
remove_seg_dfs = set()
for seg_df in molecule.seg_dfs:
if len(seg_df.df) == 0:
remove_seg_dfs.add(seg_df)
for seg_df in remove_seg_dfs:
molecule.seg_dfs.remove(seg_df)
def calc_bleaching_steps(self):
"""
Calculate bleaching steps for all proteins of all molecules in archive.
Also see calc_bleaching_steps() in SegmentsTable object:
Calculate number of bleaching steps based off of segment table rows.
Returns: number of bleaching steps (integer value)
No return value here; bleaching steps are stored as attribute dict with prefixes as key.
"""
for molecule in self.molecules:
molecule.bleaching_steps = dict()
for prefix in molecule.prefixes:
# check if one protein molecule has only one seg_df with type 'bleaching'
if len(list(filter(lambda seg_df:
prefix == seg_df.prefix and seg_df.type == 'bleaching', molecule.seg_dfs))) > 1:
err_message = f"Conflict in molecule {molecule.uid}!\nMore than one SegmentTable for {prefix}."
raise MarsPyException(err_message)
# only molecules with proper bleaching (not rejected)
if 'reject_bleach_' + prefix in molecule.tags:
continue
molecule.bleaching_steps[prefix] = \
list(filter(lambda seg_df: prefix == seg_df.prefix and seg_df.type == 'bleaching',
molecule.seg_dfs))[0].calc_bleaching_steps()
def detect_pauses(self, thresh=3, global_thresh=False, col='B'):
"""
Detect pauses in translocation for all SegmentTables of all molecules in archive.
Also see detect_pauses() in SegmentsTable object:
Detection pauses in SegmentTable (only for type = 'rate', others are skipped)
global_thresh: Set to True if a fixed threshold for all molecules should be used
thresh: threshold to detect pauses.
If global_thresh is False, a molecule-specific threshold is calculated with thresh^-1 * np.mean(col)
col: column evaluated for pauses
"""
for molecule in self.molecules:
for seg_df in molecule.seg_dfs:
seg_df.detect_pauses(thresh=thresh, global_thresh=global_thresh, col=col)
def get_molecule_by_uid(self, uid):
"""
Returns molecule object with provided UID.
"""
return list(filter(lambda molecule: molecule.uid == uid, self.molecules))[0]
def get_molecules_by_tags(self, tags):
"""
Provide tags as list.
Returns filter of all molecules which have all the specified tags
"""
return filter(lambda molecule: set(tags).issubset(set(molecule.tags)), self.molecules)
def __len__(self):
return len(self.molecules)
def instantiate_archive(name, datasets):
"""
Instantiates passed archive from underlying dataset
"""
# check if we have the right data type
for data in datasets:
if not isinstance(data, DataSet):
raise MarsPyException('Dataset contains non-compatible data type.')
data = list(filter(lambda dataset: dataset.name == name, datasets))[0]
if data.archive_type == 'DnaMoleculeArchive':
DnaMoleculeArchive(filepath=data.filepath + data.name, accept_tag=data.accept_tag, labels=data.labels)
elif data.archive_type == 'SingleMoleculeArchive':
SingleMoleculeArchive(filepath=data.filepath + data.name, accept_tag=data.accept_tag, label=data.labels)
else:
raise MarsPyException(f'Failed to instantiate Archive {data.name}.')
def describe_archives(archives):
"""
Describes passed archives by returning a pandsa DataFrame. Pass archives as iterable object
"""
df = pd.DataFrame(columns=['# of datasets', '# of molecules', 'labeled proteins', 'nucleotide', 'HS challenge?',
'chromatin', 'terminator?', 'archive validation'])
for archive in archives:
_temp_df = pd.DataFrame(index=[archive.name.split('.')[0]],
data=[[len(archive.metadata_uids), len(archive),
'; '.join([label + '-' + protein for protein, label in archive.labels.items()]),
archive.nucleotide, archive.highsalt_wash, archive.chromatin,
archive.t7_terminator, archive.validate_params()]],
columns=['# of datasets', '# of molecules', 'labeled proteins', 'nucleotide',
'HS challenge?', 'chromatin', 'terminator?', 'archive validation'])
df = pd.concat([df, _temp_df])
df = df.infer_objects()
return df
|
the-stack_0_12837 | import numpy as np
from sys import argv
import sys
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import matplotlib.ticker as ticker
from collections import Counter
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from EpiModel import *
import cv19
#-------------------------------------------------------------
# Esta es una implementacion del modelo SEIR de desarrollo,
# para que luego reemplace a las funciones del modulo cv19
#
# en la version original:
# no hay retardo, el contagio y los sintomas son instantaneos
# no es estocástico
# toda la poblacion reacciona igual al virus
# es cerrado: S+I+R=N
#
# en esta version:
# hay retardo estadistico
# no es estocástico
# es cerrado: S+I+R=N
#
# Mejoras a realizar:
# - incorporar casos importados: simplemente sumar una Poisson a I
# - incorporar la naturaleza estocástica: simplemente sortear una
# VA en lugar de hacer el calculo determinista
# - incorporar cuarentenas: hacer que beta sea una función del tiempo
#
# Con eso el modelo tendría todo
#
#--------------------------------------------------------------
def random_gen(A, n=1):
from random import random
res = []
rep = [0]*len(A)
for _ in range(n):
u = random()
#msg = 'testing if %f is in interval (%f %f) / index: %i'
y_old = 0.
for i, y_new in enumerate(A):
if u > y_old and u < y_new:
j = i
#print(msg % (u, y_old, y_new, i))
break
else:
y_old = y_new
if j<0 or j>= len(A):
print(j, len(A))
res.append(int(j))
rep[j] = rep[j] + 1
return(res, rep)
#___________________________________
# Load settings
conf = cv19.parser()
conf.check_file(argv)
conf.read_config_file()
conf.load_filenames()
conf.load_parameters()
#___________________________________
# parameters
R_0 = 2.2
beta = 0.7
sigma = 0.05
gamma = beta / R_0
population = 100000
N_init = 10
t_max = 200
#----------------------------------------------- SIMULATED MODEL
#{{{
c = cv19.InfectionCurve()
p = conf.p
g = cv19.Graph_nd()
# al ppo. todos en S con la distrib de la poblacion:
# 1. inventar una PDF cualquiera para la distrib de poblacion:
# en este ejemplo hay Nages=3 rangos: joven, adulto, mayor
pdf = np.array([5, 3, 2])
Nages = len(pdf)
pdf = pdf / float(pdf.sum())
r, rep = random_gen(pdf.cumsum(), population)
rep = np.array([.8,.2,.1])*population
pop_by_age = np.c_[rep]
# Population has a given age distribution
#---------------------------------------------------
S_init_by_age = np.c_[[[3],[20],[1]]]
# Initialize graph:
#---------------------------------------------------
I0 = S_init_by_age
S0 = pop_by_age - I0
E0 = np.zeros([Nages,1])
R0 = np.zeros([Nages,1])
S, E, I, R = S0, E0, I0, R0
zs = np.zeros([Nages,1])
pops = np.c_[[[population],[population],[population]]]
# transition probabilities may depend on the age:
#----------------------------------------------------
R_0 = 2.2
beta = 0.7
betas = np.c_[[[beta],[beta],[beta]]]
sigma = 0.05
sigmas = np.c_[[[sigma],[sigma],[sigma]]]
gamma = beta/R_0
gammas = np.c_[[[gamma],[gamma],[gamma]]]
#----------------------------------------------------
ts = [0.] # time series
nms = ['prob','lag']
p_dt = 1.
t = 0.
time_steps = 0
t_max = 140
while t < t_max:
time_steps = time_steps + 1
t_prev = t
t = t + p.dt
ts.append(t)
# (( S )) al actualzar S usar el I por edades y el S total.
Sf = S[:,-1].reshape(3,1) # distrib. el dia anterior
St = np.c_[([S[:,-1].sum()]*3)] # total S el dia anterior
If = I[:,-1].reshape(3,1)
dS = - St * If / population * betas
#dS = - Sf * If / pop_by_age * betas
n_S = np.maximum(Sf + dS, zs)
# (( E ))
It = np.c_[([I[:,-1].sum()]*3)]
Ef = E[:,-1].reshape(3,1)
dE = St * It / population * betas - sigmas * Ef
dE = Sf * It / pop_by_age * betas - sigmas * Ef
n_E = np.minimum(Ef + dE, pop_by_age)
# (( I ))
dI = sigmas*Ef - gammas * If
n_I = np.minimum(If + dI, pops)
# (( R ))
Rf = R[:,-1].reshape(3,1)
dR = gammas * If
n_R = np.minimum(Rf + dR, pop_by_age)
S = np.insert(S, [time_steps], n_S, axis=1)
E = np.insert(E, [time_steps], n_E, axis=1)
I = np.insert(I, [time_steps], n_I, axis=1)
R = np.insert(R, [time_steps], n_R, axis=1)
##}}}
#
# para el lag:
# reemplazar I[:,-1] por I[:,-l:] y pesar por la distribución
# de tiempos de retardo.
##------------------------------------------------------- PLOT
##{{{
#
ics = [S[0], S[1], S[2], E[0], E[1], E[2], I[0], I[1], I[2], R[0], R[1], R[2]]
labels = ['S', 'E', 'I', 'R']
labels = ['S[0]', 'S[1]', 'S[2]', 'E[0]', 'E[1]', 'E[2]', 'I[0]',
'I[1]', 'I[2]', 'R[0]', 'R[1]', 'R[2]']
clrs = ['red']*3 + ['blue']*3 + ['green']*3 + ['orange']*3
t = ts
plt.rcParams['savefig.facecolor'] = "0.8"
fig, ax = plt.subplots(1, 3, figsize=(20, 10))
#--- SIMU linear
for i, ic in enumerate(ics):
if i%3!=0: continue
sns.lineplot(x=t, y=ic, sort=False, linewidth=1, ax=ax[0],
label=labels[i], color=clrs[i])
#sns.scatterplot(t, ic, ax=ax[0])
ax[0].set_xlabel('Time [days]', fontsize=22)
ax[0].set_ylabel('Number infected', fontsize=22)
ax[0].legend()
ax[0].grid()
ax[0].set_title('Simulation')
#---
for i, ic in enumerate(ics):
if i%3!=1: continue
sns.lineplot(x=t, y=ic, sort=False, linewidth=1, ax=ax[1],
label=labels[i], color=clrs[i])
#sns.scatterplot(t, ic, ax=ax[0])
ax[1].set_xlabel('Time [days]', fontsize=22)
ax[1].set_ylabel('Number infected', fontsize=22)
ax[1].legend()
ax[1].grid()
ax[1].set_title('Simulation')
#---
for i, ic in enumerate(ics):
if i%3!=2: continue
sns.lineplot(x=t, y=ic, sort=False, linewidth=1, ax=ax[2],
label=labels[i], color=clrs[i])
#sns.scatterplot(t, ic, ax=ax[0])
ax[2].set_xlabel('Time [days]', fontsize=22)
ax[2].set_ylabel('Number infected', fontsize=22)
ax[2].legend()
ax[2].grid()
ax[2].set_title('Simulation')
#--- plt
plt.xticks(rotation=0, fontsize=22)
plt.yticks(rotation=90, fontsize=22)
plt.tight_layout()
fig.savefig('../plt/plot_sim_dists.png')
plt.close()
#}}}
|
the-stack_0_12839 | #!/usr/bin/env python3
"""A script for running Robot Framework's own acceptance tests.
Usage: atest/run.py [--interpreter interpreter] [options] [data]
`data` is path (or paths) of the file or directory under the `atest/robot`
folder to execute. If `data` is not given, all tests except for tests tagged
with `no-ci` are executed.
Available `options` are the same that can be used with Robot Framework.
See its help (e.g. `robot --help`) for more information.
By default uses the same Python interpreter for running tests that is used
for running this script. That can be changed by using the `--interpreter` (`-I`)
option. It can be the name of the interpreter (e.g. `pypy3`) or a path to the
selected interpreter (e.g. `/usr/bin/python39`). If the interpreter itself needs
arguments, the interpreter and its arguments need to be quoted (e.g. `"py -3"`).
Examples:
$ atest/run.py
$ atest/run.py --exclude no-ci atest/robot/standard_libraries
$ atest/run.py --interpreter pypy3
The results of the test execution are written into an interpreter specific
directory under the `atest/results` directory. Temporary outputs created
during the execution are created under the system temporary directory.
"""
import argparse
import os
from pathlib import Path
import shutil
import signal
import subprocess
import sys
import tempfile
from interpreter import Interpreter
CURDIR = Path(__file__).parent
ARGUMENTS = '''
--doc Robot Framework acceptance tests
--metadata interpreter:{interpreter}
--variablefile {variable_file};{interpreter.path};{interpreter.name};{interpreter.version}
--pythonpath {pythonpath}
--outputdir {outputdir}
--splitlog
--console dotted
--consolewidth 100
--SuiteStatLevel 3
'''.strip()
def atests(interpreter, arguments):
try:
interpreter = Interpreter(interpreter)
except ValueError as err:
sys.exit(err)
outputdir, tempdir = _get_directories(interpreter)
arguments = list(_get_arguments(interpreter, outputdir)) + list(arguments)
return _run(arguments, tempdir, interpreter)
def _get_directories(interpreter):
name = interpreter.output_name
outputdir = CURDIR / 'results' / name
tempdir = Path(tempfile.gettempdir()) / 'robotatest' / name
if outputdir.exists():
shutil.rmtree(outputdir)
if tempdir.exists():
shutil.rmtree(tempdir)
os.makedirs(tempdir)
return outputdir, tempdir
def _get_arguments(interpreter, outputdir):
arguments = ARGUMENTS.format(interpreter=interpreter,
variable_file=CURDIR / 'interpreter.py',
pythonpath=CURDIR / 'resources',
outputdir=outputdir)
for line in arguments.splitlines():
yield from line.split(' ', 1)
for exclude in interpreter.excludes:
yield '--exclude'
yield exclude
def _run(args, tempdir, interpreter):
command = [sys.executable, str(CURDIR.parent / 'src/robot/run.py')] + args
environ = dict(os.environ,
TEMPDIR=str(tempdir),
PYTHONCASEOK='True',
PYTHONIOENCODING='')
print('%s\n%s\n' % (interpreter, '-' * len(str(interpreter))))
print('Running command:\n%s\n' % ' '.join(command))
sys.stdout.flush()
signal.signal(signal.SIGINT, signal.SIG_IGN)
return subprocess.call(command, env=environ)
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-I', '--interpreter', default=sys.executable)
parser.add_argument('-h', '--help', action='store_true')
options, robot_args = parser.parse_known_args()
if not robot_args or not Path(robot_args[-1]).exists():
robot_args += ['--exclude', 'no-ci', str(CURDIR/'robot')]
if options.help:
print(__doc__)
rc = 251
else:
rc = atests(options.interpreter, robot_args)
sys.exit(rc)
|
the-stack_0_12840 | import requests
import json
session = requests.Session()
jar = requests.cookies.RequestsCookieJar()
baseurl = "https://general.direction.com:8443/wsg/api/public/v6_1/" #replace "general.direction.com" with either the host name or IP of a member of the cluster
# Written with 3.6.2 in mind
#http://docs.ruckuswireless.com/smartzone/3.6.2/sz100-public-api-reference-guide-3-6-2.html API documentation
szusername = "" #Enter a username with read privages to everything you want to access
szpassword = "" #Password for the above account
headers_template = {'Content-Type': "application/json;charset=UTF-8"}
loginpayload = '{ "username": "' + szusername + '",\r\n "password": "' + szpassword + '"}'
def ruckus_post(url,data,headers = headers_template,check_cert = False):
output = session.post(baseurl + url, data=data, headers=headers, verify=check_cert, cookies=jar)
return output
get_login_session_cookie = ruckus_post("session",loginpayload) #This uses the ruckus_post above to get a session valid session cookie into the cookie jar
def ruckus_get(url,headers = headers_template,check_cert = False):
output = session.get(baseurl + url, headers=headers, verify=check_cert, cookies=jar)
return output
jsonzones = ruckus_get("rkszones") #Get the JSON data for the zones confiured on the cluster
#The below function ruckus_list is used for stripping out the "list" dictionary from the returned JSON
def ruckus_list(jsondata):
output = {}
output = json.loads(jsondata.text)
output = output["list"]
return output
zones = ruckus_list(jsonzones)
def clean_ruckus_list(dictdata,dict_parent_name = "NONE",dict_parent_id = "NONE",names="name",ids="id"):
output = []
for row in dictdata:
output_name = ""
output_id = ""
for key,val in row.items():
if key == ids:
output_id = row[key]
elif key == names:
output_name = row[key]
if dict_parent_name and dict_parent_id == "NONE": #Produce a list without useless data but catchs if someone doesn't pass both arguements
output.append([output_name,output_id])
else:
output.append([dict_parent_name,dict_parent_id,output_name,output_id])
return output
cleaned_zones = clean_ruckus_list(zones)
print("\n")
print("-" * 50)
print("\n")
print("The AP zones configured on this szcluster are:")
print("\n")
for row in cleaned_zones:
print("Name: {} and ID: {}".format(row[0],row[1]))
print("-" * 5)
print("\n")
print("-" * 50)
|
the-stack_0_12841 | from django.conf import settings
from datetime import datetime
OUTPUT_FOLDER = settings.MEDIA_ROOT
METRICS = {'R' : 'Pearson\'s r',
'p_R' : 'Pearson\'s r p-value',
'rho' : 'Spearman\'s rho',
'p_rho' : 'Spearman\'s rho p-value',
'RMSD' : 'Root-mean-square deviation',
'BIAS' : 'Bias (difference of means)',
'n_obs' : '# observations',
'urmsd' : 'Unbiased root-mean-square deviation',
'RSS' : 'Residual sum of squares',
'mse' : 'Mean square error',
'mse_corr' : 'Mean square error correlation',
'mse_bias' : 'Mean square error bias',
'mse_var' : 'Mean square error variance',}
METRIC_TEMPLATE = ["overview_{id_ref}-{ds_ref}_and_{id_sat}-{ds_sat}_",
"{metric}"]
TC_METRICS = {'snr': 'TC: Signal-to-noise ratio',
'err_std': 'TC: Error standard deviation',
'beta': 'TC: Scaling coefficient',}
TC_METRIC_TEMPLATE = ["overview_{id_ref}-{ds_ref}_and_{id_sat}-{ds_sat}_and_{id_sat2}-{ds_sat2}",
"_{metric}",
"_for_{id_met}-{ds_met}"]
C3S = 'C3S'
ISMN = 'ISMN'
GLDAS = 'GLDAS'
SMAP = 'SMAP'
ASCAT = 'ASCAT'
CCI = 'ESA_CCI_SM_combined'
CCIA = 'ESA_CCI_SM_active'
CCIP = 'ESA_CCI_SM_passive'
SMOS = 'SMOS'
ERA5 = 'ERA5'
ERA5_LAND = 'ERA5_LAND'
## dataset versions
C3S_V201706 = 'C3S_V201706'
C3S_V201812 = 'C3S_V201812'
C3S_V201912 = 'C3S_V201912'
ISMN_V20180712_MINI = 'ISMN_V20180712_MINI'
ISMN_V20191211 = 'ISMN_V20191211'
SMAP_V5_PM = 'SMAP_V5_PM'
SMAP_V6_PM = 'SMAP_V6_PM'
SMOS_105_ASC = 'SMOS_105_ASC'
GLDAS_NOAH025_3H_2_1 = 'GLDAS_NOAH025_3H_2_1'
ASCAT_H113 = 'ASCAT_H113'
ERA5_20190613 = 'ERA5_20190613'
ERA5_Land_V20190904 = 'ERA5_LAND_V20190904'
ESA_CCI_SM_A_V04_4 = 'ESA_CCI_SM_A_V04_4'
ESA_CCI_SM_P_V04_4 = 'ESA_CCI_SM_P_V04_4'
ESA_CCI_SM_C_V04_4 = 'ESA_CCI_SM_C_V04_4'
ESA_CCI_SM_A_V04_5 = 'ESA_CCI_SM_A_V04_5'
ESA_CCI_SM_P_V04_5 = 'ESA_CCI_SM_P_V04_5'
ESA_CCI_SM_C_V04_5 = 'ESA_CCI_SM_C_V04_5'
ESA_CCI_SM_C_V04_7 = 'ESA_CCI_SM_C_V04_7'
ESA_CCI_SM_A_V05_2 = 'ESA_CCI_SM_A_V05_2'
ESA_CCI_SM_P_V05_2 = 'ESA_CCI_SM_P_V05_2'
ESA_CCI_SM_C_V05_2 = 'ESA_CCI_SM_C_V05_2'
## dataset data variables
C3S_sm = 'C3S_sm'
SMAP_soil_moisture = 'SMAP_soil_moisture'
SMOS_sm = 'SMOS_sm'
ASCAT_sm = 'ASCAT_sm'
ISMN_soil_moisture = 'ISMN_soil_moisture'
GLDAS_SoilMoi0_10cm_inst = 'GLDAS_SoilMoi0_10cm_inst'
GLDAS_SoilMoi10_40cm_inst = 'GLDAS_SoilMoi10_40cm_inst'
GLDAS_SoilMoi40_100cm_inst = 'GLDAS_SoilMoi40_100cm_inst'
GLDAS_SoilMoi100_200cm_inst = 'GLDAS_SoilMoi100_200cm_inst'
ERA5_sm = 'ERA5_sm'
ERA5_LAND_sm = 'ERA5_LAND_sm'
ESA_CCI_SM_P_sm = 'ESA_CCI_SM_P_sm'
ESA_CCI_SM_A_sm = 'ESA_CCI_SM_A_sm'
ESA_CCI_SM_C_sm = 'ESA_CCI_SM_C_sm'
NOT_AS_REFERENCE = [SMAP, SMOS, ASCAT]
IRREGULAR_GRIDS = {'SMAP' : 0.35,
'SMOS' : 0.25,
'ASCAT' : 0.1}
START_TIME = datetime(1978, 1, 1).strftime('%Y-%m-%d')
END_TIME = datetime.now().strftime('%Y-%m-%d')
|
the-stack_0_12842 | #!/usr/bin/env python
"""
Script to run benchmarks (used by regression tests)
"""
import os
import os.path
import sys
import csv
from LogManager import LoggingManager
def printf(format, *args):
sys.stdout.write(format % args)
_log = LoggingManager.get_logger(__name__)
def isexec (fpath):
if fpath == None: return False
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if isexec (program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if isexec (exe_file):
return exe_file
return None
def parseArgs (argv):
import argparse as a
p = a.ArgumentParser (description='Benchmark Runner')
p.add_argument ('--cpu', metavar='CPU',
type=int, help='CPU limit', default=60)
p.add_argument ('--mem', metavar='MEM',
type=int, help='Memory limit (MB)', default=512)
p.add_argument ('file', nargs='+',
help='Benchmark files')
p.add_argument ('--prefix', default='BRUNCH_STAT',
help='Prefix for stats')
p.add_argument ('--format', required=True, help='Fields')
p.add_argument ('--out', metavar='DIR',
default="out", help='Output directory')
if '-h' in argv or '--help' in argv:
p.print_help ()
p.exit (0)
try:
k = argv.index ('--')
except ValueError:
p.error ("No '--' argument")
args = p.parse_args (argv[:k])
args.tool_args = argv[k+1:]
# include date in output directory
# import datetime as dt
# dt = dt.datetime.now ().strftime ('%d_%m_%Y-t%H-%M-%S')
# args.out = '{out}.{dt}'.format (out=args.out, dt=dt)
return args
def collectStats (stats, file):
f = open (file, 'r')
for line in f:
if not line.startswith ('BRUNCH_STAT'): continue
fld = line.split (' ')
stats [fld[1]] = fld[2].strip ()
f.close ()
return stats
def statsHeader (stats_file, flds):
with open (stats_file, 'w') as sf:
writer = csv.writer (sf)
writer.writerow (flds)
def statsLine (stats_file, fmt, stats):
line = list()
for fld in fmt:
if fld in stats: line.append (str (stats [fld]))
else: line.append (None)
with open (stats_file, 'a') as sf:
writer = csv.writer (sf)
writer.writerow (line)
cpuTotal = 0.0
def runTool (tool_args, f, out, cpu, mem, fmt):
global cpuTotal
import resource as r
def set_limits ():
if mem > 0:
mem_bytes = mem * 1024 * 1024
r.setrlimit (r.RLIMIT_AS, [mem_bytes, mem_bytes])
if cpu > 0:
r.setrlimit (r.RLIMIT_CPU, [cpu, cpu])
fmt_tool_args = [v.format(f=f) for v in tool_args]
fmt_tool_args[0] = which (fmt_tool_args[0])
fmt_tool_args.append(f)
base = os.path.basename (f)
#outfile = os.path.join (out, base + '.stdout')
errfile = os.path.join (out, base + '.stderr')
import subprocess as sub
_log.info(base)
p = sub.Popen (fmt_tool_args,
shell=False, stdout=sub.PIPE, stderr=sub.STDOUT,
preexec_fn=set_limits)
#stdout=open(outfile, 'w'), stderr=open(errfile, 'w'),
result, _ = p.communicate ()
cpuUsage = r.getrusage (r.RUSAGE_CHILDREN).ru_utime
stats = dict()
stats['File'] = f
stats['base'] = base
stats['Status'] = p.returncode
stats['Cpu'] = '{0:.3f}'.format (cpuUsage - cpuTotal)
if "UNSAT" in result:
stats['Result'] = "UNSAT"
elif "SAT" in result:
stats['Result'] = "SAT"
elif "UNKNOWN" in result:
stats['Result'] = "UNKNOWN"
else:
_log.error(base)
f = open(errfile,"w")
f.write(result)
stats['Result'] = "ERR"
cpuTotal = cpuUsage
#stats = collectStats (stats, outfile)
#stats = collectStats (stats, errfile)
statsLine (os.path.join (out, 'stats'), fmt, stats)
def main (argv):
args = parseArgs (argv[1:])
if not os.path.exists (args.out):
os.mkdir (args.out)
fmt = args.format.split (':')
statsHeader (os.path.join (args.out, 'stats'), fmt)
global cpuTotal
import resource as r
cpuTotal = r.getrusage (r.RUSAGE_CHILDREN).ru_utime
for f in args.file:
runTool (args.tool_args, f, args.out,
cpu=args.cpu,
mem=args.mem,
fmt=fmt)
return 0
if __name__ == '__main__':
sys.exit (main (sys.argv))
|
the-stack_0_12843 | # Python
import unittest
# Ats
from pyats.topology import Device
# Genie package
from genie.ops.base import Base
from genie.ops.base.maker import Maker
from unittest.mock import Mock
# genie.libs
from genie.libs.ops.static_routing.iosxe.static_routing import StaticRouting
from genie.libs.ops.static_routing.iosxe.tests.static_routing_output import StaticRouteOutput
from genie.libs.parser.iosxe.show_vrf import ShowVrfDetail
outputs = {}
outputs['show ip static route'] = StaticRouteOutput.showIpv4StaticRoute_default
outputs['show ip static route vrf VRF1'] = StaticRouteOutput.showIpv4StaticRoute_vrf1
outputs['show ipv6 static detail'] = StaticRouteOutput.showIpv6StaticRoute_default
outputs['show ipv6 static vrf VRF1 detail'] = StaticRouteOutput.showIpv6StaticRoute_vrf1
def mapper(key):
return outputs[key]
class test_static_route_all(unittest.TestCase):
def setUp(self):
self.device = Device(name='aDevice')
self.device.os = 'iosxe'
self.device.custom['abstraction'] = {'order':['os']}
self.device.mapping = {}
self.device.mapping['cli'] = 'cli'
self.device.connectionmgr.connections['cli'] = self.device
def test_full_static_route(self):
f = StaticRouting(device=self.device)
f.maker.outputs[ShowVrfDetail] = {'': StaticRouteOutput.ShowVrfDetail}
# Get 'show ip static route' output
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
f.learn()
self.maxDiff = None
self.assertEqual(f.info, StaticRouteOutput.staticRouteOpsOutput)
def test_selective_attribute_static_route(self):
f = StaticRouting(device=self.device)
f.maker.outputs[ShowVrfDetail] = {'': StaticRouteOutput.ShowVrfDetail}
# Get 'show ip static route' output
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
f.learn()
# Check match
self.assertEqual('GigabitEthernet0/2', f.info['vrf']['VRF1']['address_family']['ipv4']['routes']\
['10.36.3.3/32']['next_hop']['outgoing_interface']['GigabitEthernet0/2']['outgoing_interface'])
# Check does not match
self.assertNotEqual('GigabitEthernet0/0', f.info['vrf']['VRF1']['address_family']['ipv4']['routes']\
['10.36.3.3/32']['next_hop']['outgoing_interface']['GigabitEthernet0/2']['outgoing_interface'])
def test_missing_attributes_static_route(self):
f = StaticRouting(device=self.device)
f.maker.outputs[ShowVrfDetail] = {'': StaticRouteOutput.ShowVrfDetail}
# Get 'show ip static route' output
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
f.learn()
with self.assertRaises(KeyError):
interfaces = f.info['vrf']['VRF1']['address_family']['ipv4']['routes']\
['10.36.3.3/32']['next_hop']['interface']
def test_empty_output_static_route(self):
self.maxDiff = None
f = StaticRouting(device=self.device)
# Get outputs
f.maker.outputs[ShowVrfDetail] = {'': {}}
outputs['show ip static route'] = ''
outputs['show ip static route vrf VRF1'] = ''
outputs['show ipv6 static detail'] = ''
outputs['show ipv6 static vrf VRF1 detail'] = ''
# Return outputs above as inputs to parser when called
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
f.learn()
# revert back
outputs['show ip static route'] = StaticRouteOutput.showIpv4StaticRoute_default
outputs['show ip static route vrf VRF1'] = StaticRouteOutput.showIpv4StaticRoute_vrf1
outputs['show ipv6 static detail'] = StaticRouteOutput.showIpv6StaticRoute_default
outputs['show ipv6 static vrf VRF1 detail'] = StaticRouteOutput.showIpv6StaticRoute_vrf1
# Check no attribute not found
with self.assertRaises(AttributeError):
f.info['vrf']
if __name__ == '__main__':
unittest.main()
|
the-stack_0_12846 | # -*- coding: utf-8 -*-
from tkinter import *
import os
import sys
import subprocess
class Application:
def __init__(self, master=None):
self.fontePadrao = ("Arial", "10")
self.primeiroContainer = Frame(master)
self.primeiroContainer["padx"] = 50
self.primeiroContainer.pack()
self.segundoContainer = Frame(master)
self.segundoContainer["padx"] = 50
self.segundoContainer.pack()
self.terceiroContainer = Frame(master)
self.terceiroContainer["padx"] = 50
self.terceiroContainer.pack()
self.quartoContainer = Frame(master)
self.quartoContainer["padx"] = 50
self.quartoContainer.pack()
self.titulo = Label(self.primeiroContainer, text="Instalador STC")
self.titulo["font"] = ("Arial", "16", "bold")
self.titulo.pack(side=RIGHT)
self.pergunta = Label(self.segundoContainer, text="Instalar STC?")
self.pergunta["font"] = ("Arial", "10")
self.pergunta.pack()
self.botaoInstalar = Button(self.terceiroContainer)
self.botaoInstalar["text"] = "Sim"
self.botaoInstalar["font"] = ("Calibri", "8")
self.botaoInstalar["width"] = 12
self.botaoInstalar["command"] = self.instalarSTC
self.botaoInstalar.pack(side=LEFT)
self.botaoInstalar = Button(self.terceiroContainer)
self.botaoInstalar["text"] = "Nao"
self.botaoInstalar["font"] = ("Calibri", "8")
self.botaoInstalar["width"] = 12
self.botaoInstalar["command"] = self.sairInstalador
self.botaoInstalar.pack()
self.mensagem = Label(self.quartoContainer, text="", font=self.fontePadrao)
self.mensagem.pack(side=LEFT)
# Metodo verificar senha
def instalarSTC(self):
print('Validando STC...')
######## matando os processos STC
os.system('tskill STCLauncher')
os.system('tskill STCDIS')
os.system('tskill STCPanel')
os.system('tskill STCPlayback')
os.system('tskill STCInterfaceExterna')
os.system('tskill STCMQ')
os.system('tskill STCMQMonitor')
os.system('tskill HybridOBCSimulator')
os.system('tskill Decryptor')
os.system('tskill Encryptor')
######## matando os servicos STC
os.system('net stop ABR')
os.system('net stop STC.Router.Service')
os.system('net stop STCGlobal')
######## exclui STC_old
os.system('rd c:\STC_old /s/q')
######## STC antigo para STC_old
os.system('move c:\STC c:\STC_old')
########## copia STC novo pasta para c:\
os.system('mkdir c:\STC')
dirname = os.path.dirname(os.path.realpath(sys.argv[0]))
caminho = ('xcopy {}\STC\*.* c:\STC /E '.format(dirname))
os.system(caminho)
#######Validar.pasta_stc()
start = "C:\\STC\\Client"
erro = "S"
for dirpath, dirnames, filenames in os.walk(start):
for filename in filenames:
if filename == "ConfigSTC.ini":
erro = "N"
filename = os.path.join(dirpath, filename)
print(filename)
print(dirpath)
if erro == "S":
print('Erro - "c:\STC\Client\ConfigSTC.ini" nao encontrado!!!')
os.system("pause")
sys.exit()
start = "C:\\STC\\Server"
for dirpath, dirnames, filenames in os.walk(start):
for filename in filenames:
if filename == "ConfigSTC.ini":
erro = "N"
filename = os.path.join(dirpath, filename)
print(filename)
print(dirpath)
if erro == "S":
print('Erro - "c:\STC\Server\ConfigSTC.ini" nao encontrado!!!')
os.system("pause")
sys.exit()
#############################################
########## validar ORACLE ###################
#############################################
######Validando se funciona o tnsping
proc = subprocess.Popen(["tnsping", "ORCL"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
pos_ini = out.find(b'C:\\')
pos_fin = out.find(b'sqlnet.ora')
pos_falha = out.find(b'Falha')
if (pos_ini or pos_fin) > 0: ##### aqui trocar por simbolo de menor(<)
print('Oracle não instalado, por favor verifique!!!')
os.system("pause")
sys.exit()
else:
# caminho = " "
############# caminho = (out[pos_ini:pos_fin]) >>>> aqui esta o caminho
#######>>>>>> excluir depois
caminho = 'C:\\app\\bruno.uthman\\product\\11.2.0\\client_1\\network\\admin'
#######>>>>>> excluir depois os comments
if pos_falha > 0:
print('configurar o tnsname ORCL em: {}'.format(caminho))
os.system('{}\\tnsnames.ora'.format(caminho))
os.system("pause")
sys.exit()
else:
print('Oracle ok')
######## configurando o tnsname ORCL
########## >>>>>precisa fazer depois
print('ENDDDDD!!!!!!')
os.system("pause")
sys.exit()
def sairInstalador(self):
sys.exit()
root = Tk()
root.geometry('{}x{}'.format(500, 150))
Application(root)
root.mainloop() |
the-stack_0_12847 | """Support for Voice mailboxes."""
from __future__ import annotations
import asyncio
from contextlib import suppress
from datetime import timedelta
from http import HTTPStatus
import logging
from aiohttp import web
from aiohttp.web_exceptions import HTTPNotFound
import async_timeout
from homeassistant.components.http import HomeAssistantView
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.setup import async_prepare_setup_platform
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mailbox"
EVENT = "mailbox_updated"
CONTENT_TYPE_MPEG = "audio/mpeg"
CONTENT_TYPE_NONE = "none"
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Track states and offer events for mailboxes."""
mailboxes: list[Mailbox] = []
hass.components.frontend.async_register_built_in_panel(
"mailbox", "mailbox", "mdi:mailbox"
)
hass.http.register_view(MailboxPlatformsView(mailboxes))
hass.http.register_view(MailboxMessageView(mailboxes))
hass.http.register_view(MailboxMediaView(mailboxes))
hass.http.register_view(MailboxDeleteView(mailboxes))
async def async_setup_platform(
p_type: str,
p_config: ConfigType | None = None,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up a mailbox platform."""
if p_config is None:
p_config = {}
if discovery_info is None:
discovery_info = {}
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
_LOGGER.error("Unknown mailbox platform specified")
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
mailbox = None
try:
if hasattr(platform, "async_get_handler"):
mailbox = await platform.async_get_handler(
hass, p_config, discovery_info
)
elif hasattr(platform, "get_handler"):
mailbox = await hass.async_add_executor_job(
platform.get_handler, hass, p_config, discovery_info
)
else:
raise HomeAssistantError("Invalid mailbox platform.")
if mailbox is None:
_LOGGER.error("Failed to initialize mailbox platform %s", p_type)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", p_type)
return
mailboxes.append(mailbox)
mailbox_entity = MailboxEntity(mailbox)
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
await component.async_add_entities([mailbox_entity])
setup_tasks = [
asyncio.create_task(async_setup_platform(p_type, p_config))
for p_type, p_config in config_per_platform(config, DOMAIN)
if p_type is not None
]
if setup_tasks:
await asyncio.wait(setup_tasks)
async def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
await async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
return True
class MailboxEntity(Entity):
"""Entity for each mailbox platform to provide a badge display."""
def __init__(self, mailbox: Mailbox) -> None:
"""Initialize mailbox entity."""
self.mailbox = mailbox
self.message_count = 0
async def async_added_to_hass(self):
"""Complete entity initialization."""
@callback
def _mailbox_updated(event):
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen(EVENT, _mailbox_updated)
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the binary sensor."""
return str(self.message_count)
@property
def name(self):
"""Return the name of the entity."""
return self.mailbox.name
async def async_update(self):
"""Retrieve messages from platform."""
messages = await self.mailbox.async_get_messages()
self.message_count = len(messages)
class Mailbox:
"""Represent a mailbox device."""
def __init__(self, hass, name):
"""Initialize mailbox object."""
self.hass = hass
self.name = name
@callback
def async_update(self):
"""Send event notification of updated mailbox."""
self.hass.bus.async_fire(EVENT)
@property
def media_type(self):
"""Return the supported media type."""
raise NotImplementedError()
@property
def can_delete(self):
"""Return if messages can be deleted."""
return False
@property
def has_media(self):
"""Return if messages have attached media files."""
return False
async def async_get_media(self, msgid):
"""Return the media blob for the msgid."""
raise NotImplementedError()
async def async_get_messages(self):
"""Return a list of the current messages."""
raise NotImplementedError()
async def async_delete(self, msgid):
"""Delete the specified messages."""
raise NotImplementedError()
class StreamError(Exception):
"""Media streaming exception."""
class MailboxView(HomeAssistantView):
"""Base mailbox view."""
def __init__(self, mailboxes: list[Mailbox]) -> None:
"""Initialize a basic mailbox view."""
self.mailboxes = mailboxes
def get_mailbox(self, platform):
"""Retrieve the specified mailbox."""
for mailbox in self.mailboxes:
if mailbox.name == platform:
return mailbox
raise HTTPNotFound
class MailboxPlatformsView(MailboxView):
"""View to return the list of mailbox platforms."""
url = "/api/mailbox/platforms"
name = "api:mailbox:platforms"
async def get(self, request: web.Request) -> web.Response:
"""Retrieve list of platforms."""
platforms = []
for mailbox in self.mailboxes:
platforms.append(
{
"name": mailbox.name,
"has_media": mailbox.has_media,
"can_delete": mailbox.can_delete,
}
)
return self.json(platforms)
class MailboxMessageView(MailboxView):
"""View to return the list of messages."""
url = "/api/mailbox/messages/{platform}"
name = "api:mailbox:messages"
async def get(self, request, platform):
"""Retrieve messages."""
mailbox = self.get_mailbox(platform)
messages = await mailbox.async_get_messages()
return self.json(messages)
class MailboxDeleteView(MailboxView):
"""View to delete selected messages."""
url = "/api/mailbox/delete/{platform}/{msgid}"
name = "api:mailbox:delete"
async def delete(self, request, platform, msgid):
"""Delete items."""
mailbox = self.get_mailbox(platform)
await mailbox.async_delete(msgid)
class MailboxMediaView(MailboxView):
"""View to return a media file."""
url = r"/api/mailbox/media/{platform}/{msgid}"
name = "api:asteriskmbox:media"
async def get(self, request, platform, msgid):
"""Retrieve media."""
mailbox = self.get_mailbox(platform)
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
async with async_timeout.timeout(10):
try:
stream = await mailbox.async_get_media(msgid)
except StreamError as err:
_LOGGER.error("Error getting media: %s", err)
return web.Response(status=HTTPStatus.INTERNAL_SERVER_ERROR)
if stream:
return web.Response(body=stream, content_type=mailbox.media_type)
return web.Response(status=HTTPStatus.INTERNAL_SERVER_ERROR)
|
the-stack_0_12852 | from typing import Union, Tuple, Optional
from torch_geometric.typing import (OptPairTensor, Adj, Size, NoneType,
OptTensor)
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn import Parameter, Linear
from torch_sparse import SparseTensor, set_diag
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import remove_self_loops, add_self_loops, softmax
from ..inits import glorot, zeros
class GATConvGI(MessagePassing): # gat with gpool
r"""The graph attentional operator from the `"Graph Attention Networks"
<https://arxiv.org/abs/1710.10903>`_ paper
.. math::
\mathbf{x}^{\prime}_i = \alpha_{i,i}\mathbf{\Theta}\mathbf{x}_{i} +
\sum_{j \in \mathcal{N}(i)} \alpha_{i,j}\mathbf{\Theta}\mathbf{x}_{j},
where the attention coefficients :math:`\alpha_{i,j}` are computed as
.. math::
\alpha_{i,j} =
\frac{
\exp\left(\mathrm{LeakyReLU}\left(\mathbf{a}^{\top}
[\mathbf{\Theta}\mathbf{x}_i \, \Vert \, \mathbf{\Theta}\mathbf{x}_j]
\right)\right)}
{\sum_{k \in \mathcal{N}(i) \cup \{ i \}}
\exp\left(\mathrm{LeakyReLU}\left(\mathbf{a}^{\top}
[\mathbf{\Theta}\mathbf{x}_i \, \Vert \, \mathbf{\Theta}\mathbf{x}_k]
\right)\right)}.
Args:
in_channels (int or tuple): Size of each input sample. A tuple
corresponds to the sizes of source and target dimensionalities.
out_channels (int): Size of each output sample.
heads (int, optional): Number of multi-head-attentions.
(default: :obj:`1`)
concat (bool, optional): If set to :obj:`False`, the multi-head
attentions are averaged instead of concatenated.
(default: :obj:`True`)
negative_slope (float, optional): LeakyReLU angle of the negative
slope. (default: :obj:`0.2`)
dropout (float, optional): Dropout probability of the normalized
attention coefficients which exposes each node to a stochastically
sampled neighborhood during training. (default: :obj:`0`)
add_self_loops (bool, optional): If set to :obj:`False`, will not add
self-loops to the input graph. (default: :obj:`True`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
_alpha: OptTensor
def __init__(self, in_channels: Union[int, Tuple[int, int]],
out_channels: int, heads: int = 1, concat: bool = True,
negative_slope: float = 0.2, dropout: float = 0.,
add_self_loops: bool = True, bias: bool = True, **kwargs):
kwargs.setdefault('aggr', 'add')
super(GATConv, self).__init__(node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
# print('out_channels in gat', out_channels)
# hid in net init 4
# out_channels in gat 4 # 4是hid1
# out_channels in gat 7 # 7 是num_classes
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.dropout = dropout
self.add_self_loops = add_self_loops
if isinstance(in_channels, int):
self.lin_l = Linear(in_channels, heads * out_channels, bias=False)
self.lin_r = self.lin_l
else:
self.lin_l = Linear(in_channels[0], heads * out_channels, False)
self.lin_r = Linear(in_channels[1], heads * out_channels, False)
self.att_l = Parameter(torch.Tensor(1, heads, out_channels))
self.att_r = Parameter(torch.Tensor(1, heads, out_channels))
if bias and concat:
self.bias = Parameter(torch.Tensor(heads * out_channels))
elif bias and not concat:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self._alpha = None
self.reset_parameters()
def reset_parameters(self):
glorot(self.lin_l.weight)
glorot(self.lin_r.weight)
glorot(self.att_l)
glorot(self.att_r)
zeros(self.bias)
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
size: Size = None, return_attention_weights=None):
# type: (Union[Tensor, OptPairTensor], Tensor, Size, NoneType) -> Tensor # noqa
# type: (Union[Tensor, OptPairTensor], SparseTensor, Size, NoneType) -> Tensor # noqa
# type: (Union[Tensor, OptPairTensor], Tensor, Size, bool) -> Tuple[Tensor, Tuple[Tensor, Tensor]] # noqa
# type: (Union[Tensor, OptPairTensor], SparseTensor, Size, bool) -> Tuple[Tensor, SparseTensor] # noqa
r"""
Args:
return_attention_weights (bool, optional): If set to :obj:`True`,
will additionally return the tuple
:obj:`(edge_index, attention_weights)`, holding the computed
attention weights for each edge. (default: :obj:`None`)
"""
H, C = self.heads, self.out_channels
x_l: OptTensor = None
x_r: OptTensor = None
alpha_l: OptTensor = None
alpha_r: OptTensor = None
if isinstance(x, Tensor):
assert x.dim() == 2, 'Static graphs not supported in `GATConv`.'
x_l = x_r = self.lin_l(x).view(-1, H, C)
alpha_l = (x_l * self.att_l).sum(dim=-1)
alpha_r = (x_r * self.att_r).sum(dim=-1)
else:
x_l, x_r = x[0], x[1]
assert x[0].dim() == 2, 'Static graphs not supported in `GATConv`.'
x_l = self.lin_l(x_l).view(-1, H, C)
alpha_l = (x_l * self.att_l).sum(dim=-1)
if x_r is not None:
x_r = self.lin_r(x_r).view(-1, H, C)
alpha_r = (x_r * self.att_r).sum(dim=-1)
assert x_l is not None
assert alpha_l is not None
if self.add_self_loops:
if isinstance(edge_index, Tensor):
num_nodes = x_l.size(0)
if x_r is not None:
num_nodes = min(num_nodes, x_r.size(0))
if size is not None:
num_nodes = min(size[0], size[1])
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)
elif isinstance(edge_index, SparseTensor):
edge_index = set_diag(edge_index)
# propagate_type: (x: OptPairTensor, alpha: OptPairTensor)
out = self.propagate(edge_index, x=(x_l, x_r),
alpha=(alpha_l, alpha_r), size=size) # base class 的propagate 调用了message()
alpha = self._alpha
self._alpha = None
if self.concat:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out += self.bias
if isinstance(return_attention_weights, bool):
assert alpha is not None
if isinstance(edge_index, Tensor):
return out, (edge_index, alpha)
elif isinstance(edge_index, SparseTensor):
return out, edge_index.set_value(alpha, layout='coo')
else:
return out
def message(self, x_j: Tensor, alpha_j: Tensor, alpha_i: OptTensor,
index: Tensor, ptr: OptTensor,
size_i: Optional[int]) -> Tensor:
alpha = alpha_j if alpha_i is None else alpha_j + alpha_i
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, index, ptr, size_i)
# 8 和1 都是number_heads , 出现两次是因为分别在训练和测试一个epoch时调用了程序
# print alpha
# print('alpha size: ', alpha.size())
# alpha size: torch.Size([13264, 8]) conv1
# alpha size: torch.Size([13264, 1]) conv2
# alpha size: torch.Size([13264, 8])
# alpha size: torch.Size([13264, 1])
self._alpha = alpha
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
return x_j * alpha.unsqueeze(-1)
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
|
the-stack_0_12853 | # -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'point'."""
from primaires.interpreteur.commande.commande import Commande
from primaires.vehicule.vecteur import get_direction
from secondaires.navigation.constantes import get_longitude_latitude
class CmdPoint(Commande):
"""Commande 'point'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "point", "bearing")
self.nom_categorie = "navire"
self.aide_courte = "fait le point"
self.aide_longue = \
"Cette commande permet de faire le point sur un navire. " \
"Vous aurez besoin d'avoir un sextant équipé. Faire le " \
"point prend un peu de temps, nécessite un ciel dégagé de " \
"nuages et est affecté par la qualité des instruments " \
"utilisés."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
personnage.agir("fairepoint")
salle = personnage.salle
if not hasattr(salle, "navire") or salle.navire is None or \
salle.navire.etendue is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
sextant = None
for objet in personnage.equipement.equipes:
if objet.est_de_type("sextant"):
sextant = objet
break
if not sextant:
personnage << "|err|Vous n'avez pas de sextant équipé.|ff|"
return
if salle.interieur:
personnage << "|err|Vous ne pouvez faire le point d'ici.|ff|"
return
else:
perturbation = importeur.meteo.get_perturbation(salle)
if perturbation is not None and perturbation.est_opaque():
personnage << "|err|Vous ne voyez pas le ciel.|ff|"
return
personnage << "Vous étudiez le ciel en utilisant {}.".format(
sextant.get_nom())
personnage.salle.envoyer("{{}} étudie le ciel grâce à {}.".format(
sextant.get_nom()), personnage)
personnage.etats.ajouter("faire_point")
yield 60
if "faire_point" not in personnage.etats:
return
personnage.etats.retirer("faire_point")
x = salle.coords.x
y = salle.coords.y
personnage << "Après calcul, vous obtenez " + get_longitude_latitude(
x, y, sextant.precision) + "."
personnage.salle.envoyer("{{}} baisse {}".format(sextant.get_nom()),
personnage)
|
the-stack_0_12855 | import logging
import os
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import torch
import torch.distributed as dist
from monai.config import print_config
from monai.handlers import (
CheckpointSaver,
LrScheduleHandler,
MeanDice,
StatsHandler,
ValidationHandler,
)
from monai.inferers import SimpleInferer, SlidingWindowInferer
from monai.losses import DiceCELoss
from monai.utils import set_determinism
from torch.nn.parallel import DistributedDataParallel
from create_dataset import get_data
from create_network import get_network
from evaluator import DynUNetEvaluator
from task_params import data_loader_params, patch_size
from trainer import DynUNetTrainer
def validation(args):
# load hyper parameters
task_id = args.task_id
sw_batch_size = args.sw_batch_size
tta_val = args.tta_val
window_mode = args.window_mode
eval_overlap = args.eval_overlap
multi_gpu_flag = args.multi_gpu
local_rank = args.local_rank
amp = args.amp
# produce the network
checkpoint = args.checkpoint
val_output_dir = "./runs_{}_fold{}_{}/".format(task_id, args.fold, args.expr_name)
if multi_gpu_flag:
dist.init_process_group(backend="nccl", init_method="env://")
device = torch.device(f"cuda:{local_rank}")
torch.cuda.set_device(device)
else:
device = torch.device("cuda")
properties, val_loader = get_data(args, mode="validation")
net = get_network(properties, task_id, val_output_dir, checkpoint)
net = net.to(device)
if multi_gpu_flag:
net = DistributedDataParallel(
module=net, device_ids=[device], find_unused_parameters=True
)
n_classes = len(properties["labels"])
net.eval()
evaluator = DynUNetEvaluator(
device=device,
val_data_loader=val_loader,
network=net,
n_classes=n_classes,
inferer=SlidingWindowInferer(
roi_size=patch_size[task_id],
sw_batch_size=sw_batch_size,
overlap=eval_overlap,
mode=window_mode,
),
post_transform=None,
key_val_metric={
"val_mean_dice": MeanDice(
include_background=False,
output_transform=lambda x: (x["pred"], x["label"]),
)
},
additional_metrics=None,
amp=amp,
tta_val=tta_val,
)
evaluator.run()
if local_rank == 0:
print(evaluator.state.metrics)
results = evaluator.state.metric_details["val_mean_dice"]
if n_classes > 2:
for i in range(n_classes - 1):
print(
"mean dice for label {} is {}".format(i + 1, results[:, i].mean())
)
def train(args):
# load hyper parameters
task_id = args.task_id
fold = args.fold
val_output_dir = "./runs_{}_fold{}_{}/".format(task_id, fold, args.expr_name)
log_filename = "nnunet_task{}_fold{}.log".format(task_id, fold)
log_filename = os.path.join(val_output_dir, log_filename)
interval = args.interval
learning_rate = args.learning_rate
max_epochs = args.max_epochs
multi_gpu_flag = args.multi_gpu
amp_flag = args.amp
lr_decay_flag = args.lr_decay
sw_batch_size = args.sw_batch_size
tta_val = args.tta_val
batch_dice = args.batch_dice
window_mode = args.window_mode
eval_overlap = args.eval_overlap
local_rank = args.local_rank
determinism_flag = args.determinism_flag
determinism_seed = args.determinism_seed
if determinism_flag:
set_determinism(seed=determinism_seed)
if local_rank == 0:
print("Using deterministic training.")
# transforms
train_batch_size = data_loader_params[task_id]["batch_size"]
if multi_gpu_flag:
dist.init_process_group(backend="nccl", init_method="env://")
device = torch.device(f"cuda:{local_rank}")
torch.cuda.set_device(device)
else:
device = torch.device("cuda")
properties, val_loader = get_data(args, mode="validation")
_, train_loader = get_data(args, batch_size=train_batch_size, mode="train")
# produce the network
checkpoint = args.checkpoint
net = get_network(properties, task_id, val_output_dir, checkpoint)
net = net.to(device)
if multi_gpu_flag:
net = DistributedDataParallel(
module=net, device_ids=[device], find_unused_parameters=True
)
optimizer = torch.optim.SGD(
net.parameters(),
lr=learning_rate,
momentum=0.99,
weight_decay=3e-5,
nesterov=True,
)
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda epoch: (1 - epoch / max_epochs) ** 0.9
)
# produce evaluator
val_handlers = [
StatsHandler(output_transform=lambda x: None),
CheckpointSaver(
save_dir=val_output_dir, save_dict={"net": net}, save_key_metric=True
),
]
evaluator = DynUNetEvaluator(
device=device,
val_data_loader=val_loader,
network=net,
n_classes=len(properties["labels"]),
inferer=SlidingWindowInferer(
roi_size=patch_size[task_id],
sw_batch_size=sw_batch_size,
overlap=eval_overlap,
mode=window_mode,
),
post_transform=None,
key_val_metric={
"val_mean_dice": MeanDice(
include_background=False,
output_transform=lambda x: (x["pred"], x["label"]),
)
},
val_handlers=val_handlers,
amp=amp_flag,
tta_val=tta_val,
)
# produce trainer
loss = DiceCELoss(to_onehot_y=True, softmax=True, batch=batch_dice)
train_handlers = []
if lr_decay_flag:
train_handlers += [LrScheduleHandler(lr_scheduler=scheduler, print_lr=True)]
train_handlers += [
ValidationHandler(validator=evaluator, interval=interval, epoch_level=True),
StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]),
]
trainer = DynUNetTrainer(
device=device,
max_epochs=max_epochs,
train_data_loader=train_loader,
network=net,
optimizer=optimizer,
loss_function=loss,
inferer=SimpleInferer(),
post_transform=None,
key_train_metric=None,
train_handlers=train_handlers,
amp=amp_flag,
)
if local_rank > 0:
evaluator.logger.setLevel(logging.WARNING)
trainer.logger.setLevel(logging.WARNING)
logger = logging.getLogger()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
# Setup file handler
fhandler = logging.FileHandler(log_filename)
fhandler.setLevel(logging.INFO)
fhandler.setFormatter(formatter)
logger.addHandler(fhandler)
if not multi_gpu_flag:
chandler = logging.StreamHandler()
chandler.setLevel(logging.INFO)
chandler.setFormatter(formatter)
logger.addHandler(chandler)
logger.setLevel(logging.INFO)
trainer.run()
if __name__ == "__main__":
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-fold", "--fold", type=int, default=0, help="0-5")
parser.add_argument(
"-task_id", "--task_id", type=str, default="04", help="task 01 to 10"
)
parser.add_argument(
"-root_dir",
"--root_dir",
type=str,
default="/workspace/data/medical/",
help="dataset path",
)
parser.add_argument(
"-expr_name",
"--expr_name",
type=str,
default="expr",
help="the suffix of the experiment's folder",
)
parser.add_argument(
"-datalist_path",
"--datalist_path",
type=str,
default="config/",
)
parser.add_argument(
"-train_num_workers",
"--train_num_workers",
type=int,
default=4,
help="the num_workers parameter of training dataloader.",
)
parser.add_argument(
"-val_num_workers",
"--val_num_workers",
type=int,
default=1,
help="the num_workers parameter of validation dataloader.",
)
parser.add_argument(
"-interval",
"--interval",
type=int,
default=5,
help="the validation interval under epoch level.",
)
parser.add_argument(
"-eval_overlap",
"--eval_overlap",
type=float,
default=0.5,
help="the overlap parameter of SlidingWindowInferer.",
)
parser.add_argument(
"-sw_batch_size",
"--sw_batch_size",
type=int,
default=4,
help="the sw_batch_size parameter of SlidingWindowInferer.",
)
parser.add_argument(
"-window_mode",
"--window_mode",
type=str,
default="gaussian",
choices=["constant", "gaussian"],
help="the mode parameter for SlidingWindowInferer.",
)
parser.add_argument(
"-num_samples",
"--num_samples",
type=int,
default=3,
help="the num_samples parameter of RandCropByPosNegLabeld.",
)
parser.add_argument(
"-pos_sample_num",
"--pos_sample_num",
type=int,
default=1,
help="the pos parameter of RandCropByPosNegLabeld.",
)
parser.add_argument(
"-neg_sample_num",
"--neg_sample_num",
type=int,
default=1,
help="the neg parameter of RandCropByPosNegLabeld.",
)
parser.add_argument(
"-cache_rate",
"--cache_rate",
type=float,
default=1.0,
help="the cache_rate parameter of CacheDataset.",
)
parser.add_argument("-learning_rate", "--learning_rate", type=float, default=1e-2)
parser.add_argument(
"-max_epochs",
"--max_epochs",
type=int,
default=1000,
help="number of epochs of training.",
)
parser.add_argument(
"-mode", "--mode", type=str, default="train", choices=["train", "val"]
)
parser.add_argument(
"-checkpoint",
"--checkpoint",
type=str,
default=None,
help="the filename of weights.",
)
parser.add_argument(
"-amp",
"--amp",
type=bool,
default=False,
help="whether to use automatic mixed precision.",
)
parser.add_argument(
"-lr_decay",
"--lr_decay",
type=bool,
default=False,
help="whether to use learning rate decay.",
)
parser.add_argument(
"-tta_val",
"--tta_val",
type=bool,
default=False,
help="whether to use test time augmentation.",
)
parser.add_argument(
"-batch_dice",
"--batch_dice",
type=bool,
default=False,
help="the batch parameter of DiceCELoss.",
)
parser.add_argument(
"-determinism_flag", "--determinism_flag", type=bool, default=False
)
parser.add_argument(
"-determinism_seed",
"--determinism_seed",
type=int,
default=0,
help="the seed used in deterministic training",
)
parser.add_argument(
"-multi_gpu",
"--multi_gpu",
type=bool,
default=False,
help="whether to use multiple GPUs for training.",
)
parser.add_argument("-local_rank", "--local_rank", type=int, default=0)
args = parser.parse_args()
if args.local_rank == 0:
print_config()
if args.mode == "train":
train(args)
elif args.mode == "val":
validation(args)
|
the-stack_0_12856 | import asyncio
import pickle
from time import time
from typing import FrozenSet, Optional
import aiomcache
import morcilla
from sqlalchemy import and_, select
from athenian.api.cache import cached, middle_term_exptime, short_term_exptime
from athenian.api.models.metadata.github import Bot
from athenian.api.models.state.models import Team
class Bots:
"""Lazy loader of the set of bot logins."""
def __init__(self):
"""Initialize a new instance of the Bots class."""
self._bots = None # type: Optional[FrozenSet[str]]
self._timestamp = time()
self._lock = None # type: Optional[asyncio.Lock]
async def _fetch(self, mdb: morcilla.Database) -> None:
self._bots = frozenset(r[0] for r in await mdb.fetch_all(select([Bot.login])))
self._timestamp = time()
@cached(
exptime=short_term_exptime,
serialize=pickle.dumps,
deserialize=pickle.loads,
key=lambda account, **_: (account,),
)
async def __call__(self,
account: int,
mdb: morcilla.Database,
sdb: morcilla.Database,
cache: Optional[aiomcache.Client],
) -> FrozenSet[str]:
"""
Return the bot logins.
There are two parts: global bots in mdb and local bots in the Bots team in sdb.
"""
if self._bots is None or time() - self._timestamp >= middle_term_exptime:
if self._lock is None:
# we don't run multi-threaded
self._lock = asyncio.Lock()
async with self._lock:
if self._bots is None or time() - self._timestamp >= middle_term_exptime:
await self._fetch(mdb)
extra = await sdb.fetch_val(select([Team.members]).where(and_(
Team.owner_id == account,
Team.name == Team.BOTS,
)))
if extra is None:
return self._bots
return self._bots.union(u.rsplit("/", 1)[1] for u in extra)
bots = Bots()
del Bots # yes, don't allow to use it directly
|
the-stack_0_12858 | ##########################################################################
#
# Copyright (c) 2018, Alex Fuller. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferCycles
def __visibilitySummary( plug ) :
info = []
for childName in ( "camera", "diffuse", "glossy", "transmission", "shadow", "scatter" ) :
if plug[childName + "Visibility"]["enabled"].getValue() :
info.append( IECore.CamelCase.toSpaced( childName ) + ( " On" if plug[childName + "Visibility"]["value"].getValue() else " Off" ) )
return ", ".join( info )
def __renderingSummary( plug ) :
info = []
for childName in ( "useHoldout", "isShadowCatcher", "color", "dupliGenerated", "dupliUV", "lightGroup" ) :
if plug[childName]["enabled"].getValue() :
info.append( IECore.CamelCase.toSpaced( childName ) + ( " On" if plug[childName]["value"].getValue() else " Off" ) )
return ", ".join( info )
def __subdivisionSummary( plug ) :
info = []
for childName in ( "maxLevel", "dicingScale" ) :
if plug[childName]["enabled"].getValue() :
info.append( IECore.CamelCase.toSpaced( childName ) + ( " On" if plug[childName]["value"].getValue() else " Off" ) )
return ", ".join( info )
def __volumeSummary( plug ) :
info = []
for childName in ( "volumeClipping", "volumeStepSize", "volumeObjectSpace" ) :
if plug[childName]["enabled"].getValue() :
info.append( IECore.CamelCase.toSpaced( childName ) + ( " On" if plug[childName]["value"].getValue() else " Off" ) )
return ", ".join( info )
def __objectSummary( plug ) :
info = []
if plug["assetName"]["enabled"].getValue() :
info.append( IECore.CamelCase.toSpaced( "assetName" ) + ( " On" if plug["assetName"]["value"].getValue() else " Off" ) )
return ", ".join( info )
def __shaderSummary( plug ) :
info = []
for childName in ( "useMis", "useTransparentShadow", "heterogeneousVolume", "volumeSamplingMethod", "volumeInterpolationMethod", "volumeStepRate", "displacementMethod" ) :
if plug[childName]["enabled"].getValue() :
info.append( IECore.CamelCase.toSpaced( childName ) + ( " On" if plug[childName]["value"].getValue() else " Off" ) )
return ", ".join( info )
Gaffer.Metadata.registerNode(
GafferCycles.CyclesAttributes,
"description",
"""
Applies Cycles attributes to objects in the scene.
""",
plugs = {
# Sections
"attributes" : [
"layout:section:Visibility:summary", __visibilitySummary,
"layout:section:Rendering:summary", __renderingSummary,
"layout:section:Subdivision:summary", __subdivisionSummary,
"layout:section:Volume:summary", __volumeSummary,
"layout:section:Object:summary", __objectSummary,
"layout:section:Shader:summary", __shaderSummary,
],
# Visibility
"attributes.cameraVisibility" : [
"description",
"""
Whether or not the object is visible to camera
rays. To hide an object completely, use the
visibility settings on the StandardAttributes
node instead.
""",
"layout:section", "Visibility",
"label", "Camera",
],
"attributes.diffuseVisibility" : [
"description",
"""
Whether or not the object is visible to diffuse
rays.
""",
"layout:section", "Visibility",
"label", "Diffuse",
],
"attributes.glossyVisibility" : [
"description",
"""
Whether or not the object is visible in
glossy rays.
""",
"layout:section", "Visibility",
"label", "Glossy",
],
"attributes.transmissionVisibility" : [
"description",
"""
Whether or not the object is visible in
transmission.
""",
"layout:section", "Visibility",
"label", "Transmission",
],
"attributes.shadowVisibility" : [
"description",
"""
Whether or not the object is visible to shadow
rays - whether it casts shadows or not.
""",
"layout:section", "Visibility",
"label", "Shadow",
],
"attributes.scatterVisibility" : [
"description",
"""
Whether or not the object is visible to
scatter rays.
""",
"layout:section", "Visibility",
"label", "Scatter",
],
# Rendering
"attributes.useHoldout" : [
"description",
"""
Turns the object into a holdout matte.
This only affects primary (camera) rays.
""",
"layout:section", "Rendering",
],
"attributes.isShadowCatcher" : [
"description",
"""
Turns the object into a shadow catcher.
""",
"layout:section", "Rendering",
],
"attributes.shadowTerminatorShadingOffset" : [
"description",
"""
Push the shadow terminator towards the light to hide artifacts on low poly geometry.
""",
"layout:section", "Rendering",
],
"attributes.shadowTerminatorGeometryOffset" : [
"description",
"""
Offset rays from the surface to reduce shadow terminator artifact on low poly geometry. Only affects triangles at grazing angles to light.
""",
"layout:section", "Rendering",
],
"attributes.color" : [
"description",
"""
Set a unique color per-object. This is intended for setting
a unique constant color that can be accessed from an object_info
shader, even if the object is being instanced.
""",
"layout:section", "Rendering",
],
"attributes.dupliGenerated" : [
"description",
"""
Set a unique position offset. Accessible from a texture_coordinate
via the generated output plug and from_dupli enabled.
""",
"layout:section", "Rendering",
],
"attributes.dupliUV" : [
"description",
"""
Set a unique UV offset. Accessible from either a texture_coordinate
or uv_map node via the UV output plug and from_dupli enabled.
""",
"layout:section", "Rendering",
],
"attributes.lightGroup" : [
"description",
"""
Set the lightgroup of an object with emission.
""",
"layout:section", "Rendering",
],
# Subdivision
"attributes.maxLevel" : [
"description",
"""
The max level of subdivision that can be
applied.
""",
"layout:section", "Subdivision",
],
"attributes.dicingScale" : [
"description",
"""
Multiplier for scene dicing rate.
""",
"layout:section", "Subdivision",
],
# Volume
"attributes.volumeClipping" : [
"description",
"""
Value under which voxels are considered empty space to
optimize rendering.
""",
"layout:section", "Volume",
],
"attributes.volumeStepSize" : [
"description",
"""
Distance between volume samples. When zero it is automatically
estimated based on the voxel size.
""",
"layout:section", "Volume",
],
"attributes.volumeObjectSpace" : [
"description",
"""
Specify volume density and step size in object or world space.
By default object space is used, so that the volume opacity and
detail remains the same regardless of object scale.
""",
"layout:section", "Volume",
],
"attributes.assetName" : [
"description",
"""
Asset name for cryptomatte.
""",
"layout:section", "Object",
],
# Shader
"attributes.useMis" : [
"description",
"""
Use multiple importance sampling for this material,
disabling may reduce overall noise for large
objects that emit little light compared to other light sources.
""",
"layout:section", "Shader",
],
"attributes.useTransparentShadow" : [
"description",
"""
Use transparent shadows for this material if it contains a Transparent BSDF,
disabling will render faster but not give accurate shadows.
""",
"layout:section", "Shader",
],
"attributes.heterogeneousVolume" : [
"description",
"""
Disabling this when using volume rendering, assume volume has the same density
everywhere (not using any textures), for faster rendering.
""",
"layout:section", "Shader",
],
"attributes.volumeSamplingMethod" : [
"description",
"""
Sampling method to use for volumes.
""",
"layout:section", "Shader",
],
"attributes.volumeSamplingMethod.value" : [
"preset:Distance", "distance",
"preset:Equiangular", "equiangular",
"preset:Multiple-Importance", "multiple_importance",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"attributes.volumeInterpolationMethod" : [
"description",
"""
Interpolation method to use for volumes.
""",
"layout:section", "Shader",
],
"attributes.volumeInterpolationMethod.value" : [
"preset:Linear", "linear",
"preset:Cubic", "cubic",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"attributes.volumeStepRate" : [
"description",
"""
Scale the distance between volume shader samples when rendering the volume
(lower values give more accurate and detailed results, but also increased render time).
""",
"layout:section", "Shader",
],
"attributes.displacementMethod" : [
"description",
"""
Method to use for the displacement.
""",
"layout:section", "Shader",
],
"attributes.displacementMethod.value" : [
"preset:Bump", "bump",
"preset:True", "true",
"preset:Both", "both",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
}
)
if not GafferCycles.withLightGroups :
Gaffer.Metadata.registerValue( GafferCycles.CyclesOptions, "attributes.lightGroup", "plugValueWidget:type", "" )
|
the-stack_0_12860 | # Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
import torch
import transformers
import turbo_transformers
import enum
import time
import numpy
class LoadType(enum.Enum):
PYTORCH = "PYTORCH"
PRETRAINED = "PRETRAINED"
NPZ = "NPZ"
def test(loadtype: LoadType, use_cuda: bool):
cfg = transformers.AlbertConfig()
model = transformers.AlbertModel(cfg)
model.eval()
torch.set_grad_enabled(False)
test_device = torch.device('cuda:0') if use_cuda else \
torch.device('cpu:0')
cfg = model.config
# use 4 threads for computing
turbo_transformers.set_num_threads(4)
input_ids = torch.tensor(
([12166, 10699, 16752, 4454], [5342, 16471, 817, 16022]),
dtype=torch.long)
model.to(test_device)
start_time = time.time()
for _ in range(10):
torch_res = model(input_ids)
end_time = time.time()
print("\ntorch time consum: {}".format(end_time - start_time))
# there are three ways to load pretrained model.
if loadtype is LoadType.PYTORCH:
# 1, from a PyTorch model, which has loaded a pretrained model
tt_model = turbo_transformers.AlbertModel.from_torch(model)
else:
raise ("LoadType is not supported")
start_time = time.time()
for _ in range(10):
res = tt_model(input_ids) # sequence_output, pooled_output
end_time = time.time()
print("\nturbo time consum: {}".format(end_time - start_time))
assert (numpy.max(
numpy.abs(res[0].cpu().numpy() - torch_res[0].cpu().numpy())) < 0.1)
if __name__ == "__main__":
test(LoadType.PYTORCH, use_cuda=False)
|
the-stack_0_12863 | import sys
import re
import PySimpleGUI as sg
import subprocess
import datetime
from bs4 import BeautifulSoup
import shutil
import openpyxl
def insert_Excel(translatedHtmlFile, checkedHtmlFile, resultsFile):
# 結果を入れるエクセルを用意
shutil.copyfile(xlsxTemplate, resultsFile)
# 翻訳後のhtmlをオープンしてパース
with open(translatedHtmlFile, encoding='utf-8') as f:
translated = f.read()
soupTranslated = BeautifulSoup(translated, 'html.parser')
translatedList = []
for t in soupTranslated.find_all('tr'):
cols = t.find_all('td')
src = cols[0].get_text()
target = cols[1].get_text()
# print(src, target)
translatedList.append(src + '\t' + target)
# チェック後のhtmlをオープンしてパース
with open(checkedHtmlFile, encoding='utf-8') as f:
checked = f.read()
soupChecked = BeautifulSoup(checked, 'html.parser')
checkedList = []
for t in soupChecked.find_all('tr'):
cols = t.find_all('td')
src = cols[0].get_text()
target = cols[1].get_text()
checkedList.append(src + '\t' + target)
# Excelを準備
wb = openpyxl.load_workbook(resultsFile)
ws = wb['Sheet1']
# 翻訳後のテキストを入力する
countT = 2
for i in translatedList:
countStr = str(countT)
src, target =i.split('\t')
judge = '=IF(B'+countStr+'=E'+countStr+',"-","check!")'
srcA = 'A' + countStr
targetB = 'B' + countStr
judgeC = 'C' + countStr
ws[srcA].value = src
ws[targetB].value = target
ws[judgeC].value = judge
countT += 1
# チェック後のテキストを入力する
countC = 2
for i in checkedList:
src, target =i.split('\t')
countStr = str(countC)
srcA = 'D' + countStr
targetB = 'E' + countStr
ws[srcA].value = src
ws[targetB].value = target
countC += 1
# Excelを閉じて保存
wb.close()
wb.save(resultsFile)
if __name__ == '__main__':
sg.theme('Dark Blue 3')
layout = [
[sg.Text('xlf file(before):', size=(20, 1)), sg.InputText('', enable_events=True,), sg.FilesBrowse('Add', key='-FILES-', file_types=(('xlf file', '*.xlf'),))],
[sg.Text('xlf file(after):', size=(20, 1)), sg.InputText('', enable_events=True,), sg.FilesBrowse('Add', key='-FILES-', file_types=(('xlf file', '*.xlf'),))],
[sg.Text('xsl file:', size=(20, 1)), sg.InputText('', enable_events=True,), sg.FilesBrowse('Add', key='-FILES-', file_types=(('xsl file', '*.xsl'),))],
[sg.Submit(button_text='Run')]
]
window = sg.Window('xlf2html-saxon', layout)
while True:
event, values = window.read()
# ウィンドウの[x]で終了
if event is None:
break
if event == 'Run':
f_before = values[0]
f_after = values[1]
xsl = values[2]
# 行頭と行末にダブルクォーテーションがあったら削除
f_before = re.sub('^\"', '', f_before)
f_before = re.sub('\"$', '', f_before)
f_after = re.sub('^\"', '', f_after)
f_after = re.sub('\"$', '', f_after)
xsl = re.sub('^\"', '', xsl)
xsl = re.sub('\"$', '', xsl)
# OutputするHTMLファイル
f_before_html = re.sub('xlf$', 'html', f_before)
f_after_html = re.sub('xlf$', 'html', f_after)
if f_before == '':
sg.popup('Please specify a xlf (before) file.')
continue
elif f_after == '':
sg.popup('Please specify a xlf (after) file.')
continue
elif xsl == '':
sg.popup('Please specify a xsl file.')
cmd1 = 'java' + ' -jar' + ' D:\\tool\\saxonb9-1-0-8j\\saxon9.jar' + ' -s:' + f_before + ' -xsl:' + xsl + ' -o:' + f_before_html
cmd2 = 'java' + ' -jar' + ' D:\\tool\\saxonb9-1-0-8j\\saxon9.jar' + ' -s:' + f_after + ' -xsl:' + xsl + ' -o:' + f_after_html
res1 = subprocess.check_call(cmd1)
res2 = subprocess.check_call(cmd2)
print(res1)
print(res2)
xlsxTemplate = "xliff_diff.xlsx"
todaydetail = datetime.datetime.today()
datetime = todaydetail.strftime("%Y%m%d%H%M%S")
resultsFile = datetime + '_' + xlsxTemplate
insert_Excel(f_before_html, f_after_html, resultsFile)
sg.popup('Done!')
window.close()
sys.exit()
|
the-stack_0_12864 | import csv
import json
input_file_name = "C:/Users/cch23/Desktop/창업 아이템/걸어서나눔나눔/파싱/in.csv"
output_file_name = "C:/Users/cch23/Desktop/창업 아이템/걸어서나눔나눔/파싱/in.json"
with open(input_file_name, "r", encoding="utf-8", newline="") as input_file, \
open(output_file_name, "w", encoding="utf-8", newline="") as output_file:
reader = csv.reader(input_file)
# 첫 줄은 col_names 리스트로 읽어 놓고
col_names = next(reader)
# 그 다음 줄부터 zip으로 묶어서 json으로 dumps
for cols in reader:
doc = {col_name: col for col_name, col in zip(col_names, cols)}
print(json.dumps(doc, ensure_ascii=False), file=output_file) |
the-stack_0_12865 | """
Experiment Management
"""
from datetime import datetime
from os import pardir
from attrdict import AttrDict
import pathlib
import hashlib
import os
from rl_helper import envhelper
import yaml
class ExperimentManager(object):
def __init__(self,add_env_helper=True) -> None:
super().__init__()
self.saves_root=pathlib.Path("./runs/")
self.exp_time=datetime.now().strftime("%Y%m%d-%H%M%S")
self.env_helper=envhelper()
def init(self,model_name,exp_class,exp_target,comments,sub_id):
assert len(exp_target)<24, "exp target to long > 20"
assert " " not in exp_target, "exp target should contain no space"
self.model_name=model_name
self.exp_class=exp_class
self.exp_target=exp_target
self.comments=comments
self.sub_id=sub_id
self.config={"model_name":self.model_name,"exp_class":self.exp_class,"exp_target":self.exp_target, "comments":self.comments,"sub_id":sub_id}
self.paras=AttrDict()
@property
def health(self):
a=[self.model_name,self.exp_class,self.exp_target, self.comments,self.sub_id
, self.sub_id , self.config , self.paras ]
for s in a:
assert a is not None
return True
def load(self,pth):
pth=pathlib.Path(pth)
assert pth.is_dir(),pth
config_yaml=pth.joinpath("config.yaml")
paras_yaml=pth.joinpath('paras.yaml')
assert config_yaml.is_file()
assert paras_yaml.is_file()
with open(config_yaml, "r") as stream:
self.config=yaml.safe_load(stream)
with open(paras_yaml, "r") as stream:
self.paras=AttrDict(yaml.safe_load(stream))
for k in self.config.keys():
self.__setattr__(k,self.config[k])
assert self.health
def get_exp_hash(self):
hash_seed=self.model_name+self.exp_class+self.exp_target+self.comments+str(self.sub_id)
pkeys=[]
for k in self.paras.keys():
pkeys.append(k)
pkeys.sort()
pkeys_value=[self.paras[k] for k in pkeys]
hash_seed+=str(pkeys)
hash_seed+=str(pkeys_value)
return hashlib.sha1(hash_seed.encode('utf-8')).hexdigest()[:5]
@property
def exp_hash_dir_path(self):
return self.saves_root.joinpath(self.exp_class).joinpath(self.exp_target).joinpath(str(self.sub_id)).joinpath(self.get_exp_hash())
# @property
# def model_save_dir_path(self):
# dir_pth=
# pass
@property
def model_save_pth(self):
return self.exp_hash_dir_path.joinpath("model")
@property
def log_save_dir_pth(self):
return self.exp_hash_dir_path.joinpath("logs")
pass
@property
def paras_save_dir_pth(self):
return self.exp_hash_dir_path.joinpath("paras")
@property
def tensorbord_log_name(self):
return str(self.sub_id)
@property
def paras_dict(self):
d={}
for k in self.paras.keys():
d[k]=self.paras[k]
return d
def add_para(self,k,value):
assert self.paras.get(k,None) is None, "{} has existed in paras".format(k)
self.paras[k]=value
print("Set {} : {}".format(k,value))
def start(self,overwrite=False):
try:
os.makedirs(self.exp_hash_dir_path.__str__(),exist_ok=False)
except:
if not overwrite:
raise NotImplementedError("Error ! Fail to create, You already have this experiment : {}".format(self.exp_hash_dir_path))
os.makedirs(self.paras_save_dir_pth,exist_ok=True)
with open(self.exp_hash_dir_path.joinpath("paras.yaml"), 'w') as outfile:
yaml.dump(self.paras_dict, outfile, default_flow_style=False)
with open(self.exp_hash_dir_path.joinpath("config.yaml"), 'w') as outfile:
yaml.dump(self.config, outfile, default_flow_style=False)
for k in self.paras.keys():
ss="{}_{}".format(k,str(self.paras[k]))
with open(self.paras_save_dir_pth.joinpath(ss), 'w') as outfile:
pass
with open(self.exp_hash_dir_path.joinpath(""+str(self.exp_time)), 'w') as outfile:
pass
def save_gif(self,**kargs):
self.env_helper.save_gif(path=self.log_save_dir_pth,**kargs) |
the-stack_0_12866 | import unicodedata
from typing import Optional
from django.utils.translation import gettext as _
from zerver.lib.exceptions import JsonableError
from zerver.models import Stream
# There are 66 Unicode non-characters; see
# https://www.unicode.org/faq/private_use.html#nonchar4
unicode_non_chars = {
chr(x)
for x in list(range(0xFDD0, 0xFDF0)) # FDD0 through FDEF, inclusive
+ list(range(0xFFFE, 0x110000, 0x10000)) # 0xFFFE, 0x1FFFE, ... 0x10FFFE inclusive
+ list(range(0xFFFF, 0x110000, 0x10000)) # 0xFFFF, 0x1FFFF, ... 0x10FFFF inclusive
}
def check_string_is_printable(var: str) -> Optional[int]:
# Return position (1-indexed!) of the character which is not
# printable, None if no such character is present.
for i, char in enumerate(var):
unicode_character = unicodedata.category(char)
if (unicode_character in ["Cc", "Cs"]) or char in unicode_non_chars:
return i + 1
return None
def check_stream_name(stream_name: str) -> None:
if stream_name.strip() == "":
raise JsonableError(_("Stream name can't be empty!"))
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError(
_("Stream name too long (limit: {} characters).").format(Stream.MAX_NAME_LENGTH)
)
for i in stream_name:
if ord(i) == 0:
raise JsonableError(
_("Stream name '{}' contains NULL (0x00) characters.").format(stream_name)
)
def check_stream_topic(topic: str) -> None:
if topic.strip() == "":
raise JsonableError(_("Topic can't be empty!"))
invalid_character_pos = check_string_is_printable(topic)
if invalid_character_pos is not None:
raise JsonableError(
_("Invalid character in topic, at position {}!").format(invalid_character_pos)
)
|
the-stack_0_12867 | import tensorflow as tf
import numpy as np
# NOTE: If you want full control for model architecture. please take a look
# at the code and change whatever you want. Some hyper parameters are hardcoded.
# Default hyperparameters:
hparams = tf.contrib.training.HParams(
name="wavenet_vocoder",
# Convenient model builder
builder="wavenet",
# Presets known to work good.
# NOTE: If specified, override hyper parameters with preset
preset="",
presets={
},
# Input type:
# 1. raw [-1, 1]
# 2. mulaw [-1, 1]
# 3. mulaw-quantize [0, mu]
# If input_type is raw or mulaw, network assumes scalar input and
# discretized mixture of logistic distributions output, otherwise one-hot
# input and softmax output are assumed.
# **NOTE**: if you change the one of the two parameters below, you need to
# re-run preprocessing before training.
# **NOTE**: scaler input (raw or mulaw) is experimental. Use it your own risk.
input_type="mulaw-quantize",
quantize_channels=256, # 65536 or 256
# Audio:
sample_rate=24000,
# this is only valid for mulaw is True
silence_threshold=2,
num_mels=80,
fmin=125,
fmax=7600,
fft_size=1024,
# shift can be specified by either hop_size or frame_shift_ms
hop_size=256,
frame_shift_ms=None,
min_level_db=-100,
ref_level_db=20,
# whether to rescale waveform or not.
# Let x is an input waveform, rescaled waveform y is given by:
# y = x / np.abs(x).max() * rescaling_max
rescaling=True,
rescaling_max=0.999,
# mel-spectrogram is normalized to [0, 1] for each utterance and clipping may
# happen depends on min_level_db and ref_level_db, causing clipping noise.
# If False, assertion is added to ensure no clipping happens.
allow_clipping_in_normalization=False,
# Mixture of logistic distributions:
log_scale_min=float(np.log(1e-14)),
# Model:
# This should equal to `quantize_channels` if mu-law quantize enabled
# otherwise num_mixture * 3 (pi, mean, log_scale)
out_channels=256,
layers=30,
stacks=3,
residual_channels=512,
gate_channels=512, # split into 2 gropus internally for gated activation
skip_out_channels=256,
dropout=1 - 0.95,
kernel_size=3,
# If True, apply weight normalization as same as DeepVoice3
weight_normalization=True,
# Local conditioning (set negative value to disable))
cin_channels=80,
# If True, use transposed convolutions to upsample conditional features,
# otherwise repeat features to adjust time resolution
upsample_conditional_features=True,
# should np.prod(upsample_scales) == hop_size
upsample_scales=[4, 4, 4, 4],
# Freq axis kernel size for upsampling network
freq_axis_kernel_size=3,
# Global conditioning (set negative value to disable)
# currently limited for speaker embedding
# this should only be enabled for multi-speaker dataset
gin_channels=-1, # i.e., speaker embedding dim
n_speakers=7, # 7 for CMU ARCTIC
# Data loader
pin_memory=True,
num_workers=2,
# train/test
# test size can be specified as portion or num samples
test_size=0.0441, # 50 for CMU ARCTIC single speaker
test_num_samples=None,
random_state=1234,
# Loss
# Training:
batch_size=32,
adam_beta1=0.9,
adam_beta2=0.999,
adam_eps=1e-8,
initial_learning_rate=1e-3,
# see lrschedule.py for available lr_schedule
lr_schedule="noam_learning_rate_decay",
lr_schedule_kwargs={}, # {"anneal_rate": 0.5, "anneal_interval": 50000},
nepochs=2000,
weight_decay=0.0,
clip_thresh=-1,
# max time steps can either be specified as sec or steps
# This is needed for those who don't have huge GPU memory...
# if both are None, then full audio samples are used
max_time_sec=None,
max_time_steps=7680,
# Hold moving averaged parameters and use them for evaluation
exponential_moving_average=True,
# averaged = decay * averaged + (1 - decay) * x
ema_decay=0.9999,
# Save
# per-step intervals
checkpoint_interval=10000,
train_eval_interval=10000,
# per-epoch interval
test_eval_epoch_interval=5,
save_optimizer_state=True,
# Eval:
)
def hparams_debug_string():
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values)]
return 'Hyperparameters:\n' + '\n'.join(hp)
|
the-stack_0_12874 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import os
import psutil
import shutil
import signal
import subprocess
import sys
import time
import zlib
from datetime import datetime, timedelta, timezone
from multiprocessing import Process
from time import sleep
from unittest import skipIf
import redis.exceptions
import pytest
import mock
from mock import Mock
from tests import RQTestCase, slow
from tests.fixtures import (
access_self, create_file, create_file_after_timeout, create_file_after_timeout_and_setsid, div_by_zero, do_nothing,
kill_worker, long_running_job, modify_self, modify_self_and_error,
run_dummy_heroku_worker, save_key_ttl, say_hello, say_pid, raise_exc_mock,
launch_process_within_worker_and_store_pid
)
from rq import Queue, SimpleWorker, Worker, get_current_connection
from rq.compat import as_text, PY2
from rq.job import Job, JobStatus, Retry
from rq.registry import StartedJobRegistry, FailedJobRegistry, FinishedJobRegistry
from rq.suspension import resume, suspend
from rq.utils import utcnow
from rq.version import VERSION
from rq.worker import HerokuWorker, WorkerStatus
from rq.serializers import JSONSerializer
class CustomJob(Job):
pass
class CustomQueue(Queue):
pass
class TestWorker(RQTestCase):
def test_create_worker(self):
"""Worker creation using various inputs."""
# With single string argument
w = Worker('foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of strings
w = Worker(['foo', 'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
self.assertEqual(w.queue_keys(), [w.queues[0].key, w.queues[1].key])
self.assertEqual(w.queue_names(), ['foo', 'bar'])
# With iterable of strings
w = Worker(iter(['foo', 'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# Also accept byte strings in Python 2
if PY2:
# With single byte string argument
w = Worker(b'foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of byte strings
w = Worker([b'foo', b'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of byte strings
w = Worker(iter([b'foo', b'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With single Queue
w = Worker(Queue('foo'))
self.assertEqual(w.queues[0].name, 'foo')
# With iterable of Queues
w = Worker(iter([Queue('foo'), Queue('bar')]))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With list of Queues
w = Worker([Queue('foo'), Queue('bar')])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With string and serializer
w = Worker('foo', serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
# With queue having serializer
w = Worker(Queue('foo'), serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
def test_work_and_quit(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo'), Queue('bar')
w = Worker([fooq, barq])
self.assertEqual(
w.work(burst=True), False,
'Did not expect any work on the queue.'
)
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
def test_work_and_quit_custom_serializer(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo', serializer=JSONSerializer), Queue('bar', serializer=JSONSerializer)
w = Worker([fooq, barq], serializer=JSONSerializer)
self.assertEqual(
w.work(burst=True), False,
'Did not expect any work on the queue.'
)
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
def test_worker_all(self):
"""Worker.all() works properly"""
foo_queue = Queue('foo')
bar_queue = Queue('bar')
w1 = Worker([foo_queue, bar_queue], name='w1')
w1.register_birth()
w2 = Worker([foo_queue], name='w2')
w2.register_birth()
self.assertEqual(
set(Worker.all(connection=foo_queue.connection)),
set([w1, w2])
)
self.assertEqual(set(Worker.all(queue=foo_queue)), set([w1, w2]))
self.assertEqual(set(Worker.all(queue=bar_queue)), set([w1]))
w1.register_death()
w2.register_death()
def test_find_by_key(self):
"""Worker.find_by_key restores queues, state and job_id."""
queues = [Queue('foo'), Queue('bar')]
w = Worker(queues)
w.register_death()
w.register_birth()
w.set_state(WorkerStatus.STARTED)
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.queues, queues)
self.assertEqual(worker.get_state(), WorkerStatus.STARTED)
self.assertEqual(worker._job_id, None)
self.assertTrue(worker.key in Worker.all_keys(worker.connection))
self.assertEqual(worker.version, VERSION)
# If worker is gone, its keys should also be removed
worker.connection.delete(worker.key)
Worker.find_by_key(worker.key)
self.assertFalse(worker.key in Worker.all_keys(worker.connection))
self.assertRaises(ValueError, Worker.find_by_key, 'foo')
def test_worker_ttl(self):
"""Worker ttl."""
w = Worker([])
w.register_birth()
[worker_key] = self.testconn.smembers(Worker.redis_workers_keys)
self.assertIsNotNone(self.testconn.ttl(worker_key))
w.register_death()
def test_work_via_string_argument(self):
"""Worker processes work fed via string arguments."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Frank!')
self.assertIsNone(job.worker_name)
def test_job_times(self):
"""job times are set correctly."""
q = Queue('foo')
w = Worker([q])
before = utcnow()
before = before.replace(microsecond=0)
job = q.enqueue(say_hello)
self.assertIsNotNone(job.enqueued_at)
self.assertIsNone(job.started_at)
self.assertIsNone(job.ended_at)
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Stranger!')
after = utcnow()
job.refresh()
self.assertTrue(
before <= job.enqueued_at <= after,
'Not %s <= %s <= %s' % (before, job.enqueued_at, after)
)
self.assertTrue(
before <= job.started_at <= after,
'Not %s <= %s <= %s' % (before, job.started_at, after)
)
self.assertTrue(
before <= job.ended_at <= after,
'Not %s <= %s <= %s' % (before, job.ended_at, after)
)
def test_work_is_unreadable(self):
"""Unreadable jobs are put on the failed job registry."""
q = Queue()
self.assertEqual(q.count, 0)
# NOTE: We have to fake this enqueueing for this test case.
# What we're simulating here is a call to a function that is not
# importable from the worker process.
job = Job.create(func=div_by_zero, args=(3,), origin=q.name)
job.save()
job_data = job.data
invalid_data = job_data.replace(b'div_by_zero', b'nonexisting')
assert job_data != invalid_data
self.testconn.hset(job.key, 'data', zlib.compress(invalid_data))
# We use the low-level internal function to enqueue any data (bypassing
# validity checks)
q.push_job_id(job.id)
self.assertEqual(q.count, 1)
# All set, we're going to process it
w = Worker([q])
w.work(burst=True) # should silently pass
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
def test_heartbeat(self):
"""Heartbeat saves last_heartbeat"""
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(str(w.pid), as_text(self.testconn.hget(w.key, 'pid')))
self.assertEqual(w.hostname,
as_text(self.testconn.hget(w.key, 'hostname')))
last_heartbeat = self.testconn.hget(w.key, 'last_heartbeat')
self.assertIsNotNone(self.testconn.hget(w.key, 'birth'))
self.assertTrue(last_heartbeat is not None)
w = Worker.find_by_key(w.key)
self.assertIsInstance(w.last_heartbeat, datetime)
# worker.refresh() shouldn't fail if last_heartbeat is None
# for compatibility reasons
self.testconn.hdel(w.key, 'last_heartbeat')
w.refresh()
# worker.refresh() shouldn't fail if birth is None
# for compatibility reasons
self.testconn.hdel(w.key, 'birth')
w.refresh()
@slow
def test_heartbeat_survives_lost_connection(self):
with mock.patch.object(Worker, 'heartbeat') as mocked:
# None -> Heartbeat is first called before the job loop
mocked.side_effect = [None, redis.exceptions.ConnectionError()]
q = Queue()
w = Worker([q])
w.work(burst=True)
# First call is prior to job loop, second raises the error,
# third is successful, after "recovery"
assert mocked.call_count == 3
@slow
def test_heartbeat_busy(self):
"""Periodic heartbeats while horse is busy with long jobs"""
q = Queue()
w = Worker([q], job_monitoring_interval=5)
for timeout, expected_heartbeats in [(2, 0), (7, 1), (12, 2)]:
job = q.enqueue(long_running_job,
args=(timeout,),
job_timeout=30,
result_ttl=-1)
with mock.patch.object(w, 'heartbeat', wraps=w.heartbeat) as mocked:
w.execute_job(job, q)
self.assertEqual(mocked.call_count, expected_heartbeats)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_work_fails(self):
"""Failing jobs are put on the failed queue."""
q = Queue()
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
self.assertIsNone(job.worker_name) # Worker name is cleared after failures
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertTrue(job.exc_info) # should contain exc_info
def test_horse_fails(self):
"""Tests that job status is set to FAILED even if horse unexpectedly fails"""
q = Queue()
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(say_hello)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
with mock.patch.object(w, 'perform_job', new_callable=raise_exc_mock):
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertTrue(job.exc_info) # should contain exc_info
def test_statistics(self):
"""Successful and failed job counts are saved properly"""
queue = Queue()
job = queue.enqueue(div_by_zero)
worker = Worker([queue])
worker.register_birth()
self.assertEqual(worker.failed_job_count, 0)
self.assertEqual(worker.successful_job_count, 0)
self.assertEqual(worker.total_working_time, 0)
registry = StartedJobRegistry(connection=worker.connection)
job.started_at = utcnow()
job.ended_at = job.started_at + timedelta(seconds=0.75)
worker.handle_job_failure(job, queue)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 1)
self.assertEqual(worker.successful_job_count, 1)
self.assertEqual(worker.total_working_time, 1.5) # 1.5 seconds
worker.handle_job_failure(job, queue)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 2)
self.assertEqual(worker.successful_job_count, 2)
self.assertEqual(worker.total_working_time, 3.0)
def test_handle_retry(self):
"""handle_job_failure() handles retry properly"""
connection = self.testconn
queue = Queue(connection=connection)
retry = Retry(max=2)
job = queue.enqueue(div_by_zero, retry=retry)
registry = FailedJobRegistry(queue=queue)
worker = Worker([queue])
# If job if configured to retry, it will be put back in the queue
# and not put in the FailedJobRegistry.
# This is the original execution
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 1)
self.assertEqual([job.id], queue.job_ids)
self.assertFalse(job in registry)
# First retry
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 0)
self.assertEqual([job.id], queue.job_ids)
# Second retry
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 0)
self.assertEqual([], queue.job_ids)
# If a job is no longer retries, it's put in FailedJobRegistry
self.assertTrue(job in registry)
def test_retry_interval(self):
"""Retries with intervals are scheduled"""
connection = self.testconn
queue = Queue(connection=connection)
retry = Retry(max=1, interval=5)
job = queue.enqueue(div_by_zero, retry=retry)
worker = Worker([queue])
registry = queue.scheduled_job_registry
# If job if configured to retry with interval, it will be scheduled,
# not directly put back in the queue
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.get_status(), JobStatus.SCHEDULED)
self.assertEqual(job.retries_left, 0)
self.assertEqual(len(registry), 1)
self.assertEqual(queue.job_ids, [])
# Scheduled time is roughly 5 seconds from now
scheduled_time = registry.get_scheduled_time(job)
now = datetime.now(timezone.utc)
self.assertTrue(now + timedelta(seconds=4) < scheduled_time < now + timedelta(seconds=6))
def test_total_working_time(self):
"""worker.total_working_time is stored properly"""
queue = Queue()
job = queue.enqueue(long_running_job, 0.05)
worker = Worker([queue])
worker.register_birth()
worker.perform_job(job, queue)
worker.refresh()
# total_working_time should be a little bit more than 0.05 seconds
self.assertGreaterEqual(worker.total_working_time, 0.05)
# in multi-user environments delays might be unpredictable,
# please adjust this magic limit accordingly in case if It takes even longer to run
self.assertLess(worker.total_working_time, 1)
def test_max_jobs(self):
"""Worker exits after number of jobs complete."""
queue = Queue()
job1 = queue.enqueue(do_nothing)
job2 = queue.enqueue(do_nothing)
worker = Worker([queue])
worker.work(max_jobs=1)
self.assertEqual(JobStatus.FINISHED, job1.get_status())
self.assertEqual(JobStatus.QUEUED, job2.get_status())
def test_disable_default_exception_handler(self):
"""
Job is not moved to FailedJobRegistry when default custom exception
handler is disabled.
"""
queue = Queue(name='default', connection=self.testconn)
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=False)
worker.work(burst=True)
registry = FailedJobRegistry(queue=queue)
self.assertTrue(job in registry)
# Job is not added to FailedJobRegistry if
# disable_default_exception_handler is True
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=True)
worker.work(burst=True)
self.assertFalse(job in registry)
def test_custom_exc_handling(self):
"""Custom exception handling."""
def first_handler(job, *exc_info):
job.meta = {'first_handler': True}
job.save_meta()
return True
def second_handler(job, *exc_info):
job.meta.update({'second_handler': True})
job.save_meta()
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
q = Queue()
self.assertEqual(q.count, 0)
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=first_handler)
w.work(burst=True)
# Check the job
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, second_handler])
w.work(burst=True)
# Both custom exception handlers are run
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertTrue(job.meta['second_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, black_hole,
second_handler])
w.work(burst=True)
# second_handler is not run since it's interrupted by black_hole
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertEqual(job.meta.get('second_handler'), None)
def test_cancelled_jobs_arent_executed(self):
"""Cancelling jobs."""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
job = q.enqueue(create_file, SENTINEL_FILE)
# Here, we cancel the job, so the sentinel file may not be created
self.testconn.delete(job.key)
w = Worker([q])
w.work(burst=True)
assert q.count == 0
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
@slow # noqa
def test_timeouts(self):
"""Worker kills jobs after timeout."""
sentinel_file = '/tmp/.rq_sentinel'
q = Queue()
w = Worker([q])
# Put it on the queue with a timeout value
res = q.enqueue(create_file_after_timeout,
args=(sentinel_file, 4),
job_timeout=1)
try:
os.unlink(sentinel_file)
except OSError as e:
if e.errno == 2:
pass
self.assertEqual(os.path.exists(sentinel_file), False)
w.work(burst=True)
self.assertEqual(os.path.exists(sentinel_file), False)
# TODO: Having to do the manual refresh() here is really ugly!
res.refresh()
self.assertIn('JobTimeoutException', as_text(res.exc_info))
def test_worker_sets_result_ttl(self):
"""Ensure that Worker properly sets result_ttl for individual jobs."""
q = Queue()
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w = Worker([q])
self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertNotEqual(self.testconn.ttl(job.key), 0)
self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
# Job with -1 result_ttl don't expire
job = q.enqueue(say_hello, args=('Frank',), result_ttl=-1)
w = Worker([q])
self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.ttl(job.key), -1)
self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
# Job with result_ttl = 0 gets deleted immediately
job = q.enqueue(say_hello, args=('Frank',), result_ttl=0)
w = Worker([q])
self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.get(job.key), None)
self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
def test_worker_sets_job_status(self):
"""Ensure that worker correctly sets job status."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertEqual(job.is_queued, True)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, False)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, True)
self.assertEqual(job.is_failed, False)
# Failed jobs should set status to "failed"
job = q.enqueue(div_by_zero, args=(1,))
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, True)
def test_job_dependency(self):
"""Enqueue dependent jobs only if their parents don't fail"""
q = Queue()
w = Worker([q])
parent_job = q.enqueue(say_hello, result_ttl=0)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
parent_job = q.enqueue(div_by_zero)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertNotEqual(job.get_status(), JobStatus.FINISHED)
def test_get_current_job(self):
"""Ensure worker.get_current_job() works properly"""
q = Queue()
worker = Worker([q])
job = q.enqueue_call(say_hello)
self.assertEqual(self.testconn.hget(worker.key, 'current_job'), None)
worker.set_current_job_id(job.id)
self.assertEqual(
worker.get_current_job_id(),
as_text(self.testconn.hget(worker.key, 'current_job'))
)
self.assertEqual(worker.get_current_job(), job)
def test_custom_job_class(self):
"""Ensure Worker accepts custom job class."""
q = Queue()
worker = Worker([q], job_class=CustomJob)
self.assertEqual(worker.job_class, CustomJob)
def test_custom_queue_class(self):
"""Ensure Worker accepts custom queue class."""
q = CustomQueue()
worker = Worker([q], queue_class=CustomQueue)
self.assertEqual(worker.queue_class, CustomQueue)
def test_custom_queue_class_is_not_global(self):
"""Ensure Worker custom queue class is not global."""
q = CustomQueue()
worker_custom = Worker([q], queue_class=CustomQueue)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.queue_class, CustomQueue)
self.assertEqual(worker_generic.queue_class, Queue)
self.assertEqual(Worker.queue_class, Queue)
def test_custom_job_class_is_not_global(self):
"""Ensure Worker custom job class is not global."""
q = Queue()
worker_custom = Worker([q], job_class=CustomJob)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.job_class, CustomJob)
self.assertEqual(worker_generic.job_class, Job)
self.assertEqual(Worker.job_class, Job)
def test_work_via_simpleworker(self):
"""Worker processes work, with forking disabled,
then returns."""
fooq, barq = Queue('foo'), Queue('bar')
w = SimpleWorker([fooq, barq])
self.assertEqual(w.work(burst=True), False,
'Did not expect any work on the queue.')
job = fooq.enqueue(say_pid)
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, os.getpid(),
'PID mismatch, fork() is not supposed to happen here')
def test_simpleworker_heartbeat_ttl(self):
"""SimpleWorker's key must last longer than job.timeout when working"""
queue = Queue('foo')
worker = SimpleWorker([queue])
job_timeout = 300
job = queue.enqueue(save_key_ttl, worker.key, job_timeout=job_timeout)
worker.work(burst=True)
job.refresh()
self.assertGreater(job.meta['ttl'], job_timeout)
def test_prepare_job_execution(self):
"""Prepare job execution does the necessary bookkeeping."""
queue = Queue(connection=self.testconn)
job = queue.enqueue(say_hello)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Updates worker statuses
self.assertEqual(worker.get_state(), 'busy')
self.assertEqual(worker.get_current_job_id(), job.id)
# job status is also updated
self.assertEqual(job._status, JobStatus.STARTED)
self.assertEqual(job.worker_name, worker.name)
def test_work_unicode_friendly(self):
"""Worker processes work with unicode description, then quits."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Adam',
description='你好 世界!')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, 'Hi there, Adam!')
self.assertEqual(job.description, '你好 世界!')
def test_work_log_unicode_friendly(self):
"""Worker process work with unicode or str other than pure ascii content,
logging work properly"""
q = Queue("foo")
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
job = q.enqueue('tests.fixtures.say_hello_unicode', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_suspend_worker_execution(self):
"""Test Pause Worker Execution"""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
q.enqueue(create_file, SENTINEL_FILE)
w = Worker([q])
suspend(self.testconn)
w.work(burst=True)
assert q.count == 1
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
resume(self.testconn)
w.work(burst=True)
assert q.count == 0
self.assertEqual(os.path.exists(SENTINEL_FILE), True)
@slow
def test_suspend_with_duration(self):
q = Queue()
for _ in range(5):
q.enqueue(do_nothing)
w = Worker([q])
# This suspends workers for working for 2 second
suspend(self.testconn, 2)
# So when this burst of work happens the queue should remain at 5
w.work(burst=True)
assert q.count == 5
sleep(3)
# The suspension should be expired now, and a burst of work should now clear the queue
w.work(burst=True)
assert q.count == 0
def test_worker_hash_(self):
"""Workers are hashed by their .name attribute"""
q = Queue('foo')
w1 = Worker([q], name="worker1")
w2 = Worker([q], name="worker2")
w3 = Worker([q], name="worker1")
worker_set = set([w1, w2, w3])
self.assertEqual(len(worker_set), 2)
def test_worker_sets_birth(self):
"""Ensure worker correctly sets worker birth date."""
q = Queue()
w = Worker([q])
w.register_birth()
birth_date = w.birth_date
self.assertIsNotNone(birth_date)
self.assertEqual(type(birth_date).__name__, 'datetime')
def test_worker_sets_death(self):
"""Ensure worker correctly sets worker death date."""
q = Queue()
w = Worker([q])
w.register_death()
death_date = w.death_date
self.assertIsNotNone(death_date)
self.assertIsInstance(death_date, datetime)
def test_clean_queue_registries(self):
"""worker.clean_registries sets last_cleaned_at and cleans registries."""
foo_queue = Queue('foo', connection=self.testconn)
foo_registry = StartedJobRegistry('foo', connection=self.testconn)
self.testconn.zadd(foo_registry.key, {'foo': 1})
self.assertEqual(self.testconn.zcard(foo_registry.key), 1)
bar_queue = Queue('bar', connection=self.testconn)
bar_registry = StartedJobRegistry('bar', connection=self.testconn)
self.testconn.zadd(bar_registry.key, {'bar': 1})
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
worker = Worker([foo_queue, bar_queue])
self.assertEqual(worker.last_cleaned_at, None)
worker.clean_registries()
self.assertNotEqual(worker.last_cleaned_at, None)
self.assertEqual(self.testconn.zcard(foo_registry.key), 0)
self.assertEqual(self.testconn.zcard(bar_registry.key), 0)
# worker.clean_registries() only runs once every 15 minutes
# If we add another key, calling clean_registries() should do nothing
self.testconn.zadd(bar_registry.key, {'bar': 1})
worker.clean_registries()
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
def test_should_run_maintenance_tasks(self):
"""Workers should run maintenance tasks on startup and every hour."""
queue = Queue(connection=self.testconn)
worker = Worker(queue)
self.assertTrue(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow()
self.assertFalse(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow() - timedelta(seconds=3700)
self.assertTrue(worker.should_run_maintenance_tasks)
def test_worker_calls_clean_registries(self):
"""Worker calls clean_registries when run."""
queue = Queue(connection=self.testconn)
registry = StartedJobRegistry(connection=self.testconn)
self.testconn.zadd(registry.key, {'foo': 1})
worker = Worker(queue, connection=self.testconn)
worker.work(burst=True)
self.assertEqual(self.testconn.zcard(registry.key), 0)
def test_job_dependency_race_condition(self):
"""Dependencies added while the job gets finished shouldn't get lost."""
# This patches the enqueue_dependents to enqueue a new dependency AFTER
# the original code was executed.
orig_enqueue_dependents = Queue.enqueue_dependents
def new_enqueue_dependents(self, job, *args, **kwargs):
orig_enqueue_dependents(self, job, *args, **kwargs)
if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue is not None and Queue._add_enqueue.id == job.id:
Queue._add_enqueue = None
Queue().enqueue_call(say_hello, depends_on=job)
Queue.enqueue_dependents = new_enqueue_dependents
q = Queue()
w = Worker([q])
with mock.patch.object(Worker, 'execute_job', wraps=w.execute_job) as mocked:
parent_job = q.enqueue(say_hello, result_ttl=0)
Queue._add_enqueue = parent_job
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
# The created spy checks two issues:
# * before the fix of #739, 2 of the 3 jobs where executed due
# to the race condition
# * during the development another issue was fixed:
# due to a missing pipeline usage in Queue.enqueue_job, the job
# which was enqueued before the "rollback" was executed twice.
# So before that fix the call count was 4 instead of 3
self.assertEqual(mocked.call_count, 3)
def test_self_modification_persistence(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack."""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
def test_self_modification_persistence_with_error(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack -- even if the job errored"""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self_and_error, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_true(self, mock_logger_info):
"""Check that log_result_lifespan True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
mock_logger_info.assert_called_with('Result is kept for %s seconds', 10)
self.assertIn('Result is kept for %s seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_false(self, mock_logger_info):
"""Check that log_result_lifespan False causes job lifespan to not be logged."""
q = Queue()
class TestWorker(Worker):
log_result_lifespan = False
w = TestWorker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
self.assertNotIn('Result is kept for 10 seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_true(self, mock_logger_info):
"""Check that log_job_description True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertIn("Frank", mock_logger_info.call_args[0][2])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_false(self, mock_logger_info):
"""Check that log_job_description False causes job lifespan to not be logged."""
q = Queue()
w = Worker([q], log_job_description=False)
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertNotIn("Frank", mock_logger_info.call_args[0][2])
def test_worker_version(self):
q = Queue()
w = Worker([q])
w.version = '0.0.0'
w.register_birth()
self.assertEqual(w.version, '0.0.0')
w.refresh()
self.assertEqual(w.version, '0.0.0')
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.version, '0.0.0')
def test_python_version(self):
python_version = sys.version
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(w.python_version, python_version)
# now patching version
python_version = 'X.Y.Z.final' # dummy version
self.assertNotEqual(python_version, sys.version) # otherwise tests are pointless
w2 = Worker([q])
w2.python_version = python_version
w2.register_birth()
self.assertEqual(w2.python_version, python_version)
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w2.key)
self.assertEqual(worker.python_version, python_version)
def wait_and_kill_work_horse(pid, time_to_wait=0.0):
time.sleep(time_to_wait)
os.kill(pid, signal.SIGKILL)
class TimeoutTestCase:
def setUp(self):
# we want tests to fail if signal are ignored and the work remain
# running, so set a signal to kill them after X seconds
self.killtimeout = 15
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(self.killtimeout)
def _timeout(self, signal, frame):
raise AssertionError(
"test still running after %i seconds, likely the worker wasn't shutdown correctly" % self.killtimeout
)
class WorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
@slow
def test_idle_worker_warm_shutdown(self):
"""worker with no ongoing job receiving single SIGTERM signal and shutting down"""
w = Worker('foo')
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(1)
self.assertFalse(w._stop_requested)
@slow
def test_working_worker_warm_shutdown(self):
"""worker with an ongoing job receiving single SIGTERM signal, allowing job to finish then shutting down"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_warm'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(2)
self.assertFalse(p.is_alive())
self.assertTrue(w._stop_requested)
self.assertTrue(os.path.exists(sentinel_file))
self.assertIsNotNone(w.shutdown_requested_date)
self.assertEqual(type(w.shutdown_requested_date).__name__, 'datetime')
@slow
def test_working_worker_cold_shutdown(self):
"""Busy worker shuts down immediately on double SIGTERM signal"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_cold'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), True))
p.start()
self.assertRaises(SystemExit, w.work)
p.join(1)
self.assertTrue(w._stop_requested)
self.assertFalse(os.path.exists(sentinel_file))
shutdown_requested_date = w.shutdown_requested_date
self.assertIsNotNone(shutdown_requested_date)
self.assertEqual(type(shutdown_requested_date).__name__, 'datetime')
@slow
def test_work_horse_death_sets_job_failed(self):
"""worker with an ongoing job whose work horse dies unexpectadly (before
completing the job) should set the job's status to FAILED
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
p = Process(target=wait_and_kill_work_horse, args=(w._horse_pid, 0.5))
p.start()
w.monitor_work_horse(job, queue)
job_status = job.get_status()
p.join(1)
self.assertEqual(job_status, JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
@slow
def test_work_horse_force_death(self):
"""Simulate a frozen worker that doesn't observe the timeout properly.
Fake it by artificially setting the timeout of the parent process to
something much smaller after the process is already forked.
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(launch_process_within_worker_and_store_pid, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
job.timeout = 5
w.job_monitoring_interval = 1
now = utcnow()
time.sleep(1)
with open(sentinel_file) as f:
subprocess_pid = int(f.read().strip())
self.assertTrue(psutil.pid_exists(subprocess_pid))
w.monitor_work_horse(job, queue)
fudge_factor = 1
total_time = w.job_monitoring_interval + 65 + fudge_factor
self.assertTrue((utcnow() - now).total_seconds() < total_time)
self.assertEqual(job.get_status(), JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
self.assertFalse(psutil.pid_exists(subprocess_pid))
def schedule_access_self():
q = Queue('default', connection=get_current_connection())
q.enqueue(access_self)
@pytest.mark.skipif(sys.platform == 'darwin', reason='Fails on OS X')
class TestWorkerSubprocess(RQTestCase):
def setUp(self):
super(TestWorkerSubprocess, self).setUp()
db_num = self.testconn.connection_pool.connection_kwargs['db']
self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num
def test_run_empty_queue(self):
"""Run the worker in its own process with an empty queue"""
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
def test_run_access_self(self):
"""Schedule a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@skipIf('pypy' in sys.version.lower(), 'often times out with pypy')
def test_run_scheduled_access_self(self):
"""Schedule a job that schedules a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(schedule_access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@pytest.mark.skipif(sys.platform == 'darwin', reason='requires Linux signals')
@skipIf('pypy' in sys.version.lower(), 'these tests often fail on pypy')
class HerokuWorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
def setUp(self):
super(HerokuWorkerShutdownTestCase, self).setUp()
self.sandbox = '/tmp/rq_shutdown/'
os.makedirs(self.sandbox)
def tearDown(self):
shutil.rmtree(self.sandbox, ignore_errors=True)
@slow
def test_immediate_shutdown(self):
"""Heroku work horse shutdown with immediate (0 second) kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 0))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
@slow
def test_1_sec_shutdown(self):
"""Heroku work horse shutdown with 1 second kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 1))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
time.sleep(0.1)
self.assertEqual(p.exitcode, None)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
@slow
def test_shutdown_double_sigrtmin(self):
"""Heroku work horse shutdown with long delay but SIGRTMIN sent twice"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 10))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
# we have to wait a short while otherwise the second signal wont bet processed.
time.sleep(0.1)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
@mock.patch('rq.worker.logger.info')
def test_handle_shutdown_request(self, mock_logger_info):
"""Mutate HerokuWorker so _horse_pid refers to an artificial process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
path = os.path.join(self.sandbox, 'shouldnt_exist')
p = Process(target=create_file_after_timeout_and_setsid, args=(path, 2))
p.start()
self.assertEqual(p.exitcode, None)
time.sleep(0.1)
w._horse_pid = p.pid
w.handle_warm_shutdown_request()
p.join(2)
# would expect p.exitcode to be -34
self.assertEqual(p.exitcode, -34)
self.assertFalse(os.path.exists(path))
mock_logger_info.assert_called_with('Killed horse pid %s', p.pid)
def test_handle_shutdown_request_no_horse(self):
"""Mutate HerokuWorker so _horse_pid refers to non existent process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
w._horse_pid = 19999
w.handle_warm_shutdown_request()
class TestExceptionHandlerMessageEncoding(RQTestCase):
def setUp(self):
super(TestExceptionHandlerMessageEncoding, self).setUp()
self.worker = Worker("foo")
self.worker._exc_handlers = []
# Mimic how exception info is actually passed forwards
try:
raise Exception(u"💪")
except Exception:
self.exc_info = sys.exc_info()
def test_handle_exception_handles_non_ascii_in_exception_message(self):
"""worker.handle_exception doesn't crash on non-ascii in exception message."""
self.worker.handle_exception(Mock(), *self.exc_info)
|
the-stack_0_12875 | import os
import requests
import codecs
import json
import hashlib
import io
from pathlib import Path
import pandas as pd
from bs4 import BeautifulSoup as bs
from bs4.element import Tag
from sklearn.model_selection import train_test_split
from finetune.datasets import Dataset
from finetune import SequenceLabeler
from finetune.utils import finetune_to_indico_sequence
from finetune.metrics import annotation_report
XML_PATH = os.path.join("Data", "Sequence", "reuters.xml")
DATA_PATH = os.path.join("Data", "Sequence", "reuters.json")
CHECKSUM = "a79cab99ed30b7932d46711ef8d662e0"
class Reuters(Dataset):
def __init__(self, filename=None, **kwargs):
super().__init__(filename=(filename or DATA_PATH), **kwargs)
@property
def md5(self):
return CHECKSUM
def download(self):
url = "https://raw.githubusercontent.com/dice-group/n3-collection/master/reuters.xml"
r = requests.get(url)
with open(XML_PATH, 'wb') as fd:
fd.write(r.content)
fd = open(XML_PATH)
soup = bs(fd, "html5lib")
docs = []
docs_labels = []
for elem in soup.find_all("document"):
texts = []
labels = []
# Loop through each child of the element under "textwithnamedentities"
for c in elem.find("textwithnamedentities").children:
if type(c) == Tag:
if c.name == "namedentityintext":
label = "Named Entity" # part of a named entity
else:
label = "<PAD>" # irrelevant word
texts.append(c.text)
labels.append(label)
docs.append(texts)
docs_labels.append(labels)
fd.close()
os.remove(XML_PATH)
raw_texts = ["".join(doc) for doc in docs]
texts, annotations = finetune_to_indico_sequence(raw_texts, docs, docs_labels)
df = pd.DataFrame({'texts': texts, 'annotations': [json.dumps(annotation) for annotation in annotations]})
df.to_csv(DATA_PATH)
if __name__ == "__main__":
dataset = Reuters().dataframe
dataset['annotations'] = [json.loads(annotation) for annotation in dataset['annotations']]
trainX, testX, trainY, testY = train_test_split(
dataset.texts.values,
dataset.annotations.values,
test_size=0.3,
random_state=42
)
model = SequenceLabeler(batch_size=2, val_size=0.)
model.fit(trainX, trainY)
predictions = model.predict(testX)
print(annotation_report(testY, predictions))
|
the-stack_0_12878 | """Place multiple rectangles with the mouse."""
import pygame
from pygame.locals import *
RED = (255, 0, 0)
BLUE = (0, 0, 255)
GRAY = (127, 127, 127)
pygame.init()
screen = pygame.display.set_mode((640, 240))
start = (0, 0)
size = (0, 0)
drawing = False
rect_list = []
running = True
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
elif event.type == MOUSEBUTTONDOWN:
start = event.pos
size = 0, 0
drawing = True
elif event.type == MOUSEBUTTONUP:
end = event.pos
size = end[0]-start[0], end[1]-start[1]
rect = pygame.Rect(start, size)
rect_list.append(rect)
drawing = False
elif event.type == MOUSEMOTION and drawing:
end = event.pos
size = end[0]-start[0], end[1]-start[1]
screen.fill(GRAY)
for rect in rect_list:
pygame.draw.rect(screen, RED, rect, 3)
pygame.draw.rect(screen, BLUE, (start, size), 1)
pygame.display.update()
pygame.quit() |
the-stack_0_12879 | import asyncio
import inspect
import click
from fastapi import FastAPI, APIRouter
from starlette.middleware.sessions import SessionMiddleware
try:
from importlib.metadata import entry_points, version
except ImportError:
from importlib_metadata import entry_points, version
from . import logger, config
from .models import db
router = APIRouter()
login_router = APIRouter()
def get_app():
app = FastAPI(title="FenceX", version=version("fencex"), debug=config.DEBUG)
app.idps = {}
db.init_app(app)
load_modules(app)
return app
class ClientDisconnectMiddleware:
def __init__(self, app):
self._app = app
async def __call__(self, scope, receive, send):
loop = asyncio.get_running_loop()
rv = loop.create_task(self._app(scope, receive, send))
waiter = None
cancelled = False
if scope["type"] == "http":
def add_close_watcher():
nonlocal waiter
async def wait_closed():
nonlocal cancelled
while True:
message = await receive()
if message["type"] == "http.disconnect":
if not rv.done():
cancelled = True
rv.cancel()
break
waiter = loop.create_task(wait_closed())
scope["add_close_watcher"] = add_close_watcher
try:
await rv
except asyncio.CancelledError:
if not cancelled:
raise
if waiter and not waiter.done():
waiter.cancel()
def load_modules(app=None):
if app:
app.add_middleware(ClientDisconnectMiddleware)
app.add_middleware(SessionMiddleware, secret_key=config.SESSION_SECRET)
all_args = dict(app=app, router=router, login_router=login_router)
logger.info("Start to load modules.")
for ep in entry_points()["fencex.modules"]:
mod = ep.load()
if app:
init_app = getattr(mod, "init_app", None)
if init_app:
args = []
for name in inspect.getfullargspec(init_app).args:
args.append(all_args[name])
init_app(*args)
msg = "Loaded module: "
logger.info(
msg + "%s",
ep.name,
extra={"color_message": msg + click.style("%s", fg="cyan")},
)
if app:
router.include_router(login_router, prefix="/login")
app.include_router(router, prefix=config.URL_PREFIX if config.DEBUG else "")
app.all_paths = set([r.path for r in app.routes])
@router.get("/version")
def get_version():
return version("fencex")
@router.get("/_status")
async def get_status():
now = await db.scalar("SELECT now()")
return dict(status="OK", timestamp=now)
|
the-stack_0_12880 | import rclpy
from rclpy.node import Node
from std_msgs.msg import String
class TestSubscriber(Node):
def __init__(self):
super().__init__('test_subscriber')
self.subscription = self.create_subscription(
String,
'websock_echo',
self.listener_callback,
10)
self.subscription
def listener_callback(self, msg):
self.get_logger().info('Received from websocket bridge: "%s"' % msg.data)
def main(args=None):
rclpy.init(args=args)
test_subscriber = TestSubscriber()
rclpy.spin(test_subscriber)
test_subscriber.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() |
the-stack_0_12881 | import typing
from PyQt5.QtCore import QAbstractListModel, QModelIndex, Qt, QMimeData
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QFileDialog
from src.expression import calculateExpr, isValidExpression, toEditableExpr, fromEditableExpr
from .utils import saveHistoryToFile, addExpressionToHistoryCache, clearHistoryCache
ExpressionRole = Qt.UserRole
ResultRole = Qt.UserRole + 1
class HistoryListModel(QAbstractListModel):
def __init__(self, parent=None) -> None:
super(HistoryListModel, self).__init__(parent)
self._expressions = []
self._font = None
self.need_clear_history = True
def addExpression(self, expr: str, index: int = 0, save_to_cache: bool = True) -> None:
if not isValidExpression(expr):
return
if self.rowCount(QModelIndex()):
latest_expr_index = self.index(0, 0)
latest_expr = self.data(latest_expr_index, ExpressionRole)
if latest_expr == expr:
return
if self.need_clear_history and self._expressions:
self.need_clear_history = False
self.beginResetModel()
self._expressions.insert(index, expr)
self.endResetModel()
if save_to_cache:
if self.need_clear_history:
clearHistoryCache()
self.need_clear_history = False
addExpressionToHistoryCache(expr)
def addExpressions(self, expressions: list) -> None:
for expression in expressions:
self.addExpression(expression, save_to_cache=False)
def rowCount(self, parent: QModelIndex) -> int:
return len(self._expressions)
def data(self, index: QModelIndex, role: int) -> typing.Any:
if not index.isValid():
return None
expression = self._expressions[index.row()]
if role == Qt.DisplayRole:
return f'{expression} = {calculateExpr(expression)}'
elif role == Qt.FontRole:
return self._font
elif role == Qt.EditRole:
return expression
elif role == ExpressionRole:
return expression
elif role == ResultRole:
return calculateExpr(expression)
def clear(self) -> None:
self.beginResetModel()
self._expressions.clear()
clearHistoryCache()
self.endResetModel()
def saveHistory(self) -> None:
if self.rowCount(QModelIndex()) == 0:
return
file_path, _ = QFileDialog.getSaveFileName(filter='*.txt')
if not file_path:
return
expressions = self.equations()
saveHistoryToFile(expressions, file_path)
def equations(self) -> typing.List[str]:
equations_list = []
for expression in self._expressions:
equations_list.append(f'{expression} = {calculateExpr(expression)}')
return equations_list
def insertRows(self, row: int, count: int, parent: QModelIndex = ...) -> bool:
self.beginInsertRows(parent, row, row + count - 1)
for _ in range(count):
self._expressions.insert(row, None)
self.endInsertRows()
return True
def removeRows(self, row: int, count: int, parent: QModelIndex = ...) -> bool:
self.beginRemoveRows(parent, row, row + count - 1)
del self._expressions[row:row + count]
self.endRemoveRows()
return True
def setData(self, index: QModelIndex, value: typing.Any, role: int = ...) -> bool:
if not index.isValid():
return False
value = fromEditableExpr(value.lower())
if not isValidExpression(value):
return False
if role == Qt.EditRole:
self._expressions[index.row()] = value
self.dataChanged.emit(index, index)
return True
return False
def flags(self, index: QModelIndex) -> int:
if index.isValid():
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled | Qt.ItemIsEditable
else:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDropEnabled
def supportedDropActions(self) -> int:
return Qt.MoveAction
def canDropMimeData(self, data: QMimeData, action: int, row: int, column: int, parent: QModelIndex) -> bool:
return action == Qt.MoveAction and data.hasText()
def mimeData(self, indexes: typing.List[QModelIndex]) -> QMimeData:
mime_data = QMimeData()
expressions = []
for index in indexes:
if index.isValid():
text = toEditableExpr(self.data(index, ExpressionRole))
expressions.append(text)
mime_data.setText('\n'.join(expressions))
return mime_data
def dropMimeData(self, data: QMimeData, action: int, row: int, column: int, parent: QModelIndex) -> bool:
if not self.canDropMimeData(data, action, row, column, parent):
return False
data = data.text().split('\n')
for value in data:
if row < 0:
row = self.rowCount(QModelIndex())
self.insertRow(row, QModelIndex())
else:
self.insertRow(row, QModelIndex())
index = self.index(row, 0, QModelIndex())
text = fromEditableExpr(value.lower())
self.setData(index, text, Qt.EditRole)
row += 1
return True
|
the-stack_0_12882 | import pytest
from numpy.testing import assert_allclose
from sklearn import __version__
from sklearn.exceptions import NotFittedError
from pysindy import FourierLibrary
from pysindy import SINDy
from pysindy import STLSQ
from pysindy.deeptime import SINDyEstimator
from pysindy.deeptime import SINDyModel
def test_estimator_has_model(data_lorenz):
x, t = data_lorenz
estimator = SINDyEstimator()
assert not estimator.has_model
estimator.fit(x, t=t)
assert estimator.has_model
def test_estimator_fetch_model(data_lorenz):
x, t = data_lorenz
estimator = SINDyEstimator()
assert estimator.fetch_model() is None
estimator.fit(x, t=t)
assert isinstance(estimator.fetch_model(), SINDyModel)
def test_model_sindy_equivalence(data_lorenz_c_1d):
x, t, u, _ = data_lorenz_c_1d
model = SINDyEstimator().fit(x, t=t, u=u).fetch_model()
sindy_model = SINDy().fit(x, t=t, u=u)
assert_allclose(model.coefficients(), sindy_model.coefficients())
print(sindy_model.n_features_in_)
if float(__version__[:3]) >= 1.0:
assert model.n_features_in_ == sindy_model.n_features_in_
else:
assert model.n_input_features_ == sindy_model.n_input_features_
assert model.n_output_features_ == sindy_model.n_output_features_
assert model.n_control_features_ == sindy_model.n_control_features_
def test_model_has_sindy_methods(data_lorenz):
x, t = data_lorenz
model = SINDyEstimator().fit(x, t=t).fetch_model()
assert hasattr(model, "predict")
assert hasattr(model, "simulate")
assert hasattr(model, "score")
assert hasattr(model, "print")
assert hasattr(model, "equations")
def test_model_unfitted_library(data_derivative_2d):
x, x_dot = data_derivative_2d
optimizer = STLSQ().fit(x, x_dot)
library = FourierLibrary()
with pytest.raises(NotFittedError):
SINDyModel(optimizer, library)
def test_model_unfitted_optimizer(data_lorenz):
x, t = data_lorenz
optimizer = STLSQ()
library = FourierLibrary().fit(x)
with pytest.raises(NotFittedError):
SINDyModel(optimizer, library)
def test_model_copy(data_lorenz):
x, t = data_lorenz
model = SINDyEstimator().fit(x, t=t).fetch_model()
model_copy = model.copy()
assert model is not model_copy
|
the-stack_0_12883 | #!/usr/bin/env python3
#
# Constants for the generation of patches for CBMC proofs.
#
# Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
PATCHES_DIR = os.path.dirname(os.path.abspath(__file__))
shared_prefix = [
"."
]
shared_prefix_port = [
"..", "..", "FreeRTOS-Kernel", "portable", "MSVC-MingW"
]
absolute_prefix = os.path.abspath(os.path.join(PATCHES_DIR, *shared_prefix))
absolute_prefix_port = os.path.abspath(os.path.join(PATCHES_DIR, *shared_prefix_port))
HEADERS = [os.path.join(absolute_prefix, "FreeRTOSConfig.h"),
os.path.join(absolute_prefix, "FreeRTOSIPConfig.h"),
os.path.join(absolute_prefix_port, "portmacro.h")]
|
the-stack_0_12884 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate the sprites tfrecords from raw_images."""
import os
import random
import re
import sys
import numpy as np
import scipy.misc
import tensorflow as tf
tf.flags.DEFINE_string('data_filepattern', '', 'The raw images.')
tf.flags.DEFINE_string('out_file', '',
'File name for the tfrecord output.')
def _read_images():
"""Read images from image files into data structure."""
sprites = dict()
files = tf.gfile.Glob(tf.flags.FLAGS.data_filepattern)
for f in files:
image = scipy.misc.imread(f)
m = re.search('image_([0-9]+)_([0-9]+)_([0-9]+).jpg', os.path.basename(f))
if m.group(1) not in sprites:
sprites[m.group(1)] = dict()
character = sprites[m.group(1)]
if m.group(2) not in character:
character[m.group(2)] = dict()
pose = character[m.group(2)]
pose[int(m.group(3))] = image
return sprites
def _images_to_example(image, image2):
"""Convert 2 consecutive image to a SequenceExample."""
example = tf.SequenceExample()
feature_list = example.feature_lists.feature_list['moving_objs']
feature = feature_list.feature.add()
feature.float_list.value.extend(np.reshape(image, [-1]).tolist())
feature = feature_list.feature.add()
feature.float_list.value.extend(np.reshape(image2, [-1]).tolist())
return example
def generate_input():
"""Generate tfrecords."""
sprites = _read_images()
sys.stderr.write('Finish reading images.\n')
train_writer = tf.python_io.TFRecordWriter(
tf.flags.FLAGS.out_file.replace('sprites', 'sprites_train'))
test_writer = tf.python_io.TFRecordWriter(
tf.flags.FLAGS.out_file.replace('sprites', 'sprites_test'))
train_examples = []
test_examples = []
for i in sprites:
if int(i) < 24:
examples = test_examples
else:
examples = train_examples
character = sprites[i]
for j in character.keys():
pose = character[j]
for k in xrange(1, len(pose), 1):
image = pose[k]
image2 = pose[k+1]
examples.append(_images_to_example(image, image2))
sys.stderr.write('Finish generating examples: %d, %d.\n' %
(len(train_examples), len(test_examples)))
random.shuffle(train_examples)
_ = [train_writer.write(ex.SerializeToString()) for ex in train_examples]
_ = [test_writer.write(ex.SerializeToString()) for ex in test_examples]
def main(_):
generate_input()
if __name__ == '__main__':
tf.app.run()
|
the-stack_0_12885 | from __future__ import unicode_literals
import logging
import traceback
from django.core.paginator import Paginator
from django.http import HttpResponseServerError, Http404
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView, View
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.diffviewer.diffutils import (get_diff_files,
populate_diff_chunks,
get_enable_highlighting)
from reviewboard.diffviewer.errors import UserVisibleError
from reviewboard.diffviewer.models import DiffSet, FileDiff
from reviewboard.diffviewer.renderers import get_diff_renderer
def get_collapse_diff(request):
if request.GET.get('expand', False):
return False
elif request.GET.get('collapse', False):
return True
elif 'collapsediffs' in request.COOKIES:
return (request.COOKIES['collapsediffs'] == "True")
else:
return True
class DiffViewerView(TemplateView):
"""Renders the main diff viewer.
This renders the diff viewer for a given DiffSet (or an interdiff
between two DiffSets). It handles loading information on the diffs,
generating the side-by-side view, and pagination.
The view expects the following parameters to be provided:
* diffset
- The DiffSet to render.
The following may also be provided:
* interdiffset
- A DiffSet object representing the other end of an interdiff range.
The following query parameters can be passed in on the URL:
* ?expand=1
- Expands all files within the diff viewer.
* ?collapse=1
- Collapses all files within the diff viewer, showing only
modifications and a few lines of context.
* ?file=<id>
- Renders only the FileDiff represented by the provided ID.
* ?page=<pagenum>
- Renders diffs found on the given page number, if the diff viewer
is paginated.
"""
template_name = 'diffviewer/view_diff.html'
fragment_error_template_name = 'diffviewer/diff_fragment_error.html'
def get(self, request, diffset, interdiffset=None, *args, **kwargs):
"""Handles GET requests for this view.
This will render the full diff viewer based on the provided
parameters.
The full rendering time will be logged.
If there's any exception thrown during rendering, an error page
with a traceback will be returned instead.
"""
self.collapse_diffs = get_collapse_diff(request)
if interdiffset:
logging.debug('Generating diff viewer page for interdiffset '
'ids %s-%s',
diffset.id, interdiffset.id, request=request)
else:
logging.debug('Generating diff viewer page for filediff id %s',
diffset.id, request=request)
try:
response = super(DiffViewerView, self).get(
request, diffset=diffset, interdiffset=interdiffset,
*args, **kwargs)
if interdiffset:
logging.debug('Done generating diff viewer page for '
'interdiffset ids %s-%s',
diffset.id, interdiffset.id, request=request)
else:
logging.debug('Done generating diff viewer page for filediff '
'id %s',
diffset.id, request=request)
return response
except Exception as e:
return exception_traceback(request, e, self.template_name)
def render_to_response(self, *args, **kwargs):
"""Renders the page to an HttpResponse.
This renders the diff viewer page, based on the context data
generated, and sets cookies before returning an HttpResponse to
the client.
"""
response = super(DiffViewerView, self).render_to_response(*args,
**kwargs)
response.set_cookie('collapsediffs', self.collapse_diffs)
return response
def get_context_data(self, diffset, interdiffset, extra_context={},
**kwargs):
"""Calculates and returns data used for rendering the diff viewer.
This handles all the hard work of generating the data backing the
side-by-side diff, handling pagination, and more. The data is
collected into a context dictionary and returned for rendering.
"""
files = get_diff_files(diffset, None, interdiffset,
request=self.request)
# Break the list of files into pages
siteconfig = SiteConfiguration.objects.get_current()
paginator = Paginator(files,
siteconfig.get('diffviewer_paginate_by'),
siteconfig.get('diffviewer_paginate_orphans'))
page_num = int(self.request.GET.get('page', 1))
if self.request.GET.get('file', False):
file_id = int(self.request.GET['file'])
for i, f in enumerate(files):
if f['filediff'].pk == file_id:
page_num = i // paginator.per_page + 1
if page_num > paginator.num_pages:
page_num = paginator.num_pages
break
page = paginator.page(page_num)
diff_context = {
'revision': {
'revision': diffset.revision,
'is_interdiff': interdiffset is not None,
'interdiff_revision': (interdiffset.revision
if interdiffset else None),
},
'pagination': {
'is_paginated': page.has_other_pages(),
'current_page': page.number,
'pages': paginator.num_pages,
'page_numbers': paginator.page_range,
'has_next': page.has_next(),
'has_previous': page.has_previous(),
},
}
if page.has_next():
diff_context['pagination']['next_page'] = page.next_page_number()
if page.has_previous():
diff_context['pagination']['previous_page'] = \
page.previous_page_number()
context = dict({
'diff_context': diff_context,
'diffset': diffset,
'interdiffset': interdiffset,
'diffset_pair': (diffset, interdiffset),
'files': page.object_list,
'collapseall': self.collapse_diffs,
}, **extra_context)
return context
class DiffFragmentView(View):
"""Renders a fragment from a file in the diff viewer.
Based on the diffset data and other arguments provided, this will render
a fragment from a file in a diff. This may be the entire file, or some
chunk within.
The view expects the following parameters to be provided:
* diffset_or_id
- A DiffSet object or the ID for one.
* filediff_id
- The ID of a FileDiff within the DiffSet.
The following may also be provided:
* interdiffset_or_id
- A DiffSet object or the ID for one representing the other end of
an interdiff range.
* chunkindex
- The index (0-based) of the chunk to render. If left out, the
entire file will be rendered.
The caller may also pass ``?lines-of-context=`` as a query parameter to
the URL to indicate how many lines of context should be provided around
the chunk.
"""
template_name = 'diffviewer/diff_file_fragment.html'
error_template_name = 'diffviewer/diff_fragment_error.html'
def get(self, request, *args, **kwargs):
"""Handles GET requests for this view.
This will create the renderer for the diff fragment, render it, and
return it.
If there's an error when rendering the diff fragment, an error page
will be rendered and returned instead.
"""
context = self.get_context_data(**kwargs)
try:
renderer = self.create_renderer(context, *args, **kwargs)
return renderer.render_to_response()
except Http404:
raise
except Exception as e:
return exception_traceback(
self.request, e, self.error_template_name,
extra_context={
'file': self._get_requested_diff_file(False),
})
def create_renderer(self, context, diffset_or_id, filediff_id,
interdiffset_or_id=None, chunkindex=None,
*args, **kwargs):
"""Creates the renderer for the diff.
This calculates all the state and data needed for rendering, and
constructs a DiffRenderer with that data. That renderer is then
returned, ready for rendering.
If there's an error in looking up the necessary information, this
may raise a UserVisibleError (best case), or some other form of
Exception.
"""
# Depending on whether we're invoked from a URL or from a wrapper
# with precomputed diffsets, we may be working with either IDs or
# actual objects. If they're objects, just use them as-is. Otherwise,
# if they're IDs, we want to grab them both (if both are provided)
# in one go, to save on an SQL query.
self.diffset = None
self.interdiffset = None
diffset_ids = []
if isinstance(diffset_or_id, DiffSet):
self.diffset = diffset_or_id
else:
diffset_ids.append(diffset_or_id)
if interdiffset_or_id:
if isinstance(interdiffset_or_id, DiffSet):
self.interdiffset = interdiffset_or_id
else:
diffset_ids.append(interdiffset_or_id)
if diffset_ids:
diffsets = DiffSet.objects.filter(pk__in=diffset_ids)
if len(diffsets) != len(diffset_ids):
raise Http404
for temp_diffset in diffsets:
if temp_diffset.pk == diffset_or_id:
self.diffset = temp_diffset
elif temp_diffset.pk == interdiffset_or_id:
self.interdiffset = temp_diffset
else:
assert False
self.highlighting = get_enable_highlighting(self.request.user)
self.filediff = get_object_or_404(FileDiff, pk=filediff_id,
diffset=self.diffset)
# Store this so we don't end up causing an SQL query later when looking
# this up.
self.filediff.diffset = self.diffset
try:
lines_of_context = self.request.GET.get('lines-of-context', '')
lines_of_context = [int(i) for i in lines_of_context.split(',', 1)]
except (TypeError, ValueError):
lines_of_context = None
if chunkindex is not None:
try:
chunkindex = int(chunkindex)
except (TypeError, ValueError):
chunkindex = None
if lines_of_context:
collapseall = True
elif chunkindex is not None:
# If we're currently expanding part of a chunk, we want to render
# the entire chunk without any lines collapsed. In the case of
# showing a range of lines, we're going to get all chunks and then
# only show the range. This is so that we won't have separate
# cached entries for each range.
collapseall = False
else:
collapseall = get_collapse_diff(self.request)
self.diff_file = self._get_requested_diff_file()
if not self.diff_file:
raise UserVisibleError(
_('Internal error. Unable to locate file record for '
'filediff %s')
% self.filediff.pk)
return get_diff_renderer(
self.diff_file,
chunk_index=chunkindex,
highlighting=self.highlighting,
collapse_all=collapseall,
lines_of_context=lines_of_context,
extra_context=context,
template_name=self.template_name)
def get_context_data(self, *args, **kwargs):
"""Returns context data used for rendering the view.
This can be overridden by subclasses to provide additional data for the
view.
"""
return {}
def _get_requested_diff_file(self, get_chunks=True):
"""Fetches information on the requested diff.
This will look up information on the diff that's to be rendered
and return it, if found. It may also augment it with additional
data.
If get_chunks is True, the diff file information will include chunks
for rendering. Otherwise, it will just contain generic information
from the database.
"""
files = get_diff_files(self.diffset, self.filediff, self.interdiffset,
request=self.request)
if get_chunks:
populate_diff_chunks(files, self.highlighting,
request=self.request)
if files:
assert len(files) == 1
file = files[0]
if 'index' in self.request.GET:
file['index'] = self.request.GET.get('index')
return file
return None
def exception_traceback_string(request, e, template_name, extra_context={}):
context = {'error': e}
context.update(extra_context)
if e.__class__ is not UserVisibleError:
context['trace'] = traceback.format_exc()
if request:
request_context = RequestContext(request, context)
else:
request_context = context
return render_to_string(template_name, request_context)
def exception_traceback(request, e, template_name, extra_context={}):
return HttpResponseServerError(
exception_traceback_string(request, e, template_name, extra_context))
|
the-stack_0_12886 | def extract_items(list):
result = []
for index in range(0, len(list)):
bottom = list[0:index]
top = list[index+1:]
item = list[index]
result.append((item, bottom + top))
return result
def perms(list):
if list == []:
return [[]]
result = []
for (item, rest) in extract_items(list):
for p in perms(rest):
result.append([item] + p)
return result
for p in perms(list(range(4))):
print(p)
|
the-stack_0_12889 | # Copyright (c) 2017 Midokura SARL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_config import cfg
import testtools
from neutron_lib.utils import test
from tempest.common import utils
from tempest.common import waiters
from tempest.lib.common import ssh
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import constants
from neutron_tempest_plugin.vpnaas.scenario import base_vpnaas as base
CONF = config.CONF
# NOTE(huntxu): This is a workaround due to a upstream bug [1].
# VPNaaS 4in6 and 6in4 is not working properly with LibreSwan 3.19+.
# In OpenStack zuul checks the base CentOS 7 node is using Libreswan 3.20 on
# CentOS 7.4. So we need to provide a way to skip the 4in6 and 6in4 test cases
# for zuul.
#
# Once the upstream bug gets fixed and the base node uses a newer version of
# Libreswan with that fix, we can remove this.
#
# [1] https://github.com/libreswan/libreswan/issues/175
CONF.register_opt(
cfg.BoolOpt('skip_4in6_6in4_tests',
default=False,
help='Whether to skip 4in6 and 6in4 test cases.'),
'neutron_vpnaas_plugin_options'
)
class Vpnaas(base.BaseTempestTestCase):
"""Test the following topology
+-------------------+
| public |
| network |
| |
+-+---------------+-+
| |
| |
+-------+-+ +-+-------+
| LEFT | | RIGHT |
| router | <--VPN--> | router |
| | | |
+----+----+ +----+----+
| |
+----+----+ +----+----+
| LEFT | | RIGHT |
| network | | network |
| | | |
+---------+ +---------+
"""
credentials = ['primary', 'admin']
inner_ipv6 = False
outer_ipv6 = False
@classmethod
@utils.requires_ext(extension="vpnaas", service="network")
def resource_setup(cls):
super(Vpnaas, cls).resource_setup()
# common
cls.keypair = cls.create_keypair()
cls.secgroup = cls.os_primary.network_client.create_security_group(
name=data_utils.rand_name('secgroup-'))['security_group']
cls.security_groups.append(cls.secgroup)
cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
cls.ikepolicy = cls.create_ikepolicy(
data_utils.rand_name("ike-policy-"))
cls.ipsecpolicy = cls.create_ipsecpolicy(
data_utils.rand_name("ipsec-policy-"))
cls.extra_subnet_attributes = {}
if cls.inner_ipv6:
cls.create_v6_pingable_secgroup_rule(
secgroup_id=cls.secgroup['id'])
cls.extra_subnet_attributes['ipv6_address_mode'] = 'slaac'
cls.extra_subnet_attributes['ipv6_ra_mode'] = 'slaac'
# LEFT
cls.router = cls.create_router(
data_utils.rand_name('left-router'),
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
cls.network = cls.create_network(network_name='left-network')
ip_version = 6 if cls.inner_ipv6 else 4
v4_cidr = netaddr.IPNetwork('10.20.0.0/24')
v6_cidr = netaddr.IPNetwork('2001:db8:0:2::/64')
cidr = v6_cidr if cls.inner_ipv6 else v4_cidr
cls.subnet = cls.create_subnet(
cls.network, ip_version=ip_version, cidr=cidr, name='left-subnet',
**cls.extra_subnet_attributes)
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
# Gives an internal IPv4 subnet for floating IP to the left server,
# we use it to ssh into the left server.
if cls.inner_ipv6:
v4_subnet = cls.create_subnet(
cls.network, ip_version=4, name='left-v4-subnet')
cls.create_router_interface(cls.router['id'], v4_subnet['id'])
# RIGHT
cls._right_network, cls._right_subnet, cls._right_router = \
cls._create_right_network()
@classmethod
def create_v6_pingable_secgroup_rule(cls, secgroup_id=None, client=None):
# NOTE(huntxu): This method should be moved into the base class, along
# with the v4 version.
"""This rule is intended to permit inbound ping6"""
rule_list = [{'protocol': 'ipv6-icmp',
'direction': 'ingress',
'port_range_min': 128, # type
'port_range_max': 0, # code
'ethertype': 'IPv6',
'remote_ip_prefix': '::/0'}]
client = client or cls.os_primary.network_client
cls.create_secgroup_rules(rule_list, client=client,
secgroup_id=secgroup_id)
@classmethod
def _create_right_network(cls):
router = cls.create_router(
data_utils.rand_name('right-router'),
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
network = cls.create_network(network_name='right-network')
v4_cidr = netaddr.IPNetwork('10.10.0.0/24')
v6_cidr = netaddr.IPNetwork('2001:db8:0:1::/64')
cidr = v6_cidr if cls.inner_ipv6 else v4_cidr
ip_version = 6 if cls.inner_ipv6 else 4
subnet = cls.create_subnet(
network, ip_version=ip_version, cidr=cidr, name='right-subnet',
**cls.extra_subnet_attributes)
cls.create_router_interface(router['id'], subnet['id'])
return network, subnet, router
def _create_server(self, create_floating_ip=True, network=None):
if network is None:
network = self.network
port = self.create_port(network, security_groups=[self.secgroup['id']])
if create_floating_ip:
fip = self.create_and_associate_floatingip(port['id'])
else:
fip = None
server = self.create_server(
flavor_ref=CONF.compute.flavor_ref,
image_ref=CONF.compute.image_ref,
key_name=self.keypair['name'],
networks=[{'port': port['id']}])['server']
waiters.wait_for_server_status(self.os_primary.servers_client,
server['id'],
constants.SERVER_STATUS_ACTIVE)
return {'port': port, 'fip': fip, 'server': server}
def _setup_vpn(self):
sites = [
dict(name="left", network=self.network, subnet=self.subnet,
router=self.router),
dict(name="right", network=self._right_network,
subnet=self._right_subnet, router=self._right_router),
]
psk = data_utils.rand_name('mysecret')
for i in range(0, 2):
site = sites[i]
site['vpnservice'] = self.create_vpnservice(
site['subnet']['id'], site['router']['id'],
name=data_utils.rand_name('%s-vpnservice' % site['name']))
for i in range(0, 2):
site = sites[i]
vpnservice = site['vpnservice']
peer = sites[1 - i]
if self.outer_ipv6:
peer_address = peer['vpnservice']['external_v6_ip']
if not peer_address:
msg = "Public network must have an IPv6 subnet."
raise self.skipException(msg)
else:
peer_address = peer['vpnservice']['external_v4_ip']
self.create_ipsec_site_connection(
self.ikepolicy['id'],
self.ipsecpolicy['id'],
vpnservice['id'],
peer_address=peer_address,
peer_id=peer_address,
peer_cidrs=[peer['subnet']['cidr']],
psk=psk,
name=data_utils.rand_name(
'%s-ipsec-site-connection' % site['name']))
def _get_ip_on_subnet_for_port(self, port, subnet_id):
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] == subnet_id:
return fixed_ip['ip_address']
msg = "Cannot get IP address on specified subnet %s for port %r." % (
subnet_id, port)
raise self.fail(msg)
@test.unstable_test("bug 1882220")
def _test_vpnaas(self):
# RIGHT
right_server = self._create_server(network=self._right_network,
create_floating_ip=False)
right_ip = self._get_ip_on_subnet_for_port(
right_server['port'], self._right_subnet['id'])
# LEFT
left_server = self._create_server()
ssh_client = ssh.Client(left_server['fip']['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
# check LEFT -> RIGHT connectivity via VPN
self.check_remote_connectivity(ssh_client, right_ip,
should_succeed=False)
self._setup_vpn()
self.check_remote_connectivity(ssh_client, right_ip)
# Test VPN traffic and floating IP traffic don't interfere each other.
if not self.inner_ipv6:
# Assign a floating-ip and check connectivity.
# This is NOT via VPN.
fip = self.create_and_associate_floatingip(
right_server['port']['id'])
self.check_remote_connectivity(ssh_client,
fip['floating_ip_address'])
# check LEFT -> RIGHT connectivity via VPN again, to ensure
# the above floating-ip doesn't interfere the traffic.
self.check_remote_connectivity(ssh_client, right_ip)
class Vpnaas4in4(Vpnaas):
@decorators.idempotent_id('aa932ab2-63aa-49cf-a2a0-8ae71ac2bc24')
def test_vpnaas(self):
self._test_vpnaas()
class Vpnaas4in6(Vpnaas):
outer_ipv6 = True
@decorators.idempotent_id('2d5f18dc-6186-4deb-842b-051325bd0466')
@testtools.skipUnless(CONF.network_feature_enabled.ipv6,
'IPv6 tests are disabled.')
@testtools.skipIf(
CONF.neutron_vpnaas_plugin_options.skip_4in6_6in4_tests,
'VPNaaS 4in6 test is skipped.')
def test_vpnaas_4in6(self):
self._test_vpnaas()
class Vpnaas6in4(Vpnaas):
inner_ipv6 = True
@decorators.idempotent_id('10febf33-c5b7-48af-aa13-94b4fb585a55')
@testtools.skipUnless(CONF.network_feature_enabled.ipv6,
'IPv6 tests are disabled.')
@testtools.skipIf(
CONF.neutron_vpnaas_plugin_options.skip_4in6_6in4_tests,
'VPNaaS 6in4 test is skipped.')
def test_vpnaas_6in4(self):
self._test_vpnaas()
class Vpnaas6in6(Vpnaas):
inner_ipv6 = True
outer_ipv6 = True
@decorators.idempotent_id('8b503ffc-aeb0-4938-8dba-73c7323e276d')
@testtools.skipUnless(CONF.network_feature_enabled.ipv6,
'IPv6 tests are disabled.')
def test_vpnaas_6in6(self):
self._test_vpnaas()
|
the-stack_0_12890 | # -*- coding: utf-8 -*-
"""Admin index for Django."""
# :copyright: (c) 2017, Maykin Media BV.
# All rights reserved.
# :license: BSD (3 Clause), see LICENSE for more details.
from __future__ import absolute_import, unicode_literals
import re
from collections import namedtuple
__version__ = "1.4.0"
__author__ = "Joeri Bekker"
__contact__ = "[email protected]"
__homepage__ = "https://github.com/maykinmedia/django-admin-index"
__docformat__ = "restructuredtext"
# -eof meta-
version_info_t = namedtuple(
"version_info_t", ("major", "minor", "patch", "releaselevel", "serial",)
)
# bumpversion can only search for {current_version}
# so we have to parse the version here.
_temp = re.match(r"(\d+)\.(\d+).(\d+)(.+)?", __version__).groups()
VERSION = version_info = version_info_t(
int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or "", ""
)
del _temp
del re
__all__ = []
default_app_config = "django_admin_index.apps.AdminIndexConfig"
|
the-stack_0_12892 | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import mock
import re
import requests
import sys
import unittest
from six.moves.urllib.parse import urlparse
from blinkpy.common.host_mock import MockHost
from blinkpy.common.path_finder import RELATIVE_WEB_TESTS
from blinkpy.web_tests.controllers.test_result_sink import CreateTestResultSink
from blinkpy.web_tests.controllers.test_result_sink import TestResultSink
from blinkpy.web_tests.models import test_results
from blinkpy.web_tests.models.typ_types import ResultType
from blinkpy.web_tests.port.test import add_manifest_to_mock_filesystem
from blinkpy.web_tests.port.test import TestPort
from blinkpy.web_tests.port.test import WEB_TEST_DIR
class TestResultSinkTestBase(unittest.TestCase):
def setUp(self):
super(TestResultSinkTestBase, self).setUpClass()
self.port = TestPort(MockHost())
def luci_context(self, **section_values):
if not section_values:
return
host = self.port.host
f, fname = host.filesystem.open_text_tempfile()
json.dump(section_values, f)
f.close()
host.environ['LUCI_CONTEXT'] = f.path
class TestCreateTestResultSink(TestResultSinkTestBase):
def test_without_luci_context(self):
self.assertIsNone(CreateTestResultSink(self.port))
def test_without_result_sink_section(self):
self.luci_context(app={'foo': 'bar'})
self.assertIsNone(CreateTestResultSink(self.port))
def test_auth_token(self):
ctx = {'address': 'localhost:123', 'auth_token': 'secret'}
self.luci_context(result_sink=ctx)
rs = CreateTestResultSink(self.port)
self.assertIsNotNone(rs)
self.assertEqual(rs._session.headers['Authorization'],
'ResultSink ' + ctx['auth_token'])
def test_with_result_sink_section(self):
ctx = {'address': 'localhost:123', 'auth_token': 'secret'}
self.luci_context(result_sink=ctx)
rs = CreateTestResultSink(self.port)
self.assertIsNotNone(rs)
response = requests.Response()
response.status_code = 200
with mock.patch.object(rs._session, 'post',
return_value=response) as m:
rs.sink(True, test_results.TestResult('test'), None)
self.assertTrue(m.called)
self.assertEqual(
urlparse(m.call_args[0][0]).netloc, ctx['address'])
class TestResultSinkMessage(TestResultSinkTestBase):
"""Tests ResulkSink.sink."""
def setUp(self):
super(TestResultSinkMessage, self).setUp()
patcher = mock.patch.object(TestResultSink, '_send')
self.mock_send = patcher.start()
self.addCleanup(patcher.stop)
ctx = {'address': 'localhost:123', 'auth_token': 'super-secret'}
self.luci_context(result_sink=ctx)
self.rs = CreateTestResultSink(self.port)
def sink(self, expected, test_result, expectations=None):
self.rs.sink(expected, test_result, expectations)
self.assertTrue(self.mock_send.called)
return self.mock_send.call_args[0][0]['testResults'][0]
def test_sink(self):
tr = test_results.TestResult(test_name='test-name')
tr.total_run_time = 123.456
tr.type = ResultType.Crash
sent_data = self.sink(True, tr)
self.assertEqual(sent_data['testId'], 'test-name')
self.assertEqual(sent_data['expected'], True)
self.assertEqual(sent_data['status'], 'CRASH')
self.assertEqual(sent_data['duration'], '123.456s')
def test_sink_with_expectations(self):
class FakeTestExpectation(object):
def __init__(self):
self.raw_results = ['Failure']
class FakeExpectations(object):
def __init__(self):
self.system_condition_tags = ['tag1', 'tag2']
def get_expectations(self, _):
return FakeTestExpectation()
# Values should be extracted from expectations.
tr = test_results.TestResult(test_name='test-name')
tr.type = ResultType.Crash
expectations = FakeExpectations()
expected_tags = [
{
'key': 'test_name',
'value': 'test-name'
},
{
'key': 'web_tests_device_failed',
'value': 'False'
},
{
'key': 'web_tests_result_type',
'value': 'CRASH'
},
{
'key': 'web_tests_flag_specific_config_name',
'value': ''
},
{
'key': 'web_tests_used_expectations_file',
'value': 'TestExpectations',
},
{
'key': 'web_tests_used_expectations_file',
'value': 'WebDriverExpectations',
},
{
'key': 'web_tests_used_expectations_file',
'value': 'NeverFixTests',
},
{
'key': 'web_tests_used_expectations_file',
'value': 'StaleTestExpectations',
},
{
'key': 'web_tests_used_expectations_file',
'value': 'SlowTests',
},
{
'key': 'raw_typ_expectation',
'value': 'Failure'
},
{
'key': 'typ_tag',
'value': 'tag1'
},
{
'key': 'typ_tag',
'value': 'tag2'
},
]
sent_data = self.sink(True, tr, expectations)
self.assertEqual(sent_data['tags'], expected_tags)
def test_sink_without_expectations(self):
tr = test_results.TestResult(test_name='test-name')
tr.type = ResultType.Crash
expected_tags = [
{
'key': 'test_name',
'value': 'test-name'
},
{
'key': 'web_tests_device_failed',
'value': 'False'
},
{
'key': 'web_tests_result_type',
'value': 'CRASH'
},
{
'key': 'web_tests_flag_specific_config_name',
'value': ''
},
{
'key': 'web_tests_used_expectations_file',
'value': 'TestExpectations',
},
{
'key': 'web_tests_used_expectations_file',
'value': 'WebDriverExpectations',
},
{
'key': 'web_tests_used_expectations_file',
'value': 'NeverFixTests',
},
{
'key': 'web_tests_used_expectations_file',
'value': 'StaleTestExpectations',
},
{
'key': 'web_tests_used_expectations_file',
'value': 'SlowTests',
},
]
sent_data = self.sink(True, tr)
self.assertEqual(sent_data['tags'], expected_tags)
def test_test_metadata(self):
tr = test_results.TestResult('')
base_path = '//' + RELATIVE_WEB_TESTS
tr.test_name = "test-name"
self.assertDictEqual(
self.sink(True, tr)['testMetadata'],
{
'name': 'test-name',
'location': {
'repo': 'https://chromium.googlesource.com/chromium/src',
'fileName': base_path + 'test-name',
},
},
)
tr.test_name = "///test-name"
self.assertDictEqual(
self.sink(True, tr)['testMetadata'],
{
'name': '///test-name',
'location': {
'repo': 'https://chromium.googlesource.com/chromium/src',
'fileName': base_path + '///test-name',
},
},
)
def test_device_failure(self):
tr = test_results.TestResult(test_name='test-name')
tr.type = ResultType.Failure
tr.device_failed = True
sent_data = self.sink(True, tr)
# If the device failed, 'expected' and 'status' must be False and 'ABORT'
self.assertEqual(sent_data['expected'], False)
self.assertEqual(sent_data['status'], 'ABORT')
def test_timeout(self):
tr = test_results.TestResult(test_name='test-name')
tr.type = ResultType.Timeout
sent_data = self.sink(True, tr)
# Timeout is considered as 'ABORT'
self.assertEqual(sent_data['status'], 'ABORT')
def test_artifacts(self):
tr = test_results.TestResult(test_name='test-name')
tr.artifacts.AddArtifact('test-image.png', '/tmp/test-image.png', True)
tr.artifacts.AddArtifact('stdout', '/tmp/stdout', True)
sent_data = self.sink(True, tr)
self.assertDictEqual(
sent_data['artifacts'], {
'test-image.png': {
'filePath': '/tmp/test-image.png'
},
'stdout': {
'filePath': '/tmp/stdout'
}
})
def test_artifacts_with_duplicate_paths(self):
tr = test_results.TestResult(test_name='test-name')
tr.artifacts.AddArtifact('artifact', '/tmp/foo', False)
tr.artifacts.AddArtifact('artifact', '/tmp/bar', False)
sent_data = self.sink(True, tr)
self.assertDictEqual(
sent_data['artifacts'], {
'artifact': {
'filePath': '/tmp/foo'
},
'artifact-1': {
'filePath': '/tmp/bar'
}
})
def test_summary_html(self):
tr = test_results.TestResult(test_name='test-name')
tr.artifacts.AddArtifact('stderr', '/tmp/stderr', False)
tr.artifacts.AddArtifact('crash_log', '/tmp/crash_log', False)
tr.artifacts.AddArtifact('command', '/tmp/cmd', False)
sent_data = self.sink(True, tr)
p = re.compile(
'<text-artifact artifact-id="(command|stderr|crash_log)" />')
self.assertListEqual(
p.findall(sent_data['summaryHtml']),
# The artifact tags should be sorted by the artifact names.
['command', 'crash_log', 'stderr'],
)
def assertFilename(self, test_name, expected_filename):
sent_data = self.sink(True, test_results.TestResult(test_name))
self.assertEqual(sent_data['testMetadata']['location']['fileName'],
'//' + RELATIVE_WEB_TESTS + expected_filename)
def test_location_filename(self):
self.assertFilename('real/test.html', 'real/test.html')
# TestPort.virtual_test_suites() has a set of hard-coded virtualized
# tests, and a test name must start with one of the virtual prefixes
# and base in order for it to be recognized as a virtual test.
self.assertFilename(
'virtual/virtual_passes/passes/does_not_exist.html',
'passes/does_not_exist.html')
self.port.host.filesystem.write_text_file(
self.port.host.filesystem.join(WEB_TEST_DIR, 'virtual',
'virtual_passes', 'passes',
'exists.html'),
'body',
)
self.assertFilename('virtual/virtual_passes/passes/exists.html',
'virtual/virtual_passes/passes/exists.html')
def test_wpt_location_filename(self):
add_manifest_to_mock_filesystem(self.port)
self.assertFilename(
'external/wpt/html/parse.html?run_type=uri',
'external/wpt/html/parse.html',
)
self.assertFilename(
'virtual/virtual_wpt/external/wpt/dom/ranges/Range-attributes.html',
'external/wpt/dom/ranges/Range-attributes.html',
)
|
the-stack_0_12893 | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains tools and decorators for registering batch transforms."""
# pylint: disable=too-few-public-methods
import copy
import functools
import inspect
import os
import types
import warnings
import pennylane as qml
class batch_transform:
r"""Class for registering a tape transform that takes a tape, and outputs
a batch of tapes to be independently executed on a quantum device.
Examples of such transforms include quantum gradient shift rules (such
as finite-differences and the parameter-shift rule) and metrics such as
the quantum Fisher information matrix.
Args:
transform_fn (function): The function to register as the batch tape transform.
It can have an arbitrary number of arguments, but the first argument
**must** be the input tape.
expand_fn (function): An expansion function (if required) to be applied to the
input tape before the transformation takes place.
It **must** take the same input arguments as ``transform_fn``.
differentiable (bool): Specifies whether the transform is differentiable or
not. A transform may be non-differentiable for several reasons:
- It does not use an autodiff framework for its tensor manipulations;
- It returns a non-differentiable or non-numeric quantity, such as
a boolean, string, or integer.
In such a case, setting ``differentiable=False`` instructs the decorator
to mark the output as 'constant', reducing potential overhead.
**Example**
A valid batch tape transform is a function that satisfies the following:
- The first argument must be a tape.
- Depending on the structure of this input tape, various quantum operations, functions,
and templates may be called.
- Any internal classical processing should use the ``qml.math`` module to ensure
the transform is differentiable.
- The transform should return a tuple containing:
* Multiple transformed tapes to be executed on a device.
* A classical processing function for post-processing the executed tape results.
This processing function should have the signature ``f(list[tensor_like]) → Any``.
If ``None``, no classical processing is applied to the results.
For example:
.. code-block:: python
@qml.batch_transform
def my_transform(tape, a, b):
'''Generates two tapes, one with all RX replaced with RY,
and the other with all RX replaced with RZ.'''
tape1 = qml.tape.QuantumTape()
tape2 = qml.tape.QuantumTape()
# loop through all operations on the input tape
for op in tape:
if op.name == "RX":
wires = op.wires
param = op.parameters[0]
with tape1:
qml.RY(a * qml.math.abs(param), wires=wires)
with tape2:
qml.RZ(b * qml.math.abs(param), wires=wires)
else:
for t in [tape1, tape2]:
with t:
qml.apply(op)
def processing_fn(results):
return qml.math.sum(qml.math.stack(results))
return [tape1, tape2], processing_fn
We can apply this transform to a quantum tape:
>>> with qml.tape.QuantumTape() as tape:
... qml.Hadamard(wires=0)
... qml.RX(-0.5, wires=0)
... qml.expval(qml.PauliX(0))
>>> tapes, fn = my_transform(tape, 0.65, 2.5)
>>> print(qml.drawer.tape_text(tapes[0], decimals=2))
0: ──H──RY(0.33)─┤ <X>
>>> print(qml.drawer.tape_text(tapes[1], decimals=2))
0: ──H──RZ(1.25)─┤ <X>
We can execute these tapes manually:
>>> dev = qml.device("default.qubit", wires=1)
>>> res = qml.execute(tapes, dev, interface="autograd", gradient_fn=qml.gradients.param_shift)
>>> print(res)
[tensor([0.94765073], requires_grad=True), tensor([0.31532236], requires_grad=True)]
Applying the processing function, we retrieve the end result of the transform:
>>> print(fn(res))
1.2629730888100839
Alternatively, we may also transform a QNode directly, using either
decorator syntax:
>>> @my_transform(0.65, 2.5)
... @qml.qnode(dev)
... def circuit(x):
... qml.Hadamard(wires=0)
... qml.RX(x, wires=0)
... return qml.expval(qml.PauliX(0))
>>> print(circuit(-0.5))
1.2629730888100839
or by transforming an existing QNode:
>>> @qml.qnode(dev)
... def circuit(x):
... qml.Hadamard(wires=0)
... qml.RX(x, wires=0)
... return qml.expval(qml.PauliX(0))
>>> circuit = my_transform(circuit, 0.65, 2.5)
>>> print(circuit(-0.5))
1.2629730888100839
Batch tape transforms are fully differentiable:
>>> x = np.array(-0.5, requires_grad=True)
>>> gradient = qml.grad(circuit)(x)
>>> print(gradient)
2.5800122591960153
.. details::
:title: Usage Details
**Expansion functions**
Tape expansion, decomposition, or manipulation may always be
performed within the custom batch transform. However, by specifying
a separate expansion function, PennyLane will be possible to access
this separate expansion function where needed via
>>> my_transform.expand_fn
The provided ``expand_fn`` must have the same input arguments as
``transform_fn`` and return a ``tape``. Following the example above:
.. code-block:: python
def expand_fn(tape, a, b):
stopping_crit = lambda obj: obj.name!="PhaseShift"
return tape.expand(depth=10, stop_at=stopping_crit)
my_transform = batch_transform(my_transform, expand_fn)
Note that:
- the transform arguments ``a`` and ``b`` must be passed to
the expansion function, and
- the expansion function must return a single tape.
"""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
if os.environ.get("SPHINX_BUILD") == "1":
# If called during a Sphinx documentation build,
# simply return the original function rather than
# instantiating the object. This allows the signature to
# be correctly displayed in the documentation.
warnings.warn(
"Batch transformations have been disabled, as a Sphinx "
"build has been detected via SPHINX_BUILD='1'. If this is not the "
"case, please set the environment variable SPHINX_BUILD='0'.",
UserWarning,
)
args[0].custom_qnode_wrapper = lambda x: x
return args[0]
return super().__new__(cls)
def __init__(self, transform_fn, expand_fn=None, differentiable=True):
if not callable(transform_fn):
raise ValueError(
f"The batch transform function to register, {transform_fn}, "
"does not appear to be a valid Python function or callable."
)
self.transform_fn = transform_fn
self.expand_fn = expand_fn
self.differentiable = differentiable
self.qnode_wrapper = self.default_qnode_wrapper
functools.update_wrapper(self, transform_fn)
def custom_qnode_wrapper(self, fn):
"""Register a custom QNode execution wrapper function
for the batch transform.
**Example**
.. code-block:: python
def my_transform(tape, *targs, **tkwargs):
...
return tapes, processing_fn
@my_transform.custom_qnode_wrapper
def my_custom_qnode_wrapper(self, qnode, targs, tkwargs):
def wrapper_fn(*args, **kwargs):
# construct QNode
qnode.construct(args, kwargs)
# apply transform to QNode's tapes
tapes, processing_fn = self.construct(qnode.qtape, *targs, **tkwargs)
# execute tapes and return processed result
...
return processing_fn(results)
return wrapper_fn
The custom QNode execution wrapper must have arguments
``self`` (the batch transform object), ``qnode`` (the input QNode
to transform and execute), ``targs`` and ``tkwargs`` (the transform
arguments and keyword arguments respectively).
It should return a callable object that accepts the *same* arguments
as the QNode, and returns the transformed numerical result.
The default :meth:`~.default_qnode_wrapper` method may be called
if only pre- or post-processing dependent on QNode arguments is required:
.. code-block:: python
@my_transform.custom_qnode_wrapper
def my_custom_qnode_wrapper(self, qnode, targs, tkwargs):
transformed_qnode = self.default_qnode_wrapper(qnode)
def wrapper_fn(*args, **kwargs):
args, kwargs = pre_process(args, kwargs)
res = transformed_qnode(*args, **kwargs)
...
return ...
return wrapper_fn
"""
self.qnode_wrapper = types.MethodType(fn, self)
def default_qnode_wrapper(self, qnode, targs, tkwargs):
"""A wrapper method that takes a QNode and transform arguments,
and returns a function that 'wraps' the QNode execution.
The returned function should accept the same keyword arguments as
the QNode, and return the output of applying the tape transform
to the QNode's constructed tape.
"""
transform_max_diff = tkwargs.pop("max_diff", None)
if "shots" in inspect.signature(qnode.func).parameters:
raise ValueError(
"Detected 'shots' as an argument of the quantum function to transform. "
"The 'shots' argument name is reserved for overriding the number of shots "
"taken by the device."
)
def _wrapper(*args, **kwargs):
shots = kwargs.pop("shots", False)
qnode.construct(args, kwargs)
tapes, processing_fn = self.construct(qnode.qtape, *targs, **tkwargs)
interface = qnode.interface
execute_kwargs = getattr(qnode, "execute_kwargs", {}).copy()
max_diff = execute_kwargs.pop("max_diff", 2)
max_diff = transform_max_diff or max_diff
gradient_fn = getattr(qnode, "gradient_fn", qnode.diff_method)
gradient_kwargs = getattr(qnode, "gradient_kwargs", {})
if interface is None or not self.differentiable:
gradient_fn = None
res = qml.execute(
tapes,
device=qnode.device,
gradient_fn=gradient_fn,
interface=interface,
max_diff=max_diff,
override_shots=shots,
gradient_kwargs=gradient_kwargs,
**execute_kwargs,
)
return processing_fn(res)
return _wrapper
def __call__(self, *targs, **tkwargs):
qnode = None
if targs:
qnode, *targs = targs
if isinstance(qnode, qml.Device):
# Input is a quantum device.
# dev = some_transform(dev, *transform_args)
return self._device_wrapper(*targs, **tkwargs)(qnode)
if isinstance(qnode, qml.tape.QuantumTape):
# Input is a quantum tape.
# tapes, fn = some_transform(tape, *transform_args)
return self._tape_wrapper(*targs, **tkwargs)(qnode)
if isinstance(qnode, (qml.QNode, qml.ExpvalCost)):
# Input is a QNode:
# result = some_transform(qnode, *transform_args)(*qnode_args)
wrapper = self.qnode_wrapper(qnode, targs, tkwargs)
wrapper = functools.wraps(qnode)(wrapper)
def _construct(args, kwargs):
qnode.construct(args, kwargs)
return self.construct(qnode.qtape, *targs, **tkwargs)
wrapper.construct = _construct
else:
# Input is not a QNode nor a quantum tape nor a device.
# Assume Python decorator syntax:
#
# result = some_transform(*transform_args)(qnode)(*qnode_args)
#
# or
#
# @some_transform(*transform_args)
# @qml.qnode(dev)
# def circuit(...):
# ...
# result = circuit(*qnode_args)
# Prepend the input to the transform args,
# and create a wrapper function.
if qnode is not None:
targs = (qnode,) + tuple(targs)
def wrapper(qnode):
if isinstance(qnode, qml.Device):
return self._device_wrapper(*targs, **tkwargs)(qnode)
if isinstance(qnode, qml.tape.QuantumTape):
return self._tape_wrapper(*targs, **tkwargs)(qnode)
_wrapper = self.qnode_wrapper(qnode, targs, tkwargs)
_wrapper = functools.wraps(qnode)(_wrapper)
def _construct(args, kwargs):
qnode.construct(args, kwargs)
return self.construct(qnode.qtape, *targs, **tkwargs)
_wrapper.construct = _construct
return _wrapper
wrapper.tape_fn = functools.partial(self.transform_fn, *targs, **tkwargs)
wrapper.expand_fn = self.expand_fn
wrapper.differentiable = self.differentiable
return wrapper
def construct(self, tape, *args, **kwargs):
"""Applies the batch tape transform to an input tape.
Args:
tape (.QuantumTape): the tape to be transformed
*args: positional arguments to pass to the tape transform
**kwargs: keyword arguments to pass to the tape transform
Returns:
tuple[list[tapes], callable]: list of transformed tapes
to execute and a post-processing function.
"""
expand = kwargs.pop("_expand", True)
if expand and self.expand_fn is not None:
tape = self.expand_fn(tape, *args, **kwargs)
tapes, processing_fn = self.transform_fn(tape, *args, **kwargs)
if processing_fn is None:
processing_fn = lambda x: x
return tapes, processing_fn
def _device_wrapper(self, *targs, **tkwargs):
def _wrapper(dev):
new_dev = copy.deepcopy(dev)
new_dev.batch_transform = lambda tape: self.construct(tape, *targs, **tkwargs)
return new_dev
return _wrapper
def _tape_wrapper(self, *targs, **tkwargs):
return lambda tape: self.construct(tape, *targs, **tkwargs)
def map_batch_transform(transform, tapes):
"""Map a batch transform over multiple tapes.
Args:
transform (.batch_transform): the batch transform
to be mapped
tapes (Sequence[QuantumTape]): The sequence of tapes the batch
transform should be applied to. Each tape in the sequence
is transformed by the batch transform.
**Example**
Consider the following tapes:
.. code-block:: python
H = qml.PauliZ(0) @ qml.PauliZ(1) - qml.PauliX(0)
with qml.tape.QuantumTape() as tape1:
qml.RX(0.5, wires=0)
qml.RY(0.1, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(H)
with qml.tape.QuantumTape() as tape2:
qml.Hadamard(wires=0)
qml.CRX(0.5, wires=[0, 1])
qml.CNOT(wires=[0, 1])
qml.expval(H + 0.5 * qml.PauliY(0))
We can use ``map_batch_transform`` to map a single
batch transform across both of the these tapes in such a way
that allows us to submit a single job for execution:
>>> tapes, fn = map_batch_transform(qml.transforms.hamiltonian_expand, [tape1, tape2])
>>> dev = qml.device("default.qubit", wires=2)
>>> fn(qml.execute(tapes, dev, qml.gradients.param_shift))
[0.9950041652780257, 0.8150893013179248]
"""
execution_tapes = []
batch_fns = []
tape_counts = []
for t in tapes:
# Preprocess the tapes by applying batch transforms
# to each tape, and storing corresponding tapes
# for execution, processing functions, and list of tape lengths.
new_tapes, fn = transform(t)
execution_tapes.extend(new_tapes)
batch_fns.append(fn)
tape_counts.append(len(new_tapes))
def processing_fn(res):
count = 0
final_results = []
for idx, s in enumerate(tape_counts):
# apply any batch transform post-processing
new_res = batch_fns[idx](res[count : count + s])
final_results.append(new_res)
count += s
return final_results
return execution_tapes, processing_fn
|
the-stack_0_12894 | from datetime import time
from django.forms import TimeInput
from django.test import override_settings
from django.utils import translation
from .base import WidgetTest
class TimeInputTest(WidgetTest):
widget = TimeInput()
def test_render_none(self):
self.check_html(self.widget, 'time', None, html='<input type="text" name="time">')
def test_render_value(self):
"""
The microseconds are trimmed on display, by default.
"""
t = time(12, 51, 34, 482548)
self.assertEqual(str(t), '12:51:34.482548')
self.check_html(self.widget, 'time', t, html='<input type="text" name="time" value="12:51:34">')
self.check_html(self.widget, 'time', time(12, 51, 34), html=(
'<input type="text" name="time" value="12:51:34">'
))
self.check_html(self.widget, 'time', time(12, 51), html=(
'<input type="text" name="time" value="12:51:00">'
))
def test_string(self):
"""Initializing from a string value."""
self.check_html(self.widget, 'time', '13:12:11', html=(
'<input type="text" name="time" value="13:12:11">'
))
def test_format(self):
"""
Use 'format' to change the way a value is displayed.
"""
t = time(12, 51, 34, 482548)
widget = TimeInput(format='%H:%M', attrs={'type': 'time'})
self.check_html(widget, 'time', t, html='<input type="time" name="time" value="12:51">')
@override_settings(USE_L10N=True)
@translation.override('de-at')
def test_l10n(self):
t = time(12, 51, 34, 482548)
self.check_html(self.widget, 'time', t, html='<input type="text" name="time" value="12:51:34">')
|
the-stack_0_12895 | import json
import logging
import os
from datetime import date
from sensors import Light
from utils import catch_measurement, save_measurement, find, exit_on_time
def main():
with open(find('setup_agriculture.json', '/')) as f:
setup = json.load(f)
local_storage: str = setup.get('local_storage')
logging.basicConfig(filename=os.path.join(local_storage, 'log.log'), level=logging.WARNING,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
light_port: int = setup['light'].get('light_port')
period: int = setup['light'].get('period')
wait: float = setup['light'].get('wait')
light_sensor = Light(light_port)
filename = os.path.join(local_storage, 'light_' + str(date.today()) + '.txt')
if not os.path.exists(filename):
with open(filename, 'w+') as f:
f.write('Timestamp, Light\n')
while exit_on_time(setup['light'].get('exit_time')):
measurement = catch_measurement(sensor=light_sensor, period=period, wait=wait)
save_measurement(measurement=measurement,
path=filename)
quit()
if __name__ == '__main__':
main()
|
the-stack_0_12896 | import sys
import common as _c
class StatusArg:
def __init__(self):
self.test = False
def parsearg(globvar):
globvar['status'] = StatusArg();
for arg in sys.argv[1:]:
if arg == '-t':
globvar['status'].test = True
else:
print("unknown argument : {0}".format(arg))
return None
|
the-stack_0_12897 | # -*- coding: utf-8 -*-
from PySide2.QtCore import Signal
from PySide2.QtWidgets import QWidget
from ......Classes.CondType21 import CondType21
from ......GUI import gui_option
from ......GUI.Dialog.DMachineSetup.SBar.PCondType21.Gen_PCondType21 import (
Gen_PCondType21,
)
class PCondType21(Gen_PCondType21, QWidget):
"""Page to setup Conductor Type 21"""
# Signal to DMachineSetup to know that the save popup is needed
saveNeeded = Signal()
# Information for SBar combobox
cond_name = "Rectangular bar"
cond_type = CondType21
def __init__(self, machine=None, material_dict=None):
"""Initialize the widget according to machine
Parameters
----------
self : PCondType21
A PCondType21 widget
machine : Machine
current machine to edit
material_dict: dict
Materials dictionary (library + machine)
"""
# Build the interface according to the .ui file
QWidget.__init__(self)
self.setupUi(self)
# Setup material combobox according to matlib names
self.material_dict = material_dict
self.w_mat.def_mat = "Copper1"
# Set FloatEdit unit
self.lf_Hbar.unit = "m"
self.lf_Wbar.unit = "m"
# Set unit name (m ou mm)
self.u = gui_option.unit
wid_list = [self.unit_Hbar, self.unit_Wbar]
for wid in wid_list:
wid.setText(self.u.get_m_name())
# Fill the fields with the machine values (if they're filled)
self.machine = machine
conductor = machine.rotor.winding.conductor
# Make sure that the rotor's conductor is a 2_1
if conductor is None or not isinstance(conductor, CondType21):
self.machine.rotor.winding.conductor = CondType21()
self.machine.rotor.winding.conductor._set_None()
# Make sure to re-set conductor with the new object
conductor = machine.rotor.winding.conductor
self.lf_Hbar.setValue(conductor.Hbar)
self.lf_Wbar.setValue(conductor.Wbar)
self.w_mat.update(conductor, "cond_mat", self.material_dict)
# Display the main output
self.w_out.comp_output()
# Connect the widget
self.lf_Hbar.editingFinished.connect(self.set_Hbar)
self.lf_Wbar.editingFinished.connect(self.set_Wbar)
self.w_mat.saveNeeded.connect(self.emit_save)
def emit_save(self):
"""Emit the saveNeeded signal"""
self.saveNeeded.emit()
def set_Hbar(self):
"""Signal to update the value of Hbar according to the line edit
Parameters
----------
self : PCondType21
A PCondType21 object
Returns
-------
"""
self.machine.rotor.winding.conductor.Hbar = self.lf_Hbar.value()
self.w_out.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_Wbar(self):
"""Signal to update the value of Wbar according to the line edit
Parameters
----------
self : PCondType21
A PCondType21 object
Returns
-------
"""
self.machine.rotor.winding.conductor.Wbar = self.lf_Wbar.value()
self.w_out.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
|
the-stack_0_12898 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
zmap.py - version and date, see below
Source code : https://github.com/nanshihui/python-zmap/
Author :
* Sherwel Nan - https://github.com/nanshihui/python-zmap/
Licence : Apache License 2.0
A permissive license whose main conditions require preservation of copyright and license notices.
Contributors provide an express grant of patent rights. Licensed works, modifications, and larger
works may be distributed under different terms and without source code.
"""
__author__ = 'Sherwel Nan'
__version__ = '0.1'
__last_modification__ = '2017.07.31'
import csv
import io
import os
import re
import shlex
import subprocess
import sys
try:
from multiprocessing import Process
except ImportError:
from threading import Thread as Process
############################################################################
class PortScanner(object):
"""
PortScanner class allows to use zmap from python
"""
def __init__(self, zmap_search_path=('zmap',
'/usr/bin/zmap',
'/usr/local/bin/zmap',
'/sw/bin/zmap',
'/opt/local/bin/zmap'),Async=False,call_back=None):
"""
Initialize PortScanner module
* detects zmap on the system and zmap version
* may raise PortScannerError exception if zmap is not found in the path
:param zmap_search_path: tupple of string where to search for zmap executable. Change this if you want to use a specific version of zmap.
:returns: nothing
"""
self._zmap_path = '' # zmap path
self._scan_result = {}
self._zmap_version_number = 0 # zmap version number
self._zmap_subversion_number = 0 # zmap subversion number
self._zmap_last_output = '' # last full ascii zmap output
is_zmap_found = False # true if we have found zmap
self._all_host=None
self.__process = None
self._command=None
# regex used to detect zmap (http or https)
regex = re.compile(
'zmap [0-9]*\.[0-9]*\.[0-9].*'
)
# launch 'zmap -V', we wait after
# 'zmap version 5.0 ( http://zmap.org )'
# This is for Mac OSX. When idle3 is launched from the finder, PATH is not set so zmap was not found
for zmap_path in zmap_search_path:
try:
if sys.platform.startswith('freebsd') \
or sys.platform.startswith('linux') \
or sys.platform.startswith('darwin'):
p = subprocess.Popen([zmap_path, '-V'],
bufsize=10000,
stdout=subprocess.PIPE,
close_fds=True)
else:
p = subprocess.Popen([zmap_path, '-V'],
bufsize=10000,
stdout=subprocess.PIPE)
except OSError:
pass
else:
self._zmap_path = zmap_path # save path
break
else:
raise PortScannerError(
'zmap program was not found in path. PATH is : {0}'.format(
os.getenv('PATH')
)
)
self._zmap_last_output = bytes.decode(p.communicate()[0]) # sav stdout
for line in self._zmap_last_output.split(os.linesep):
if regex.match(line) is not None:
is_zmap_found = True
# Search for version number
regex_version = re.compile('[0-9]+')
regex_subversion = re.compile('\.[0-9]+')
rv = regex_version.search(line)
rsv = regex_subversion.search(line)
if rv is not None and rsv is not None:
# extract version/subversion
self._zmap_version_number = int(line[rv.start():rv.end()])
self._zmap_subversion_number = int(
line[rsv.start() + 1:rsv.end()]
)
break
if not is_zmap_found:
raise PortScannerError('zmap program was not found in path')
return
def get_zmap_last_output(self):
"""
Returns the last text output of zmap in raw text
this may be used for debugging purpose
:returns: string containing the last text output of zmap in raw text
"""
return self._zmap_last_output
def zmap_version(self):
"""
returns zmap version if detected (int version, int subversion)
or (0, 0) if unknown
:returns: (zmap_version_number, zmap_subversion_number)
"""
return (self._zmap_version_number, self._zmap_subversion_number)
def scanbyfile(self,path,ports):
pass
def scanbylist(self,lists,ports):
pass
def scan(self, hosts='127.0.0.1', ports=None, arguments='', sudo=False):
"""
Scan given hosts
May raise PortScannerError exception if zmap output was not xml
Test existance of the following key to know
if something went wrong : ['zmap']['scaninfo']['error']
If not present, everything was ok.
:param hosts: string for hosts as zmap use it 'scanme.zmap.org' or '198.116.0-255.1-127' or '216.163.128.20/20'
:param ports: int for ports as zmap use it '22'
:param arguments: string of arguments for zmap '-q'
:param sudo: launch zmap with sudo if True
:returns: scan_result as dictionnary
"""
# assert os.geteuid() == 0,'zmap should be running with root'
if sys.version_info[0] == 2:
assert type(hosts) in (str, unicode), 'Wrong type for [hosts], should be a string [was {0}]'.format(
type(hosts)) # noqa
assert ports and type(ports) == (int),'Wrong type for [ports], should be a int [was {0}]'.format(
type(ports)) # noqa
assert type(arguments) in (str, unicode), 'Wrong type for [arguments], should be a string [was {0}]'.format(
type(arguments)) # noqa
else:
assert type(hosts) in (str), 'Wrong type for [hosts], should be a string [was {0}]'.format(
type(hosts)) # noqa
assert ports and type(ports)==(int), 'Wrong type for [ports], should be a string [was {0}]'.format(
type(ports)) # noqa
assert type(arguments) is str, 'Wrong type for [arguments], should be a string [was {0}]'.format(
type(arguments)) # noqa
h_args = shlex.split(hosts)
f_args = shlex.split(arguments)
# Launch scan
args = [self._zmap_path] + h_args + ['-p', str(ports)] * (ports is not None) + f_args
if sudo:
args = ['sudo'] + args
self._command=args
p = subprocess.Popen(args, bufsize=100000,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# wait until finished
# get output
(self._zmap_last_output, zmap_err) = p.communicate()
self._zmap_last_output = bytes.decode(self._zmap_last_output)
zmap_err = bytes.decode(zmap_err)
# If there was something on stderr, there was a problem so abort... in
# fact not always. As stated by AlenLPeacock :
# This actually makes python-zmap mostly unusable on most real-life
# networks -- a particular subnet might have dozens of scannable hosts,
# but if a single one is unreachable or unroutable during the scan,
# zmap.scan() returns nothing. This behavior also diverges significantly
# from commandline zmap, which simply stderrs individual problems but
# keeps on trucking.
zmap_err_keep_trace = []
zmap_warn_keep_trace = []
zmap_info_keep_trace=[]
if len(zmap_err) > 0:
regex_warning = re.compile('\[WARN\].*', re.IGNORECASE)
regex_info = re.compile('\[INFO\].*', re.IGNORECASE)
regex_fatal = re.compile('\[FATAL\].*', re.IGNORECASE)
for line in zmap_err.split(os.linesep):
if len(line) > 0:
rgw = regex_warning.search(line)
rgi=regex_info.search(line)
rgf=regex_fatal.search(line)
if rgw is not None:
# sys.stderr.write(line+os.linesep)
zmap_warn_keep_trace.append(line + os.linesep)
elif rgi is not None:
zmap_info_keep_trace.append(line + os.linesep)
elif rgf is not None:
zmap_err_keep_trace.append(line + os.linesep)
# raise PortScannerError(zmap_err)
else:
zmap_info_keep_trace.append(line)
return self.analyse_zmap_scan(
zmap_output=self._zmap_last_output,
zmap_err=zmap_err,
zmap_err_keep_trace=zmap_err_keep_trace,
zmap_warn_keep_trace=zmap_warn_keep_trace,
port=ports
)
def analyse_zmap_scan(self,port=None, zmap_output=None, zmap_err='', zmap_err_keep_trace='', zmap_warn_keep_trace=''):
"""
Analyses zmap scan ouput
May raise PortScannerError exception if zmap output was not xml
Test existance of the following key to know if something went wrong : ['zmap']['scaninfo']['error']
If not present, everything was ok.
:param zmap_output: string to analyse
:returns: scan_result as dictionnary
"""
if zmap_output is not None:
self._zmap_last_output = zmap_output
scan_result = {}
scan_result['alive']=[]
scan_result['error_info']=[]
scan_result['warn_info']=[]
if len(self._zmap_last_output)>0:
scan_result['alive']=self._zmap_last_output.split()
if zmap_err_keep_trace:
scan_result['error_info']=zmap_err_keep_trace
if zmap_warn_keep_trace:
scan_result['warn_info']=zmap_warn_keep_trace
# zmap command line
scan_info={}
scan_info['scaninfo']={}
scan_info['scaninfo'][port]=scan_result
scan_info['command_line']=' '.join(i for i in self._command)
self._scan_result = scan_info # store for later use
return scan_info
def __getitem__(self,port=None):
"""
returns a port's detail
"""
if sys.version_info[0] == 2:
assert port and type(port) ==int, 'Wrong type for [host], should be a int [was {0}]'.format(
type(port))
else:
assert port and type(port) == int, 'Wrong type for [host], should be a int [was {0}]'.format(type(port))
return self._scan_result['scaninfo'].get(port,{}).get('alive',None)
def all_hosts(self):
"""
returns a sorted list of all hosts
"""
if self._command:
if self._all_host:
return self._all_host
else:
args = self._command+['-d']+['-c 0']
p = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# wait until finished
# get output
(msg, msg_err) = p.communicate()
if msg:
template=re.compile(r"""daddr: ((?<![\.\d])(?:\d{1,3}\.){3}\d{1,3}(?![\.\d]))""")
hosts=template.findall(msg)
self._all_host=hosts
return hosts
else:
return []
else:
return []
def command_line(self):
"""
returns command line used for the scan
may raise AssertionError exception if called before scanning
"""
assert 'command_line' in self._scan_result, 'Do a scan before trying to get result !'
return self._scan_result['command_line']
def scaninfo(self):
"""
returns scaninfo structure
{'tcp': {'services': '22', 'method': 'connect'}}
may raise AssertionError exception if called before scanning
"""
assert 'scaninfo' in self._scan_result, 'Do a scan before trying to get result !'
return self._scan_result['scaninfo']
def has_port(self, port):
"""
returns True if port has result, False otherwise
"""
assert type(port) is int, 'Wrong type for [host], should be a int [was {0}]'.format(type(port))
assert 'scaninfo' in self._scan_result, 'Do a scan before trying to get result !'
if self._scan_result['scaninfo'].get(port,{}).get('alive',None):
return True
return False
def csv(self):
"""
returns CSV output as text
Example :
host;port;status;
127.0.0.1;port;open
"""
assert 'scan' in self._scan_result, 'Do a scan before trying to get result !'
if sys.version_info < (3, 0):
fd = io.BytesIO()
else:
fd = io.StringIO()
csv_ouput = csv.writer(fd, delimiter=';')
csv_header = [
'host',
'port',
'state',
]
csv_ouput.writerow(csv_header)
for host in self.all_hosts():
for proto in self[host].all_protocols():
if proto not in ['tcp', 'udp']:
continue
lport = list(self[host][proto].keys())
lport.sort()
for port in lport:
hostname = ''
for h in self[host]['hostnames']:
hostname = h['name']
hostname_type = h['type']
csv_row = [
host, hostname, hostname_type,
proto, port,
self[host][proto][port]['name'],
self[host][proto][port]['state'],
self[host][proto][port]['product'],
self[host][proto][port]['extrainfo'],
self[host][proto][port]['reason'],
self[host][proto][port]['version'],
self[host][proto][port]['conf'],
self[host][proto][port]['cpe']
]
csv_ouput.writerow(csv_row)
return fd.getvalue()
############################################################################
def __scan_progressive__(self, hosts, ports, arguments, callback, sudo):
"""
Used by PortScannerAsync for callback
"""
for host in self._nm.listscan(hosts):
try:
scan_data = self._nm.scan(host, ports, arguments, sudo)
except PortScannerError:
scan_data = None
if callback is not None:
callback(host, scan_data)
return
############################################################################
class PortScannerAsync(object):
"""
PortScannerAsync allows to use zmap from python asynchronously
for each host scanned, callback is called with scan result for the host
"""
def __init__(self):
"""
Initialize the module
* detects zmap on the system and zmap version
* may raise PortScannerError exception if zmap is not found in the path
"""
self._process = None
self._nm = PortScanner()
return
def __del__(self):
"""
Cleanup when deleted
"""
if self._process is not None:
try:
if self._process.is_alive():
self._process.terminate()
except AssertionError:
# Happens on python3.4
# when using PortScannerAsync twice in a row
pass
self._process = None
return
def scan(self, hosts='127.0.0.1', ports=None, arguments='-sV', callback=None, sudo=False):
"""
Scan given hosts in a separate process and return host by host result using callback function
PortScannerError exception from standard zmap is catched and you won't know about but get None as scan_data
:param hosts: string for hosts as zmap use it 'scanme.zmap.org' or '198.116.0-255.1-127' or '216.163.128.20/20'
:param ports: string for ports as zmap use it '22,53,110,143-4564'
:param arguments: string of arguments for zmap '-sU -sX -sC'
:param callback: callback function which takes (host, scan_data) as arguments
:param sudo: launch zmap with sudo if true
"""
if sys.version_info[0] == 2:
assert type(hosts) in (str, unicode), 'Wrong type for [hosts], should be a string [was {0}]'.format(
type(hosts))
assert type(ports) in (
str, unicode, type(None)), 'Wrong type for [ports], should be a string [was {0}]'.format(type(ports))
assert type(arguments) in (str, unicode), 'Wrong type for [arguments], should be a string [was {0}]'.format(
type(arguments))
else:
assert type(hosts) is str, 'Wrong type for [hosts], should be a string [was {0}]'.format(type(hosts))
assert type(ports) in (str, type(None)), 'Wrong type for [ports], should be a string [was {0}]'.format(
type(ports))
assert type(arguments) is str, 'Wrong type for [arguments], should be a string [was {0}]'.format(
type(arguments))
assert callable(callback) or callback is None, 'The [callback] {0} should be callable or None.'.format(
str(callback))
for redirecting_output in ['-oX', '-oA']:
assert redirecting_output not in arguments, 'Xml output can\'t be redirected from command line.\nYou can access it after a scan using:\nzmap.nm.get_zmap_last_output()'
self._process = Process(
target=__scan_progressive__,
args=(self, hosts, ports, arguments, callback, sudo)
)
self._process.daemon = True
self._process.start()
return
def stop(self):
"""
Stop the current scan process
"""
if self._process is not None:
self._process.terminate()
return
def wait(self, timeout=None):
"""
Wait for the current scan process to finish, or timeout
:param timeout: default = None, wait timeout seconds
"""
assert type(timeout) in (
int, type(None)), 'Wrong type for [timeout], should be an int or None [was {0}]'.format(type(timeout))
self._process.join(timeout)
return
def still_scanning(self):
"""
:returns: True if a scan is currently running, False otherwise
"""
try:
return self._process.is_alive()
except:
return False
############################################################################
class PortScannerYield(PortScannerAsync):
"""
PortScannerYield allows to use zmap from python with a generator
for each host scanned, yield is called with scan result for the host
"""
def __init__(self):
"""
Initialize the module
* detects zmap on the system and zmap version
* may raise PortScannerError exception if zmap is not found in the path
"""
PortScannerAsync.__init__(self)
return
def scan(self, hosts='127.0.0.1', ports=None, arguments='-sV', sudo=False):
"""
Scan given hosts in a separate process and return host by host result using callback function
PortScannerError exception from standard zmap is catched and you won't know about it
:param hosts: string for hosts as zmap use it 'scanme.zmap.org' or '198.116.0-255.1-127' or '216.163.128.20/20'
:param ports: string for ports as zmap use it '22,53,110,143-4564'
:param arguments: string of arguments for zmap '-sU -sX -sC'
:param callback: callback function which takes (host, scan_data) as arguments
:param sudo: launch zmap with sudo if true
"""
assert type(hosts) is str, 'Wrong type for [hosts], should be a string [was {0}]'.format(type(hosts))
assert type(ports) in (str, type(None)), 'Wrong type for [ports], should be a string [was {0}]'.format(
type(ports))
assert type(arguments) is str, 'Wrong type for [arguments], should be a string [was {0}]'.format(
type(arguments))
for redirecting_output in ['-oX', '-oA']:
assert redirecting_output not in arguments, 'Xml output can\'t be redirected from command line.\nYou can access it after a scan using:\nzmap.nm.get_zmap_last_output()'
for host in self._nm.listscan(hosts):
try:
scan_data = self._nm.scan(host, ports, arguments, sudo)
except PortScannerError:
scan_data = None
yield (host, scan_data)
return
def stop(self):
pass
def wait(self, timeout=None):
pass
def still_scanning(self):
pass
class PortScannerError(Exception):
"""
Exception error class for PortScanner class
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def __repr__(self):
return 'PortScannerError exception {0}'.format(self.value)
|
the-stack_0_12899 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import json
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig,
BertForSequenceClassification, BertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
)
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers import glue_convert_examples_to_features as convert_examples_to_features
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig,
RobertaConfig, DistilBertConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
'albert': (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer)
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
|
the-stack_0_12900 | from graphbrain.meaning.corefs import main_coref
def is_actor(hg, edge):
"""Checks if the edge is a coreference to an actor."""
if edge.type()[0] == 'c':
return hg.exists(('actor/p/.', main_coref(hg, edge)))
else:
return False
def find_actors(hg, edge):
"""Returns set of all coreferences to actors found in the edge."""
actors = set()
if is_actor(hg, edge):
actors.add(main_coref(hg, edge))
if not edge.is_atom():
for item in edge:
actors |= find_actors(hg, item)
return actors
|
the-stack_0_12901 | '''
Kattis - jackpot
Simply get the LCM of all numbers. Note the property that LCM(a, b, c, ...) = LCM(LCM(a, b), c, ...)
GCD also has this property.
Time: O(n * log(INT_MAX)) Space O(n), Assuming Euclidean algorithm is O(log (INT_MAX))
'''
from math import gcd
def lcm(a, b):
return a * b // gcd(a, b)
num_tc = int(input())
for _ in range(num_tc):
n = int(input())
arr = list(map(int, input().split()))
cur = arr[0]
if (n == 1):
print(cur)
continue
for i in range(1, n):
cur = lcm(cur, arr[i])
if (cur > 1e9):
print("More than a billion.")
break
if (i == n-1):
print(cur)
|
the-stack_0_12902 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions andsss
# limitations under the License.
"""Tests for coverage_data_utils.py"""
from unittest import mock
import pandas as pd
import pandas.testing as pd_test
from analysis import coverage_data_utils
FUZZER = 'afl'
BENCHMARK = 'libpng-1.2.56'
EXPERIMENT_FILESTORE_PATH = 'gs://fuzzbench-data/myexperiment'
SAMPLE_DF = pd.DataFrame([{
'experiment_filestore': 'gs://fuzzbench-data',
'experiment': 'exp1',
'fuzzer': FUZZER,
'benchmark': BENCHMARK
}, {
'experiment_filestore': 'gs://fuzzbench-data2',
'experiment': 'exp2',
'fuzzer': 'libfuzzer',
'benchmark': BENCHMARK
}])
def create_coverage_data():
"""Utility function to create test data."""
return {
'afl libpng-1.2.56': [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
'libfuzzer libpng-1.2.56': [[0, 0, 1, 1], [0, 0, 2, 3], [0, 0, 3, 3],
[0, 0, 4, 4]]
}
def test_get_unique_region_dict():
"""Tests get_unique_region_dict() function."""
coverage_dict = create_coverage_data()
benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, 'libpng-1.2.56')
unique_region_dict = coverage_data_utils.get_unique_region_dict(
benchmark_coverage_dict)
expected_dict = {
(0, 0, 2, 2): ['afl'],
(0, 0, 2, 3): ['libfuzzer'],
(0, 0, 4, 4): ['libfuzzer']
}
assert expected_dict == unique_region_dict
def test_get_unique_region_cov_df():
"""Tests get_unique_region_cov_df() function."""
coverage_dict = create_coverage_data()
benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, 'libpng-1.2.56')
unique_region_dict = coverage_data_utils.get_unique_region_dict(
benchmark_coverage_dict)
fuzzer_names = ['afl', 'libfuzzer']
unique_region_df = coverage_data_utils.get_unique_region_cov_df(
unique_region_dict, fuzzer_names)
unique_region_df = unique_region_df.sort_values(by=['fuzzer']).reset_index(
drop=True)
expected_df = pd.DataFrame([{
'fuzzer': 'afl',
'unique_regions_covered': 1
}, {
'fuzzer': 'libfuzzer',
'unique_regions_covered': 2
}])
assert unique_region_df.equals(expected_df)
def test_get_benchmark_cov_dict():
"""Tests that get_benchmark_cov_dict() returns correct dictionary."""
coverage_dict = create_coverage_data()
benchmark = 'libpng-1.2.56'
benchmark_cov_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, benchmark)
expected_cov_dict = {
"afl": {(0, 0, 3, 3), (0, 0, 2, 2), (0, 0, 1, 1)},
"libfuzzer": {(0, 0, 4, 4), (0, 0, 3, 3), (0, 0, 2, 3), (0, 0, 1, 1)}
}
assert expected_cov_dict == benchmark_cov_dict
def test_get_pairwise_unique_coverage_table():
"""Tests that get_pairwise_unique_coverage_table() gives the
correct dataframe."""
coverage_dict = create_coverage_data()
benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, 'libpng-1.2.56')
fuzzers = ['libfuzzer', 'afl']
table = coverage_data_utils.get_pairwise_unique_coverage_table(
benchmark_coverage_dict, fuzzers)
expected_table = pd.DataFrame([[0, 1], [2, 0]],
index=fuzzers,
columns=fuzzers)
pd_test.assert_frame_equal(table, expected_table)
def test_get_fuzzer_benchmark_covered_regions_filestore_path():
"""Tests that get_fuzzer_benchmark_covered_regions_filestore_path returns
the correct result."""
assert (
coverage_data_utils.get_fuzzer_benchmark_covered_regions_filestore_path(
FUZZER, BENCHMARK,
EXPERIMENT_FILESTORE_PATH) == ('gs://fuzzbench-data/myexperiment/'
'coverage/data/libpng-1.2.56/afl/'
'covered_regions.json'))
def test_fuzzer_and_benchmark_to_key():
"""Tests that fuzzer_and_benchmark_to_key returns the correct result."""
assert (coverage_data_utils.fuzzer_and_benchmark_to_key(
FUZZER, BENCHMARK) == 'afl libpng-1.2.56')
def test_key_to_fuzzer_and_benchmark():
"""Tests that key_to_fuzzer_and_benchmark returns the correct result."""
assert (coverage_data_utils.key_to_fuzzer_and_benchmark('afl libpng-1.2.56')
== (FUZZER, BENCHMARK))
def test_fuzzer_benchmark_key_roundtrip():
"""Tests that key_to_fuzzer_and_benchmark(fuzzer_and_benchmark_to_key(X, Y))
returns (X, Y)."""
assert (coverage_data_utils.key_to_fuzzer_and_benchmark(
coverage_data_utils.fuzzer_and_benchmark_to_key(
FUZZER, BENCHMARK)) == (FUZZER, BENCHMARK))
def test_get_experiment_filestore_path_for_fuzzer_benchmark():
"""Tests that get_experiment_filestore_path_for_fuzzer_benchmark returns the
right result."""
filestore_path = (
coverage_data_utils.get_experiment_filestore_path_for_fuzzer_benchmark(
FUZZER, BENCHMARK, SAMPLE_DF))
assert filestore_path == 'gs://fuzzbench-data/exp1'
@mock.patch('analysis.coverage_data_utils.logger.warning')
def test_get_experiment_filestore_path_for_fuzzer_benchmark_multiple(
mocked_warning):
"""Tests that get_experiment_filestore_path_for_fuzzer_benchmark returns the
right result when there are multiple filestores for a single pair and that a
warning is logged.."""
df = pd.DataFrame([{
'experiment_filestore': 'gs://fuzzbench-data',
'experiment': 'exp1',
'fuzzer': FUZZER,
'benchmark': BENCHMARK
}, {
'experiment_filestore': 'gs://fuzzbench-data2',
'experiment': 'exp2',
'fuzzer': FUZZER,
'benchmark': BENCHMARK
}])
filestore_path = (
coverage_data_utils.get_experiment_filestore_path_for_fuzzer_benchmark(
FUZZER, BENCHMARK, df))
assert filestore_path in ('gs://fuzzbench-data/exp1',
'gs://fuzzbench-data2/exp2')
assert mocked_warning.call_count == 1
def test_get_experiment_filestore_paths():
"""Tests that get_experiment_filestore_paths returns the right result."""
df = pd.DataFrame([{
'experiment_filestore': 'gs://fuzzbench-data',
'experiment': 'exp1'
}, {
'experiment_filestore': 'gs://fuzzbench-data2',
'experiment': 'exp2'
}])
assert sorted(coverage_data_utils.get_experiment_filestore_paths(df)) == [
'gs://fuzzbench-data/exp1', 'gs://fuzzbench-data2/exp2'
]
def test_coverage_report_filestore_path():
"""Tests that get_coverage_report_filestore_path returns the correct
result."""
expected_cov_report_url = ('gs://fuzzbench-data/exp1/coverage/reports/'
'libpng-1.2.56/afl/index.html')
assert coverage_data_utils.get_coverage_report_filestore_path(
FUZZER, BENCHMARK, SAMPLE_DF) == expected_cov_report_url
|
the-stack_0_12904 | import torch
from torch.nn.functional import softmax
import json
import nltk
nltk.download('punkt')
from nltk.tokenize import sent_tokenize
from transformers import AutoTokenizer, AutoModelForMaskedLM
import numpy as np
import sys
import random
import os
from os import path
def get_abstract(article):
return ' '.join([x['text'] for x in article['abstract']])
def get_pls(article):
return article['pls'] if article['pls_type'] == 'long' else ' '.join([x['text'] for x in article['pls']])
def mask_batch(tokens, tokenizer, num_mask):
indexed_tokens = []
mask_indices = []
for i in range(10):
cur_mask_indices = random.sample(list(range(1,len(tokens)-1)), num_mask)
masked_tokens = [tokens[index] for index in cur_mask_indices]
for index in cur_mask_indices:
tokens[index] = '[MASK]'
indexed_tokens.append(tokenizer.convert_tokens_to_ids(tokens))
mask_indices.append(cur_mask_indices)
for j in range(num_mask):
index = cur_mask_indices[j]
tokens[index] = masked_tokens[j]
return indexed_tokens, mask_indices
def run_model_sentence(tokens, tokenizer, model, num_mask=5, batch_size=1, device='cuda'):
(indexed_tokens, mask_indices) = mask_batch(tokens, tokenizer, num_mask)
predictions = []
model.eval()
model.to(device)
start_index = 0
while start_index < len(indexed_tokens):
end_index = min(start_index + batch_size, len(indexed_tokens))
cur_indexed_tokens = torch.tensor(indexed_tokens[start_index:end_index], dtype=torch.long).to(device)
segment_ids = torch.ones((end_index-start_index, len(tokens)), dtype=torch.long).to(device)
with torch.no_grad():
outputs = model.forward(cur_indexed_tokens, token_type_ids=segment_ids)
predictions.append(outputs[0].to('cpu'))
start_index = end_index
predictions = torch.cat(predictions, dim=0)
return predictions, mask_indices
def eval_sentence(sentence, tokenizer, model, num_mask=5, batch_size=1, device='cuda'):
tokens = tokenizer.tokenize(sentence)
if len(tokens) > 510:
tokens = tokens[:510]
tokens = ['[CLS]'] + tokens + ['[SEP]']
#if num_mask is a float, treat as a percentage of tokens to mask,
#of course excluding the CLS and SEP tokens
if type(num_mask) == float:
num_mask = int(num_mask * (len(tokens)-2))
(predictions, mask_indices) = run_model_sentence(tokens, tokenizer, model, num_mask, batch_size, device)
probabilities = []
for i in range(len(predictions)):
for mask_index in mask_indices[i]:
distribution = softmax(predictions[i, mask_index], dim=0)
masked_token_index = tokenizer.convert_tokens_to_ids(tokens[mask_index])
prob = distribution[masked_token_index].item()
probabilities.append(prob)
return probabilities
def eval_paragraph(paragraph, tokenizer, model, num_mask=5, batch_size=1, device='cuda'):
probabilities = []
for sent in sent_tokenize(paragraph):
if type(num_mask) == int and len(tokenizer.tokenize(sent)) < num_mask:
print('skipping sentence...')
continue
probabilities += eval_sentence(sent, tokenizer, model, num_mask, batch_size, device)
return probabilities
def eval_article(article, tokenizer, model, num_mask=5, batch_size=1, device='cuda'):
abstract_probs = eval_paragraph(article['abstract'], tokenizer, model, num_mask, batch_size, device)
pls_probs = eval_paragraph(article['pls'], tokenizer, model, num_mask, batch_size, device)
gen_probs = eval_paragraph(article['gen'], tokenizer, model, num_mask, batch_size, device)
return abstract_probs, pls_probs, gen_probs
def probability_results(data, input_file_name, tokenizer, model, file_name, num_mask=5, batch_size=1, device='cuda'):
prefix = path.split(input_file_name)[-2]
#read in the dois already processed (if the file_name exists) so that they
#can be ignored in this run
already = set()
if path.isfile(path.join(prefix, file_name)):
with open(path.join(prefix, file_name)) as f:
for l in f.readlines():
if len(l) > 0:
already.add(l.split(', ')[0])
for index,article in enumerate(data):
if article['doi'] in already:
continue
print(index)
(abstract_probs, pls_probs, gen_probs) = eval_article(article, tokenizer, model, num_mask, batch_size, device)
abstract_avg = np.mean(abstract_probs)
pls_avg = np.mean(pls_probs)
gen_avg = np.mean(gen_probs)
with open(path.join(prefix, file_name), 'a+', 1) as f:
f.write(f'{article["doi"]} {abstract_avg} {pls_avg} {gen_avg}\n')
f.flush()
model_name = sys.argv[1]
input_file_name = sys.argv[2]
file_name = sys.argv[3]
num_mask = float(sys.argv[4]) if '.' in sys.argv[4] else int(sys.argv[4])
batch_size = int(sys.argv[5])
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForMaskedLM.from_pretrained(model_name)
start_index = int(sys.argv[6])
end_index = int(sys.argv[7])
print(input_file_name)
sys.exit()
data = json.load(open(input_file_name))
probability_results(data[start_index:end_index], input_file_name, tokenizer, model, file_name, num_mask, batch_size, device='cuda')
|
the-stack_0_12906 | #-*- coding: utf-8 -*-
import re
import pickle
# say-as 기본 규칙: 참고 논문 <기술문에서 우리말 숫자 쓰기, 권성규>
_mandarin_num = {"0": "공", "1": "일", "2": "이", "3": "삼", "4": "사", "5": "오", "6": "육", "7": "칠",
"8": "팔", "9": "구", "10": "십", "100": "백", "1000": "천", "10000": "만", "100000000": "억",
"1000000000000": "조"}
_korean_num = {"1": "한", "2": "두", "3": "세", "4": "네", "5": "다섯", "6": "여섯", "7": "일곱",
'8': "여덟", "9": "아홉", "10": "열", "20": "스물", "30": "서른", "40": "마흔", "50": "쉰",
"60": "예순", "70": "일흔", "80": "여든", "90": "아흔"}
_korean_end_word = ['개', '돈', '마리', '벌', '살', '손', '자루', '발',
'죽', '채', '켤레', '쾌', '시', '포기', '번째', '가지', '곳',
'살', '척', '캔', '배', '그루', '명', '번', '달', '겹', '건', '대']
_exception_korean_end_word = ['개국', '달러', '개월']
_english_word = {'a': '에이', 'b': '비', 'c': '씨', 'd': '디', 'e': '이', 'f': '에프', 'g': '쥐', 'h': '에이치',
'i': '아이', 'j': '제이', 'k': '케이', 'l': '엘', 'n': '엔', 'm': '엠', 'o': '오', 'p': '피',
'q': '큐', 'r':'얼', 's': '에스', 't': '티', 'u':'유', 'v':'브이', 'w':'더블유', 'x': '엑스',
'y': '와이', 'z': '지'}
_special_num_sub = {'A4': '에이포', 'G20': '지이십', 'G2': '지투', 'U2': '유투',
'2PM': '투피엠', '88올림픽': '팔팔올림픽',
'119에': '일일구에', '112신고': '일일이신고', '빅3': '빅쓰리', '4대강': '사대강'}
# lexicon 기본 규칙: 참고 사전 <국립국어원 표준국어대사전>
with open('./tts/text/dictionary/lexicon.pickle', 'rb') as handle:
_lexicon = pickle.load(handle)
# sub 기본 규칙
with open('./tts/text/dictionary/sub.pickle', 'rb') as handle:
_sub = pickle.load(handle)
with open('./tts/text/dictionary/num_sub.pickle', 'rb') as handle:
_num_sub = pickle.load(handle)
_num_sub['㎜'] = '밀리미터'
def read1to999(n):
units = [''] + list('십백천')
nums = '일이삼사오육칠팔구'
result = []
i = 0
while n > 0:
n, r = divmod(n, 10)
if r > 0:
if units[i] == '':
result.append(nums[r - 1] + units[i])
else:
if r == 1:
result.append(units[i])
else:
result.append(nums[r - 1] + units[i])
i += 1
return ''.join(result[::-1])
def readNumM(n):
"""
한자로 숫자 읽기
"""
result = ''
if n >= 1000000000000:
r, n = divmod(n, 10000000000000)
tmp = read1to999(r)
if len(tmp) == 1 and tmp[-1] == '일':
result += '조'
else:
result += tmp + "조"
if n >= 100000000:
r, n = divmod(n, 100000000)
tmp = read1to999(r)
if len(tmp) == 1 and tmp[-1] == '일':
result += '억'
else:
result += tmp + "억"
if n >= 10000:
r, n = divmod(n, 10000)
tmp = read1to999(r)
if len(tmp) == 1 and tmp[-1] == '일':
result += '만'
else:
result += tmp + "만"
result += read1to999(n)
return result
def readNumK(intNum):
"""
한글로 숫자 읽기
"""
tmp_list = list(_korean_num.keys())
num_list = list()
for num in tmp_list:
num_list.append(int(num))
num_list.sort(reverse=True)
result = ""
for num in num_list:
if intNum >= num:
intNum -= num
result += _korean_num[str(num)]
return result
def txt_preprocessing(txt):
word_list = txt.split(' ') # for tts
for k, word in enumerate(word_list):
# lexicon & sub 발음 교체
english = re.sub('[^a-zA-Z]', '', word)
not_checked = 1
if english != '' and not re.findall('\d', word):
# lexicon 처리
for key, value in _lexicon.items():
if key.lower() == english.lower():
word_list[k] = word_list[k].replace(english, value)
not_checked = 0
# sub 처리
for key, value in _sub.items():
if key.lower() == english.lower():
word_list[k] = word_list[k].replace(english, value)
not_checked = 0
elif re.findall('\d+', word):
# num_sub 처리
for key, value in _num_sub.items():
if key in word:
word_list[k] = word_list[k].replace(key, value)
not_checked = 0
# say-as 발음 교체
seperated_num = 0
if '-' in word:
seperated_num = 1
if '.' in word:
if word[-1] != '.':
word_list[k].replace('.', '점')
if ',' in word:
if word[-1] != ',':
word_list[k].replace(',', '')
word.replace(',', '')
strNum_list = re.findall('\d+', word) # 값 중복 시 제거해 나가면서 처리 필요
prev = -1
for strNum in strNum_list:
pos = word.index(strNum)
if prev == pos: # 약식 값 중복 처리
continue
wList = [word[0:pos], word[pos: pos + len(strNum)], word[pos + len(strNum):]]
wList = [w for w in wList if not w == '']
check = ""
# 처음이 0으로 시작하면 한문-분리
if strNum[0] == '0' or seperated_num:
check = "한문-분리"
if word_list[k-1] == '카드번호는':
word_list[k]= word_list[k].replace('-', '다시')
else:
word_list[k]= word_list[k].replace('-', '에')
else:
for i, w in enumerate(wList):
# 숫자 뒤에 붙는 것이 없을 때, 한문
if len(wList) == (i + 1):
if k > 1:
if word_list[k - 1][0] == '-':
check = "한문-분리"
break
if k + 1 < len(word_list):
if word_list[k + 1] == '':
check = "한문"
elif word_list[k + 1][0] == '-':
check = "한문-분리"
elif word_list[k + 1][0] in _korean_end_word:
check = "한글"
else:
check = "한문"
else:
check = "한문"
break
elif w == strNum:
# 숫자 뒤에 붙는 것에 따라 한글, 한문 선택
if wList[i + 1][0] in _korean_end_word:
check = "한글"
else:
check = "한문"
break
tmpNum = ""
intNum = int(strNum)
if check == "한문-분리":
for s in strNum:
# 한글자씩 읽기 (0 == 공)
tmpNum += _mandarin_num[s]
elif check == "한문":
# 숫자 한문 읽기
tmpNum = readNumM(intNum)
else: # check == "한글"
# 100이상 한문 읽기 + 이하 한글 읽기
tmpNum = readNumM(intNum // 100 * 100) + readNumK(intNum % 100)
word_list[k] = word_list[k].replace(strNum, tmpNum)
elif '-' in word:
word_list[k] = word_list[k].replace('-', '에')
if not_checked:
tmp = ''
for char in word_list[k]:
if char.lower() in _english_word.keys():
not_checked = 0
tmp += _english_word[char.lower()]
else:
tmp += char
word_list[k] = tmp
tts_sentence = word_list[0]
for word in word_list[1:]: # 길이 1 예외처리 필요
tts_sentence += ' ' + word
return tts_sentence
def txt_preprocessing_only_num(txt):
word_list = txt.split(' ')
for k, word in enumerate(word_list):
strNum_list = re.findall('\d+', word)
not_special_case = True
for key, value in _special_num_sub.items():
if key in word:
not_special_case = False
word_list[k] = word_list[k].replace(key, value)
if not_special_case and strNum_list:
# num_sub 처리
for key, value in _num_sub.items():
if key in word:
if 'k' + key in word:
key = 'k' + key
value = '킬로' + value
elif 'm' + key in word:
key = 'm' + key
value = '밀리' + value
elif 'c' + key in word:
key = 'c' + key
value = '센티' + value
word_list[k] = word_list[k].replace(key, value)
break
# say-as 발음 교체
seperated_num = 0
if '-' in word:
seperated_num = 1
if '.' in word:
if word[-1] != '.':
word_list[k] = word_list[k].replace('.', '점')
if ',' in word:
if word[-1] != ',':
word_list[k] = word_list[k].replace(',', '')
if '·' in word:
word_list[k] = word_list[k].replace('·', '')
prev = -1
for strNum in sorted(strNum_list, key=lambda x:len(x), reverse=True):
pos = word.index(strNum)
if prev == pos: # 약식 값 중복 처리
continue
wList = [word[0:pos], word[pos: pos + len(strNum)], word[pos + len(strNum):]]
wList = [w for w in wList if not w == '']
check = ""
one_change = False
if '·' in word:
check = '한문-분리'
one_change = True
elif re.findall('(\d+)-(\d+)', word):
check = "한문-분리"
if word_list[k-1] == '카드번호는':
word_list[k] = word_list[k].replace('-','다시')
else:
word_list[k] = word_list[k].replace('-','에')
elif strNum[0] == '0': # 처음이 0으로 시작하면 한문-분리
if len(strNum) == 1:
word_list[k] = word_list[k].replace('0', '영')
continue
elif '00' in strNum:
key = ''
value = ''
for _ in range(strNum.count('0')):
key += '0'
value += '땡'
word_list[k] = word_list[k].replace(key, value)
continue
check = "한문-분리"
else:
for i, w in enumerate(wList):
# 숫자 뒤에 붙는 것이 없을 때, 한문
if len(wList) == (i + 1):
if k > 1:
if word_list[k - 1][0] == '-':
check = "한문-분리"
break
if k + 1 < len(word_list):
if word_list[k + 1][0] == '-':
check = "한문-분리"
elif len(word_list[k+1]) >= 2:
if word_list[k+1][:2] in _korean_end_word:
check = "한글"
break
elif word_list[k + 1][0] in _korean_end_word:
check = "한글"
for e in _exception_korean_end_word:
if e in word_list[k+1]:
check = '한문'
break
else:
check = "한문"
else:
check = "한문"
break
elif w == strNum:
# 숫자 뒤에 붙는 것에 따라 한글, 한문 선택
if len(wList[i+1]) >= 2:
if wList[i+1][:2] in _korean_end_word:
check = '한글'
break
if wList[i + 1][0] in _korean_end_word:
check = "한글"
for e in _exception_korean_end_word:
if e in wList[i+1]:
check = '한문'
break
else:
check = "한문"
break
tmpNum = ""
intNum = int(strNum)
if check == "한문-분리":
for s in strNum:
# 한글자씩 읽기 (0 == 공)
tmpNum += _mandarin_num[s]
elif check == "한문":
# 숫자 한문 읽기
tmpNum = readNumM(intNum)
else: # check == "한글"
# 100이상 한문 읽기 + 이하 한글 읽기
if intNum > 99:
tmpNum = readNumM(intNum)
else:
tmpNum = readNumK(intNum)
# tmpNum = readNumM(intNum // 100 * 100) + readNumK(intNum % 100)
word_list[k] = word_list[k].replace(strNum, tmpNum)
if word_list:
word_list = [' ' + w for w in word_list]
tts_sentence = ''.join(word_list)
tts_sentence = tts_sentence[1:]
return tts_sentence
else:
return ' ' |
the-stack_0_12907 | import pathlib
import random
import sys
import panel as pn
import param
_COLORS = [
("#00A170", "white"),
("#DAA520", "white"),
("#2F4F4F", "white"),
("#F08080", "white"),
("#4099da", "white"), # lightblue
]
_LOGOS = {
"default": "https://panel.holoviz.org/_static/logo_stacked.png",
"dark": "https://raw.githubusercontent.com/holoviz/panel/98389a8dead125bcb7c60dc2c1564e112d89d3fa/doc/_static/logo_stacked_dark_theme.png",
}
_MENU_FILE = pathlib.Path(__file__).parent / "menu.html"
_MENU_TEXT = _MENU_FILE.read_text()
_ACE_THEMES={
"default": "chrome",
"dark": "tomorrow_night_eighties"
}
RAW_CSS = """
.sidenav .menu-item-active a {
background: var(--accent-fill-active);
color: white;
}
"""
if not RAW_CSS in pn.config.raw_css:
pn.config.raw_css.append(RAW_CSS)
def _mock_panel():
def _reload(module=None):
if module is not None:
for module in pn.io.reload._modules:
if module in sys.modules:
del sys.modules[module]
for cb in pn.io.reload._callbacks.values():
cb.stop()
pn.io.reload._callbacks.clear()
if pn.state.location:
pn.state.location.reload = True
for loc in pn.state._locations.values():
loc.reload = True
pn.io.reload._reload = _reload
_mock_panel()
#tests
class Configuration(param.Parameterized):
theme = param.String()
site = param.String(default="Panel@PyData 2021")
title = param.String()
url = param.String()
logo = param.String()
accent_base_color = param.Color()
header_color = param.Color()
header_accent_base_color = param.Color("white")
header_background = param.Color()
main_max_width = param.String("95%")
sidebar_width = param.Integer(400)
ace_theme=param.String()
def __init__(self, random=False, **params):
"""Configuration for your (Fast) Template
Args:
random (bool, optional): Whether or not to provide randomized values. Defaults to False.
"""
super().__init__(**params)
self.theme = self._get_theme()
if random:
color_index = self._get_random_color_index()
else:
color_index=0
self.accent_base_color = _COLORS[color_index][0]
self.header_color = _COLORS[color_index][1]
self.header_background = self.accent_base_color
self.logo=_LOGOS[self.theme]
self.ace_theme=_ACE_THEMES[self.theme]
def _get_theme(self):
if pn.template.FastListTemplate().theme==pn.template.DarkTheme:
return "dark"
return "default"
def _get_random_color_index(self):
if not "color" in pn.state.cache:
pn.state.cache["color"]=-1
color = pn.state.cache["color"]+1
if color==len(_COLORS):
color=0
pn.state.cache["color"]=color
return color
@property
def _collapsed_icon(self) -> str:
return f"""<svg style="stroke: { self.accent_base_color }" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="collapsed-icon">
<path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
<path d="M9 5.44446V12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
<path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
</svg>"""
@property
def _expanded_icon(self) -> str:
return f"""<svg style="stroke: { self.accent_base_color }" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="expanded-icon">
<path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
<path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
</svg>
"""
@property
def menu(self) -> str:
"""Returns a HTML Menu"""
test=f'<li><a href="{ self.url }">{ self.title }</a></li>'
return (
_MENU_TEXT
.replace("{ COLLAPSED_ICON }", self._collapsed_icon)
.replace("{ EXPANDED_ICON }", self._expanded_icon)
.replace(f'<li><a href="{ self.url }">{ self.title }</a></li>', f'<li class="menu-item-active"><a href="{ self.url }">{ self.title }</a></li>')
)
def get_logo_pane(self, **params):
return pn.pane.PNG(
self.logo,
link_url="https://panel.holoviz.org",
embed=False,
sizing_mode="fixed",
align="center",
**params
)
if __name__.startswith("bokeh"):
config = Configuration(title="Works in your Notebook and IDE", url="works_in_your_notebook_and_ide", random=True)
pn.template.FastListTemplate(
title="Test Configuration",
site=config.site,
header_accent_base_color=config.header_accent_base_color,
header_background=config.header_background,
header_color=config.header_color,
sidebar_footer=config.menu,
accent_base_color=config.accent_base_color,
main=[pn.pane.PNG(config.logo)],
).servable()
|
the-stack_0_12908 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility functions for dealing with files"""
import pkg_resources
import pathlib
EXAMPLE_AUDIO = "example_data/Kevin_MacLeod_-_Vibe_Ace.ogg"
__all__ = ["example_audio_file", "find_files"]
def example_audio_file():
"""Get the path to an included audio example file.
.. raw:: html
<div xmlns:cc="http://creativecommons.org/ns#"
xmlns:dct="http://purl.org/dc/terms/"
about="http://freemusicarchive.org/music/Kevin_MacLeod/Jazz_Sampler/Vibe_Ace_1278">
<span property="dct:title">Vibe Ace</span>
(<a rel="cc:attributionURL" property="cc:attributionName"
href="http://freemusicarchive.org/music/Kevin_MacLeod/">Kevin MacLeod</a>)
/ <a rel="license" href="http://creativecommons.org/licenses/by/3.0/">CC BY 3.0</a>
</div>
Examples
--------
>>> # Load the waveform from the example track
>>> y, sr = librosa.load(librosa.util.example_audio_file())
Returns
-------
filename : str
Path to the audio example file included with librosa
"""
return pkg_resources.resource_filename(__name__, EXAMPLE_AUDIO)
def find_files(
directory, ext=None, recurse=True, case_sensitive=False, limit=None, offset=0
):
"""Get a sorted list of (audio) files in a directory or directory sub-tree.
Examples
--------
>>> # Get all audio files in a directory sub-tree
>>> files = librosa.util.find_files('~/Music')
>>> # Look only within a specific directory, not the sub-tree
>>> files = librosa.util.find_files('~/Music', recurse=False)
>>> # Only look for mp3 files
>>> files = librosa.util.find_files('~/Music', ext='mp3')
>>> # Or just mp3 and ogg
>>> files = librosa.util.find_files('~/Music', ext=['mp3', 'ogg'])
>>> # Only get the first 10 files
>>> files = librosa.util.find_files('~/Music', limit=10)
>>> # Or last 10 files
>>> files = librosa.util.find_files('~/Music', offset=-10)
Parameters
----------
directory : Path object
Path to look for files
ext : str or list of str
A file extension or list of file extensions to include in the search.
Default: `['aac', 'au', 'flac', 'm4a', 'mp3', 'ogg', 'wav']`
recurse : boolean
If `True`, then all subfolders of `directory` will be searched.
Otherwise, only `directory` will be searched.
case_sensitive : boolean
If `False`, files matching upper-case version of
extensions will be included.
limit : int > 0 or None
Return at most `limit` files. If `None`, all files are returned.
offset : int
Return files starting at `offset` within the list.
Use negative values to offset from the end of the list.
Returns
-------
files : list of str
The list of audio files.
"""
directory = pathlib.Path(directory)
if ext is None:
ext = ["aac", "au", "flac", "m4a", "mp3", "ogg", "wav"]
elif isinstance(ext, str):
ext = [ext]
# Cast into a set
ext = set(ext)
# Generate upper-case versions
if not case_sensitive:
# Force to lower-case
ext = set([e.lower() for e in ext])
# Add in upper-case versions
ext |= set([e.upper() for e in ext])
files = set()
if recurse:
files = __get_files(directory, ext, True)
else:
files = __get_files(directory, ext, False)
files = list(files)
files.sort()
files = files[offset:]
if limit is not None:
files = files[:limit]
return files
def __get_files(dir_name: pathlib.Path, extensions: set, recur: bool):
"""Helper function to get files in a single directory"""
# Expand out the directory
dir_name = dir_name.expanduser().absolute()
my_files = set()
if recur:
for sub_ext in extensions:
my_files |= set(dir_name.rglob("*." + sub_ext))
else:
for sub_ext in extensions:
my_files |= set(dir_name.glob("*." + sub_ext))
return my_files
|
the-stack_0_12911 | '''
This module was downloaded from the pycroscopy github page: https://github.com/pycroscopy/pycroscopy
It was edited slightly with contributor Jessica Kong @kongjy to accomodate the new format
in which PiFM data is taken with a polarizer installed.
'''
import os
import numpy as np
from pyUSID.io.translator import Translator
from pyUSID.io import write_utils
from pyUSID import USIDataset
import pyUSID as usid
import h5py
class PiFMTranslator(Translator):
"""
Class that writes images, spectrograms, point spectra and associated ancillary data sets to h5 file in pyUSID data
structure.
"""
def translate(self, path, append_path='', grp_name='Measurement'):
"""
Parameters
----------
file_path : String / unicode
Absolute path of the .ibw file
verbose : Boolean (Optional)
Whether or not to show print statements for debugging
append_path : string (Optional)
h5_file to add these data to, must be a path to the h5_file on disk
parm_encoding : str, optional
Codec to be used to decode the bytestrings into Python strings if needed.
Default 'utf-8'
Returns
-------
h5_path : String / unicode
Absolute path of the .h5 file
"""
self.get_path(path)
self.read_anfatec_params()
self.read_file_desc()
self.read_spectrograms()
self.read_imgs()
self.read_spectra()
self.make_pos_vals_inds_dims()
self.create_hdf5_file(append_path, grp_name)
self.write_spectrograms()
self.write_images()
self.write_spectra()
self.write_ps_spectra()
return self.h5_f
def get_path(self, path):
"""writes full path, directory, and file name as attributes to class"""
# get paths/get params dictionary, img/spectrogram/spectrum descriptions
self.path = path
full_path = os.path.realpath(self.path)
directory = os.path.dirname(full_path)
# file name
basename = os.path.basename(self.path)
self.full_path = full_path
self.directory = directory
self.basename = basename
#these dictionary parameters will be written to hdf5 file under measurement attributes
def read_anfatec_params(self):
"""reads the scan parameters and writes them to a dictionary"""
params_dictionary = {}
params = True
with open(self.path, 'r', encoding="ISO-8859-1") as f:
for line in f:
if params:
sline = [val.strip() for val in line.split(':')]
if len(sline) == 2 and sline[0][0] != ';':
params_dictionary[sline[0]] = sline[1]
#in ANFATEC parameter files, all attributes are written before file references.
if sline[0].startswith('FileDesc'):
params = False
f.close()
self.params_dictionary = params_dictionary
self.x_len, self.y_len = int(params_dictionary['xPixel']), int(params_dictionary['yPixel'])
def read_file_desc(self):
"""reads spectrogram, image, and spectra file descriptions and stores all to dictionary where
the key:value pairs are filename:[all descriptors]"""
spectrogram_desc = {}
img_desc = {}
spectrum_desc = {}
pspectrum_desc = {}
with open(self.path,'r', encoding="ISO-8859-1") as f:
## can be made more concise...by incorporating conditons with loop control
lines = f.readlines()
for index, line in enumerate(lines):
sline = [val.strip() for val in line.split(':')]
#if true, then file describes image.
if sline[0].startswith('FileDescBegin'):
no_descriptors = 5
file_desc = []
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#img_desc['filename'] = caption, scale, physical unit, offset
img_desc[file_desc[0]] = file_desc[1:]
#if true, file describes spectrogram (ie hyperspectral image)
if sline[0].startswith('FileDesc2Begin'):
no_descriptors = 10
file_desc = []
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#caption, bytes perpixel, scale, physical unit, offset, offset, datatype, bytes per reading
#filename wavelengths, phys units wavelengths.
spectrogram_desc[file_desc[0]] = file_desc[1:]
if sline[0].startswith('AFMSpectrumDescBegin'):
file_desc = []
line_desc = [val.strip() for val in lines[index+1].split(':')][1]
if 'powerspectrum' in line_desc:
no_descriptors = 2
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#file name, position x, position y
pspectrum_desc[file_desc[0]] = file_desc[1:]
else:
no_descriptors = 7
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#file name, position x, position y
spectrum_desc[file_desc[0]] = file_desc[1:]
f.close()
self.img_desc = img_desc
self.spectrogram_desc = spectrogram_desc
self.spectrum_desc = spectrum_desc
self.pspectrum_desc = pspectrum_desc
def read_spectrograms(self):
"""reads spectrograms, associated spectral values, and saves them in two dictionaries"""
spectrograms = {}
spectrogram_spec_vals = {}
for file_name, descriptors in self.spectrogram_desc.items():
# load and save spectroscopic values
spec_vals_i = np.loadtxt(os.path.join(self.directory, file_name.strip('.int') + 'Wavelengths.txt'))
#if true, data is acquired with polarizer, with an attenuation data column
if np.array(spec_vals_i).ndim == 2:
spectrogram_spec_vals[file_name] = spec_vals_i[:,0]
attenuation = {}
attenuation[file_name] = spec_vals_i[:,1]
self.attenuation = attenuation
else:
spectrogram_spec_vals[file_name] = spec_vals_i
# load and save spectrograms
spectrogram_i = np.fromfile(os.path.join(self.directory, file_name), dtype='i4')
spectrograms[file_name] = np.zeros((self.x_len, self.y_len, len(spec_vals_i)))
for y, line in enumerate(np.split(spectrogram_i, self.y_len)):
for x, pt_spectrum in enumerate(np.split(line, self.x_len)):
spectrograms[file_name][x, y, :] = pt_spectrum * float(descriptors[2])
self.spectrograms = spectrograms
self.spectrogram_spec_vals = spectrogram_spec_vals
def read_imgs(self):
"""reads images and saves to dictionary"""
imgs = {}
for file_name, descriptors in self.img_desc.items():
img_i = np.fromfile(os.path.join(self.directory, file_name), dtype='i4')
imgs[file_name] = np.zeros((self.x_len, self.y_len))
for y, line in enumerate(np.split(img_i, self.y_len)):
for x, pixel in enumerate(np.split(line, self.x_len)):
imgs[file_name][x, y] = pixel * float(descriptors[1])
self.imgs = imgs
def read_spectra(self):
"""reads all point spectra and saves to dictionary"""
spectra = {}
spectra_spec_vals = {}
spectra_x_y_dim_name = {}
for file_name, descriptors in self.spectrum_desc.items():
spectrum_f = np.loadtxt(os.path.join(self.directory, file_name), skiprows=1)
spectra_spec_vals[file_name] = spectrum_f[:, 0]
spectra[file_name] = spectrum_f[:,1]
with open(os.path.join(self.directory, file_name)) as f:
spectra_x_y_dim_name[file_name] = f.readline().strip('\n').split('\t')
for file_name, descriptors in self.pspectrum_desc.items():
spectrum_f = np.loadtxt(os.path.join(self.directory, file_name), skiprows=1)
spectra_spec_vals[file_name] = spectrum_f[:, 0]
spectra[file_name] = spectrum_f[:,1]
with open(os.path.join(self.directory, file_name)) as f:
spectra_x_y_dim_name[file_name] = f.readline().strip('\n').split('\t')
self.spectra = spectra
self.spectra_spec_vals = spectra_spec_vals
self.spectra_x_y_dim_name = spectra_x_y_dim_name
def make_pos_vals_inds_dims(self):
x_range = float(self.params_dictionary['XScanRange'])
y_range = float(self.params_dictionary['YScanRange'])
x_center = float(self.params_dictionary['xCenter'])
y_center = float(self.params_dictionary['yCenter'])
x_start = x_center-(x_range/2); x_end = x_center+(x_range/2)
y_start = y_center-(y_range/2); y_end = y_center+(y_range/2)
dx = x_range/self.x_len
dy = y_range/self.y_len
#assumes y scan direction:down; scan angle: 0 deg
y_linspace = -np.arange(y_start, y_end, step=dy)
x_linspace = np.arange(x_start, x_end, step=dx)
pos_ind, pos_val = write_utils.build_ind_val_matrices(unit_values=(x_linspace, y_linspace), is_spectral=False)
#usid.write_utils.Dimension uses ascii encoding, which can not encode
# micron symbol, so we replace it, if present, with the letter u.
pos_dims = [usid.write_utils.Dimension('X', self.params_dictionary['XPhysUnit'].replace('\xb5', 'u'), self.x_len),
usid.write_utils.Dimension('Y', self.params_dictionary['YPhysUnit'].replace('\xb5', 'u'), self.y_len)]
self.pos_ind, self.pos_val, self.pos_dims = pos_ind, pos_val, pos_dims
def create_hdf5_file(self, append_path='', grp_name='Measurement'):
if not append_path:
h5_path = os.path.join(self.directory, self.basename.replace('.txt', '.h5'))
if os.path.exists(h5_path):
raise FileExistsError
#if file already exists. (maybe there is a better way to check for this)
else:
self.h5_f = h5py.File(h5_path, mode='w')
else:
if not os.path.exists(append_path):
raise Exception('File does not exist. Check pathname.')
self.h5_f = h5py.File(append_path, mode='r+')
self.h5_meas_grp = usid.hdf_utils.create_indexed_group(self.h5_f, grp_name)
usid.hdf_utils.write_simple_attrs(self.h5_meas_grp, self.params_dictionary)
return
def write_spectrograms(self):
if bool(self.spectrogram_desc):
for spectrogram_f, descriptors in self.spectrogram_desc.items():
channel_i = usid.hdf_utils.create_indexed_group(self.h5_meas_grp, 'Channel_')
spec_vals_i = self.spectrogram_spec_vals[spectrogram_f]
spectrogram_spec_dims = usid.write_utils.Dimension('Wavelength', descriptors[8], spec_vals_i)
h5_raw = usid.hdf_utils.write_main_dataset(channel_i, # parent HDF5 group
(self.x_len *
self.y_len, len(spec_vals_i)), # shape of Main dataset
'Raw_Data', # Name of main dataset
'Spectrogram', # Physical quantity contained in Main dataset
descriptors[3], # Units for the physical quantity
self.pos_dims, # Position dimensions
spectrogram_spec_dims, # Spectroscopic dimensions
dtype=np.float32, # data type / precision
main_dset_attrs={'Caption': descriptors[0],
'Bytes_Per_Pixel': descriptors[1],
'Scale': descriptors[2],
'Physical_Units': descriptors[3],
'Offset': descriptors[4],
'Datatype': descriptors[5],
'Bytes_Per_Reading': descriptors[6],
'Wavelength_File': descriptors[7],
'Wavelength_Units': descriptors[8]})
h5_raw.h5_pos_vals[:, :] = self.pos_val
h5_raw[:, :] = self.spectrograms[spectrogram_f].reshape(h5_raw.shape)
def write_images(self):
if bool(self.img_desc):
for img_f, descriptors in self.img_desc.items():
#check for existing spectrogram or image and link position/spec inds/vals
#at most two channels worth of need to be checked
try:
str_main = str(usid.hdf_utils.get_all_main(self.h5_f['Measurement_000/Channel_000']))
i_beg = str_main.find('located at: \n\t') + 14
i_end = str_main.find('\nData contains') - 1
data_loc = str_main[i_beg:i_end]
channel_data = USIDataset(self.h5_f[data_loc])
h5_pos_inds = channel_data.h5_pos_inds
h5_pos_vals = channel_data.h5_pos_vals
pos_dims = None
write_pos_vals = False
if channel_data.spec_dim_sizes[0] == 1:
h5_spec_inds = channel_data.h5_spec_inds
h5_spec_vals = channel_data.h5_spec_vals
spec_dims = None
#if channel 000 is spectrogram, check next dataset
elif channel_data.spec_dim_sizes[0] !=1:
str_main = str(usid.hdf_utils.get_all_main(self.h5_f['Measurement_000/Channel_001']))
i_beg = str_main.find('located at: \n\t') + 14
i_end = str_main.find('\nData contains') - 1
data_loc = str_main[i_beg:i_end]
channel_data = USIDataset(self.h5_f[data_loc])
#channel data is an image, & we link their spec inds/vals
if channel_data.spec_dim_sizes[0] == 1:
h5_spec_inds = channel_data.h5_spec_inds
h5_spec_vals = channel_data.h5_spec_vals
spec_dims = None
#in case where channel does not exist, we make new spec/pos inds/vals
except KeyError:
#pos dims
h5_pos_inds = None
h5_pos_vals = None
pos_dims = self.pos_dims
write_pos_vals = True
#spec dims
h5_spec_inds = None
h5_spec_vals = None
spec_dims = usid.write_utils.Dimension('arb', 'a.u', 1)
channel_i = usid.hdf_utils.create_indexed_group(self.h5_meas_grp,'Channel_')
h5_raw = usid.hdf_utils.write_main_dataset(channel_i, #parent HDF5 group
(self.x_len * self.y_len, 1), # shape of Main dataset
'Raw_' + descriptors[0].replace('-', '_'),
# Name of main dataset
descriptors[0],
# Physical quantity contained in Main dataset
descriptors[2], # Units for the physical quantity
h5_pos_inds=h5_pos_inds,
h5_pos_vals=h5_pos_vals,
# Position dimensions
pos_dims=pos_dims,
# Spectroscopic dimensions
h5_spec_inds=h5_spec_inds,
h5_spec_vals=h5_spec_vals,
spec_dims=spec_dims,
dtype=np.float32, # data type / precision
main_dset_attrs={'Caption': descriptors[0],
'Scale': descriptors[1],
'Physical_Units': descriptors[2],
'Offset': descriptors[3]})
h5_raw[:, :] = self.imgs[img_f].reshape(h5_raw.shape)
if write_pos_vals:
h5_raw.h5_pos_vals[:, :] = self.pos_val
def write_spectra(self):
if bool(self.spectrum_desc):
for spec_f, descriptors in self.spectrum_desc.items():
#create new measurement group for ea spectrum
self.h5_meas_grp = usid.hdf_utils.create_indexed_group(self.h5_f, 'Measurement_')
x_name = self.spectra_x_y_dim_name[spec_f][0].split(' ')[0]
x_unit = self.spectra_x_y_dim_name[spec_f][0].split(' ')[1]
y_name = self.spectra_x_y_dim_name[spec_f][1].split(' ')[0]
y_unit = self.spectra_x_y_dim_name[spec_f][1].split(' ')[1]
spec_i_spec_dims = usid.write_utils.Dimension(x_name, x_unit, self.spectra_spec_vals[spec_f])
spec_i_pos_dims = [usid.write_utils.Dimension('X',
self.params_dictionary['XPhysUnit'].replace('\xb5','u'),
np.array([float(descriptors[1])])),
usid.write_utils.Dimension('Y',
self.params_dictionary['YPhysUnit'].replace('\xb5','u'),
np.array([float(descriptors[1])]))]
#write data to a channel in the measurement group
spec_i_ch = usid.hdf_utils.create_indexed_group(self.h5_meas_grp, 'Spectrum_')
h5_raw = usid.hdf_utils.write_main_dataset(spec_i_ch, # parent HDF5 group
(1, len(self.spectra_spec_vals[spec_f])), # shape of Main dataset
'Raw_Spectrum',
# Name of main dataset
y_name,
# Physical quantity contained in Main dataset
y_unit, # Units for the physical quantity
# Position dimensions
pos_dims=spec_i_pos_dims, spec_dims=spec_i_spec_dims,
# Spectroscopic dimensions
dtype=np.float32, # data type / precision
main_dset_attrs={'XLoc': descriptors[0],
'YLoc': descriptors[1]})
h5_raw[:, :] = self.spectra[spec_f].reshape(h5_raw.shape)
def write_ps_spectra(self):
if bool(self.pspectrum_desc):
for spec_f, descriptors in self.pspectrum_desc.items():
# create new measurement group for ea spectrum
self.h5_meas_grp = usid.hdf_utils.create_indexed_group(self.h5_f, 'Measurement_')
x_name = self.spectra_x_y_dim_name[spec_f][0].split(' ')[0]
x_unit = self.spectra_x_y_dim_name[spec_f][0].split(' ')[1]
y_name = self.spectra_x_y_dim_name[spec_f][1].split(' ')[0]
y_unit = self.spectra_x_y_dim_name[spec_f][1].split(' ')[1]
spec_i_spec_dims = usid.write_utils.Dimension(x_name, x_unit, self.spectra_spec_vals[spec_f])
spec_i_pos_dims = [usid.write_utils.Dimension('X',
self.params_dictionary['XPhysUnit'].replace(
'\xb5', 'u'),
np.array([0])),
usid.write_utils.Dimension('Y',
self.params_dictionary['YPhysUnit'].replace(
'\xb5', 'u'),
np.array([0]))]
# write data to a channel in the measurement group
spec_i_ch = usid.hdf_utils.create_indexed_group(self.h5_meas_grp, 'PowerSpectrum_')
h5_raw = usid.hdf_utils.write_main_dataset(spec_i_ch, # parent HDF5 group
(1, len(self.spectra_spec_vals[spec_f])),
# shape of Main dataset
'Raw_Spectrum',
# Name of main dataset
y_name,
# Physical quantity contained in Main dataset
y_unit, # Units for the physical quantity
# Position dimensions
pos_dims=spec_i_pos_dims, spec_dims=spec_i_spec_dims,
# Spectroscopic dimensions
dtype=np.float32, # data type / precision
main_dset_attrs={'XLoc': 0,
'YLoc': 0})
h5_raw[:, :] = self.spectra[spec_f].reshape(h5_raw.shape)
|
the-stack_0_12912 | from django.urls import path,re_path
from measure import views
from rest_framework.routers import DefaultRouter
app_name = 'measure'
router = DefaultRouter()
urlpatterns = [
re_path(r'api/MeasureRecordCreate/', views.MeasureRecordCreate.as_view()),
re_path(r'api/MeasureRecordList/(?P<userid>[a-zA-Z0-9]+)/(?P<incident_category>[0-9]+)$', views.MeasureRecordList.as_view()),
re_path(r'api/MeasureRecordListByUser/(?P<userid>[a-zA-Z0-9]+)$', views.MeasureRecordListByUser.as_view())
]
|
the-stack_0_12914 | from colorama import Fore
from time import sleep
from config import token
from codecs import open
from requests import get
import os
logo = """
__ _ _ _ _
/ _| | | | | (_) |
__ _| |_ ___| |_ ___| |__ _| |_
\ \/ / _/ _ \ __/ __| '_ \| | __|
> <| || __/ || (__| | | | | |_
/_/\_\_| \___|\__\___|_| |_|_|\__|
v1.3 xstratumm
"""
intro = """
xfetchit uses public VKontakte API (https://vk.com/dev/methods).
Only you are responsible for your actions.
"""
LINK = "https://api.vk.com/method/"
def cls():
os.system("cls" if os.name == "nt" else "clear")
def donate():
wallets = """
I will be very grateful for any crypto from you,
thx! :3
BTC 1HzA8mZxksDGNuTMu5sKUottp9S8bv9NKA
ETH 0xe9a30E9c2aa2D72c224e771c316aE9a7F4fdE36A
LTC LKeWWBWSN7JQxBKDx32WQnJYPD77LdNSrx
ZEC t1HvDeXHFtoTBYHbzNpVH5ocLgnannmdhhc
Dash XrFaQBuBK7GKtPyWWEy8vsTguer4qRqNCX
ETC 0x6d5644C78CBB78542c6219E3815ffE7EbEBd88bf
QTUM QeQ9SaJEHJ9uR2Apa9ymonfpAudnamBUuY
TRX TKojkeYBDY74ghqrFrj9dTWziw6y2Mh1CN
"""
cls()
print(wallets)
def fetch(offset, group_id):
r = get(LINK + "groups.getMembers",
params={"access_token": token, "v": 5.9, "group_id": group_id, "offset": offset, "fields": "contacts"}).json()
return r
def parse(user, parsed):
if not "mobile_phone" in user or not user["mobile_phone"]:
pass
else:
user = user["mobile_phone"]
if user[0] in ["7", "8", "+"]:
parsed.append(user)
def groupParse(group_id):
r = get(LINK + "groups.getMembers",
params={"access_token": token, "v": 5.9, "group_id": group_id, "fields": "contacts"}).json()
if not "response" in r:
print("\nInvalid group ID or screen name (or group is private).")
print("Please check it and try one more time.")
else:
cls()
print("Number of members: " + str(r["response"]["count"]))
print("\nStarting parsing in 5 seconds.")
sleep(5)
cls()
print("Parsing started.")
print("It can take some time according to amount of group members.\n")
print("Wait...")
users = r["response"]["items"]
count = r["response"]["count"]
parsed = []
for user in users:
parse(user, parsed)
if count >= 1000:
left = count - len(users)
if left <= 1000:
r = get(LINK + "groups.getMembers",
params={"access_token": token, "v": 5.9, "group_id": group_id, "offset": 1000, "fields": "contacts"}).json()
for user in r["response"]["items"]:
parse(user, parsed)
else:
offset = 0
while left >= 1000:
offset += 1000
left -= 1000
r = fetch(offset, group_id)
for user in r["response"]["items"]:
parse(user, parsed)
offset += left
r = fetch(offset, group_id)
for user in r["response"]["items"]:
parse(user, parsed)
cls()
if len(parsed) == 0:
print("Parsing ended, but " + Fore.RED + "nothing found" + Fore.RESET + ".\nTry another group.")
else:
print("Parsing ended. Found: " + str(len(parsed)) + " numbers")
print("\nSaving results to \"found.txt\"")
if os.path.isfile("found.txt"):
f = open("found.txt", 'a', "utf-8")
else:
f = open("found.txt", "w", "utf-8")
for user in parsed:
f.write(user + "\r\n")
f.close()
def main():
cls()
print(Fore.RED + logo + Fore.RESET + intro + "\n")
print("Choose:\n\n1) Parse phone numbers\n" + "2) Exit\n" +
Fore.YELLOW + "3) Donate\n" + Fore.RESET)
choice = input("> ")
if choice == "1":
cls()
print("Choose:\n\n" + Fore.BLUE + "1) Group" + Fore.RESET + "\n*parses" +
" all users' phone numbers from specified group\n\n" +
"2) Exit\n")
choice = input("> ")
if choice == "1":
cls()
group_id = input(Fore.BLUE + "Enter group ID or its screen name\n" + Fore.RESET + "> ")
groupParse(group_id)
elif choice == "2":
exit(0)
else:
print("\nInvalid choice.\nPlease read one more time.")
elif choice == "2":
exit(0)
elif choice == "3":
donate()
exit(0)
else:
print("\nInvalid choice.\nPlease read one more time.")
if __name__ == "__main__":
if len(token) < 85:
print("\nInvalid token.\n\nPlease configure it in\n\"config.py\"")
else:
main()
|
the-stack_0_12918 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2018, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from collections import deque
from datetime import datetime
from time import sleep
from warnings import warn
from py2neo.cypher import cypher_escape
from py2neo.data import Table, Record
from py2neo.internal.addressing import get_connection_data
from py2neo.internal.caching import ThreadLocalEntityCache
from py2neo.internal.compat import string_types, xstr
from py2neo.internal.util import version_tuple, title_case, snake_case
from py2neo.matching import NodeMatcher, RelationshipMatcher
update_stats_keys = [
"constraints_added",
"constraints_removed",
"indexes_added",
"indexes_removed",
"labels_added",
"labels_removed",
"nodes_created",
"nodes_deleted",
"properties_set",
"relationships_deleted",
"relationships_created",
]
class Database(object):
""" Accessor for an entire Neo4j graph database installation over
Bolt or HTTP. Within the py2neo object hierarchy, a :class:`.Database`
contains a :class:`.Graph` in which most activity occurs. Currently,
Neo4j only supports one `Graph` per `Database`.
An explicit URI can be passed to the constructor::
>>> from py2neo import Database
>>> db = Database("bolt://camelot.example.com:7687")
Alternatively, the default value of ``bolt://localhost:7687`` is
used::
>>> default_db = Database()
>>> default_db
<Database uri='bolt://localhost:7687'>
"""
_instances = {}
_driver = None
_graphs = None
@classmethod
def forget_all(cls):
""" Forget all cached :class:`.Database` details.
"""
for _, db in cls._instances.items():
db._driver.close()
db._driver = None
cls._instances.clear()
def __new__(cls, uri=None, **settings):
connection_data = get_connection_data(uri, **settings)
key = connection_data["hash"]
try:
inst = cls._instances[key]
except KeyError:
inst = super(Database, cls).__new__(cls)
inst._connection_data = connection_data
from py2neo.internal.http import HTTPDriver, HTTPSDriver
from neo4j.v1 import Driver
inst._driver = Driver(connection_data["uri"],
auth=connection_data["auth"],
encrypted=connection_data["secure"],
user_agent=connection_data["user_agent"])
inst._graphs = {}
cls._instances[key] = inst
return inst
def __repr__(self):
class_name = self.__class__.__name__
data = self._connection_data
return "<%s uri=%r secure=%r user_agent=%r>" % (
class_name, data["uri"], data["secure"], data["user_agent"])
def __eq__(self, other):
try:
return self.uri == other.uri
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._connection_data["hash"])
def __contains__(self, database):
return database in self._graphs
def __getitem__(self, database):
if database == "data" and database not in self._graphs:
self._graphs[database] = Graph(**self._connection_data)
return self._graphs[database]
def __setitem__(self, database, graph):
self._graphs[database] = graph
def __iter__(self):
yield "data"
@property
def driver(self):
return self._driver
@property
def uri(self):
""" The URI to which this `Database` is connected.
"""
return self._connection_data["uri"]
@property
def default_graph(self):
""" The default graph exposed by this database.
:rtype: :class:`.Graph`
"""
return self["data"]
def keys(self):
return list(self)
def query_jmx(self, namespace, instance=None, name=None, type=None):
""" Query the JMX service attached to this database.
"""
d = {}
for nom, _, attributes in self.default_graph.run("CALL dbms.queryJmx('')"):
ns, _, terms = nom.partition(":")
if ns != namespace:
continue
terms = dict(tuple(term.partition("=")[0::2]) for term in terms.split(","))
if instance is not None and instance != terms["instance"]:
continue
if name is not None and name != terms["name"]:
continue
if type is not None and type != terms["type"]:
continue
for attr_name, attr_data in attributes.items():
attr_value = attr_data.get("value")
if attr_value == "true":
d[attr_name] = True
elif attr_value == "false":
d[attr_name] = False
elif isinstance(attr_value, string_types) and "." in attr_value:
try:
d[attr_name] = float(attr_value)
except (TypeError, ValueError):
d[attr_name] = attr_value
else:
try:
d[attr_name] = int(attr_value)
except (TypeError, ValueError):
d[attr_name] = attr_value
return d
@property
def name(self):
""" Return the name of the active Neo4j database.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
return info.get("DatabaseName")
@property
def kernel_start_time(self):
""" Return the time from which this Neo4j instance was in operational mode.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
return datetime.fromtimestamp(info["KernelStartTime"] / 1000.0)
@property
def kernel_version(self):
""" Return the version of Neo4j.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
version_string = info["KernelVersion"].partition("version:")[-1].partition(",")[0].strip()
return version_tuple(version_string)
@property
def product(self):
""" Return the product name.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
return info["KernelVersion"]
@property
def store_creation_time(self):
""" Return the time when this Neo4j graph store was created.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
return datetime.fromtimestamp(info["StoreCreationDate"] / 1000.0)
@property
def store_id(self):
""" Return an identifier that, together with store creation time,
uniquely identifies this Neo4j graph store.
"""
info = self.query_jmx("org.neo4j", name="Kernel")
return info["StoreId"]
@property
def primitive_counts(self):
""" Return a dictionary of estimates of the numbers of different
kinds of Neo4j primitives.
"""
return self.query_jmx("org.neo4j", name="Primitive count")
@property
def store_file_sizes(self):
""" Return a dictionary of file sizes for each file in the Neo4j
graph store.
"""
return self.query_jmx("org.neo4j", name="Store file sizes")
@property
def config(self):
""" Return a dictionary of the configuration parameters used to
configure Neo4j.
"""
return self.query_jmx("org.neo4j", name="Configuration")
class Graph(object):
""" The `Graph` class represents the graph data storage space within
a Neo4j graph database. Connection details are provided using URIs
and/or individual settings.
Supported URI schemes are:
- ``http``
- ``https``
- ``bolt``
- ``bolt+routing``
The full set of supported `settings` are:
============== ============================================= ============== =============
Keyword Description Type Default
============== ============================================= ============== =============
``auth`` A 2-tuple of (user, password) tuple ``('neo4j', 'password')``
``host`` Database server host name str ``'localhost'``
``password`` Password to use for authentication str ``'password'``
``port`` Database server port int ``7687``
``scheme`` Use a specific URI scheme str ``'bolt'``
``secure`` Use a secure connection (TLS) bool ``False``
``user`` User to authenticate as str ``'neo4j'``
``user_agent`` User agent to send for all connections str `(depends on URI scheme)`
============== ============================================= ============== =============
Each setting can be provided as a keyword argument or as part of
an ``http:``, ``https:``, ``bolt:`` or ``bolt+routing:`` URI. Therefore, the examples
below are equivalent::
>>> from py2neo import Graph
>>> graph_1 = Graph()
>>> graph_2 = Graph(host="localhost")
>>> graph_3 = Graph("bolt://localhost:7687")
Once obtained, the `Graph` instance provides direct or indirect
access to most of the functionality available within py2neo.
"""
#: The :class:`.Database` to which this :class:`.Graph` belongs.
database = None
#: The :class:`.Schema` resource for this :class:`.Graph`.
schema = None
node_cache = ThreadLocalEntityCache()
relationship_cache = ThreadLocalEntityCache()
def __new__(cls, uri=None, **settings):
name = settings.pop("name", "data")
database = Database(uri, **settings)
if name in database:
inst = database[name]
else:
inst = object.__new__(cls)
inst.database = database
inst.schema = Schema(inst)
inst.__name__ = name
database[name] = inst
return inst
def __repr__(self):
return "<Graph database=%r name=%r>" % (self.database, self.__name__)
def __eq__(self, other):
try:
return self.database == other.database and self.__name__ == other.__name__
except (AttributeError, TypeError):
return False
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.relationships)
def __bool__(self):
return True
__nonzero__ = __bool__
def begin(self, autocommit=False):
""" Begin a new :class:`.Transaction`.
:param autocommit: if :py:const:`True`, the transaction will
automatically commit after the first operation
"""
return Transaction(self, autocommit)
def create(self, subgraph):
""" Run a :meth:`.Transaction.create` operation within a
:class:`.Transaction`.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
with self.begin() as tx:
tx.create(subgraph)
def delete(self, subgraph):
""" Run a :meth:`.Transaction.delete` operation within an
`autocommit` :class:`.Transaction`. To delete only the
relationships, use the :meth:`.separate` method.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph` object
"""
self.begin(autocommit=True).delete(subgraph)
def delete_all(self):
""" Delete all nodes and relationships from this :class:`.Graph`.
.. warning::
This method will permanently remove **all** nodes and relationships
from the graph and cannot be undone.
"""
self.run("MATCH (a) DETACH DELETE a")
self.node_cache.clear()
self.relationship_cache.clear()
def evaluate(self, cypher, parameters=None, **kwparameters):
""" Run a :meth:`.Transaction.evaluate` operation within an
`autocommit` :class:`.Transaction`.
:param cypher: Cypher statement
:param parameters: dictionary of parameters
:return: first value from the first record returned or
:py:const:`None`.
"""
return self.begin(autocommit=True).evaluate(cypher, parameters, **kwparameters)
def exists(self, subgraph):
""" Run a :meth:`.Transaction.exists` operation within an
`autocommit` :class:`.Transaction`.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph` object
:return:
"""
return self.begin(autocommit=True).exists(subgraph)
def match(self, nodes=None, r_type=None, limit=None):
""" Match and return all relationships with specific criteria.
For example, to find all of Alice's friends::
for rel in graph.match((alice, ), r_type="FRIEND"):
print(rel.end_node["name"])
:param nodes: Sequence or Set of start and end nodes (:const:`None` means any node);
a Set implies a match in any direction
:param r_type: type of relationships to match (:const:`None` means any type)
:param limit: maximum number of relationships to match (:const:`None` means unlimited)
"""
return RelationshipMatcher(self).match(nodes=nodes, r_type=r_type).limit(limit)
def match_one(self, nodes=None, r_type=None):
""" Match and return one relationship with specific criteria.
:param nodes: Sequence or Set of start and end nodes (:const:`None` means any node);
a Set implies a match in any direction
:param r_type: type of relationships to match (:const:`None` means any type)
"""
rels = list(self.match(nodes=nodes, r_type=r_type, limit=1))
if rels:
return rels[0]
else:
return None
def merge(self, subgraph, label=None, *property_keys):
""" Run a :meth:`.Transaction.merge` operation within an
`autocommit` :class:`.Transaction`.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph` object
:param label: label on which to match any existing nodes
:param property_keys: property keys on which to match any existing nodes
"""
with self.begin() as tx:
tx.merge(subgraph, label, *property_keys)
@property
def name(self):
return self.__name__
@property
def nodes(self):
""" Obtain a :class:`.NodeMatcher` for this graph.
This can be used to find nodes that match given criteria:
>>> graph = Graph()
>>> graph.nodes[1234]
(_1234:Person {name: 'Alice'})
>>> graph.nodes.get(1234)
(_1234:Person {name: 'Alice'})
>>> graph.nodes.match("Person", name="Alice").first()
(_1234:Person {name: 'Alice'})
Nodes can also be efficiently counted using this attribute:
>>> len(graph.nodes)
55691
>>> len(graph.nodes.match("Person", age=33))
12
"""
return NodeMatcher(self)
def pull(self, subgraph):
""" Pull data to one or more entities from their remote counterparts.
:param subgraph: the collection of nodes and relationships to pull
"""
with self.begin() as tx:
tx.pull(subgraph)
def push(self, subgraph):
""" Push data from one or more entities to their remote counterparts.
:param subgraph: the collection of nodes and relationships to push
"""
with self.begin() as tx:
tx.push(subgraph)
@property
def relationships(self):
""" Obtain a :class:`.RelationshipMatcher` for this graph.
This can be used to find relationships that match given criteria
as well as efficiently count relationships.
"""
return RelationshipMatcher(self)
def run(self, cypher, parameters=None, **kwparameters):
""" Run a :meth:`.Transaction.run` operation within an
`autocommit` :class:`.Transaction`.
:param cypher: Cypher statement
:param parameters: dictionary of parameters
:param kwparameters: extra keyword parameters
:return:
"""
return self.begin(autocommit=True).run(cypher, parameters, **kwparameters)
def separate(self, subgraph):
""" Run a :meth:`.Transaction.separate` operation within an
`autocommit` :class:`.Transaction`.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
self.begin(autocommit=True).separate(subgraph)
class Schema(object):
""" The schema resource attached to a `Graph` instance.
"""
def __init__(self, graph):
self.graph = graph
@property
def node_labels(self):
""" The set of node labels currently defined within the graph.
"""
return frozenset(record[0] for record in self.graph.run("CALL db.labels"))
@property
def relationship_types(self):
""" The set of relationship types currently defined within the graph.
"""
return frozenset(record[0] for record in self.graph.run("CALL db.relationshipTypes"))
def create_index(self, label, *property_keys):
""" Create a schema index for a label and property
key combination.
"""
self.graph.run("CREATE INDEX ON :%s(%s)" %
(cypher_escape(label), ",".join(map(cypher_escape, property_keys)))).close()
while property_keys not in self.get_indexes(label):
sleep(0.1)
def create_uniqueness_constraint(self, label, *property_keys):
""" Create a uniqueness constraint for a label.
"""
self.graph.run("CREATE CONSTRAINT ON (a:%s) "
"ASSERT a.%s IS UNIQUE" %
(cypher_escape(label), ",".join(map(cypher_escape, property_keys)))).close()
while property_keys not in self.get_uniqueness_constraints(label):
sleep(0.1)
def drop_index(self, label, *property_keys):
""" Remove label index for a given property key.
"""
self.graph.run("DROP INDEX ON :%s(%s)" %
(cypher_escape(label), ",".join(map(cypher_escape, property_keys)))).close()
def drop_uniqueness_constraint(self, label, *property_keys):
""" Remove the uniqueness constraint for a given property key.
"""
self.graph.run("DROP CONSTRAINT ON (a:%s) "
"ASSERT a.%s IS UNIQUE" %
(cypher_escape(label), ",".join(map(cypher_escape, property_keys)))).close()
def _get_indexes(self, label, t=None):
indexes = []
for record in self.graph.run("CALL db.indexes"):
description = record['description'] if 'description' in record.keys() else None
lbl = record['label'] if 'label' in record.keys() else None
properties = record['properties'] if 'properties' in record.keys() else []
state = record['state'] if 'state' in record.keys() else None
typ = record['type'] if 'type' in record.keys() else None
provider = record['provider'] if 'provider' in record.keys() else None
# minimum requirements are values for description, state, and type
if description is None or state is None or typ is None:
raise RuntimeError("Unexpected response from procedure "
"db.indexes (%d fields)" % len(record))
if state not in (u"ONLINE", u"online"):
continue
if t and typ != t:
continue
if not lbl or not properties:
from py2neo.cypher.lexer import CypherLexer
from pygments.token import Token
tokens = list(CypherLexer().get_tokens(description))
for token_type, token_value in tokens:
if token_type is Token.Name.Label:
lbl = token_value.strip("`")
elif token_type is Token.Name.Variable:
properties.append(token_value.strip("`"))
if not lbl or not properties:
continue
if lbl == label:
indexes.append(tuple(properties))
return indexes
def get_indexes(self, label):
""" Fetch a list of indexed property keys for a label.
"""
return self._get_indexes(label)
def get_uniqueness_constraints(self, label):
""" Fetch a list of unique constraints for a label.
"""
return self._get_indexes(label, "node_unique_property")
class Result(object):
""" Wraps a BoltStatementResult
"""
def __init__(self, graph, entities, result):
from neo4j.v1 import BoltStatementResult
from py2neo.internal.http import HTTPStatementResult
from py2neo.internal.packstream import PackStreamHydrator
self.result = result
self.result.error_class = GraphError.hydrate
# TODO: un-yuk this
if isinstance(result, HTTPStatementResult):
self.result._hydrant.entities = entities
self.result_iterator = iter(self.result)
elif isinstance(result, BoltStatementResult):
self.result._hydrant = PackStreamHydrator(graph, result.keys(), entities)
self.result_iterator = iter(map(Record, self.result))
else:
raise RuntimeError("Unexpected statement result class %r" % result.__class__.__name__)
def keys(self):
""" Return the keys for the whole data set.
"""
return self.result.keys()
def summary(self):
""" Return the summary.
"""
return self.result.summary()
def plan(self):
""" Return the query plan, if available.
"""
metadata = self.result.summary().metadata
plan = {}
if "plan" in metadata:
plan.update(metadata["plan"])
if "profile" in metadata:
plan.update(metadata["profile"])
if "http_plan" in metadata:
plan.update(metadata["http_plan"]["root"])
def collapse_args(data):
if "args" in data:
for key in data["args"]:
data[key] = data["args"][key]
del data["args"]
if "children" in data:
for child in data["children"]:
collapse_args(child)
def snake_keys(data):
if isinstance(data, list):
for item in data:
snake_keys(item)
return
if not isinstance(data, dict):
return
for key, value in list(data.items()):
new_key = snake_case(key)
if new_key != key:
data[new_key] = value
del data[key]
if isinstance(value, (list, dict)):
snake_keys(value)
collapse_args(plan)
snake_keys(plan)
return plan
def stats(self):
""" Return the query statistics.
"""
return vars(self.result.summary().counters)
def fetch(self):
""" Fetch and return the next item.
"""
try:
return next(self.result_iterator)
except StopIteration:
return None
class GraphError(Exception):
"""
"""
__cause__ = None
http_status_code = None
code = None
message = None
@classmethod
def hydrate(cls, data):
code = data["code"]
message = data["message"]
_, classification, category, title = code.split(".")
if classification == "ClientError":
try:
error_cls = ClientError.get_mapped_class(code)
except KeyError:
error_cls = ClientError
message = "%s: %s" % (title_case(title), message)
elif classification == "DatabaseError":
error_cls = DatabaseError
elif classification == "TransientError":
error_cls = TransientError
else:
error_cls = cls
inst = error_cls(message)
inst.code = code
inst.message = message
return inst
def __new__(cls, *args, **kwargs):
try:
exception = kwargs["exception"]
error_cls = type(xstr(exception), (cls,), {})
except KeyError:
error_cls = cls
return Exception.__new__(error_cls, *args)
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args)
for key, value in kwargs.items():
setattr(self, key.lower(), value)
class ClientError(GraphError):
""" The Client sent a bad request - changing the request might yield a successful outcome.
"""
@classmethod
def get_mapped_class(cls, status):
from neo4j.exceptions import ConstraintError, CypherSyntaxError, CypherTypeError, Forbidden, AuthError
return {
# ConstraintError
"Neo.ClientError.Schema.ConstraintValidationFailed": ConstraintError,
"Neo.ClientError.Schema.ConstraintViolation": ConstraintError,
"Neo.ClientError.Statement.ConstraintVerificationFailed": ConstraintError,
"Neo.ClientError.Statement.ConstraintViolation": ConstraintError,
# CypherSyntaxError
"Neo.ClientError.Statement.InvalidSyntax": CypherSyntaxError,
"Neo.ClientError.Statement.SyntaxError": CypherSyntaxError,
# CypherTypeError
"Neo.ClientError.Procedure.TypeError": CypherTypeError,
"Neo.ClientError.Statement.InvalidType": CypherTypeError,
"Neo.ClientError.Statement.TypeError": CypherTypeError,
# Forbidden
"Neo.ClientError.General.ForbiddenOnReadOnlyDatabase": Forbidden,
"Neo.ClientError.General.ReadOnly": Forbidden,
"Neo.ClientError.Schema.ForbiddenOnConstraintIndex": Forbidden,
"Neo.ClientError.Schema.IndexBelongsToConstrain": Forbidden,
"Neo.ClientError.Security.Forbidden": Forbidden,
"Neo.ClientError.Transaction.ForbiddenDueToTransactionType": Forbidden,
# Unauthorized
"Neo.ClientError.Security.AuthorizationFailed": AuthError,
"Neo.ClientError.Security.Unauthorized": AuthError,
}[status]
class DatabaseError(GraphError):
""" The database failed to service the request.
"""
class TransientError(GraphError):
""" The database cannot service the request right now, retrying later might yield a successful outcome.
"""
class TransactionFinished(GraphError):
""" Raised when actions are attempted against a :class:`.Transaction`
that is no longer available for use.
"""
class Transaction(object):
""" A transaction is a logical container for multiple Cypher statements.
"""
session = None
_finished = False
def __init__(self, graph, autocommit=False):
self.graph = graph
self.autocommit = autocommit
self.entities = deque()
self.driver = driver = self.graph.database.driver
self.session = driver.session()
self.results = []
if autocommit:
self.transaction = None
else:
self.transaction = self.session.begin_transaction()
def __del__(self):
if self.session:
self.session.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.commit()
else:
self.rollback()
def _assert_unfinished(self):
if self._finished:
raise TransactionFinished(self)
def finished(self):
""" Indicates whether or not this transaction has been completed
or is still open.
"""
return self._finished
def run(self, cypher, parameters=None, **kwparameters):
""" Send a Cypher statement to the server for execution and return
a :py:class:`.Cursor` for navigating its result.
:param cypher: Cypher statement
:param parameters: dictionary of parameters
:returns: :py:class:`.Cursor` object
"""
from neo4j.v1 import CypherError
self._assert_unfinished()
try:
entities = self.entities.popleft()
except IndexError:
entities = {}
try:
if self.transaction:
result = self.transaction.run(cypher, parameters, **kwparameters)
else:
result = self.session.run(cypher, parameters, **kwparameters)
except CypherError as error:
raise GraphError.hydrate({"code": error.code, "message": error.message})
else:
r = Result(self.graph, entities, result)
self.results.append(r)
return Cursor(r)
finally:
if not self.transaction:
self.finish()
def process(self):
""" Send all pending statements to the server for processing.
"""
self._assert_unfinished()
self.session.sync()
def finish(self):
self.process()
if self.transaction:
self.transaction.close()
self._assert_unfinished()
self._finished = True
self.session.close()
self.session = None
def commit(self):
""" Commit the transaction.
"""
if self.transaction:
self.transaction.success = True
self.finish()
def rollback(self):
""" Roll back the current transaction, undoing all actions previously taken.
"""
self._assert_unfinished()
if self.transaction:
self.transaction.success = False
self.finish()
def evaluate(self, cypher, parameters=None, **kwparameters):
""" Execute a single Cypher statement and return the value from
the first column of the first record.
:param cypher: Cypher statement
:param parameters: dictionary of parameters
:returns: single return value or :const:`None`
"""
return self.run(cypher, parameters, **kwparameters).evaluate(0)
def create(self, subgraph):
""" Create remote nodes and relationships that correspond to those in a
local subgraph. Any entities in *subgraph* that are already bound to
remote entities will remain unchanged, those which are not will become
bound to their newly-created counterparts.
For example::
>>> from py2neo import Graph, Node, Relationship
>>> g = Graph()
>>> tx = g.begin()
>>> a = Node("Person", name="Alice")
>>> tx.create(a)
>>> b = Node("Person", name="Bob")
>>> ab = Relationship(a, "KNOWS", b)
>>> tx.create(ab)
>>> tx.commit()
>>> g.exists(ab)
True
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
creatable object
"""
try:
create = subgraph.__db_create__
except AttributeError:
raise TypeError("No method defined to create object %r" % subgraph)
else:
create(self)
def delete(self, subgraph):
""" Delete the remote nodes and relationships that correspond to
those in a local subgraph. To delete only the relationships, use
the :meth:`.separate` method.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
delete = subgraph.__db_delete__
except AttributeError:
raise TypeError("No method defined to delete object %r" % subgraph)
else:
delete(self)
def exists(self, subgraph):
""" Determine whether one or more graph entities all exist within the
database. Note that if any nodes or relationships in *subgraph* are not
bound to remote counterparts, this method will return ``False``.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
:returns: ``True`` if all entities exist remotely, ``False`` otherwise
"""
try:
exists = subgraph.__db_exists__
except AttributeError:
raise TypeError("No method defined to check existence of object %r" % subgraph)
else:
return exists(self)
def merge(self, subgraph, primary_label=None, primary_key=None):
""" Merge nodes and relationships from a local subgraph into the
database. Each node and relationship is merged independently, with
nodes merged first and relationships merged second.
For each node, the merge is carried out by comparing that node with a
potential remote equivalent on the basis of a label and property value.
If no remote match is found, a new node is created. The label and
property to use for comparison are determined by `primary_label` and
`primary_key` but may be overridden for individual nodes by the
presence of `__primarylabel__` and `__primarykey__` attributes on
the node itself. Note that multiple property keys may be specified by
using a tuple.
For each relationship, the merge is carried out by comparing that
relationship with a potential remote equivalent on the basis of matching
start and end nodes plus relationship type. If no remote match is found,
a new relationship is created.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph` object
:param primary_label: label on which to match any existing nodes
:param primary_key: property key(s) on which to match any existing
nodes
"""
try:
merge = subgraph.__db_merge__
except AttributeError:
raise TypeError("No method defined to merge object %r" % subgraph)
else:
merge(self, primary_label, primary_key)
def pull(self, subgraph):
""" Update local entities from their remote counterparts.
For any nodes and relationships that exist in both the local
:class:`.Subgraph` and the remote :class:`.Graph`, pull properties
and node labels into the local copies. This operation does not
create or delete any entities.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
pull = subgraph.__db_pull__
except AttributeError:
raise TypeError("No method defined to pull object %r" % subgraph)
else:
return pull(self)
def push(self, subgraph):
""" Update remote entities from their local counterparts.
For any nodes and relationships that exist in both the local
:class:`.Subgraph` and the remote :class:`.Graph`, push properties
and node labels into the remote copies. This operation does not
create or delete any entities.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
push = subgraph.__db_push__
except AttributeError:
raise TypeError("No method defined to push object %r" % subgraph)
else:
return push(self)
def separate(self, subgraph):
""" Delete the remote relationships that correspond to those in a local
subgraph. This leaves any nodes untouched.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
separate = subgraph.__db_separate__
except AttributeError:
raise TypeError("No method defined to separate object %r" % subgraph)
else:
separate(self)
class Cursor(object):
""" A `Cursor` is a navigator for a stream of records.
A cursor can be thought of as a window onto an underlying data
stream. All cursors in py2neo are "forward-only", meaning that
navigation starts before the first record and may proceed only in a
forward direction.
It is not generally necessary for application code to instantiate a
cursor directly as one will be returned by any Cypher execution method.
However, cursor creation requires only a :class:`.DataSource` object
which contains the logic for how to access the source data that the
cursor navigates.
Many simple cursor use cases require only the :meth:`.forward` method
and the :attr:`.current` attribute. To navigate through all available
records, a `while` loop can be used::
while cursor.forward():
print(cursor.current["name"])
If only the first record is of interest, a similar `if` structure will
do the job::
if cursor.forward():
print(cursor.current["name"])
To combine `forward` and `current` into a single step, use the built-in
py:func:`next` function::
print(next(cursor)["name"])
Cursors are also iterable, so can be used in a loop::
for record in cursor:
print(record["name"])
For queries that are expected to return only a single value within a
single record, use the :meth:`.evaluate` method. This will return the
first value from the next record or :py:const:`None` if neither the
field nor the record are present::
print(cursor.evaluate())
"""
def __init__(self, result):
self._result = result
self._current = None
def __next__(self):
if self.forward():
return self._current
else:
raise StopIteration()
# Exists only for Python 2 iteration compatibility
next = __next__
def __iter__(self):
while self.forward():
yield self._current
def __getitem__(self, key):
return self._current[key]
@property
def current(self):
""" Returns the current record or :py:const:`None` if no record
has yet been selected.
"""
return self._current
def close(self):
""" Close this cursor and free up all associated resources.
"""
self._result = None
self._current = None
def keys(self):
""" Return the field names for the records in the stream.
"""
return self._result.keys()
def summary(self):
""" Return the result summary.
"""
return self._result.summary()
def plan(self):
""" Return the plan returned with this result, if any.
"""
return self._result.plan()
def stats(self):
""" Return the query statistics.
"""
s = dict.fromkeys(update_stats_keys, 0)
s.update(self._result.stats())
s["contains_updates"] = bool(sum(s.get(k, 0) for k in update_stats_keys))
return s
def forward(self, amount=1):
""" Attempt to move the cursor one position forward (or by
another amount if explicitly specified). The cursor will move
position by up to, but never more than, the amount specified.
If not enough scope for movement remains, only that remainder
will be consumed. The total amount moved is returned.
:param amount: the amount to move the cursor
:returns: the amount that the cursor was able to move
"""
if amount == 0:
return 0
assert amount > 0
amount = int(amount)
moved = 0
fetch = self._result.fetch
while moved != amount:
new_current = fetch()
if new_current is None:
break
else:
self._current = new_current
moved += 1
return moved
def evaluate(self, field=0):
""" Return the value of the first field from the next record
(or the value of another field if explicitly specified).
This method attempts to move the cursor one step forward and,
if successful, selects and returns an individual value from
the new current record. By default, this value will be taken
from the first value in that record but this can be overridden
with the `field` argument, which can represent either a
positional index or a textual key.
If the cursor cannot be moved forward or if the record contains
no values, :py:const:`None` will be returned instead.
This method is particularly useful when it is known that a
Cypher query returns only a single value.
:param field: field to select value from (optional)
:returns: value of the field or :py:const:`None`
Example:
>>> from py2neo import Graph
>>> g = Graph()
>>> g.run("MATCH (a) WHERE a.email={x} RETURN a.name", x="[email protected]").evaluate()
'Bob Robertson'
"""
if self.forward():
try:
return self[field]
except IndexError:
return None
else:
return None
def data(self):
""" Consume and extract the entire result as a list of
dictionaries.
::
>>> from py2neo import Graph
>>> graph = Graph()
>>> graph.run("MATCH (a:Person) RETURN a.name, a.born LIMIT 4").data()
[{'a.born': 1964, 'a.name': 'Keanu Reeves'},
{'a.born': 1967, 'a.name': 'Carrie-Anne Moss'},
{'a.born': 1961, 'a.name': 'Laurence Fishburne'},
{'a.born': 1960, 'a.name': 'Hugo Weaving'}]
:return: the full query result
:rtype: `list` of `dict`
"""
return [record.data() for record in self]
def to_table(self):
""" Consume and extract the entire result as a :class:`.Table`
object.
:return: the full query result
"""
return Table(self)
def to_subgraph(self):
""" Consume and extract the entire result as a :class:`.Subgraph`
containing the union of all the graph structures within.
:return: :class:`.Subgraph` object
"""
s = None
for record in self:
s_ = record.to_subgraph()
if s_ is not None:
if s is None:
s = s_
else:
s |= s_
return s
def to_ndarray(self, dtype=None, order='K'):
""" Consume and extract the entire result as a
`numpy.ndarray <https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html>`_.
.. note::
This method requires `numpy` to be installed.
:param dtype:
:param order:
:warns: If `numpy` is not installed
:returns: `ndarray <https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html>`__ object.
"""
try:
from numpy import array
except ImportError:
warn("Numpy is not installed.")
raise
else:
return array(list(map(list, self)), dtype=dtype, order=order)
def to_series(self, field=0, index=None, dtype=None):
""" Consume and extract one field of the entire result as a
`pandas.Series <http://pandas.pydata.org/pandas-docs/stable/dsintro.html#series>`_.
.. note::
This method requires `pandas` to be installed.
:param field:
:param index:
:param dtype:
:warns: If `pandas` is not installed
:returns: `Series <http://pandas.pydata.org/pandas-docs/stable/dsintro.html#series>`__ object.
"""
try:
from pandas import Series
except ImportError:
warn("Pandas is not installed.")
raise
else:
return Series([record[field] for record in self], index=index, dtype=dtype)
def to_data_frame(self, index=None, columns=None, dtype=None):
""" Consume and extract the entire result as a
`pandas.DataFrame <http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe>`_.
::
>>> from py2neo import Graph
>>> graph = Graph()
>>> graph.run("MATCH (a:Person) RETURN a.name, a.born LIMIT 4").to_data_frame()
a.born a.name
0 1964 Keanu Reeves
1 1967 Carrie-Anne Moss
2 1961 Laurence Fishburne
3 1960 Hugo Weaving
.. note::
This method requires `pandas` to be installed.
:param index: Index to use for resulting frame.
:param columns: Column labels to use for resulting frame.
:param dtype: Data type to force.
:warns: If `pandas` is not installed
:returns: `DataFrame <http://pandas.pydata.org/pandas-docs/stable/dsintro.html#series>`__ object.
"""
try:
from pandas import DataFrame
except ImportError:
warn("Pandas is not installed.")
raise
else:
return DataFrame(list(map(dict, self)), index=index, columns=columns, dtype=dtype)
def to_matrix(self, mutable=False):
""" Consume and extract the entire result as a
`sympy.Matrix <http://docs.sympy.org/latest/tutorial/matrices.html>`_.
.. note::
This method requires `sympy` to be installed.
:param mutable:
:returns: `Matrix <http://docs.sympy.org/latest/tutorial/matrices.html>`_ object.
"""
try:
from sympy import MutableMatrix, ImmutableMatrix
except ImportError:
warn("Sympy is not installed.")
raise
else:
if mutable:
return MutableMatrix(list(map(list, self)))
else:
return ImmutableMatrix(list(map(list, self)))
|
the-stack_0_12919 | """
Display number of ongoing tickets from RT queues.
Configuration parameters:
cache_timeout: how often we refresh this module in seconds (default 300)
db: database to use (default '')
format: see placeholders below (default 'general: {General}')
host: database host to connect to (default '')
password: login password (default '')
threshold_critical: set bad color above this threshold (default 20)
threshold_warning: set degraded color above this threshold (default 10)
timeout: timeout for database connection (default 5)
user: login user (default '')
Format placeholders:
{YOUR_QUEUE_NAME} number of ongoing RT tickets (open+new+stalled)
Color options:
color_bad: Exceeded threshold_critical
color_degraded: Exceeded threshold_warning
Requires:
PyMySQL: https://pypi.org/project/PyMySQL/
or
MySQL-python: https://pypi.org/project/MySQL-python/
It features thresholds to colorize the output and forces a low timeout to
limit the impact of a server connectivity problem on your i3bar freshness.
@author ultrabug
SAMPLE OUTPUT
{'full_text': 'general: 24'}
"""
try:
import pymysql as mysql
except: # noqa e722 // (ImportError, ModuleNotFoundError): # py2/py3
import MySQLdb as mysql
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 300
db = ""
format = "general: {General}"
host = ""
password = ""
threshold_critical = 20
threshold_warning = 10
timeout = 5
user = ""
def rt(self):
has_one_queue_formatted = False
response = {"full_text": ""}
tickets = {}
mydb = mysql.connect(
host=self.host,
user=self.user,
passwd=self.password,
db=self.db,
connect_timeout=self.timeout,
)
mycr = mydb.cursor()
mycr.execute(
"""select q.Name as queue, coalesce(total,0) as total
from Queues as q
left join (
select t.Queue as queue, count(t.id) as total
from Tickets as t
where Status = 'new' or Status = 'open' or Status = 'stalled'
group by t.Queue)
as s on s.Queue = q.id
group by q.Name;"""
)
for row in mycr.fetchall():
queue, nb_tickets = row
if queue == "___Approvals":
continue
tickets[queue] = nb_tickets
if queue in self.format:
has_one_queue_formatted = True
if nb_tickets > self.threshold_critical:
response.update({"color": self.py3.COLOR_BAD})
elif nb_tickets > self.threshold_warning and "color" not in response:
response.update({"color": self.py3.COLOR_DEGRADED})
if has_one_queue_formatted:
response["full_text"] = self.py3.safe_format(self.format, tickets)
else:
response["full_text"] = f"queue(s) not found ({self.format})"
mydb.close()
response["cached_until"] = self.py3.time_in(self.cache_timeout)
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
the-stack_0_12920 | from __future__ import division
import inspect
import os
from collections import OrderedDict, namedtuple
from copy import copy
from distutils.version import LooseVersion
from itertools import product
import corner
import json
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import lines as mpllines
import numpy as np
import pandas as pd
import scipy.stats
from scipy.special import logsumexp
from . import utils
from .utils import (
logger, infer_parameters_from_function,
check_directory_exists_and_if_not_mkdir,
latex_plot_format, safe_save_figure,
BilbyJsonEncoder, load_json,
move_old_file, get_version_information,
decode_bilby_json,
)
from .prior import Prior, PriorDict, DeltaFunction
def result_file_name(outdir, label, extension='json', gzip=False):
""" Returns the standard filename used for a result file
Parameters
----------
outdir: str
Name of the output directory
label: str
Naming scheme of the output file
extension: str, optional
Whether to save as `hdf5` or `json`
gzip: bool, optional
Set to True to append `.gz` to the extension for saving in gzipped format
Returns
-------
str: File name of the output file
"""
if extension in ['json', 'hdf5']:
if extension == 'json' and gzip:
return os.path.join(outdir, '{}_result.{}.gz'.format(label, extension))
else:
return os.path.join(outdir, '{}_result.{}'.format(label, extension))
else:
raise ValueError("Extension type {} not understood".format(extension))
def _determine_file_name(filename, outdir, label, extension, gzip):
""" Helper method to determine the filename """
if filename is not None:
return filename
else:
if (outdir is None) and (label is None):
raise ValueError("No information given to load file")
else:
return result_file_name(outdir, label, extension, gzip)
def read_in_result(filename=None, outdir=None, label=None, extension='json', gzip=False):
""" Reads in a stored bilby result object
Parameters
----------
filename: str
Path to the file to be read (alternative to giving the outdir and label)
outdir, label, extension: str
Name of the output directory, label and extension used for the default
naming scheme.
"""
filename = _determine_file_name(filename, outdir, label, extension, gzip)
# Get the actual extension (may differ from the default extension if the filename is given)
extension = os.path.splitext(filename)[1].lstrip('.')
if extension == 'gz': # gzipped file
extension = os.path.splitext(os.path.splitext(filename)[0])[1].lstrip('.')
if 'json' in extension:
result = Result.from_json(filename=filename)
elif ('hdf5' in extension) or ('h5' in extension):
result = Result.from_hdf5(filename=filename)
elif extension is None:
raise ValueError("No filetype extension provided")
else:
raise ValueError("Filetype {} not understood".format(extension))
return result
def get_weights_for_reweighting(
result, new_likelihood=None, new_prior=None, old_likelihood=None,
old_prior=None):
""" Calculate the weights for reweight()
See bilby.core.result.reweight() for help with the inputs
Returns
-------
ln_weights: array
An array of the natural-log weights
new_log_likelihood_array: array
An array of the natural-log likelihoods
new_log_prior_array: array
An array of the natural-log priors
"""
nposterior = len(result.posterior)
old_log_likelihood_array = np.zeros(nposterior)
old_log_prior_array = np.zeros(nposterior)
new_log_likelihood_array = np.zeros(nposterior)
new_log_prior_array = np.zeros(nposterior)
for ii, sample in result.posterior.iterrows():
# Convert sample to dictionary
par_sample = {key: sample[key] for key in result.search_parameter_keys}
if old_likelihood is not None:
old_likelihood.parameters.update(par_sample)
old_log_likelihood_array[ii] = old_likelihood.log_likelihood()
else:
old_log_likelihood_array[ii] = sample["log_likelihood"]
if new_likelihood is not None:
new_likelihood.parameters.update(par_sample)
new_log_likelihood_array[ii] = new_likelihood.log_likelihood()
else:
# Don't perform likelihood reweighting (i.e. likelihood isn't updated)
new_log_likelihood_array[ii] = old_log_likelihood_array[ii]
if old_prior is not None:
old_log_prior_array[ii] = old_prior.ln_prob(par_sample)
else:
old_log_prior_array[ii] = sample["log_prior"]
if new_prior is not None:
new_log_prior_array[ii] = new_prior.ln_prob(par_sample)
else:
# Don't perform prior reweighting (i.e. prior isn't updated)
new_log_prior_array[ii] = old_log_prior_array[ii]
ln_weights = (
new_log_likelihood_array + new_log_prior_array - old_log_likelihood_array - old_log_prior_array)
return ln_weights, new_log_likelihood_array, new_log_prior_array
def rejection_sample(posterior, weights):
""" Perform rejection sampling on a posterior using weights
Parameters
----------
posterior: pd.DataFrame or np.ndarray of shape (nsamples, nparameters)
The dataframe or array containing posterior samples
weights: np.ndarray
An array of weights
Returns
-------
reweighted_posterior: pd.DataFrame
The posterior resampled using rejection sampling
"""
keep = weights > np.random.uniform(0, max(weights), weights.shape)
return posterior[keep]
def reweight(result, label=None, new_likelihood=None, new_prior=None,
old_likelihood=None, old_prior=None):
""" Reweight a result to a new likelihood/prior using rejection sampling
Parameters
----------
label: str, optional
An updated label to apply to the result object
new_likelihood: bilby.core.likelood.Likelihood, (optional)
If given, the new likelihood to reweight too. If not given, likelihood
reweighting is not applied
new_prior: bilby.core.prior.PriorDict, (optional)
If given, the new prior to reweight too. If not given, prior
reweighting is not applied
old_likelihood: bilby.core.likelihood.Likelihood, (optional)
If given, calculate the old likelihoods from this object. If not given,
the values stored in the posterior are used.
old_prior: bilby.core.prior.PriorDict, (optional)
If given, calculate the old prior from this object. If not given,
the values stored in the posterior are used.
Returns
-------
result: bilby.core.result.Result
A copy of the result object with a reweighted posterior
"""
result = copy(result)
nposterior = len(result.posterior)
logger.info("Reweighting posterior with {} samples".format(nposterior))
ln_weights, new_log_likelihood_array, new_log_prior_array = get_weights_for_reweighting(
result, new_likelihood=new_likelihood, new_prior=new_prior,
old_likelihood=old_likelihood, old_prior=old_prior)
# Overwrite the likelihood and prior evaluations
result.posterior["log_likelihood"] = new_log_likelihood_array
result.posterior["log_prior"] = new_log_prior_array
weights = np.exp(ln_weights)
result.posterior = rejection_sample(result.posterior, weights=weights)
logger.info("Rejection sampling resulted in {} samples".format(len(result.posterior)))
result.meta_data["reweighted_using_rejection_sampling"] = True
result.log_evidence += logsumexp(ln_weights) - np.log(nposterior)
result.priors = new_prior
if label:
result.label = label
else:
result.label += "_reweighted"
return result
class Result(object):
def __init__(self, label='no_label', outdir='.', sampler=None,
search_parameter_keys=None, fixed_parameter_keys=None,
constraint_parameter_keys=None, priors=None,
sampler_kwargs=None, injection_parameters=None,
meta_data=None, posterior=None, samples=None,
nested_samples=None, log_evidence=np.nan,
log_evidence_err=np.nan, log_noise_evidence=np.nan,
log_bayes_factor=np.nan, log_likelihood_evaluations=None,
log_prior_evaluations=None, sampling_time=None, nburn=None,
num_likelihood_evaluations=None, walkers=None,
max_autocorrelation_time=None, use_ratio=None,
parameter_labels=None, parameter_labels_with_unit=None,
gzip=False, version=None):
""" A class to store the results of the sampling run
Parameters
----------
label, outdir, sampler: str
The label, output directory, and sampler used
search_parameter_keys, fixed_parameter_keys, constraint_parameter_keys: list
Lists of the search, constraint, and fixed parameter keys.
Elements of the list should be of type `str` and match the keys
of the `prior`
priors: dict, bilby.core.prior.PriorDict
A dictionary of the priors used in the run
sampler_kwargs: dict
Key word arguments passed to the sampler
injection_parameters: dict
A dictionary of the injection parameters
meta_data: dict
A dictionary of meta data to store about the run
posterior: pandas.DataFrame
A pandas data frame of the posterior
samples, nested_samples: array_like
An array of the output posterior samples and the unweighted samples
log_evidence, log_evidence_err, log_noise_evidence, log_bayes_factor: float
Natural log evidences
log_likelihood_evaluations: array_like
The evaluations of the likelihood for each sample point
num_likelihood_evaluations: int
The number of times the likelihood function is called
log_prior_evaluations: array_like
The evaluations of the prior for each sample point
sampling_time: float
The time taken to complete the sampling
nburn: int
The number of burn-in steps discarded for MCMC samplers
walkers: array_like
The samplers taken by a ensemble MCMC samplers
max_autocorrelation_time: float
The estimated maximum autocorrelation time for MCMC samplers
use_ratio: bool
A boolean stating whether the likelihood ratio, as opposed to the
likelihood was used during sampling
parameter_labels, parameter_labels_with_unit: list
Lists of the latex-formatted parameter labels
gzip: bool
Set to True to gzip the results file (if using json format)
version: str,
Version information for software used to generate the result. Note,
this information is generated when the result object is initialized
Note
---------
All sampling output parameters, e.g. the samples themselves are
typically not given at initialisation, but set at a later stage.
"""
self.label = label
self.outdir = os.path.abspath(outdir)
self.sampler = sampler
self.search_parameter_keys = search_parameter_keys
self.fixed_parameter_keys = fixed_parameter_keys
self.constraint_parameter_keys = constraint_parameter_keys
self.parameter_labels = parameter_labels
self.parameter_labels_with_unit = parameter_labels_with_unit
self.priors = priors
self.sampler_kwargs = sampler_kwargs
self.meta_data = meta_data
self.injection_parameters = injection_parameters
self.posterior = posterior
self.samples = samples
self.nested_samples = nested_samples
self.walkers = walkers
self.nburn = nburn
self.use_ratio = use_ratio
self.log_evidence = log_evidence
self.log_evidence_err = log_evidence_err
self.log_noise_evidence = log_noise_evidence
self.log_bayes_factor = log_bayes_factor
self.log_likelihood_evaluations = log_likelihood_evaluations
self.log_prior_evaluations = log_prior_evaluations
self.num_likelihood_evaluations = num_likelihood_evaluations
self.sampling_time = sampling_time
self.version = version
self.max_autocorrelation_time = max_autocorrelation_time
self.prior_values = None
self._kde = None
@classmethod
def from_hdf5(cls, filename=None, outdir=None, label=None):
""" Read in a saved .h5 data file
Parameters
----------
filename: str
If given, try to load from this filename
outdir, label: str
If given, use the default naming convention for saved results file
Returns
-------
result: bilby.core.result.Result
Raises
-------
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.result.Result is found in the path
"""
import deepdish
filename = _determine_file_name(filename, outdir, label, 'hdf5', False)
if os.path.isfile(filename):
dictionary = deepdish.io.load(filename)
# Some versions of deepdish/pytables return the dictionary as
# a dictionary with a key 'data'
if len(dictionary) == 1 and 'data' in dictionary:
dictionary = dictionary['data']
if "priors" in dictionary:
# parse priors from JSON string (allowing for backwards
# compatibility)
if not isinstance(dictionary["priors"], PriorDict):
try:
priordict = PriorDict()
for key, value in dictionary["priors"].items():
if key not in ["__module__", "__name__", "__prior_dict__"]:
priordict[key] = decode_bilby_json(value)
dictionary["priors"] = priordict
except Exception as e:
raise IOError(
"Unable to parse priors from '{}':\n{}".format(
filename, e,
)
)
try:
if isinstance(dictionary.get('posterior', None), dict):
dictionary['posterior'] = pd.DataFrame(dictionary['posterior'])
return cls(**dictionary)
except TypeError as e:
raise IOError("Unable to load dictionary, error={}".format(e))
else:
raise IOError("No result '{}' found".format(filename))
@classmethod
def from_json(cls, filename=None, outdir=None, label=None, gzip=False):
""" Read in a saved .json data file
Parameters
----------
filename: str
If given, try to load from this filename
outdir, label: str
If given, use the default naming convention for saved results file
Returns
-------
result: bilby.core.result.Result
Raises
-------
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.result.Result is found in the path
"""
filename = _determine_file_name(filename, outdir, label, 'json', gzip)
if os.path.isfile(filename):
dictionary = load_json(filename, gzip)
try:
return cls(**dictionary)
except TypeError as e:
raise IOError("Unable to load dictionary, error={}".format(e))
else:
raise IOError("No result '{}' found".format(filename))
def __str__(self):
"""Print a summary """
if getattr(self, 'posterior', None) is not None:
if getattr(self, 'log_noise_evidence', None) is not None:
return ("nsamples: {:d}\n"
"ln_noise_evidence: {:6.3f}\n"
"ln_evidence: {:6.3f} +/- {:6.3f}\n"
"ln_bayes_factor: {:6.3f} +/- {:6.3f}\n"
.format(len(self.posterior), self.log_noise_evidence, self.log_evidence,
self.log_evidence_err, self.log_bayes_factor,
self.log_evidence_err))
else:
return ("nsamples: {:d}\n"
"ln_evidence: {:6.3f} +/- {:6.3f}\n"
.format(len(self.posterior), self.log_evidence, self.log_evidence_err))
else:
return ''
@property
def priors(self):
if self._priors is not None:
return self._priors
else:
raise ValueError('Result object has no priors')
@priors.setter
def priors(self, priors):
if isinstance(priors, dict):
if isinstance(priors, PriorDict):
self._priors = priors
else:
self._priors = PriorDict(priors)
if self.parameter_labels is None:
if 'H_eff5' in self.search_parameter_keys:
self.priors['H_eff5'].latex_label = '$H_{eff5}$'
if 'chi_1' in self.search_parameter_keys:
self.priors['chi_1'].latex_label = '$\\chi_1$'
if 'chi_2' in self.search_parameter_keys:
self.priors['chi_2'].latex_label = '$\\chi_2$'
if 'Q_tilde' in self.search_parameter_keys:
self.priors['Q_tilde'].latex_label = '$\\tilde{Q}$'
self.parameter_labels = [self.priors[k].latex_label for k in
self.search_parameter_keys]
if self.parameter_labels_with_unit is None:
self.parameter_labels_with_unit = [
self.priors[k].latex_label_with_unit for k in
self.search_parameter_keys]
elif priors is None:
self._priors = priors
self.parameter_labels = self.search_parameter_keys
self.parameter_labels_with_unit = self.search_parameter_keys
else:
raise ValueError("Input priors not understood")
@property
def samples(self):
""" An array of samples """
if self._samples is not None:
return self._samples
else:
raise ValueError("Result object has no stored samples")
@samples.setter
def samples(self, samples):
self._samples = samples
@property
def num_likelihood_evaluations(self):
""" number of likelihood evaluations """
if self._num_likelihood_evaluations is not None:
return self._num_likelihood_evaluations
else:
raise ValueError("Result object has no stored likelihood evaluations")
@num_likelihood_evaluations.setter
def num_likelihood_evaluations(self, num_likelihood_evaluations):
self._num_likelihood_evaluations = num_likelihood_evaluations
@property
def nested_samples(self):
"""" An array of unweighted samples """
if self._nested_samples is not None:
return self._nested_samples
else:
raise ValueError("Result object has no stored nested samples")
@nested_samples.setter
def nested_samples(self, nested_samples):
self._nested_samples = nested_samples
@property
def walkers(self):
"""" An array of the ensemble walkers """
if self._walkers is not None:
return self._walkers
else:
raise ValueError("Result object has no stored walkers")
@walkers.setter
def walkers(self, walkers):
self._walkers = walkers
@property
def nburn(self):
"""" An array of the ensemble walkers """
if self._nburn is not None:
return self._nburn
else:
raise ValueError("Result object has no stored nburn")
@nburn.setter
def nburn(self, nburn):
self._nburn = nburn
@property
def posterior(self):
""" A pandas data frame of the posterior """
if self._posterior is not None:
return self._posterior
else:
raise ValueError("Result object has no stored posterior")
@posterior.setter
def posterior(self, posterior):
self._posterior = posterior
@property
def log_10_bayes_factor(self):
return self.log_bayes_factor / np.log(10)
@property
def log_10_evidence(self):
return self.log_evidence / np.log(10)
@property
def log_10_evidence_err(self):
return self.log_evidence_err / np.log(10)
@property
def log_10_noise_evidence(self):
return self.log_noise_evidence / np.log(10)
@property
def version(self):
return self._version
@version.setter
def version(self, version):
if version is None:
self._version = 'bilby={}'.format(utils.get_version_information())
else:
self._version = version
def _get_save_data_dictionary(self):
# This list defines all the parameters saved in the result object
save_attrs = [
'label', 'outdir', 'sampler', 'log_evidence', 'log_evidence_err',
'log_noise_evidence', 'log_bayes_factor', 'priors', 'posterior',
'injection_parameters', 'meta_data', 'search_parameter_keys',
'fixed_parameter_keys', 'constraint_parameter_keys',
'sampling_time', 'sampler_kwargs', 'use_ratio',
'log_likelihood_evaluations', 'log_prior_evaluations',
'num_likelihood_evaluations', 'samples', 'nested_samples',
'walkers', 'nburn', 'parameter_labels', 'parameter_labels_with_unit',
'version']
dictionary = OrderedDict()
for attr in save_attrs:
try:
dictionary[attr] = getattr(self, attr)
except ValueError as e:
logger.debug("Unable to save {}, message: {}".format(attr, e))
pass
return dictionary
def save_to_file(self, filename=None, overwrite=False, outdir=None,
extension='json', gzip=False):
"""
Writes the Result to a json or deepdish h5 file
Parameters
----------
filename: optional,
Filename to write to (overwrites the default)
overwrite: bool, optional
Whether or not to overwrite an existing result file.
default=False
outdir: str, optional
Path to the outdir. Default is the one stored in the result object.
extension: str, optional {json, hdf5, True}
Determines the method to use to store the data (if True defaults
to json)
gzip: bool, optional
If true, and outputing to a json file, this will gzip the resulting
file and add '.gz' to the file extension.
"""
if extension is True:
extension = "json"
outdir = self._safe_outdir_creation(outdir, self.save_to_file)
if filename is None:
filename = result_file_name(outdir, self.label, extension, gzip)
move_old_file(filename, overwrite)
# Convert the prior to a string representation for saving on disk
dictionary = self._get_save_data_dictionary()
# Convert callable sampler_kwargs to strings
if dictionary.get('sampler_kwargs', None) is not None:
for key in dictionary['sampler_kwargs']:
if hasattr(dictionary['sampler_kwargs'][key], '__call__'):
dictionary['sampler_kwargs'][key] = str(dictionary['sampler_kwargs'])
try:
# convert priors to JSON dictionary for both JSON and hdf5 files
dictionary["priors"] = dictionary["priors"]._get_json_dict()
if extension == 'json':
if gzip:
import gzip
# encode to a string
json_str = json.dumps(dictionary, cls=BilbyJsonEncoder).encode('utf-8')
with gzip.GzipFile(filename, 'w') as file:
file.write(json_str)
else:
with open(filename, 'w') as file:
json.dump(dictionary, file, indent=2, cls=BilbyJsonEncoder)
elif extension == 'hdf5':
import deepdish
for key in dictionary:
if isinstance(dictionary[key], pd.DataFrame):
dictionary[key] = dictionary[key].to_dict()
deepdish.io.save(filename, dictionary)
else:
raise ValueError("Extension type {} not understood".format(extension))
except Exception as e:
logger.error("\n\n Saving the data has failed with the "
"following message:\n {} \n\n".format(e))
def save_posterior_samples(self, filename=None, outdir=None, label=None):
""" Saves posterior samples to a file
Generates a .dat file containing the posterior samples and auxillary
data saved in the posterior. Note, strings in the posterior are
removed while complex numbers will be given as absolute values with
abs appended to the column name
Parameters
----------
filename: str
Alternative filename to use. Defaults to
outdir/label_posterior_samples.dat
outdir, label: str
Alternative outdir and label to use
"""
if filename is None:
if label is None:
label = self.label
outdir = self._safe_outdir_creation(outdir, self.save_posterior_samples)
filename = '{}/{}_posterior_samples.dat'.format(outdir, label)
else:
outdir = os.path.dirname(filename)
self._safe_outdir_creation(outdir, self.save_posterior_samples)
# Drop non-numeric columns
df = self.posterior.select_dtypes([np.number]).copy()
# Convert complex columns to abs
for key in df.keys():
if np.any(np.iscomplex(df[key])):
complex_term = df.pop(key)
df.loc[:, key + "_abs"] = np.abs(complex_term)
df.loc[:, key + "_angle"] = np.angle(complex_term)
logger.info("Writing samples file to {}".format(filename))
df.to_csv(filename, index=False, header=True, sep=' ')
def get_latex_labels_from_parameter_keys(self, keys):
""" Returns a list of latex_labels corresponding to the given keys
Parameters
----------
keys: list
List of strings corresponding to the desired latex_labels
Returns
-------
list: The desired latex_labels
"""
latex_labels = []
for key in keys:
if key in self.search_parameter_keys:
idx = self.search_parameter_keys.index(key)
label = self.parameter_labels_with_unit[idx]
elif key in self.parameter_labels:
label = key
else:
label = None
logger.debug(
'key {} not a parameter label or latex label'.format(key)
)
if label is None:
label = key.replace("_", " ")
latex_labels.append(label)
return latex_labels
@property
def covariance_matrix(self):
""" The covariance matrix of the samples the posterior """
samples = self.posterior[self.search_parameter_keys].values
return np.cov(samples.T)
@property
def posterior_volume(self):
""" The posterior volume """
if self.covariance_matrix.ndim == 0:
return np.sqrt(self.covariance_matrix)
else:
return 1 / np.sqrt(np.abs(np.linalg.det(
1 / self.covariance_matrix)))
@staticmethod
def prior_volume(priors):
""" The prior volume, given a set of priors """
return np.prod([priors[k].maximum - priors[k].minimum for k in priors])
def occam_factor(self, priors):
""" The Occam factor,
See Chapter 28, `Mackay "Information Theory, Inference, and Learning
Algorithms" <http://www.inference.org.uk/itprnn/book.html>`_ Cambridge
University Press (2003).
"""
return self.posterior_volume / self.prior_volume(priors)
@property
def bayesian_model_dimensionality(self):
""" Characterises how many parameters are effectively constraint by the data
See <https://arxiv.org/abs/1903.06682>
Returns
-------
float: The model dimensionality
"""
return 2 * (np.mean(self.posterior['log_likelihood']**2) -
np.mean(self.posterior['log_likelihood'])**2)
def get_one_dimensional_median_and_error_bar(self, key, fmt='.2f',
quantiles=(0.16, 0.84)):
""" Calculate the median and error bar for a given key
Parameters
----------
key: str
The parameter key for which to calculate the median and error bar
fmt: str, ('.2f')
A format string
quantiles: list, tuple
A length-2 tuple of the lower and upper-quantiles to calculate
the errors bars for.
Returns
-------
summary: namedtuple
An object with attributes, median, lower, upper and string
"""
summary = namedtuple('summary', ['median', 'lower', 'upper', 'string'])
if len(quantiles) != 2:
raise ValueError("quantiles must be of length 2")
quants_to_compute = np.array([quantiles[0], 0.5, quantiles[1]])
quants = np.percentile(self.posterior[key], quants_to_compute * 100)
summary.median = quants[1]
summary.plus = quants[2] - summary.median
summary.minus = summary.median - quants[0]
fmt = "{{0:{0}}}".format(fmt).format
string_template = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
summary.string = string_template.format(
fmt(summary.median), fmt(summary.minus), fmt(summary.plus))
return summary
@latex_plot_format
def plot_single_density(self, key, prior=None, cumulative=False,
title=None, truth=None, save=True,
file_base_name=None, bins=50, label_fontsize=16,
title_fontsize=16, quantiles=(0.16, 0.84), dpi=300):
""" Plot a 1D marginal density, either probability or cumulative.
Parameters
----------
key: str
Name of the parameter to plot
prior: {bool (True), bilby.core.prior.Prior}
If true, add the stored prior probability density function to the
one-dimensional marginal distributions. If instead a Prior
is provided, this will be plotted.
cumulative: bool
If true plot the CDF
title: bool
If true, add 1D title of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
truth: {bool, float}
If true, plot self.injection_parameters[parameter].
If float, plot this value.
save: bool:
If true, save plot to disk.
file_base_name: str, optional
If given, the base file name to use (by default `outdir/label_` is
used)
bins: int
The number of histogram bins
label_fontsize, title_fontsize: int
The fontsizes for the labels and titles
quantiles: tuple
A length-2 tuple of the lower and upper-quantiles to calculate
the errors bars for.
dpi: int
Dots per inch resolution of the plot
Returns
-------
figure: matplotlib.pyplot.figure
A matplotlib figure object
"""
logger.info('Plotting {} marginal distribution'.format(key))
label = self.get_latex_labels_from_parameter_keys([key])[0]
fig, ax = plt.subplots()
try:
ax.hist(self.posterior[key].values, bins=bins, density=True,
histtype='step', cumulative=cumulative)
except ValueError as e:
logger.info(
'Failed to generate 1d plot for {}, error message: {}'
.format(key, e))
return
ax.set_xlabel(label, fontsize=label_fontsize)
if truth is not None:
ax.axvline(truth, ls='-', color='orange')
summary = self.get_one_dimensional_median_and_error_bar(
key, quantiles=quantiles)
ax.axvline(summary.median - summary.minus, ls='--', color='C0')
ax.axvline(summary.median + summary.plus, ls='--', color='C0')
if title:
ax.set_title(summary.string, fontsize=title_fontsize)
if isinstance(prior, Prior):
theta = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 300)
if cumulative is False:
ax.plot(theta, prior.prob(theta), color='C2')
else:
ax.plot(theta, prior.cdf(theta), color='C2')
if save:
fig.tight_layout()
if cumulative:
file_name = file_base_name + key + '_cdf'
else:
file_name = file_base_name + key + '_pdf'
safe_save_figure(fig=fig, filename=file_name, dpi=dpi)
plt.close(fig)
else:
return fig
def plot_marginals(self, parameters=None, priors=None, titles=True,
file_base_name=None, bins=50, label_fontsize=16,
title_fontsize=16, quantiles=(0.16, 0.84), dpi=300,
outdir=None):
""" Plot 1D marginal distributions
Parameters
----------
parameters: (list, dict), optional
If given, either a list of the parameter names to include, or a
dictionary of parameter names and their "true" values to plot.
priors: {bool (False), bilby.core.prior.PriorDict}
If true, add the stored prior probability density functions to the
one-dimensional marginal distributions. If instead a PriorDict
is provided, this will be plotted.
titles: bool
If true, add 1D titles of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
file_base_name: str, optional
If given, the base file name to use (by default `outdir/label_` is
used)
bins: int
The number of histogram bins
label_fontsize, title_fontsize: int
The font sizes for the labels and titles
quantiles: tuple
A length-2 tuple of the lower and upper-quantiles to calculate
the errors bars for.
dpi: int
Dots per inch resolution of the plot
outdir: str, optional
Path to the outdir. Default is the one store in the result object.
Returns
-------
"""
if isinstance(parameters, dict):
plot_parameter_keys = list(parameters.keys())
truths = parameters
elif parameters is None:
plot_parameter_keys = self.posterior.keys()
if self.injection_parameters is None:
truths = dict()
else:
truths = self.injection_parameters
else:
plot_parameter_keys = list(parameters)
if self.injection_parameters is None:
truths = dict()
else:
truths = self.injection_parameters
if file_base_name is None:
outdir = self._safe_outdir_creation(outdir, self.plot_marginals)
file_base_name = '{}/{}_1d/'.format(outdir, self.label)
check_directory_exists_and_if_not_mkdir(file_base_name)
if priors is True:
priors = getattr(self, 'priors', dict())
elif isinstance(priors, dict):
pass
elif priors in [False, None]:
priors = dict()
else:
raise ValueError('Input priors={} not understood'.format(priors))
for i, key in enumerate(plot_parameter_keys):
if not isinstance(self.posterior[key].values[0], float):
continue
prior = priors.get(key, None)
truth = truths.get(key, None)
for cumulative in [False, True]:
self.plot_single_density(
key, prior=prior, cumulative=cumulative, title=titles,
truth=truth, save=True, file_base_name=file_base_name,
bins=bins, label_fontsize=label_fontsize, dpi=dpi,
title_fontsize=title_fontsize, quantiles=quantiles)
@latex_plot_format
def plot_corner(self, parameters=None, priors=None, titles=True, save=True,
filename=None, dpi=300, **kwargs):
""" Plot a corner-plot
Parameters
----------
parameters: (list, dict), optional
If given, either a list of the parameter names to include, or a
dictionary of parameter names and their "true" values to plot.
priors: {bool (False), bilby.core.prior.PriorDict}
If true, add the stored prior probability density functions to the
one-dimensional marginal distributions. If instead a PriorDict
is provided, this will be plotted.
titles: bool
If true, add 1D titles of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
save: bool, optional
If true, save the image using the given label and outdir
filename: str, optional
If given, overwrite the default filename
dpi: int, optional
Dots per inch resolution of the plot
**kwargs:
Other keyword arguments are passed to `corner.corner`. We set some
defaults to improve the basic look and feel, but these can all be
overridden. Also optional an 'outdir' argument which can be used
to override the outdir set by the absolute path of the result object.
Notes
-----
The generation of the corner plot themselves is done by the corner
python module, see https://corner.readthedocs.io for more
information.
Truth-lines can be passed in in several ways. Either as the values
of the parameters dict, or a list via the `truths` kwarg. If
injection_parameters where given to run_sampler, these will auto-
matically be added to the plot. This behaviour can be stopped by
adding truths=False.
Returns
-------
fig:
A matplotlib figure instance
"""
# If in testing mode, not corner plots are generated
if utils.command_line_args.bilby_test_mode:
return
# bilby default corner kwargs. Overwritten by anything passed to kwargs
defaults_kwargs = dict(
bins=50, smooth=0.9, label_kwargs=dict(fontsize=16),
title_kwargs=dict(fontsize=16), color='#0072C1',
truth_color='tab:orange', quantiles=[0.16, 0.84],
levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9 / 2.)),
plot_density=False, plot_datapoints=True, fill_contours=True,
max_n_ticks=3)
if LooseVersion(matplotlib.__version__) < "2.1":
defaults_kwargs['hist_kwargs'] = dict(normed=True)
else:
defaults_kwargs['hist_kwargs'] = dict(density=True)
if 'lionize' in kwargs and kwargs['lionize'] is True:
defaults_kwargs['truth_color'] = 'tab:blue'
defaults_kwargs['color'] = '#FF8C00'
defaults_kwargs.update(kwargs)
kwargs = defaults_kwargs
# Handle if truths was passed in
if 'truth' in kwargs:
kwargs['truths'] = kwargs.pop('truth')
if "truths" in kwargs:
truths = kwargs.get('truths')
if isinstance(parameters, list) and isinstance(truths, list):
if len(parameters) != len(truths):
raise ValueError(
"Length of parameters and truths don't match")
elif isinstance(truths, dict) and parameters is None:
parameters = kwargs.pop('truths')
elif isinstance(truths, bool):
pass
elif truths is None:
kwargs["truths"] = False
else:
raise ValueError(
"Combination of parameters and truths not understood")
# If injection parameters where stored, use these as parameter values
# but do not overwrite input parameters (or truths)
cond1 = getattr(self, 'injection_parameters', None) is not None
cond2 = parameters is None
cond3 = bool(kwargs.get("truths", True))
if cond1 and cond2 and cond3:
parameters = {
key: self.injection_parameters.get(key, np.nan)
for key in self.search_parameter_keys
}
# If parameters is a dictionary, use the keys to determine which
# parameters to plot and the values as truths.
if isinstance(parameters, dict):
plot_parameter_keys = list(parameters.keys())
kwargs['truths'] = list(parameters.values())
elif parameters is None:
plot_parameter_keys = self.search_parameter_keys
else:
plot_parameter_keys = list(parameters)
# Get latex formatted strings for the plot labels
kwargs['labels'] = kwargs.get(
'labels', self.get_latex_labels_from_parameter_keys(
plot_parameter_keys))
kwargs["labels"] = sanity_check_labels(kwargs["labels"])
# Unless already set, set the range to include all samples
# This prevents ValueErrors being raised for parameters with no range
kwargs['range'] = kwargs.get('range', [1] * len(plot_parameter_keys))
# Remove truths if it is a bool
if isinstance(kwargs.get('truths'), bool):
kwargs.pop('truths')
# Create the data array to plot and pass everything to corner
xs = self.posterior[plot_parameter_keys].values
if len(plot_parameter_keys) > 1:
fig = corner.corner(xs, **kwargs)
else:
ax = kwargs.get("ax", plt.subplot())
ax.hist(xs, bins=kwargs["bins"], color=kwargs["color"],
histtype="step", **kwargs["hist_kwargs"])
ax.set_xlabel(kwargs["labels"][0])
fig = plt.gcf()
axes = fig.get_axes()
# Add the titles
if titles and kwargs.get('quantiles', None) is not None:
for i, par in enumerate(plot_parameter_keys):
ax = axes[i + i * len(plot_parameter_keys)]
if ax.title.get_text() == '':
ax.set_title(self.get_one_dimensional_median_and_error_bar(
par, quantiles=kwargs['quantiles']).string,
**kwargs['title_kwargs'])
# Add priors to the 1D plots
if priors is True:
priors = getattr(self, 'priors', False)
if isinstance(priors, dict):
for i, par in enumerate(plot_parameter_keys):
ax = axes[i + i * len(plot_parameter_keys)]
theta = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 300)
ax.plot(theta, priors[par].prob(theta), color='C2')
elif priors in [False, None]:
pass
else:
raise ValueError('Input priors={} not understood'.format(priors))
if save:
if filename is None:
outdir = self._safe_outdir_creation(kwargs.get('outdir'), self.plot_corner)
filename = '{}/{}_corner.png'.format(outdir, self.label)
logger.debug('Saving corner plot to {}'.format(filename))
safe_save_figure(fig=fig, filename=filename, dpi=dpi)
plt.close(fig)
return fig
@latex_plot_format
def plot_walkers(self, **kwargs):
""" Method to plot the trace of the walkers in an ensemble MCMC plot """
if hasattr(self, 'walkers') is False:
logger.warning("Cannot plot_walkers as no walkers are saved")
return
if utils.command_line_args.bilby_test_mode:
return
nwalkers, nsteps, ndim = self.walkers.shape
idxs = np.arange(nsteps)
fig, axes = plt.subplots(nrows=ndim, figsize=(6, 3 * ndim))
walkers = self.walkers[:, :, :]
parameter_labels = sanity_check_labels(self.parameter_labels)
for i, ax in enumerate(axes):
ax.plot(idxs[:self.nburn + 1], walkers[:, :self.nburn + 1, i].T,
lw=0.1, color='r')
ax.set_ylabel(parameter_labels[i])
for i, ax in enumerate(axes):
ax.plot(idxs[self.nburn:], walkers[:, self.nburn:, i].T, lw=0.1,
color='k')
ax.set_ylabel(parameter_labels[i])
fig.tight_layout()
outdir = self._safe_outdir_creation(kwargs.get('outdir'), self.plot_walkers)
filename = '{}/{}_walkers.png'.format(outdir, self.label)
logger.debug('Saving walkers plot to {}'.format('filename'))
safe_save_figure(fig=fig, filename=filename)
plt.close(fig)
@latex_plot_format
def plot_with_data(self, model, x, y, ndraws=1000, npoints=1000,
xlabel=None, ylabel=None, data_label='data',
data_fmt='o', draws_label=None, filename=None,
maxl_label='max likelihood', dpi=300, outdir=None):
""" Generate a figure showing the data and fits to the data
Parameters
----------
model: function
A python function which when called as `model(x, **kwargs)` returns
the model prediction (here `kwargs` is a dictionary of key-value
pairs of the model parameters.
x, y: np.ndarray
The independent and dependent data to plot
ndraws: int
Number of draws from the posterior to plot
npoints: int
Number of points used to plot the smoothed fit to the data
xlabel, ylabel: str
Labels for the axes
data_label, draws_label, maxl_label: str
Label for the data, draws, and max likelihood legend
data_fmt: str
Matpltolib fmt code, defaults to `'-o'`
dpi: int
Passed to `plt.savefig`
filename: str
If given, the filename to use. Otherwise, the filename is generated
from the outdir and label attributes.
outdir: str, optional
Path to the outdir. Default is the one store in the result object.
"""
# Determine model_posterior, the subset of the full posterior which
# should be passed into the model
model_keys = infer_parameters_from_function(model)
model_posterior = self.posterior[model_keys]
xsmooth = np.linspace(np.min(x), np.max(x), npoints)
fig, ax = plt.subplots()
logger.info('Plotting {} draws'.format(ndraws))
for _ in range(ndraws):
s = model_posterior.sample().to_dict('records')[0]
ax.plot(xsmooth, model(xsmooth, **s), alpha=0.25, lw=0.1, color='r',
label=draws_label)
try:
if all(~np.isnan(self.posterior.log_likelihood)):
logger.info('Plotting maximum likelihood')
s = model_posterior.iloc[self.posterior.log_likelihood.idxmax()]
ax.plot(xsmooth, model(xsmooth, **s), lw=1, color='k',
label=maxl_label)
except (AttributeError, TypeError):
logger.debug(
"No log likelihood values stored, unable to plot max")
ax.plot(x, y, data_fmt, markersize=2, label=data_label)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
ax.legend(numpoints=3)
fig.tight_layout()
if filename is None:
outdir = self._safe_outdir_creation(outdir, self.plot_with_data)
filename = '{}/{}_plot_with_data'.format(outdir, self.label)
safe_save_figure(fig=fig, filename=filename, dpi=dpi)
plt.close(fig)
@staticmethod
def _add_prior_fixed_values_to_posterior(posterior, priors):
if priors is None:
return posterior
for key in priors:
if isinstance(priors[key], DeltaFunction):
posterior[key] = priors[key].peak
elif isinstance(priors[key], float):
posterior[key] = priors[key]
return posterior
def samples_to_posterior(self, likelihood=None, priors=None,
conversion_function=None, npool=1):
"""
Convert array of samples to posterior (a Pandas data frame)
Also applies the conversion function to any stored posterior
Parameters
----------
likelihood: bilby.likelihood.GravitationalWaveTransient, optional
GravitationalWaveTransient likelihood used for sampling.
priors: bilby.prior.PriorDict, optional
Dictionary of prior object, used to fill in delta function priors.
conversion_function: function, optional
Function which adds in extra parameters to the data frame,
should take the data_frame, likelihood and prior as arguments.
"""
try:
data_frame = self.posterior
except ValueError:
data_frame = pd.DataFrame(
self.samples, columns=self.search_parameter_keys)
data_frame = self._add_prior_fixed_values_to_posterior(
data_frame, priors)
data_frame['log_likelihood'] = getattr(
self, 'log_likelihood_evaluations', np.nan)
if self.log_prior_evaluations is None and priors is not None:
data_frame['log_prior'] = priors.ln_prob(
dict(data_frame[self.search_parameter_keys]), axis=0)
else:
data_frame['log_prior'] = self.log_prior_evaluations
if conversion_function is not None:
if "npool" in inspect.getargspec(conversion_function).args:
data_frame = conversion_function(data_frame, likelihood, priors, npool=npool)
else:
data_frame = conversion_function(data_frame, likelihood, priors)
self.posterior = data_frame
def calculate_prior_values(self, priors):
"""
Evaluate prior probability for each parameter for each sample.
Parameters
----------
priors: dict, PriorDict
Prior distributions
"""
self.prior_values = pd.DataFrame()
for key in priors:
if key in self.posterior.keys():
if isinstance(priors[key], DeltaFunction):
continue
else:
self.prior_values[key]\
= priors[key].prob(self.posterior[key].values)
def get_all_injection_credible_levels(self, keys=None):
"""
Get credible levels for all parameters
Parameters
----------
keys: list, optional
A list of keys for which return the credible levels, if None,
defaults to search_parameter_keys
Returns
-------
credible_levels: dict
The credible levels at which the injected parameters are found.
"""
if keys is None:
keys = self.search_parameter_keys
if self.injection_parameters is None:
raise(TypeError, "Result object has no 'injection_parameters'. "
"Cannot compute credible levels.")
credible_levels = {key: self.get_injection_credible_level(key)
for key in keys
if isinstance(self.injection_parameters.get(key, None), float)}
return credible_levels
def get_injection_credible_level(self, parameter):
"""
Get the credible level of the injected parameter
Calculated as CDF(injection value)
Parameters
----------
parameter: str
Parameter to get credible level for
Returns
-------
float: credible level
"""
if self.injection_parameters is None:
raise(TypeError, "Result object has no 'injection_parameters'. "
"Cannot copmute credible levels.")
if parameter in self.posterior and\
parameter in self.injection_parameters:
credible_level =\
sum(self.posterior[parameter].values <
self.injection_parameters[parameter]) / len(self.posterior)
return credible_level
else:
return np.nan
def _check_attribute_match_to_other_object(self, name, other_object):
""" Check attribute name exists in other_object and is the same
Parameters
----------
name: str
Name of the attribute in this instance
other_object: object
Other object with attributes to compare with
Returns
-------
bool: True if attribute name matches with an attribute of other_object, False otherwise
"""
a = getattr(self, name, False)
b = getattr(other_object, name, False)
logger.debug('Checking {} value: {}=={}'.format(name, a, b))
if (a is not False) and (b is not False):
type_a = type(a)
type_b = type(b)
if type_a == type_b:
if type_a in [str, float, int, dict, list]:
try:
return a == b
except ValueError:
return False
elif type_a in [np.ndarray]:
return np.all(a == b)
return False
@property
def kde(self):
""" Kernel density estimate built from the stored posterior
Uses `scipy.stats.gaussian_kde` to generate the kernel density
"""
if self._kde:
return self._kde
else:
self._kde = scipy.stats.gaussian_kde(
self.posterior[self.search_parameter_keys].values.T)
return self._kde
def posterior_probability(self, sample):
""" Calculate the posterior probability for a new sample
This queries a Kernel Density Estimate of the posterior to calculate
the posterior probability density for the new sample.
Parameters
----------
sample: dict, or list of dictionaries
A dictionary containing all the keys from
self.search_parameter_keys and corresponding values at which to
calculate the posterior probability
Returns
-------
p: array-like,
The posterior probability of the sample
"""
if isinstance(sample, dict):
sample = [sample]
ordered_sample = [[s[key] for key in self.search_parameter_keys]
for s in sample]
return self.kde(ordered_sample)
def _safe_outdir_creation(self, outdir=None, caller_func=None):
if outdir is None:
outdir = self.outdir
try:
utils.check_directory_exists_and_if_not_mkdir(outdir)
except PermissionError:
raise FileMovedError("Can not write in the out directory.\n"
"Did you move the here file from another system?\n"
"Try calling " + caller_func.__name__ + " with the 'outdir' "
"keyword argument, e.g. " + caller_func.__name__ + "(outdir='.')")
return outdir
def get_weights_by_new_prior(self, old_prior, new_prior, prior_names=None):
""" Calculate a list of sample weights based on the ratio of new to old priors
Parameters
----------
old_prior: PriorDict,
The prior used in the generation of the original samples.
new_prior: PriorDict,
The prior to use to reweight the samples.
prior_names: list
A list of the priors to include in the ratio during reweighting.
Returns
-------
weights: array-like,
A list of sample weights.
"""
weights = []
# Shared priors - these will form a ratio
if prior_names is not None:
shared_parameters = {key: self.posterior[key] for key in new_prior if
key in old_prior and key in prior_names}
else:
shared_parameters = {key: self.posterior[key] for key in new_prior if key in old_prior}
parameters = [{key: self.posterior[key][i] for key in shared_parameters.keys()}
for i in range(len(self.posterior))]
for i in range(len(self.posterior)):
weight = 1
for prior_key in shared_parameters.keys():
val = self.posterior[prior_key][i]
weight *= new_prior.evaluate_constraints(parameters[i])
weight *= new_prior[prior_key].prob(val) / old_prior[prior_key].prob(val)
weights.append(weight)
return weights
def to_arviz(self, prior=None):
""" Convert the Result object to an ArviZ InferenceData object.
Parameters
----------
prior: int
If a positive integer is given then that number of prior
samples will be drawn and stored in the ArviZ InferenceData
object.
Returns
-------
azdata: InferenceData
The ArviZ InferenceData object.
"""
try:
import arviz as az
except ImportError:
logger.debug(
"ArviZ is not installed, so cannot convert to InferenceData"
)
posdict = {}
for key in self.posterior:
posdict[key] = self.posterior[key].values
if "log_likelihood" in posdict:
loglikedict = {
"log_likelihood": posdict.pop("log_likelihood")
}
else:
if self.log_likelihood_evaluations is not None:
loglikedict = {
"log_likelihood": self.log_likelihood_evaluations
}
else:
loglikedict = None
priorsamples = None
if prior is not None:
if self.priors is None:
logger.warning(
"No priors are in the Result object, so prior samples "
"will not be included in the output."
)
else:
priorsamples = self.priors.sample(size=prior)
azdata = az.from_dict(
posterior=posdict,
log_likelihood=loglikedict,
prior=priorsamples,
)
# add attributes
version = {
"inference_library": "bilby: {}".format(self.sampler),
"inference_library_version": get_version_information()
}
azdata.posterior.attrs.update(version)
if "log_likelihood" in azdata._groups:
azdata.log_likelihood.attrs.update(version)
if "prior" in azdata._groups:
azdata.prior.attrs.update(version)
return azdata
class ResultList(list):
def __init__(self, results=None):
""" A class to store a list of :class:`bilby.core.result.Result` objects
from equivalent runs on the same data. This provides methods for
outputing combined results.
Parameters
----------
results: list
A list of `:class:`bilby.core.result.Result`.
"""
super(ResultList, self).__init__()
for result in results:
self.append(result)
def append(self, result):
"""
Append a :class:`bilby.core.result.Result`, or set of results, to the
list.
Parameters
----------
result: :class:`bilby.core.result.Result` or filename
pointing to a result object, to append to the list.
"""
if isinstance(result, Result):
super(ResultList, self).append(result)
elif isinstance(result, str):
super(ResultList, self).append(read_in_result(result))
else:
raise TypeError("Could not append a non-Result type")
def combine(self):
"""
Return the combined results in a :class:bilby.core.result.Result`
object.
"""
if len(self) == 0:
return Result()
elif len(self) == 1:
return copy(self[0])
else:
result = copy(self[0])
if result.label is not None:
result.label += '_combined'
self.check_consistent_sampler()
self.check_consistent_data()
self.check_consistent_parameters()
self.check_consistent_priors()
# check which kind of sampler was used: MCMC or Nested Sampling
if result._nested_samples is not None:
posteriors, result = self._combine_nested_sampled_runs(result)
else:
posteriors = [res.posterior for res in self]
combined_posteriors = pd.concat(posteriors, ignore_index=True)
result.posterior = combined_posteriors.sample(len(combined_posteriors)) # shuffle
return result
def _combine_nested_sampled_runs(self, result):
"""
Combine multiple nested sampling runs.
Currently this keeps posterior samples from each run in proportion with
the evidence for each individual run
Parameters
----------
result: bilby.core.result.Result
The result object to put the new samples in.
Returns
-------
posteriors: list
A list of pandas DataFrames containing the reduced sample set from
each run.
result: bilby.core.result.Result
The result object with the combined evidences.
"""
self.check_nested_samples()
# Combine evidences
log_evidences = np.array([res.log_evidence for res in self])
result.log_evidence = logsumexp(log_evidences, b=1. / len(self))
result.log_bayes_factor = result.log_evidence - result.log_noise_evidence
# Propogate uncertainty in combined evidence
log_errs = [res.log_evidence_err for res in self if np.isfinite(res.log_evidence_err)]
if len(log_errs) > 0:
result.log_evidence_err = 0.5 * logsumexp(2 * np.array(log_errs), b=1. / len(self))
else:
result.log_evidence_err = np.nan
# Combined posteriors with a weighting
result_weights = np.exp(log_evidences - np.max(log_evidences))
posteriors = list()
for res, frac in zip(self, result_weights):
selected_samples = (np.random.uniform(size=len(res.posterior)) < frac)
posteriors.append(res.posterior[selected_samples])
# remove original nested_samples
result.nested_samples = None
result.sampler_kwargs = None
return posteriors, result
def check_nested_samples(self):
for res in self:
try:
res.nested_samples
except ValueError:
raise ResultListError("Not all results contain nested samples")
def check_consistent_priors(self):
for res in self:
for p in self[0].priors.keys():
if not self[0].priors[p] == res.priors[p] or len(self[0].priors) != len(res.priors):
raise ResultListError("Inconsistent priors between results")
def check_consistent_parameters(self):
if not np.all([set(self[0].search_parameter_keys) == set(res.search_parameter_keys) for res in self]):
raise ResultListError("Inconsistent parameters between results")
def check_consistent_data(self):
if not np.all([res.log_noise_evidence == self[0].log_noise_evidence for res in self])\
and not np.all([np.isnan(res.log_noise_evidence) for res in self]):
raise ResultListError("Inconsistent data between results")
def check_consistent_sampler(self):
if not np.all([res.sampler == self[0].sampler for res in self]):
raise ResultListError("Inconsistent samplers between results")
@latex_plot_format
def plot_multiple(results, filename=None, labels=None, colours=None,
save=True, evidences=False, corner_labels=None, **kwargs):
""" Generate a corner plot overlaying two sets of results
Parameters
----------
results: list
A list of `bilby.core.result.Result` objects containing the samples to
plot.
filename: str
File name to save the figure to. If None (default), a filename is
constructed from the outdir of the first element of results and then
the labels for all the result files.
labels: list
List of strings to use when generating a legend. If None (default), the
`label` attribute of each result in `results` is used.
colours: list
The colours for each result. If None, default styles are applied.
save: bool
If true, save the figure
kwargs: dict
All other keyword arguments are passed to `result.plot_corner` (except
for the keyword `labels` for which you should use the dedicated
`corner_labels` input).
However, `show_titles` and `truths` are ignored since they would be
ambiguous on such a plot.
evidences: bool, optional
Add the log-evidence calculations to the legend. If available, the
Bayes factor will be used instead.
corner_labels: list, optional
List of strings to be passed to the input `labels` to `result.plot_corner`.
Returns
-------
fig:
A matplotlib figure instance
"""
kwargs['show_titles'] = False
kwargs['truths'] = None
if corner_labels is not None:
kwargs['labels'] = corner_labels
fig = results[0].plot_corner(save=False, **kwargs)
default_filename = '{}/{}'.format(results[0].outdir, 'combined')
lines = []
default_labels = []
for i, result in enumerate(results):
if colours:
c = colours[i]
else:
c = 'C{}'.format(i)
hist_kwargs = kwargs.get('hist_kwargs', dict())
hist_kwargs['color'] = c
fig = result.plot_corner(fig=fig, save=False, color=c, **kwargs)
default_filename += '_{}'.format(result.label)
lines.append(mpllines.Line2D([0], [0], color=c))
default_labels.append(result.label)
# Rescale the axes
for i, ax in enumerate(fig.axes):
ax.autoscale()
plt.draw()
if labels is None:
labels = default_labels
labels = sanity_check_labels(labels)
if evidences:
if np.isnan(results[0].log_bayes_factor):
template = ' $\mathrm{{ln}}(Z)={lnz:1.3g}$'
else:
template = ' $\mathrm{{ln}}(B)={lnbf:1.3g}$'
labels = [template.format(lnz=result.log_evidence,
lnbf=result.log_bayes_factor)
for ii, result in enumerate(results)]
axes = fig.get_axes()
ndim = int(np.sqrt(len(axes)))
axes[ndim - 1].legend(lines, labels)
if filename is None:
filename = default_filename
if save:
safe_save_figure(fig=fig, filename=filename)
return fig
@latex_plot_format
def make_pp_plot(results, filename=None, save=True, confidence_interval=[0.68, 0.95, 0.997],
lines=None, legend_fontsize='x-small', keys=None, title=True,
confidence_interval_alpha=0.1,
**kwargs):
"""
Make a P-P plot for a set of runs with injected signals.
Parameters
----------
results: list
A list of Result objects, each of these should have injected_parameters
filename: str, optional
The name of the file to save, the default is "outdir/pp.png"
save: bool, optional
Whether to save the file, default=True
confidence_interval: (float, list), optional
The confidence interval to be plotted, defaulting to 1-2-3 sigma
lines: list
If given, a list of matplotlib line formats to use, must be greater
than the number of parameters.
legend_fontsize: float
The font size for the legend
keys: list
A list of keys to use, if None defaults to search_parameter_keys
confidence_interval_alpha: float, list, optional
The transparency for the background condifence interval
kwargs:
Additional kwargs to pass to matplotlib.pyplot.plot
Returns
-------
fig, pvals:
matplotlib figure and a NamedTuple with attributes `combined_pvalue`,
`pvalues`, and `names`.
"""
if keys is None:
keys = results[0].search_parameter_keys
credible_levels = pd.DataFrame()
for result in results:
credible_levels = credible_levels.append(
result.get_all_injection_credible_levels(keys), ignore_index=True)
if lines is None:
colors = ["C{}".format(i) for i in range(8)]
linestyles = ["-", "--", ":"]
lines = ["{}{}".format(a, b) for a, b in product(linestyles, colors)]
if len(lines) < len(credible_levels.keys()):
raise ValueError("Larger number of parameters than unique linestyles")
x_values = np.linspace(0, 1, 1001)
N = len(credible_levels)
fig, ax = plt.subplots()
if isinstance(confidence_interval, float):
confidence_interval = [confidence_interval]
if isinstance(confidence_interval_alpha, float):
confidence_interval_alpha = [confidence_interval_alpha] * len(confidence_interval)
elif len(confidence_interval_alpha) != len(confidence_interval):
raise ValueError(
"confidence_interval_alpha must have the same length as confidence_interval")
for ci, alpha in zip(confidence_interval, confidence_interval_alpha):
edge_of_bound = (1. - ci) / 2.
lower = scipy.stats.binom.ppf(1 - edge_of_bound, N, x_values) / N
upper = scipy.stats.binom.ppf(edge_of_bound, N, x_values) / N
# The binomial point percent function doesn't always return 0 @ 0,
# so set those bounds explicitly to be sure
lower[0] = 0
upper[0] = 0
ax.fill_between(x_values, lower, upper, alpha=alpha, color='k')
pvalues = []
logger.info("Key: KS-test p-value")
for ii, key in enumerate(credible_levels):
pp = np.array([sum(credible_levels[key].values < xx) /
len(credible_levels) for xx in x_values])
pvalue = scipy.stats.kstest(credible_levels[key], 'uniform').pvalue
pvalues.append(pvalue)
logger.info("{}: {}".format(key, pvalue))
try:
name = results[0].priors[key].latex_label
except AttributeError:
name = key
if name == 'H_eff5':
name = '$H_{eff5}$'
if name == 'chi_1':
name = '$\\chi_1$'
if name == 'chi_2':
name = '$\\chi_2$'
if name == 'Q_tilde':
name = '$\\tilde{Q}$'
label = "{} ({:2.3f})".format(name, pvalue)
plt.plot(x_values, pp, lines[ii], label=label, **kwargs)
Pvals = namedtuple('pvals', ['combined_pvalue', 'pvalues', 'names'])
pvals = Pvals(combined_pvalue=scipy.stats.combine_pvalues(pvalues)[1],
pvalues=pvalues,
names=list(credible_levels.keys()))
logger.info(
"Combined p-value: {}".format(pvals.combined_pvalue))
if title:
ax.set_title("N={}, p-value={:2.4f}".format(
len(results), pvals.combined_pvalue))
ax.set_xlabel("C.I.")
ax.set_ylabel("Fraction of events in C.I.")
ax.legend(handlelength=2, labelspacing=0.25, fontsize=legend_fontsize)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
fig.tight_layout()
if save:
if filename is None:
filename = 'outdir/pp.png'
safe_save_figure(fig=fig, filename=filename, dpi=500)
return fig, pvals
def sanity_check_labels(labels):
""" Check labels for plotting to remove matplotlib errors """
for ii, lab in enumerate(labels):
if "_" in lab and "$" not in lab:
labels[ii] = lab.replace("_", "-")
return labels
class ResultError(Exception):
""" Base exception for all Result related errors """
class ResultListError(ResultError):
""" For Errors occuring during combining results. """
class FileMovedError(ResultError):
""" Exceptions that occur when files have been moved """
|
the-stack_0_12921 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import datetime
import json
import math
import os
import time
import sys
import unittest
from artists.miro.constraints import (
DatasetConstraints,
Fields,
FieldConstraints,
MinConstraint,
MaxConstraint,
SignConstraint,
TypeConstraint,
MaxNullsConstraint,
NoDuplicatesConstraint,
AllowedValuesConstraint,
MinLengthConstraint,
MaxLengthConstraint,
constraint_class,
strip_lines,
)
TESTDATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'testdata')
class TestConstraints(unittest.TestCase):
def test_constraint_repr(self):
self.assertEqual(repr(MinConstraint(7)),
'MinConstraint(value=7, precision=None)')
self.assertEqual(repr(MinConstraint('a')),
"MinConstraint(value='a', precision=None)")
self.assertEqual(repr(MinConstraint('a', precision='closed')),
"MinConstraint(value='a', precision='closed')")
self.assertEqual(repr(MinLengthConstraint(3)),
"MinLengthConstraint(value=3)")
self.assertEqual(repr(MaxConstraint(-3)),
'MaxConstraint(value=-3, precision=None)')
self.assertEqual(repr(MaxConstraint('KJ')),
"MaxConstraint(value='KJ', precision=None)")
self.assertEqual(repr(MaxConstraint(4.2, precision='closed')),
"MaxConstraint(value=4.2, precision='closed')")
self.assertEqual(repr(MaxLengthConstraint(0)),
"MaxLengthConstraint(value=0)")
self.assertEqual(repr(SignConstraint('positive')),
"SignConstraint(value='positive')")
self.assertEqual(repr(MaxNullsConstraint(0)),
"MaxNullsConstraint(value=0)")
self.assertEqual(repr(NoDuplicatesConstraint()),
"NoDuplicatesConstraint(value=True)")
self.assertEqual(repr(TypeConstraint('int')),
"TypeConstraint(value='int')")
self.assertEqual(repr(TypeConstraint(['int', 'real'])),
"TypeConstraint(value=['int', 'real'])")
self.assertEqual(repr(AllowedValuesConstraint(['a', 'b'])),
"AllowedValuesConstraint(value=['a', 'b'])")
def test_constraint_class(self):
goods = {
'type': 'TypeConstraint',
'min': 'MinConstraint',
'min_length': 'MinLengthConstraint',
'max': 'MaxConstraint',
'max_length': 'MaxLengthConstraint',
'sign': 'SignConstraint',
'max_nulls': 'MaxNullsConstraint',
'no_duplicates': 'NoDuplicatesConstraint',
'allowed_values': 'AllowedValuesConstraint',
}
for k,v in goods.items():
self.assertEqual(constraint_class(k), v)
def testBadConstraints(self):
self.assertRaisesRegex(TypeError, 'unexpected keyword',
SignConstraint, precision='closed')
self.assertRaises(AssertionError,
MinConstraint, 3, precision='unknown')
self.assertRaises(AssertionError,
SignConstraint, 'not too positive')
self.assertRaises(AssertionError,
TypeConstraint, 'float')
self.assertRaises(AssertionError,
TypeConstraint, ['int', 'float'])
self.assertRaises(AssertionError,
TypeConstraint, ['int', None])
def testFieldConstraintsDict(self):
c = FieldConstraints('one', [TypeConstraint('int'),
MinConstraint(3),
MaxConstraint(7),
SignConstraint('positive'),
MaxNullsConstraint(0),
NoDuplicatesConstraint()])
dfc = Fields([c])
self.assertEqual(strip_lines(json.dumps(dfc.to_dict_value(),
indent=4)),
'''{
"one": {
"type": "int",
"min": 3,
"max": 7,
"sign": "positive",
"max_nulls": 0,
"no_duplicates": true
}
}''')
c = FieldConstraints('one', [TypeConstraint('int'),
MinConstraint(3, precision='closed'),
MaxConstraint(7, precision='fuzzy'),
SignConstraint('positive'),
MaxNullsConstraint(0),
NoDuplicatesConstraint()])
dfc = Fields([c])
self.assertEqual(strip_lines(json.dumps(dfc.to_dict_value(),
indent=4)),
'''{
"one": {
"type": "int",
"min": {
"value": 3,
"precision": "closed"
},
"max": {
"value": 7,
"precision": "fuzzy"
},
"sign": "positive",
"max_nulls": 0,
"no_duplicates": true
}
}''')
def testload(self):
path = os.path.join(TESTDATA_DIR, 'ddd.tdda')
constraints = DatasetConstraints(loadpath=path)
# print(constraints)
if sys.version_info.major < 3:
# Quieten down Python3's vexatious complaining
TestConstraints.assertRaisesRegex = TestConstraints.assertRaisesRegexp
if __name__ == '__main__':
unittest.main()
|
the-stack_0_12925 | """
Open3d visualization tool box
Written by Jihan YANG
All rights preserved from 2021 - present.
"""
import open3d
import torch
import matplotlib
import numpy as np
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def get_coor_colors(obj_labels):
"""
Args:
obj_labels: 1 is ground, labels > 1 indicates different instance cluster
Returns:
rgb: [N, 3]. color for each point.
"""
colors = matplotlib.colors.XKCD_COLORS.values()
max_color_num = obj_labels.max()
color_list = list(colors)[:max_color_num+1]
colors_rgba = [matplotlib.colors.to_rgba_array(color) for color in color_list]
label_rgba = np.array(colors_rgba)[obj_labels]
label_rgba = label_rgba.squeeze()[:, :3]
return label_rgba
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_labels=None, ref_scores=None, point_colors=None, draw_origin=True):
"""
绘制点云
Args:
points:点云
gt_boxes:真值box (N, 7)
ref_boxes:预测box (M, 7)
ref_scores:预测分数 (M,)
ref_labels:预测类别 (M,)
"""
# 1.判断数据类型,并将数据从tensor转化为numpy的array
if isinstance(points, torch.Tensor):
points = points.cpu().numpy()
if isinstance(gt_boxes, torch.Tensor):
gt_boxes = gt_boxes.cpu().numpy()
if isinstance(ref_boxes, torch.Tensor):
ref_boxes = ref_boxes.cpu().numpy()
vis = open3d.visualization.Visualizer()
vis.create_window()
vis.get_render_option().point_size = 1.0
vis.get_render_option().background_color = np.zeros(3)
# draw origin
if draw_origin:
axis_pcd = open3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0])
vis.add_geometry(axis_pcd)
pts = open3d.geometry.PointCloud()
pts.points = open3d.utility.Vector3dVector(points[:, :3])
vis.add_geometry(pts)
if point_colors is None:
pts.colors = open3d.utility.Vector3dVector(np.ones((points.shape[0], 3)))
else:
pts.colors = open3d.utility.Vector3dVector(point_colors)
if gt_boxes is not None:
vis = draw_box(vis, gt_boxes, (0, 0, 1))
if ref_boxes is not None:
vis = draw_box(vis, ref_boxes, (0, 1, 0), ref_labels, ref_scores)
vis.run()
vis.destroy_window()
def translate_boxes_to_open3d_instance(gt_boxes):
"""
4-------- 6
/| /|
5 -------- 3 .
| | | |
. 7 -------- 1
|/ |/
2 -------- 0
"""
center = gt_boxes[0:3]
lwh = gt_boxes[3:6]
axis_angles = np.array([0, 0, gt_boxes[6] + 1e-10])
rot = open3d.geometry.get_rotation_matrix_from_axis_angle(axis_angles)
box3d = open3d.geometry.OrientedBoundingBox(center, rot, lwh)
line_set = open3d.geometry.LineSet.create_from_oriented_bounding_box(box3d)
# import ipdb; ipdb.set_trace(context=20)
lines = np.asarray(line_set.lines)
lines = np.concatenate([lines, np.array([[1, 4], [7, 6]])], axis=0)
line_set.lines = open3d.utility.Vector2iVector(lines)
return line_set, box3d
def draw_box(vis, gt_boxes, color=(0, 1, 0), ref_labels=None, score=None):
for i in range(gt_boxes.shape[0]):
line_set, box3d = translate_boxes_to_open3d_instance(gt_boxes[i])
if ref_labels is None:
line_set.paint_uniform_color(color)
else:
line_set.paint_uniform_color(box_colormap[ref_labels[i]])
vis.add_geometry(line_set)
# if score is not None:
# corners = box3d.get_box_points()
# vis.add_3d_label(corners[5], '%.2f' % score[i])
return vis
|
the-stack_0_12927 | # vim:ts=4:sts=4:sw=4:expandtab
import copy
import datetime
import dateutil.parser
import glob
import json
import logging
import math
from multiprocessing import Process
import os
import random
import shutil
import subprocess
import sys
import tempfile
import traceback
from threading import Thread
import time
import uuid
from kolejka.common import kolejka_config, foreman_config
from kolejka.common import KolejkaTask, KolejkaResult, KolejkaLimits
from kolejka.common import MemoryAction, TimeAction, parse_memory
from kolejka.client import KolejkaClient
from kolejka.common.gpu import gpu_stats
from kolejka.common.images import (
pull_docker_image,
get_docker_image_size,
check_docker_image_existance,
list_docker_images,
remove_docker_image
)
from kolejka.worker.stage0 import stage0
from kolejka.worker.volume import check_python_volume
def manage_images(pull, size, necessary_images, priority_images):
necessary_size = sum(necessary_images.values(), 0)
free_size = size - necessary_size
assert free_size >= 0
docker_images = list_docker_images()
p_images = dict()
for image in priority_images:
if image in docker_images:
p_images[image] = docker_images[image]
priority_images = p_images
keep_images = set()
for image in necessary_images:
keep_images.add(image)
list_images = list(priority_images.items())
random.shuffle(list_images)
li = list(docker_images.items())
random.shuffle(li)
list_images += li
for image,size in list_images:
if image in keep_images:
continue
if size <= free_size:
free_size -= size
keep_images.add(image)
for image in docker_images:
if image not in keep_images:
remove_docker_image(image)
for image, size in necessary_images.items():
pull_image = pull
if not pull_image:
if not check_docker_image_existance(image):
pull_image = True
if pull_image:
pull_docker_image(image)
image_size = get_docker_image_size(image)
assert image_size <= size
def foreman_single(temp_path, task):
config = foreman_config()
with tempfile.TemporaryDirectory(temp_path) as jailed_path:
if task.limits.workspace is not None:
subprocess.run(['mount', '-t', 'tmpfs', '-o', 'size='+str(task.limits.workspace), 'none', jailed_path], check=True)
try:
task_path = os.path.join(jailed_path, 'task')
result_path = os.path.join(jailed_path, 'result')
temp_path = os.path.join(jailed_path, 'temp')
os.makedirs(task_path, exist_ok=True)
os.makedirs(result_path, exist_ok=True)
os.makedirs(temp_path, exist_ok=True)
task.path = task_path
client = KolejkaClient()
client.task_get(task.id, task_path)
for k,f in task.files.items():
f.path = k
task.commit()
stage0(task.path, result_path, temp_path=temp_path, consume_task_folder=True)
result = KolejkaResult(result_path)
result.tags = config.tags
client.result_put(result)
except:
traceback.print_exc()
finally:
if task.limits.storage is not None:
subprocess.run(['umount', '-l', jailed_path])
def foreman():
config = foreman_config()
gstats = gpu_stats().gpus
limits = KolejkaLimits()
limits.cpus = config.cpus
limits.memory = config.memory
limits.swap = config.swap
limits.pids = config.pids
limits.storage = config.storage
limits.image = config.image
limits.workspace = config.workspace
limits.time = config.time
limits.network = config.network
limits.gpus = config.gpus
if limits.gpus is None:
limits.gpus = len(gstats)
limits.gpu_memory = config.gpu_memory
for k,v in gstats.items():
if limits.gpu_memory is None:
limits.gpu_memory = v.memory_total
elif v.memory_total is not None:
limits.gpu_memory = min(limits.gpu_memory, v.memory_total)
client = KolejkaClient()
logging.debug('Foreman tags: {}, limits: {}'.format(config.tags, limits.dump()))
while True:
try:
tasks = client.dequeue(config.concurency, limits, config.tags)
if len(tasks) == 0:
time.sleep(config.interval)
else:
check_python_volume()
while len(tasks) > 0:
resources = KolejkaLimits()
resources.copy(limits)
image_usage = dict()
processes = list()
cpus_offset = 0
gpus_offset = 0
for task in tasks:
if len(processes) >= config.concurency:
break
if task.exclusive and len(processes) > 0:
break
task.limits.update(limits)
task.limits.cpus_offset = cpus_offset
task.limits.gpus_offset = gpus_offset
ok = True
if resources.cpus is not None and task.limits.cpus > resources.cpus:
ok = False
if task.limits.gpus is not None and task.limits.gpus > 0:
if resources.gpus is None or task.limits.gpus > resources.gpus:
ok = False
if resources.gpu_memory is not None and task.limits.gpu_memory > resources.gpu_memory:
ok = False
if resources.memory is not None and task.limits.memory > resources.memory:
ok = False
if resources.swap is not None and task.limits.swap > resources.swap:
ok = False
if resources.pids is not None and task.limits.pids > resources.pids:
ok = False
if resources.storage is not None and task.limits.storage > resources.storage:
ok = False
if resources.image is not None:
image_usage_add = max(image_usage.get(task.image, 0), task.limits.image) - image_usage.get(task.image, 0)
if image_usage_add > resources.image:
ok = False
if resources.workspace is not None and task.limits.workspace > resources.workspace:
ok = False
if ok:
proc = Process(target=foreman_single, args=(config.temp_path, task))
processes.append(proc)
cpus_offset += task.limits.cpus
if resources.cpus is not None:
resources.cpus -= task.limits.cpus
if resources.gpus is not None and task.limits.gpus is not None:
resources.gpus -= task.limits.gpus
gpus_offset += task.limits.gpus
if resources.memory is not None:
resources.memory -= task.limits.memory
if resources.swap is not None:
resources.swap -= task.limits.swap
if resources.pids is not None:
resources.pids -= task.limits.pids
if resources.storage is not None:
resources.storage -= task.limits.storage
if resources.image is not None:
resources.image -= image_usage_add
image_usage[task.image] = max(image_usage.get(task.image, 0), task.limits.image)
if resources.workspace is not None:
resources.workspace -= task.limits.workspace
tasks = tasks[1:]
if task.exclusive:
break
else:
break
if config.image is not None:
manage_images(
config.pull,
config.image,
image_usage,
[task.image for task in tasks]
)
for proc in processes:
proc.start()
for proc in processes:
proc.join()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
time.sleep(config.interval)
def config_parser(parser):
parser.add_argument('--auto-tags', type=bool, help='add automatically generated machine tags', default=True)
parser.add_argument('--pull', action='store_true', help='always pull images, even if local version is present', default=False)
parser.add_argument('--tags', type=str, help='comma separated list of machine tags')
parser.add_argument('--temp', type=str, help='temp folder')
parser.add_argument('--interval', type=float, help='dequeue interval (in seconds)')
parser.add_argument('--concurency', type=int, help='number of simultaneous tasks')
parser.add_argument('--cpus', type=int, help='cpus limit')
parser.add_argument('--memory', action=MemoryAction, help='memory limit')
parser.add_argument('--swap', action=MemoryAction, help='swap limit')
parser.add_argument('--pids', type=int, help='pids limit')
parser.add_argument('--storage', action=MemoryAction, help='storage limit')
parser.add_argument('--image', action=MemoryAction, help='image size limit')
parser.add_argument('--workspace', action=MemoryAction, help='workspace size limit')
parser.add_argument('--time', action=TimeAction, help='time limit')
parser.add_argument('--network', type=bool, help='allow netowrking')
parser.add_argument('--gpus', type=int, help='gpus limit')
parser.add_argument('--gpu-memory', type=MemoryAction, help='gpu memory limit')
def execute(args):
kolejka_config(args=args)
foreman()
parser.set_defaults(execute=execute)
|
the-stack_0_12928 | """Helper functions for beam search."""
import numpy as np
from queue import PriorityQueue
from future.utils import implements_iterator
def InitBeam(phrase, user_id, m):
# Need to find the hidden state for the last char in the prefix.
prev_hidden = np.zeros((1, 2 * m.params.num_units))
for word in phrase[:-1]:
feed_dict = {
m.model.prev_hidden_state: prev_hidden,
m.model.prev_word: [m.char_vocab[word]],
m.model.beam_size: 4
}
prev_hidden = m.session.run(m.model.next_hidden_state, feed_dict)
return prev_hidden
class BeamItem(object):
"""This is a node in the beam search tree.
Each node holds four things: a log probability, a list of previous words, and
the two hidden state vectors.
"""
def __init__(self, prev_word, prev_hidden, log_prob=0.0):
self.log_probs = log_prob
if type(prev_word) == list:
self.words = prev_word
else:
self.words = [prev_word]
self.prev_hidden = prev_hidden
def __le__(self, other):
return self.log_probs <= other.log_probs
def __lt__(self, other):
return self.log_probs < other.log_probs
def __ge__(self, other):
return self.log_probs >= other.log_probs
def __gt__(self, other):
return self.log_probs > other.log_probs
def __eq__(self, other):
return self.log_probs == other.log_probs
def __str__(self):
return 'beam {0:.3f}: '.format(self.log_probs) + ''.join(self.words)
class BeamQueue(object):
"""Bounded priority queue."""
def __init__(self, max_size=10):
self.max_size = max_size
self.size = 0
self.bound = None
self.q = PriorityQueue()
def Insert(self, item):
self.size += 1
self.q.put((-item.log_probs, item))
if self.size > self.max_size:
self.Eject()
def CheckBound(self, val):
# If the queue is full then we know that there is no chance of a new item
# being accepted if it's priority is worse than the last thing that got
# ejected.
return self.size < self.max_size or self.bound is None or val < self.bound
def Eject(self):
score, _ = self.q.get()
self.bound = -score
self.size -= 1
def __iter__(self):
return self
def __next__(self):
if not self.q.empty():
_, item = self.q.get()
return item
raise StopIteration
def next(self):
return self.__next__()
def GetCompletions(prefix, user_id, m, branching_factor=8, beam_size=300,
stop='</S>'):
""" Find top completions for a given prefix, user and model."""
m.Lock(user_id) # pre-compute the adaptive recurrent matrix
prev_state = InitBeam(prefix, user_id, m)
nodes = [BeamItem(prefix, prev_state)]
for i in range(36):
new_nodes = BeamQueue(max_size=beam_size)
current_nodes = []
for node in nodes:
if i > 0 and node.words[-1] == stop: # don't extend past the stop token
new_nodes.Insert(node) # copy over finished beams
else:
current_nodes.append(node) # these ones will get extended
if len(current_nodes) == 0:
return new_nodes # all beams have finished
# group together all the nodes in the queue for efficient computation
prev_hidden = np.vstack([item.prev_hidden for item in current_nodes])
prev_words = np.array([m.char_vocab[item.words[-1]] for item in current_nodes])
feed_dict = {
m.model.prev_word: prev_words,
m.model.prev_hidden_state: prev_hidden,
m.model.beam_size: branching_factor
}
current_char, current_char_p, prev_hidden = m.session.run(
[m.beam_chars, m.model.selected_p, m.model.next_hidden_state],
feed_dict)
for i, node in enumerate(current_nodes):
for new_word, top_value in zip(current_char[i, :], current_char_p[i, :]):
new_cost = top_value + node.log_probs
if new_nodes.CheckBound(new_cost): # only create a new object if it fits in beam
new_beam = BeamItem(node.words + [new_word], prev_hidden[i, :],
log_prob=new_cost)
new_nodes.Insert(new_beam)
nodes = new_nodes
return nodes
def FirstNonMatch(s1, s2, start=0):
# returns the position of the first non-matching character
min_len = min(len(s1), len(s2))
for i in xrange(start, min_len):
if s1[i] != s2[i]:
return i
return min_len
def GetSavedKeystrokes(m, query, branching_factor=4, beam_size=100):
"""Find the shortest prefix that gets the right completion.
Uses binary search.
"""
left = 1
right = len(query)
while left <= right:
midpoint = (left + right) / 2
prefix = ['<S>'] + list(query[:midpoint])
completions = GetCompletions(
prefix, 0, m, branching_factor=branching_factor, beam_size=beam_size)
top_completion = list(completions)[-1]
top_completion = ''.join(top_completion.words[1:-1])
if top_completion == query:
right = midpoint - 1
else:
left = midpoint + 1
return left
|
the-stack_0_12932 | from django.urls import path
from .consumers import AnalysisConsumer, ServiceConsumer
websocket_urlpatterns = [
path(r"ws/service/", ServiceConsumer),
path(r"ws/analyses/", AnalysisConsumer),
path(r"ws/analyses/<uuid:analysis_id>/", AnalysisConsumer),
]
|
the-stack_0_12934 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 11:50:02 2018
@author: Andrija Master
"""
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from scipy.optimize import minimize
import scipy as sp
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import math
""" CLASS GCRFCNB """
class GCRFCNB:
def __init__(self):
pass
def muKov(alfa, R, Precison, Noinst, NodeNo):
mu = np.zeros([Noinst,NodeNo])
bv = 2*np.matmul(R,alfa)
bv = bv.reshape([Noinst,NodeNo])
Kov = np.linalg.inv(Precison)
for m in range(Noinst):
mu[m,:] = Kov[m,:,:].dot(bv[m,:])
return mu,Kov
def Prec(alfa,beta,NodeNo,Se,Noinst):
alfasum = np.sum(alfa)
Q1 = np.identity(NodeNo)*alfasum
Q2 = np.zeros([Noinst,NodeNo,NodeNo])
Prec = np.zeros([Noinst,NodeNo,NodeNo])
pomocna = np.zeros(Se.shape)
for j in range(Se.shape[1]):
pomocna[:,j,:,:] = Se[:,j,:,:] * beta[j]
Q2 = -np.sum(pomocna,axis = 1)
for m in range(Noinst):
Prec[m,:,:] = 2*(Q2[m,:,:]+np.diag(-Q2[m,:,:].sum(axis=0))+Q1)
return Prec
def sigmaCal(ceta): # Provereno
Sigma=1/(1 + np.exp(-ceta))
Sigma[Sigma>0.99999999] = 0.99999999
Sigma[Sigma<1e-10] = 1e-10
return Sigma
""" PREDICT """
def predict(self,R,Se):
NodeNo = Se.shape[3]
Noinst = Se.shape[0]
Precison = GCRFCNB.Prec(self.alfa, self.beta, NodeNo, Se, Noinst)
mu, Kovmat = GCRFCNB.muKov(self.alfa, R, Precison, Noinst, NodeNo)
Prob = GCRFCNB.sigmaCal(mu)
Class = np.round(Prob,0)
self.Prob = Prob
self.Class = Class
return self.Prob, self.Class
""" FIT """
def fit(self,R,Se,Y,x0 = None, learn = 'SLSQP', maxiter = 1000, learnrate = 0.1):
def L(x,Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R):
alfa=x[:ModelUNNo]
beta=x[-ModelSTNo:]
print(alfa)
Precison = GCRFCNB.Prec(alfa, beta, NodeNo, Se, Noinst)
mu,kovMat = GCRFCNB.muKov(alfa,R,Precison,Noinst,NodeNo)
sigma = GCRFCNB.sigmaCal(mu)
L = np.sum(Y*np.log(sigma)+(1-Y)*np.log(1-sigma))
print('skor je {}'.format(L))
return -1*L
def DLdx(x,Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R):
def sigmaFUN(Y,mu):
sigma = GCRFCNB.sigmaCal(mu)
sigmafun=Y-sigma
return sigmafun
def dPrecdbeta(Noinst,ModelSTNo,NodeNo,Se): # PROVERENO
dPrecdbeta = np.zeros([Noinst,ModelSTNo,NodeNo,NodeNo])
dPrecdbeta = -Se
for m in range(Noinst):
for L in range(ModelSTNo):
dPrecdbeta[m,L,:,:]=2*(dPrecdbeta[m,L,:,:] + np.diag(-dPrecdbeta[m,L,:,:].sum(axis=1)))
return dPrecdbeta
def dLdalfadbeta(sigmafun,dmudalfa,dmudbeta,ModelUNNo,ModelSTNo):
dLdalfa = np.zeros(ModelUNNo)
dLdbeta = np.zeros(ModelSTNo)
for i in range(ModelUNNo):
dLdalfa[i] = np.sum(sigmafun*dmudalfa[:,i,:])
for i in range(ModelSTNo):
dLdbeta[i] = np.sum(sigmafun*dmudbeta[:,i,:])
return dLdalfa,dLdbeta
def dPrecdalfa(NodeNo,ModelUNNo): # Provereno
dPrecdalfa=np.zeros([ModelUNNo,NodeNo,NodeNo])
dQ1dalfa=np.identity(NodeNo)
for p in range(ModelUNNo):
dPrecdalfa[p,:,:]=dQ1dalfa*2
return dPrecdalfa
def dbdalfa(ModelUNNo,Noinst,R,NodeNo): # Provereno 1
dbdalfa = np.zeros([Noinst,ModelUNNo,NodeNo])
for m in range(ModelUNNo):
dbdalfa[:,m,:] = 2*R[:,m].reshape([Noinst, NodeNo])
return dbdalfa
def dmutdalfa(dbdalfa,DPrecdalfa,Kov,ModelUNNo,Noinst,mu): # Provereno
dmutdalfa=np.zeros([Noinst,ModelUNNo,NodeNo])
for m in range(Noinst):
for p in range(ModelUNNo):
dmutdalfa[m,p,:]=(dbdalfa[m,p,:]-DPrecdalfa[p,:,:].dot(mu[m,:])).T.dot(Kov[m,:,:])
return dmutdalfa
def dmutdbeta(dPrecdbeta,mu,Kov,Noinst,ModelSTNo,NodeNo): # Provereno
dmutdbeta=np.zeros([Noinst,ModelSTNo,NodeNo])
for m in range(0,Noinst):
for p in range(0,ModelSTNo):
dmutdbeta[m,p,:]=(-dPrecdbeta[m,p,:,:].dot(mu[m,:])).T.dot(Kov[m,:,:])
return dmutdbeta
alfa=x[:ModelUNNo]
beta=x[-ModelSTNo:]
DPrecdalfa=dPrecdalfa(NodeNo,ModelUNNo) # Nezavisno od alfa i iteracija
Precison = GCRFCNB.Prec(alfa, beta, NodeNo, Se, Noinst)
DPrecdbeta = dPrecdbeta(Noinst,ModelSTNo,NodeNo,Se)
mu,kovMat = GCRFCNB.muKov(alfa,R,Precison,Noinst,NodeNo)
mu[np.isnan(mu)] = 0
Dbdalfa = dbdalfa(ModelUNNo,Noinst,R,NodeNo)
# Dbdalfa[Dbdalfa == -np.inf] = -1e12
Dmudalfa = dmutdalfa(Dbdalfa,DPrecdalfa,kovMat,ModelUNNo,Noinst,mu)
Dmudbeta = dmutdbeta(DPrecdbeta,mu,kovMat,Noinst,ModelSTNo,NodeNo)
sigmafun = sigmaFUN(Y,mu)
DLdalfa,DLdbeta = dLdalfadbeta(sigmafun,Dmudalfa,Dmudbeta,ModelUNNo,ModelSTNo)
DLdx = -np.concatenate((DLdalfa,DLdbeta))
print(DLdx)
return DLdx
ModelUNNo = R.shape[1]
NodeNo = Se.shape[2]
Noinst = Se.shape[0]
ModelSTNo = Se.shape[1]
bnd = ((1e-8,None),)*(ModelSTNo+ModelUNNo)
if x0 is None:
x0 = np.abs(np.random.randn(ModelUNNo + ModelSTNo))*100
if learn == 'SLSQP':
res = minimize(L, x0, method='SLSQP', jac=DLdx, args=(Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R),\
options={'disp': True,'maxiter': maxiter,'ftol': 1e-8},bounds=bnd)
self.alfa = res.x[:ModelUNNo]
self.beta = res.x[ModelUNNo:ModelSTNo+ModelUNNo]
elif learn == 'TNC':
bnd = ((1e-6,None),)*(ModelSTNo+ModelUNNo)
res = sp.optimize.fmin_tnc(L, x0, fprime = DLdx, \
args=(Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R),\
bounds = bnd)
self.alfa = res[0][:ModelUNNo]
self.beta = res[0][ModelUNNo:ModelSTNo+ModelUNNo]
elif learn == 'EXP':
x = x0
u1 = np.log(x0)
for i in range(maxiter):
dLdx = -DLdx(x,Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R)
u1 = u1 + learnrate*x*dLdx
x = np.exp(u1)
L1 = -L(x,Y,ModelUNNo,ModelSTNo,NodeNo,Noinst,R)
print('U iteciji {} DLDX je {}'.format(i,dLdx))
print('U iteciji {} L je {}'.format(i,L1))
self.alfa = x[:ModelUNNo]
self.beta = x[ModelUNNo:ModelSTNo+ModelUNNo]
self.x = x
#""" Proba na SIN podacima """
#import time
#start_time = time.time()
#def S(connect,Se,Xst):
# for j in range(NoGraph):
# for k,l in connect[j]:
# if j == 0:
# Se[:,j,k,l] = np.exp(np.abs(Xst.iloc[:,j].unstack().values[:,k] -
# Xst.iloc[:,j].unstack().values[:,l]))*0.1
# Se[:,j,l,k] = Se[:,j,k,l]
# elif j == 1:
# Se[:,j,k,l] = np.exp(np.abs(Xst.iloc[:,j].unstack().values[:,k] -
# Xst.iloc[:,j].unstack().values[:,l]))*0.3
# Se[:,j,l,k] = Se[:,j,k,l]
# return Se
#
#path = 'D:\Dokumenti\Programi Python\Proba.xlsx'
#df = pd.read_excel(path)
##R = df.iloc[:,:2].values
##R=np.random.rand(5200,2)*2-1
#R = np.load('R_sinteticki.npy')
#NodeNo = 4
#Nopoint = R.shape[0]
#Noinst = np.round(Nopoint/NodeNo).astype(int)
#i1 = np.arange(NodeNo)
#i2 = np.arange(Noinst)
#Xst = np.load('Xst.npy')
#Xst =pd.DataFrame(data=Xst)
#Xst['Node'] = np.tile(i1, Noinst)
#Xst['Inst'] = np.repeat(i2,NodeNo)
#Xst = Xst.set_index(['Inst','Node'])
#connect1=np.array([[0,1],[1,2]])
#connect2=np.array([[0,1],[2,3]])
#connect=[connect1,connect2]
#NoGraph = len(connect)
##Se = np.zeros([Noinst,NoGraph,NodeNo,NodeNo])
##Se = S(connect,Se,Xst)
#Se = np.load('Se.npy')
#
#Notrain = (Noinst*0.8).astype(int)
#Notest = (Noinst*0.2).astype(int)
#
#
#mod1 = GCRFCNB()
#mod1.alfa = np.array([1,18])
#mod1.beta = np.array([0.2,0.2])
#prob, Y = mod1.predict(R,Se)
#Se_train = Se[:Notrain,:,:,:]
#R_train = R[:Notrain*NodeNo,:]
#Y_test = Y[Notrain:Noinst,:]
#Y_train = Y[:Notrain,:]
#
#mod1.fit(R_train, Se_train, Y_train, learn = 'TNC')
#
#R_test = R[Notrain*NodeNo:Noinst*NodeNo,:]
#Se_test = Se[Notrain:Noinst,:,:,:]
#prob2, Y2, Var = mod1.predict(R_test,Se_test)
#Prob1 = prob2.copy()
#Prob1[Y2==0] = 1 - Prob1[Y2==0]
#Y21 = Y2.reshape([Y2.shape[0]*Y2.shape[1]])
#Y_test1 = Y_test.reshape([Y_test.shape[0]*Y_test.shape[1]])
#probr = prob2.reshape([prob2.shape[0]*prob2.shape[1]])
#probr1 = Prob1.reshape([Prob1.shape[0]*Prob1.shape[1]])
#print('AUC je {}'.format(roc_auc_score(Y_test1,probr)))
##print('Skor je {}'.format(accuracy_score(Y21,Y_test1)))
#print('LogPRob je {}'.format(np.sum(np.log(probr1))))
#print("--- %s seconds ---" % (time.time() - start_time))
#""" Stvarni podaci Skijasi """
#Spom = np.load('Se.npy')
#R_train = np.load('Z_train_com.npy')
#R_test = np.load('Z_test_com.npy')
#Y_train = np.load('Y_train.npy')
#Y_test = np.load('Y_test.npy')
#Se_train_inst = np.load('Se_train.npy')
#Se_test_inst = np.load('Se_test.npy')
#
#NodeNo = 7
#Noinst_train = np.round(R_train.shape[0]/NodeNo).astype(int)
#Noinst_test = np.round(R_test.shape[0]/NodeNo).astype(int)
#
#ModelSTNo = 6
#Se_train = np.zeros([Noinst_train,ModelSTNo,NodeNo,NodeNo])
#Se_test = np.zeros([Noinst_test,ModelSTNo,NodeNo,NodeNo])
#
#for i in range(Noinst_train):
# Se_train[i,:5,:,:] = Spom
#
#for i in range(Noinst_test):
# Se_test[i,:5,:,:] = Spom
#
#Se_train[:,5,:,:] = np.squeeze(Se_train_inst)
#Se_test[:,5,:,:] = np.squeeze(Se_test_inst)
#
#
#mod1 = GCRFCNB()
#
#
#mod1.fit(R_train, Se_train, Y_train, learn = 'SLSQP', learnrate = 6e-4, maxiter = 300)
#
##mod1.alfa = np.array([0.1043126 , 0.06905401, 0.08689079])
##mod1.beta = np.array([1.00008728e-08, 2.88191498e+02, 1.00000563e-08, 1.00000000e-08,
## 8.74943190e+01, 3.48984028e-03])
#
#prob2, Y2 = mod1.predict(R_test,Se_test)
#Y2 = Y2.reshape([Y2.shape[0]*Y2.shape[1]])
#Y_test = Y_test.reshape([Y_test.shape[0]*Y_test.shape[1]])
#prob2 = prob2.reshape([prob2.shape[0]*prob2.shape[1]])
#
#print('AUC GCRFCNB prediktora je {}'.format(roc_auc_score(Y_test,prob2)))
#print('Skor GCRFCNB prediktora je {}'.format(accuracy_score(Y2,Y_test)))
##Skor_com = np.load('Skor_com.npy')
#Skor_com_AUC = np.load('Skor_com_AUC.npy')
#print('AUC nestruktuiranih prediktora je {}'.format(Skor_com_AUC))
##print('Skor nestruktuiranih prediktora je {}'.format(Skor_com))
#print('Logprob je {}'.format(np.sum(np.log(prob2))))
#""" Stvarni podaci Debeli """
#
#import time
#Spom = np.load('Se.npy')
#R_train = np.load('Z_train_com.npy')
#R_train[R_train == -np.inf] = -10
#R_train[R_train == -np.inf] = np.min(R_train)-100
#R_test = np.load('Z_test_com.npy')
#R_test[R_test == -np.inf] = -10
#R_test[R_test == -np.inf] = np.min(R_test)-100
#Y_train = np.load('Y_train.npy')
#Y_test = np.load('Y_test.npy')
#for i in range(R_train.shape[1]):
# Range = np.abs(np.max(R_train[:,i]) + np.min(R_train[:,i]))
# faktor = int(math.log10(Range))
# R_train[:,i] = R_train[:,i]*10**(-faktor)
# R_test[:,i] = R_test[:,i]*10**(-faktor)
#
#NodeNo = 10
#Noinst_train = np.round(R_train.shape[0]/NodeNo).astype(int)
#Noinst_test = np.round(R_test.shape[0]/NodeNo).astype(int)
#
#ModelSTNo = 4
#Se_train = np.zeros([Noinst_train,ModelSTNo,NodeNo,NodeNo])
#Se_test = np.zeros([Noinst_test,ModelSTNo,NodeNo,NodeNo])
#
#for i in range(Noinst_train):
# Se_train[i,:,:,:] = Spom
#
#for i in range(Noinst_test):
# Se_test[i,:,:,:] = Spom
#
#mod1 = GCRFCNB()
#
#start_time = time.time()
#mod1.fit(R_train, Se_train, Y_train, learn = 'SLSQP', learnrate = 6e-4, maxiter = 5000)
#
#
##mod1.alfa = np.array([1-10, 1e-10, 1e-10, 3000])
##mod1.beta = np.array([1.0000000e-10, 1.0000000e-10, 1e-10, 1e-10])
#
#prob2, Y2 = mod1.predict(R_test,Se_test)
#Y2 = Y2.reshape([Y2.shape[0]*Y2.shape[1]])
#Y_test = Y_test.reshape([Y_test.shape[0]*Y_test.shape[1]])
#prob2 = prob2.reshape([prob2.shape[0]*prob2.shape[1]])
#
##Y_train = Y_train.reshape([Y_train.shape[0]*Y_train.shape[1]])
#print('AUC GCRFCNB prediktora je {}'.format(roc_auc_score(Y_test,prob2)))
##print('Skor GCRFCNB prediktora je {}'.format(accuracy_score(Y2,Y_test)))
##Skor_com = np.load('Skor_com.npy')
#Skor_com_AUC = np.load('Skor_com_AUC.npy')
#print('AUC nestruktuiranih prediktora je {}'.format(Skor_com_AUC))
##print('Skor nestruktuiranih prediktora je {}'.format(Skor_com))
#print('Logprob je {}'.format(np.sum(np.log(prob2))))
#print("--- %s seconds ---" % (time.time() - start_time)) |
the-stack_0_12935 | """The tests for the Switch component."""
# pylint: disable=protected-access
import unittest
from homeassistant.setup import setup_component, async_setup_component
from homeassistant import core, loader
from homeassistant.components import switch
from homeassistant.const import STATE_ON, STATE_OFF, CONF_PLATFORM
from tests.common import get_test_home_assistant
from tests.components.switch import common
class TestSwitch(unittest.TestCase):
"""Test the switch module."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
platform = loader.get_component(self.hass, 'switch.test')
platform.init()
# Switch 1 is ON, switch 2 is OFF
self.switch_1, self.switch_2, self.switch_3 = \
platform.DEVICES
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_methods(self):
"""Test is_on, turn_on, turn_off methods."""
assert setup_component(
self.hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: 'test'}}
)
assert switch.is_on(self.hass)
assert STATE_ON == \
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state
assert switch.is_on(self.hass, self.switch_1.entity_id)
assert not switch.is_on(self.hass, self.switch_2.entity_id)
assert not switch.is_on(self.hass, self.switch_3.entity_id)
common.turn_off(self.hass, self.switch_1.entity_id)
common.turn_on(self.hass, self.switch_2.entity_id)
self.hass.block_till_done()
assert switch.is_on(self.hass)
assert not switch.is_on(self.hass, self.switch_1.entity_id)
assert switch.is_on(self.hass, self.switch_2.entity_id)
# Turn all off
common.turn_off(self.hass)
self.hass.block_till_done()
assert not switch.is_on(self.hass)
assert STATE_OFF == \
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state
assert not switch.is_on(self.hass, self.switch_1.entity_id)
assert not switch.is_on(self.hass, self.switch_2.entity_id)
assert not switch.is_on(self.hass, self.switch_3.entity_id)
# Turn all on
common.turn_on(self.hass)
self.hass.block_till_done()
assert switch.is_on(self.hass)
assert STATE_ON == \
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state
assert switch.is_on(self.hass, self.switch_1.entity_id)
assert switch.is_on(self.hass, self.switch_2.entity_id)
assert switch.is_on(self.hass, self.switch_3.entity_id)
def test_setup_two_platforms(self):
"""Test with bad configuration."""
# Test if switch component returns 0 switches
test_platform = loader.get_component(self.hass, 'switch.test')
test_platform.init(True)
loader.set_component(self.hass, 'switch.test2', test_platform)
test_platform.init(False)
assert setup_component(
self.hass, switch.DOMAIN, {
switch.DOMAIN: {CONF_PLATFORM: 'test'},
'{} 2'.format(switch.DOMAIN): {CONF_PLATFORM: 'test2'},
}
)
async def test_switch_context(hass):
"""Test that switch context works."""
assert await async_setup_component(hass, 'switch', {
'switch': {
'platform': 'test'
}
})
state = hass.states.get('switch.ac')
assert state is not None
await hass.services.async_call('switch', 'toggle', {
'entity_id': state.entity_id,
}, True, core.Context(user_id='abcd'))
state2 = hass.states.get('switch.ac')
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == 'abcd'
|
the-stack_0_12937 | from io import StringIO
from .dvexpansion import *
class YMLConfigPP:
def __init__(self, pathes):
self.out_fd = StringIO()
self.include_files = set()
self.pp_pathes = []
for p in pathes:
self.pp_pathes.append(evaluate_dollar_var_expr(p))
def find_yml_file(self, yml):
ret = None
for pp_path in self.pp_pathes:
print("pp_path: ", pp_path)
yml_fn = os.path.join(pp_path, yml)
if os.path.exists(yml_fn):
ret = yml_fn
break
return ret
def get_pp_content(self):
return self.out_fd.getvalue()
def run_pp(self, yml_fn):
fd = open(yml_fn, "r")
self.out_fd.write(f"# {yml_fn}\n")
while 1:
l = fd.readline()
if not l:
break
if l.find("!include") != -1:
self.out_fd.write(f"# {l}")
self.process_include(l)
else:
self.out_fd.write(l)
self.out_fd.write("\n#\n")
self.out_fd.write(f"# end of file {yml_fn}\n")
def process_include(self, include_line):
include_re = r"!include\s+<([\w+/\.\-]+)>"
m = re.match(include_re, include_line)
if m == None or len(m.groups()) != 1:
raise Exception(f"YMLConfigPP::process_include: malformed line {include_line}")
include_file = self.find_yml_file(m.groups()[0])
if include_file == None:
raise Exception(f"YMLConfigPP::process_include: can't resolve {include_line}")
if not include_file in self.include_files:
self.include_files.add(include_file)
self.run_pp(include_file)
|
the-stack_0_12938 | """Metrics related to messages."""
from prometheus_client import Counter, Gauge
msgs_sent = Counter("msg_sent",
"Number of messages sent between nodes",
["node_id", "msg_type"])
msg_rtt = Gauge("msg_rtt",
"Time taken to send a message to a node and get an ACK",
["node_id", "receiver_id", "receiver_hostname"])
msgs_in_queue = Gauge("msgs_in_queue",
"The amount of messages waiting to be sent over channel",
["node_id", "receiver_id", "receiver_hostname"])
allow_service_rtt = Gauge("allow_service_rtt",
"Time taken from declining service to allowing",
["node_id", "view_from"])
msg_sent_size = Gauge("msg_sent_size",
"Size of a message sent from node over com_mod of type \
msg_type",
["node_id", "msg_type", "com_mod"])
bytes_sent = Counter("bytes_sent",
"Number of bytes sent from node over com_mod",
["node_id", "com_mod"])
run_method_time = Gauge("run_method_time",
"Time taken to run the run-forever-loop",
["node_id", "module"])
msgs_during_exp = Gauge("msgs_during_exp",
"Number of messages sent during an experiment",
["node_id", "exp_param", "view_est_msgs",
"rep_msgs", "prim_mon_msgs", "fd_msgs"])
bytes_during_exp = Gauge("bytes_during_exp",
"Number of bytes sent during an experiment",
["node_id", "exp_param", "view_est_bytes",
"rep_bytes", "prim_mon_bytes", "fd_bytes"])
|
the-stack_0_12939 | import sys
import studentdirectory as sd
import gui
def main_for_command_line():
stud_dir = sd.StudentDirectory()
while(1):
print("\nSTUDENT DIRECTORY MENU")
print(" [a] Add New Student")
print(" [b] View Student Details")
print(" [c] Show Student Directory")
print(" [d] Edit Student Details")
print(" [e] Delete Student")
print(" [f] Clear Student Directory")
print(" [g] Exit")
choice = input("Enter choice: ")
if choice == "a":
print("\nADD NEW Student")
key = input(" Enter new student's student number: ")
detail0 = input(" Enter new student's name: ")
detail1 = input(" Enter new student's course and year: ")
detail2 = input(" Enter new student's age: ")
detail3 = input(" Enter new student's email address: ")
detail4 = input(" Enter new student's contact number: ")
value = [detail0, detail1, detail2, detail3, detail4]
stud_dir.add_new_student(key, value)
print("\nNew student added to directory successfully.\n")
elif choice == "b":
print("\nVIEW STUDENT DETAILS")
key = input(" Enter student number: ")
stud_dir.view_student_details(key)
print(" ")
elif choice == "c":
print("\nSHOW STUDENT DIRECTORY")
stud_dir.show_student_directory()
print(" ")
elif choice == "d":
print("\nEDIT STUDENT DETAILS")
key = input(" Enter student number: ")
if stud_dir.check_if_student_exist(key):
detail0 = input(" Enter student's new name: ")
detail1 = input(" Enter student's new course and year: ")
detail2 = input(" Enter student's new age: ")
detail3 = input(" Enter student's new email address: ")
detail4 = input(" Enter student's new contact number: ")
value = [detail0, detail1, detail2, detail3, detail4]
stud_dir.edit_student_details(key, value)
print("\nStudent's details edited successfully.\n")
else:
print("Student number does not exist in "
+ "the student directory.")
elif choice == "e":
print("\nDELETE STUDENT")
key = input(" Enter student number: ")
if stud_dir.check_if_student_exist(key):
stud_dir.delete_student(key)
print("\nStudent removed from the student "
+ "directory successfully.\n")
else:
print("Student number does not exist in "
+ "the student directory.")
elif choice == "f":
print("\nCLEAR STUDENT DIRECTORY")
print(" WARNING! This will delete all entries in the "
+ "student directory. Do you really want to proceed?")
decision = input(" [y]es or [n]o: ")
if decision == "y":
print("\nClearing student directory...")
stud_dir.clear_student_directory()
print("Clearing student directory successful.\n")
elif decision == "n":
print("\n Good call. Going back to the menu...")
else:
print("\nNonexistent decision. Going back to the menu...")
elif choice == "g":
print("\nSaving student directory changes to JSON file.")
stud_dir.save_changes()
print("Done. Bye.")
sys.exit(0)
else:
print("\nNonexistent choice.")
def main():
# main_for_command_line()
gui.GUI()
if __name__ == "__main__": main() |
the-stack_0_12940 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class InputFilterCondition(Model):
"""InputFilterCondition.
:param case_sensitive: Whether or not to do a case sensitive match
:type case_sensitive: bool
:param input_id: The Id of the input to filter on
:type input_id: str
:param input_value: The "expected" input value to compare with the actual input value
:type input_value: str
:param operator: The operator applied between the expected and actual input value
:type operator: object
"""
_attribute_map = {
'case_sensitive': {'key': 'caseSensitive', 'type': 'bool'},
'input_id': {'key': 'inputId', 'type': 'str'},
'input_value': {'key': 'inputValue', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'object'}
}
def __init__(self, case_sensitive=None, input_id=None, input_value=None, operator=None):
super(InputFilterCondition, self).__init__()
self.case_sensitive = case_sensitive
self.input_id = input_id
self.input_value = input_value
self.operator = operator
|
the-stack_0_12944 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from hypothesis import assume, given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestReductionOps(serial.SerializedTestCase):
@serial.given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_sum(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def sum_op(X):
return [np.sum(X)]
op = core.CreateOperator(
"SumElements",
["X"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sum_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
@serial.given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_int_sum(self, n, gc, dc):
X = np.random.rand(n).astype(np.int32)
def sum_op(X):
return [np.sum(X)]
op = core.CreateOperator(
"SumElementsInt",
["X"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sum_op,
)
@serial.given(n=st.integers(1, 65536),
dtype=st.sampled_from([np.float32, np.float16]),
**hu.gcs)
def test_elementwise_sqrsum(self, n, dtype, gc, dc):
if dtype == np.float16:
# fp16 is only supported with CUDA
assume(gc.device_type == caffe2_pb2.CUDA)
dc = [d for d in dc if d.device_type == caffe2_pb2.CUDA]
X = np.random.rand(n).astype(dtype)
def sumsqr_op(X):
return [np.sum(X * X)]
op = core.CreateOperator(
"SumSqrElements",
["X"],
["y"]
)
threshold = 0.01 if dtype == np.float16 else 0.005
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sumsqr_op,
threshold=threshold,
)
@given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_avg(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def avg_op(X):
return [np.mean(X)]
op = core.CreateOperator(
"SumElements",
["X"],
["y"],
average=1
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=avg_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
@serial.given(batch_size=st.integers(1, 3),
m=st.integers(1, 3),
n=st.integers(1, 4),
**hu.gcs)
def test_rowwise_max(self, batch_size, m, n, gc, dc):
X = np.random.rand(batch_size, m, n).astype(np.float32)
def rowwise_max(X):
return [np.max(X, axis=2)]
op = core.CreateOperator(
"RowwiseMax",
["x"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=rowwise_max,
)
@serial.given(batch_size=st.integers(1, 3),
m=st.integers(1, 3),
n=st.integers(1, 4),
**hu.gcs)
def test_columnwise_max(self, batch_size, m, n, gc, dc):
X = np.random.rand(batch_size, m, n).astype(np.float32)
def columnwise_max(X):
return [np.max(X, axis=1)]
op = core.CreateOperator(
"ColwiseMax",
["x"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=columnwise_max,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.