filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_22318 | """
The documentation for python-tdl. A Pythonic port of
U{libtcod<http://doryen.eptalys.net/libtcod/>}.
You can find the project page on Google Code
U{here<http://code.google.com/p/python-tdl/>}.
Report any bugs or issues to the Google Code issue tracker
U{here<https://code.google.com/p/python-tdl/issues/list>}.
Getting Started
===============
Once the library is imported you can load the font you want to use with
L{tdl.setFont}.
This is optional and when skipped will use a decent default font.
After that you call L{tdl.init} to set the size of the window and get the
root console in return.
This console is the canvas to what will appear on the screen.
Indexing Consoles
=================
For most methods taking a position you can use Python-style negative
indexes to refer to the opposite side of a console with (-1, -1)
starting at the bottom right.
You can also check if a point is part of a console using containment
logic i.e. ((x, y) in console).
You may also iterate over a console using a for statement. This returns
every x,y coordinate available to draw on but it will be extremely slow
to actually operate on every coordinate individualy.
Try to minimize draws by using an offscreen L{Console}, only drawing
what needs to be updated, and using L{Console.blit}.
Drawing
=======
Once you have the root console from L{tdl.init} you can start drawing on
it using a method such as L{Console.drawChar}.
When using this method you can have the char parameter be an integer or a
single character string.
The fgcolor and bgcolor parameters expect a three item list
[red, green, blue] with integers in the 0-255 range with [0, 0, 0] being
black and [255, 255, 255] being white.
Or instead you can use None in the place of any of the three parameters
to tell the library to not overwrite colors.
After the drawing functions are called a call to L{tdl.flush} will update
the screen.
"""
import sys
import os
import ctypes
import weakref
import array
import itertools
import textwrap
import struct
import re
import warnings
from . import event, map, noise
from .__tcod import _lib, _Color, _unpackfile
_IS_PYTHON3 = (sys.version_info[0] == 3)
if _IS_PYTHON3: # some type lists to use with isinstance
_INTTYPES = (int,)
_NUMTYPES = (int, float)
_STRTYPES = (str, bytes)
else:
_INTTYPES = (int, long)
_NUMTYPES = (int, long, float)
_STRTYPES = (str,)
def _encodeString(string): # still used for filepaths, and that's about it
"changes string into bytes if running in python 3, for sending to ctypes"
if _IS_PYTHON3 and isinstance(string, str):
return string.encode()
return string
#def _formatString(string):
# pass
def _formatChar(char):
"""Prepares a single characters for passing to ctypes calls, needs to return
an integer but can also pass None which will keep the current characters
instead of overwriting it.
This is called often and needs to be optimized whenever possible.
"""
if char is None:
return None
#if isinstance(char, _INTTYPES):
# return char
if isinstance(char, _STRTYPES) and len(char) == 1:
return ord(char)
return int(char) # conversion faster than type check
#raise TypeError('Expected char parameter to be a single characters string, number, or None, got: %s' % repr(char))
_fontinitialized = False
_rootinitialized = False
_rootConsoleRef = None
# remove dots from common functions
_setchar = _lib.TCOD_console_set_char
_setfore = _lib.TCOD_console_set_char_foreground
_setback = _lib.TCOD_console_set_char_background
_setcharEX = _lib.TCOD_console_put_char_ex
def _verify_colors(*colors):
"""Used internally.
Raise an assertion error if the parameters can not be converted into colors.
"""
for color in colors:
assert _iscolor(color), 'a color must be a 3 items tuple, web format, or None, received %s' % repr(color)
return True
def _iscolor(color):
"""Used internally.
A debug function to see if an object can be used as a TCOD color struct.
None counts as a parameter to keep the current colors instead.
This function is often part of an inner-loop and can slow a program down.
It has been made to work with assert and can be skipped with the -O flag.
Still it's called often and must be optimized.
"""
if color is None:
return True
if isinstance(color, (tuple, list, _Color)):
return len(color) == 3
if isinstance(color, _INTTYPES):
return True
return False
## not using this for now
#class Color(object):
#
# def __init__(self, r, g, b):
# self._color = (r, g, b)
# self._ctype = None
#
# def _getCType(self):
# if not self._ctype:
# self._ctype = _Color(*self._color)
# return self._ctype
#
# def __len__(self):
# return 3
# Format the color to ctypes, will preserve None and False
_formatColor = _Color.new
def _getImageSize(filename):
"""Try to get the width and height of a bmp of png image file"""
file = open(filename, 'rb')
if file.read(8) == b'\x89PNG\r\n\x1a\n': # PNG
while 1:
length, = struct.unpack('>i', file.read(4))
chunkID = file.read(4)
if chunkID == '': # EOF
return None
if chunkID == b'IHDR':
# return width, height
return struct.unpack('>ii', file.read(8))
file.seek(4 + length, 1)
file.seek(0)
if file.read(8) == b'BM': # Bitmap
file.seek(18, 0) # skip to size data
# return width, height
return struct.unpack('<ii', file.read(8))
# return None on error, unknown file
class TDLError(Exception):
"""
The catch all for most TDL specific errors.
"""
class _MetaConsole(object):
"""
Contains methods shared by both the L{Console} and L{Window} characters.
"""
__slots__ = ('width', 'height', 'console', '_cursor', '_fgcolor',
'_bgcolor', '_bgblend', '_colorLock', '__weakref__', '__dict__')
def __init__(self):
self._cursor = (0, 0)
self._scrollMode = 'error'
self._fgcolor = _formatColor((255, 255, 255))
self._bgcolor = _formatColor((0, 0, 0))
self._bgblend = 1 # SET
self._colorLock = None # which object sets the ctype color options
def _normalizePoint(self, x, y):
"""Check if a point is in bounds and make minor adjustments.
Respects Pythons negative indexes. -1 starts at the bottom right.
Replaces the _drawable function
"""
#assert isinstance(x, _INTTYPES), 'x must be an integer, got %s' % repr(x)
#assert isinstance(y, _INTTYPES), 'y must be an integer, got %s' % repr(y)
# force int, always faster than type checking
x = int(x)
y = int(y)
assert (-self.width <= x < self.width) and (-self.height <= y < self.height), \
('(%i, %i) is an invalid postition on %s' % (x, y, self))
# handle negative indexes
if x < 0:
x += self.width
if y < 0:
y += self.height
return (x, y)
def _normalizeRect(self, x, y, width, height):
"""Check if the rectangle is in bounds and make minor adjustments.
raise AssertionError's for any problems
"""
x, y = self._normalizePoint(x, y) # inherit _normalizePoint logic
assert width is None or isinstance(width, _INTTYPES), 'width must be an integer or None, got %s' % repr(width)
assert height is None or isinstance(height, _INTTYPES), 'height must be an integer or None, got %s' % repr(height)
# if width or height are None then extend them to the edge
if width is None:
width = self.width - x
elif width < 0: # handle negative numbers
width += self.width
width = max(0, width) # a 'too big' negative is clamped zero
if height is None:
height = self.height - y
height = max(0, height)
elif height < 0:
height += self.height
# reduce rect size to bounds
width = min(width, self.width - x)
height = min(height, self.height - y)
return x, y, width, height
def _normalizeCursor(self, x, y):
"""return the normalized the cursor position."""
width, height = self.getSize()
assert width != 0 and height != 0, 'can not print on a console with a width or height of zero'
while x >= width:
x -= width
y += 1
while y >= height:
if self._scrollMode == 'scroll':
y -= 1
self.scroll(0, -1)
elif self._scrollMode == 'error':
# reset the cursor on error
self._cursor = (0, 0)
raise TDLError('Cursor has reached the end of the console')
return (x, y)
def _lockColors(self, forceUpdate=False):
"""Make sure the color options on the root console match ths instance"""
if self.console._lockColors is not self or forceUpdate:
self.console._lockColors = self
_lib.TCOD_console_set_default_background(self.console, self.bgcolor)
_lib.TCOD_console_set_default_foreground(self.console, self.fgcolor)
#
def setMode(self, mode):
"""Configure how this console will react to the cursor writing past the
end if the console.
This is for methods that use the virtual cursor, such as L{printStr}.
@type mode: string
@param mode: Possible settings are:
- 'error' - A TDLError will be raised once the cursor
reaches the end of the console. Everything up until
the error will still be drawn.
This is the default setting.
- 'scroll' - The console will scroll up as stuff is
written to the end.
You can restrict the region with L{tdl.Window} when
doing this.
"""
MODES = ['error', 'scroll']
if mode.lower() not in MODES:
raise TDLError('mode must be one of %s, got %s' % (MODES, repr(mode)))
self._scrollMode = mode.lower()
def setColors(self, fg=None, bg=None):
"""Sets the colors to be used with the L{printStr} function.
Values of None will only leave the current values unchanged.
"""
if self.console._lockColors is self:
self.console._lockColors = None
if fg is not None:
self._fgcolor = _formatColor(fg)
if bg is not None:
self._bgcolor = _formatColor(bg)
def printStr(self, string):
"""Print a string at the virtual cursor.
Handles special characters such as '\\n' and '\\r'.
Printing past the bottom of the console will scroll everying upwards.
Colors can be set with L{setColors} and the virtual cursor can be moved
with L{move}.
@type string: string
@param string:
"""
x, y = self._cursor
for char in string:
if char == '\n': # line break
x = 0
y += 1
continue
if char == '\r': # return
x = 0
continue
x, y = self._normalizeCursor(x, y)
self.drawChar(x, y, char, self._fgcolor, self._bgcolor)
x += 1
self._cursor = (x, y)
def write(self, string):
"""This method mimics basic file-like behaviour.
Because of this method you can replace sys.stdout or sys.stderr with
a L{Typewriter} instance.
This is a convoluted process and behaviour seen now can be excepted to
change on later versions.
@type string: string
"""
# some 'basic' line buffer stuff.
# there must be an easier way to do this. The textwrap module didn't
# help much.
x, y = self._normalizeCursor(*self._cursor)
width, height = self.getSize()
wrapper = textwrap.TextWrapper(initial_indent=(' '*x), width=width)
writeLines = []
for line in string.split('\n'):
if line:
writeLines += wrapper.wrap(line)
wrapper.initial_indent = ''
else:
writeLines.append([])
for line in writeLines:
x, y = self._normalizeCursor(x, y)
self.drawStr(x, y, line[x:], self._fgcolor, self._bgcolor)
y += 1
x = 0
y -= 1
self._cursor = (x, y)
def drawChar(self, x, y, char, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Draws a single characters.
@type x: int
@param x: X coordinate to draw at.
@type y: int
@param y: Y coordinate to draw at.
@type char: int, string, or None
@param char: Should be an integer, single characters string, or None.
You can set the char parameter as None if you only want to change
the colors of the tile.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
assert _verify_colors(fgcolor, bgcolor)
x, y = self._normalizePoint(x, y)
x, y = ctypes.c_int(x), ctypes.c_int(y)
self._setChar(x, y, _formatChar(char),
_formatColor(fgcolor), _formatColor(bgcolor))
def drawStr(self, x, y, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Draws a string starting at x and y. Optinally colored.
A string that goes past the right side will wrap around. A string
wraping to below the console will raise a L{TDLError} but will still be
written out. This means you can safely ignore the errors with a
try... except block if you're fine with partily written strings.
\\r and \\n are drawn on the console as normal characters tiles. No
special encoding is done and any string will translate to the characters
table as is.
For a string drawing operation that respects special characters see the
L{Typewriter} class.
@type x: int
@param x: X coordinate to draw at.
@type y: int
@param y: Y coordinate to draw at.
@type string: string or iterable
@param string: Can be a string or an iterable of numbers.
Special characters are ignored and rendered as any other
characters.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
x, y = self._normalizePoint(x, y)
assert _verify_colors(fgcolor, bgcolor)
fgcolor, bgcolor = _formatColor(fgcolor), _formatColor(bgcolor)
width, height = self.getSize()
batch = [] # prepare a batch operation
def _drawStrGen(x=x, y=y, string=string, width=width, height=height):
"""Generator for drawStr
Iterates over ((x, y), ch) data for _setCharBatch, raising an
error if the end of the console is reached.
"""
for char in string:
if y == height:
raise TDLError('End of console reached.')
#batch.append(((x, y), _formatChar(char))) # ((x, y), ch)
yield((x, y), _formatChar(char))
x += 1 # advance cursor
if x == width: # line break
x = 0
y += 1
self._setCharBatch(_drawStrGen(), fgcolor, bgcolor)
def drawRect(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Draws a rectangle starting from x and y and extending to width and height.
If width or height are None then it will extend to the edge of the console.
@type x: int
@param x: x coordinate to draw at.
@type y: int
@param y: y coordinate to draw at.
@type width: int or None
@param width: Width of the rectangle.
Can be None to extend to the bottom right of the
console or can be a negative number to be sized reltive
to the total size of the console.
@type height: int or None
@param height: Height of the rectangle. See width.
@type string: int, string, or None
@param string: Should be an integer, single characters string, or None.
You can set the char parameter as None if you only want
to change the colors of an areas.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
x, y, width, height = self._normalizeRect(x, y, width, height)
assert _verify_colors(fgcolor, bgcolor)
fgcolor, bgcolor = _formatColor(fgcolor), _formatColor(bgcolor)
char = _formatChar(string)
# use itertools to make an x,y grid
# using ctypes here reduces type converstions later
grid = itertools.product((ctypes.c_int(x) for x in range(x, x + width)),
(ctypes.c_int(y) for y in range(y, y + height)))
# zip the single characters in a batch variable
batch = zip(grid, itertools.repeat(char, width * height))
self._setCharBatch(batch, fgcolor, bgcolor, nullChar=(char is None))
def drawFrame(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
"""Similar to L{drawRect} but only draws the outline of the rectangle.
@type x: int
@param x: x coordinate to draw at.
@type y: int
@param y: y coordinate to draw at.
@type width: int or None
@param width: Width of the rectangle.
Can be None to extend to the bottom right of the
console or can be a negative number to be sized reltive
to the total size of the console.
@type height: int or None
@param height: Height of the rectangle. See width.
@type string: int, string, or None
@param string: Should be an integer, single characters string, or None.
You can set the char parameter as None if you only want
to change the colors of an areas.
@type fgcolor: (r, g, b) or None
@param fgcolor: For fgcolor and bgcolor you use a 3 items list with
integers ranging 0-255 or None.
None will keep the current color at this position unchanged.
@type bgcolor: (r, g, b) or None
@param bgcolor: Background color. See fgcolor
@raise AssertionError: Having x or y values that can't be placed inside
of the console will raise an AssertionError.
You can use always use ((x, y) in console) to
check if a tile is drawable.
"""
x, y, width, height = self._normalizeRect(x, y, width, height)
assert _verify_colors(fgcolor, bgcolor)
fgcolor, bgcolor = _formatColor(fgcolor), _formatColor(bgcolor)
char = _formatChar(string)
if width == 1 or height == 1: # it's just a single width line here
return self.drawRect(x, y, width, height, char, fgcolor, bgcolor)
# draw sides of frame with drawRect
self.drawRect(x, y, 1, height, char, fgcolor, bgcolor)
self.drawRect(x, y, width, 1, char, fgcolor, bgcolor)
self.drawRect(x + width - 1, y, 1, height, char, fgcolor, bgcolor)
self.drawRect(x, y + height - 1, width, 1, char, fgcolor, bgcolor)
def blit(self, source, x=0, y=0, width=None, height=None, srcX=0, srcY=0):
"""Blit another console or Window onto the current console.
By default it blits the entire source to the topleft corner.
@type source: L{Console} or L{Window}
@param source: Source window can be a L{Console} or L{Window} instance.
It can even blit to itself without any problems.
@type x: int
@param x: X coordinate to blit to.
@type y: int
@param y: Y coordinate to blit to.
@type width: int or None
@param width: Width of the rectangle.
Can be None to extend as far as possible to the
bottom right corner of the blit areas or can be a negative
number to be sized reltive to the total size of the
B{destination} console.
@type height: int or None
@param height: Height of the rectangle. See width.
@type srcX: int
@param srcX: The source consoles x coordinate to blit from.
@type srcY: int
@param srcY: The source consoles y coordinate to blit from.
"""
# hardcode alpha settings for now
fgalpha=1.0
bgalpha=1.0
assert isinstance(source, (Console, Window)), "source muse be a Window or Console instance"
# handle negative indexes and rects
# negative width and height will be set realtive to the destination
# and will also be clamped to the smallest Console
x, y, width, height = self._normalizeRect(x, y, width, height)
srcX, srcY, width, height = source._normalizeRect(srcX, srcY, width, height)
# translate source and self if any of them are Window instances
srcX, srcY = source._translate(srcX, srcY)
source = source.console
x, y = self._translate(x, y)
self = self.console
if self == source:
# if we are the same console then we need a third console to hold
# onto the data, otherwise it tries to copy into itself and
# starts destroying everything
tmp = Console(width, height)
_lib.TCOD_console_blit(source, srcX, srcY, width, height, tmp, 0, 0, fgalpha, bgalpha)
_lib.TCOD_console_blit(tmp, 0, 0, width, height, self, x, y, fgalpha, bgalpha)
else:
_lib.TCOD_console_blit(source, srcX, srcY, width, height, self, x, y, fgalpha, bgalpha)
def getCursor(self):
"""Return the virtual cursor position.
@rtype: (x, y)
@return: Returns (x, y) a 2-integer tuple containing where the next
L{addChar} or L{addStr} will start at.
This can be changed with the L{move} method."""
x, y = self._cursor
width, height = self.parent.getSize()
while x >= width:
x -= width
y += 1
if y >= height and self.scrollMode == 'scroll':
y = height - 1
return x, y
def getSize(self):
"""Return the size of the console as (width, height)
@rtype: (width, height)
"""
return self.width, self.height
def __iter__(self):
"""Return an iterator with every possible (x, y) value for this console.
It goes without saying that working on the console this way is a
slow process, especially for Python, and should be minimized.
@rtype: iter((x, y), ...)
"""
return itertools.product(range(self.width), range(self.height))
def move(self, x, y):
"""Move the virtual cursor.
@type x: int
@param x: X position to place the cursor.
@type y: int
@param y: Y position to place the cursor.
"""
self._cursor = self._normalizePoint(x, y)
def scroll(self, x, y):
"""Scroll the contents of the console in the direction of x,y.
Uncovered areas will be cleared.
Does not move the virutal cursor.
@type x: int
@param x: Distance to scroll along x-axis
@type y: int
@param y: Distance to scroll along y-axis
@rtype: iter((x, y), ...)
@return: Iterates over the (x, y) of any tile uncovered after scrolling.
"""
assert isinstance(x, _INTTYPES), "x must be an integer, got %s" % repr(x)
assert isinstance(y, _INTTYPES), "y must be an integer, got %s" % repr(x)
def getSlide(x, length):
"""get the parameters needed to scroll the console in the given
direction with x
returns (x, length, srcx)
"""
if x > 0:
srcx = 0
length -= x
elif x < 0:
srcx = abs(x)
x = 0
length -= srcx
else:
srcx = 0
return x, length, srcx
def getCover(x, length):
"""return the (x, width) ranges of what is covered and uncovered"""
cover = (0, length) # everything covered
uncover = None # nothing uncovered
if x > 0: # left side uncovered
cover = (x, length - x)
uncover = (0, x)
elif x < 0: # right side uncovered
x = abs(x)
cover = (0, length - x)
uncover = (length - x, x)
return cover, uncover
width, height = self.getSize()
if abs(x) >= width or abs(y) >= height:
return self.clear() # just clear the console normally
# get the ranges of the areas that will be uncovered
coverX, uncoverX = getCover(x, width)
coverY, uncoverY = getCover(y, height)
# so at this point we know that coverX and coverY makes a rect that
# encases the areas that we end up blitting to. uncoverX/Y makes a
# rect in the corner of the uncovered areas. So we need to combine
# the uncoverX/Y with coverY/X to make what's left of the uncovered
# areas. Explaining it makes it mush easier to do now.
# But first we need to blit.
x, width, srcx = getSlide(x, width)
y, height, srcy = getSlide(y, height)
self.blit(self, x, y, width, height, srcx, srcy)
if uncoverX: # clear sides (0x20 is space)
self.drawRect(uncoverX[0], coverY[0], uncoverX[1], coverY[1], 0x20, 0x000000, 0x000000)
if uncoverY: # clear top/bottom
self.drawRect(coverX[0], uncoverY[0], coverX[1], uncoverY[1], 0x20, 0x000000, 0x000000)
if uncoverX and uncoverY: # clear corner
self.drawRect(uncoverX[0], uncoverY[0], uncoverX[1], uncoverY[1], 0x20, 0x000000, 0x000000)
def getChar(self, x, y):
"""Return the characters and colors of a tile as (ch, fg, bg)
This method runs very slowly as is not recommended to be called
frequently.
@rtype: (int, (r, g, b), (r, g, b))
@returns: Returns a 3-items tuple. The first items is an integer of the
characters at the position (x, y) the second and third are the
foreground and background colors respectfully.
"""
raise NotImplementedError('Method here only exists for the docstring')
def __contains__(self, position):
"""Use ((x, y) in console) to check if a position is drawable on this console.
"""
x, y = position
return (0 <= x < self.width) and (0 <= y < self.height)
class Console(_MetaConsole):
"""Contains characters and color data and can be drawn to.
The console created by the L{tdl.init} function is the root console and is the
console that is rendered to the screen with L{flush}.
Any console created from the Console class is an off-screen console that
can be drawn on before being L{blit} to the root console.
"""
__slots__ = ('_as_parameter_', '_typewriter')
def __init__(self, width, height):
"""Create a new offscreen console.
@type width: int
@param width: Width of the console in tiles
@type height: int
@param height: Height of the console in tiles
"""
_MetaConsole.__init__(self)
if not _rootinitialized:
raise TDLError('Can not create Console\'s before tdl.init')
self._as_parameter_ = _lib.TCOD_console_new(width, height)
self.console = self
self.width = width
self.height = height
self._typewriter = None # "typewriter lock", makes sure the colors are set to the typewriter
# will be phased out with the Typewriter class
@classmethod
def _newConsole(cls, console):
"""Make a Console instance, from a console ctype"""
self = cls.__new__(cls)
_MetaConsole.__init__(self)
self._as_parameter_ = console
self.console = self
self.width = _lib.TCOD_console_get_width(self)
self.height = _lib.TCOD_console_get_height(self)
self._typewriter = None
return self
def __del__(self):
"""
If the main console is garbage collected then the window will be closed as well
"""
# If this is the root console the window will close when collected
try:
if isinstance(self._as_parameter_, ctypes.c_void_p):
global _rootinitialized, _rootConsoleRef
_rootinitialized = False
_rootConsoleRef = None
_lib.TCOD_console_delete(self)
except StandardError:
pass # I forget why I put this here but I'm to afraid to delete it
def __copy__(self):
# make a new class and blit
clone = self.__class__(self.width, self.height)
clone.blit(self)
return clone
def __getstate__(self):
# save data from getChar
data = [self.getChar(x, y) for x,y in
itertools.product(range(self.width), range(self.height))]
return self.width, self.height, data
def __setstate__(self, state):
# make console from __init__ and unpack a getChar array
width, height, data = state
self.__init__(width, height)
for (x, y), graphic in zip(itertools.product(range(width),
range(height)), data):
self.drawChar(x, y, *graphic)
def _replace(self, console):
"""Used internally
Mostly used just to replace this Console object with the root console
If another Console object is used then they are swapped
"""
if isinstance(console, Console):
self._as_parameter_, console._as_parameter_ = \
console._as_parameter_, self._as_parameter_ # swap tcod consoles
else:
self._as_parameter_ = console
self.width = _lib.TCOD_console_get_width(self)
self.height = _lib.TCOD_console_get_height(self)
return self
def _translate(self, x, y):
"""Convertion x and y to their position on the root Console for this Window
Because this is a Console instead of a Window we return the paramaters
untouched"""
return x, y
def clear(self, fgcolor=(0, 0, 0), bgcolor=(0, 0, 0)):
"""Clears the entire Console.
@type fgcolor: (r, g, b)
@param fgcolor: Foreground color.
Must be a 3-items list with integers that range 0-255.
Unlike most other operations you cannot use None here.
@type bgcolor: (r, g, b)
@param bgcolor: Background color. See fgcolor.
"""
assert _verify_colors(fgcolor, bgcolor)
assert fgcolor and bgcolor, 'Can not use None with clear'
self._typewriter = None
_lib.TCOD_console_set_default_background(self, _formatColor(bgcolor))
_lib.TCOD_console_set_default_foreground(self, _formatColor(fgcolor))
_lib.TCOD_console_clear(self)
def _setChar(self, x, y, char, fgcolor=None, bgcolor=None, bgblend=1):
"""
Sets a characters.
This is called often and is designed to be as fast as possible.
Because of the need for speed this function will do NO TYPE CHECKING
AT ALL, it's up to the drawing functions to use the functions:
_formatChar and _formatColor before passing to this."""
# buffer values as ctypes objects
console = self._as_parameter_
if char is not None and fgcolor is not None and bgcolor is not None:
_setcharEX(console, x, y, char, fgcolor, bgcolor)
return
if char is not None:
_setchar(console, x, y, char)
if fgcolor is not None:
_setfore(console, x, y, fgcolor)
if bgcolor is not None:
_setback(console, x, y, bgcolor, bgblend)
def _setCharBatch(self, batch, fgcolor, bgcolor, bgblend=1, nullChar=False):
"""
Try to perform a batch operation otherwise fall back to _setChar.
If fgcolor and bgcolor are defined then this is faster but not by very
much.
batch is a iterable of [(x, y), ch] items
"""
if fgcolor and not nullChar:
# buffer values as ctypes objects
self._typewriter = None # clear the typewriter as colors will be set
console = self._as_parameter_
bgblend = ctypes.c_int(bgblend)
if not bgcolor:
bgblend = 0
else:
_lib.TCOD_console_set_default_background(console, bgcolor)
_lib.TCOD_console_set_default_foreground(console, fgcolor)
_putChar = _lib.TCOD_console_put_char # remove dots and make local
for (x, y), char in batch:
_putChar(console, x, y, char, bgblend)
else:
for (x, y), char in batch:
self._setChar(x, y, char, fgcolor, bgcolor, bgblend)
def getChar(self, x, y):
# inherit docstring
x, y = self._normalizePoint(x, y)
char = _lib.TCOD_console_get_char(self, x, y)
bgcolor = _lib.TCOD_console_get_char_background_wrapper(self, x, y)
fgcolor = _lib.TCOD_console_get_char_foreground_wrapper(self, x, y)
return char, tuple(fgcolor), tuple(bgcolor)
def __repr__(self):
return "<Console (Width=%i Height=%i)>" % (self.width, self.height)
class Window(_MetaConsole):
"""A Window contains a small isolated part of a Console.
Drawing on the Window draws on the Console.
Making a Window and setting its width or height to None will extend it to
the edge of the console.
"""
__slots__ = ('parent', 'x', 'y')
def __init__(self, console, x, y, width, height):
"""Isolate part of a L{Console} or L{Window} instance.
@type console: L{Console} or L{Window}
@param console: The parent object which can be a L{Console} or another
L{Window} instance.
@type x: int
@param x: X coordinate to place the Window.
This follows the normal rules for indexing so you can use a
negative integer to place the Window relative to the bottom
right of the parent Console instance.
@type y: int
@param y: Y coordinate to place the Window.
See x.
@type width: int or None
@param width: Width of the Window.
Can be None to extend as far as possible to the
bottom right corner of the parent Console or can be a
negative number to be sized reltive to the Consoles total
size.
@type height: int or None
@param height: Height of the Window.
See width.
"""
_MetaConsole.__init__(self)
assert isinstance(console, (Console, Window)), 'console parameter must be a Console or Window instance, got %s' % repr(console)
self.parent = console
self.x, self.y, self.width, self.height = console._normalizeRect(x, y, width, height)
if isinstance(console, Console):
self.console = console
else:
self.console = self.parent.console
def _translate(self, x, y):
"""Convertion x and y to their position on the root Console"""
# we add our position relative to our parent and then call then next parent up
return self.parent._translate((x + self.x), (y + self.y))
def clear(self, fgcolor=(0, 0, 0), bgcolor=(0, 0, 0)):
"""Clears the entire Window.
@type fgcolor: (r, g, b)
@param fgcolor: Foreground color.
Must be a 3-items list with integers that range 0-255.
Unlike most other operations you can not use None here.
@type bgcolor: (r, g, b)
@param bgcolor: Background color. See fgcolor.
"""
assert _verify_colors(fgcolor, bgcolor)
assert fgcolor and bgcolor, 'Can not use None with clear'
self.drawRect(0, 0, None, None, 0x20, fgcolor, bgcolor)
def _setChar(self, x, y, char=None, fgcolor=None, bgcolor=None, bgblend=1):
self.parent._setChar((x + self.x), (y + self.y), char, fgcolor, bgcolor, bgblend)
def _setCharBatch(self, batch, fgcolor, bgcolor, bgblend=1):
myX = self.x # remove dots for speed up
myY = self.y
self.parent._setCharBatch((((x + myX, y + myY), ch) for ((x, y), ch) in batch),
fgcolor, bgcolor, bgblend)
def drawChar(self, x, y, char, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
# inherit docstring
x, y = self._normalizePoint(x, y)
self.parent.drawChar(x + self.x, y + self.y, char, fgcolor, bgcolor)
def drawRect(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
# inherit docstring
x, y, width, height = self._normalizeRect(x, y, width, height)
self.parent.drawRect(x + self.x, y + self.y, width, height, string, fgcolor, bgcolor)
def drawFrame(self, x, y, width, height, string, fgcolor=(255, 255, 255), bgcolor=(0, 0, 0)):
# inherit docstring
x, y, width, height = self._normalizeRect(x, y, width, height)
self.parent.drawFrame(x + self.x, y + self.y, width, height, string, fgcolor, bgcolor)
def getChar(self, x, y):
# inherit docstring
x, y = self._normalizePoint(x, y)
return self.console.getChar(self._translate(x, y))
def __repr__(self):
return "<Window(X=%i Y=%i Width=%i Height=%i)>" % (self.x, self.y,
self.width,
self.height)
def init(width, height, title=None, fullscreen=False, renderer='OPENGL'):
"""Start the main console with the given width and height and return the
root console.
Call the consoles drawing functions. Then remember to use L{tdl.flush} to
make what's drawn visible on the console.
@type width: int
@param width: width of the root console (in tiles)
@type height: int
@param height: height of the root console (in tiles)
@type title: string
@param title: Text to display as the window title.
If left None it defaults to the running scripts filename.
@type fullscreen: boolean
@param fullscreen: Can be set to True to start in fullscreen mode.
@type renderer: string
@param renderer: Can be one of 'GLSL', 'OPENGL', or 'SDL'.
Due to way Python works you're unlikely to see much of an
improvement by using 'GLSL' or 'OPENGL' as most of the
time Python is slow interacting with the console and the
rendering itself is pretty fast even on 'SDL'.
@rtype: L{Console}
@return: The root console. Only what is drawn on the root console is
what's visible after a call to L{tdl.flush}.
After the root console is garbage collected, the window made by
this function will close.
"""
RENDERERS = {'GLSL': 0, 'OPENGL': 1, 'SDL': 2}
global _rootinitialized, _rootConsoleRef
if not _fontinitialized: # set the default font to the one that comes with tdl
setFont(_unpackfile('terminal8x8.png'), None, None, True, True)
if renderer.upper() not in RENDERERS:
raise TDLError('No such render type "%s", expected one of "%s"' % (renderer, '", "'.join(RENDERERS)))
renderer = RENDERERS[renderer.upper()]
# If a console already exists then make a clone to replace it
if _rootConsoleRef and _rootConsoleRef():
oldroot = _rootConsoleRef()
rootreplacement = Console(oldroot.width, oldroot.height)
rootreplacement.blit(oldroot)
oldroot._replace(rootreplacement)
del rootreplacement
if title is None: # use a default title
if sys.argv:
# Use the script filename as the title.
title = os.path.basename(sys.argv[0])
else:
title = 'python-tdl'
_lib.TCOD_console_init_root(width, height, _encodeString(title), fullscreen, renderer)
#event.get() # flush the libtcod event queue to fix some issues
# issues may be fixed already
event._eventsflushed = False
_rootinitialized = True
rootconsole = Console._newConsole(ctypes.c_void_p())
_rootConsoleRef = weakref.ref(rootconsole)
return rootconsole
def flush():
"""Make all changes visible and update the screen.
Remember to call this function after drawing operations.
Calls to flush will enfore the frame rate limit set by L{tdl.setFPS}.
This function can only be called after L{tdl.init}
"""
if not _rootinitialized:
raise TDLError('Cannot flush without first initializing with tdl.init')
_lib.TCOD_console_flush()
def setFont(path, columns=None, rows=None, columnFirst=False,
greyscale=False, altLayout=False):
"""Changes the font to be used for this session.
This should be called before L{tdl.init}
If the font specifies its size in its filename (i.e. font_NxN.png) then this
function can auto-detect the tileset formatting and the parameters columns
and rows can be left None.
While it's possible you can change the font mid program it can sometimes
break in rare circumstances. So use caution when doing this.
@type path: string
@param path: Must be a string filepath where a bmp or png file is found.
@type columns: int
@param columns: Number of columns in the tileset.
Can be left None for auto-detection.
@type rows: int
@param rows: Number of rows in the tileset.
Can be left None for auto-detection.
@type columnFirst: boolean
@param columnFirst: Defines if the characer order goes along the rows or
colomns.
It should be True if the charater codes 0-15 are in the
first column.
And should be False if the characters 0-15
are in the first row.
@type greyscale: boolean
@param greyscale: Creates an anti-aliased font from a greyscale bitmap.
Otherwise it uses the alpha channel for anti-aliasing.
Unless you actually need anti-aliasing from a font you
know uses a smooth greyscale channel you should leave
this on False.
@type altLayout: boolean
@param altLayout: An alternative layout with space in the upper left
corner.
The colomn parameter is ignored if this is True,
find examples of this layout in the font/libtcod/
directory included with the python-tdl source.
@raise TDLError: Will be raised if no file is found at path or if auto-
detection fails.
@note: A png file that's been optimized can fail to load correctly on
MAC OS X creating a garbled mess when rendering.
Don't use a program like optipng or just use bmp files instead if
you want your program to work on macs.
"""
# put up some constants that are only used here
FONT_LAYOUT_ASCII_INCOL = 1
FONT_LAYOUT_ASCII_INROW = 2
FONT_TYPE_GREYSCALE = 4
FONT_LAYOUT_TCOD = 8
global _fontinitialized
_fontinitialized = True
flags = 0
if altLayout:
flags |= FONT_LAYOUT_TCOD
elif columnFirst:
flags |= FONT_LAYOUT_ASCII_INCOL
else:
flags |= FONT_LAYOUT_ASCII_INROW
if greyscale:
flags |= FONT_TYPE_GREYSCALE
if not os.path.exists(path):
raise TDLError('no file exists at: "%s"' % path)
path = os.path.abspath(path)
# and the rest is the auto-detect script
imgSize = _getImageSize(path) # try to find image size
if imgSize:
imgWidth, imgHeight = imgSize
# try to get font size from filename
match = re.match('.*?([0-9]+)[xX]([0-9]+)', os.path.basename(path))
if match:
fontWidth, fontHeight = match.groups()
fontWidth, fontHeight = int(fontWidth), int(fontHeight)
# estimate correct tileset size
estColumns, remC = divmod(imgWidth, fontWidth)
estRows, remR = divmod(imgHeight, fontHeight)
if remC or remR:
warnings.warn("Font may be incorrectly formatted.")
if not columns:
columns = estColumns
if not rows:
rows = estRows
else:
# the font name excluded the fonts size
if not (columns and rows):
# no matched font size and no tileset is given
raise TDLError('%s has no font size in filename' % os.path.basename(path))
if columns and rows:
# confirm user set options
if (fontWidth * columns != imgWidth or
fontHeight * rows != imgHeight):
warnings.warn("setFont parameters are set as if the image size is (%d, %d) when the detected size is actually (%i, %i)"
% (fontWidth * columns, fontHeight * rows,
imgWidth, imgHeight))
else:
warnings.warn("%s is probably not an image." % os.path.basename(path))
if not (columns and rows):
# didn't auto-detect
raise TDLError('Can not auto-detect the tileset of %s' % os.path.basename(path))
_lib.TCOD_console_set_custom_font(_encodeString(path), flags, columns, rows)
def getFullscreen():
"""Returns True if program is fullscreen.
@rtype: boolean
@return: Returns True if the window is in fullscreen mode.
Otherwise returns False.
"""
if not _rootinitialized:
raise TDLError('Initialize first with tdl.init')
return _lib.TCOD_console_is_fullscreen()
def setFullscreen(fullscreen):
"""Changes the fullscreen state.
@type fullscreen: boolean
"""
if not _rootinitialized:
raise TDLError('Initialize first with tdl.init')
_lib.TCOD_console_set_fullscreen(fullscreen)
def setTitle(title):
"""Change the window title.
@type title: string
"""
if not _rootinitialized:
raise TDLError('Not initilized. Set title with tdl.init')
_lib.TCOD_console_set_window_title(_encodeString(title))
def screenshot(path=None):
"""Capture the screen and save it as a png file
@type path: string
@param path: The filepath to save the screenshot.
If path is None then the image will be placed in the current
folder with the names:
screenshot001.png, screenshot002.png, ...
"""
if not _rootinitialized:
raise TDLError('Initialize first with tdl.init')
if isinstance(path, str):
_lib.TCOD_sys_save_screenshot(_encodeString(path))
elif path is None: # save to screenshot001.png, screenshot002.png, ...
filelist = os.listdir('.')
n = 1
filename = 'screenshot%.3i.png' % n
while filename in filelist:
n += 1
filename = 'screenshot%.3i.png' % n
_lib.TCOD_sys_save_screenshot(_encodeString(filename))
else: # assume file like obj
#save to temp file and copy to file-like obj
tmpname = os.tempnam()
_lib.TCOD_sys_save_screenshot(_encodeString(tmpname))
with tmpname as tmpfile:
path.write(tmpfile.read())
os.remove(tmpname)
#else:
# raise TypeError('path is an invalid type: %s' % type(path))
def setFPS(frameRate):
"""Set the maximum frame rate.
@type frameRate: int
@param frameRate: Further calls to L{tdl.flush} will limit the speed of
the program to run at <frameRate> frames per second. Can
also be set to 0 to run without a limit.
Defaults to None.
"""
if frameRate is None:
frameRate = 0
assert isinstance(frameRate, _INTTYPES), 'frameRate must be an integer or None, got: %s' % repr(frameRate)
_lib.TCOD_sys_set_fps(frameRate)
def getFPS():
"""Return the current frames per second of the running program set by
L{setFPS}
@rtype: int
@return: Returns the frameRate set by setFPS.
If set to no limit, this will return 0.
"""
return _lib.TCOD_sys_get_fps()
def forceResolution(width, height):
"""Change the fullscreen resoulution
@type width: int
@type height: int
"""
_lib.TCOD_sys_force_fullscreen_resolution(width, height)
__all__ = [_var for _var in locals().keys() if _var[0] != '_' and _var not in
['sys', 'os', 'ctypes', 'array', 'weakref', 'itertools', 'textwrap',
'struct', 're', 'warnings']] # remove modules from __all__
__all__ += ['_MetaConsole'] # keep this object public to show the documentation in epydoc
__license__ = "New BSD License"
__email__ = "[email protected]"
file = open(os.path.join(os.path.dirname(__file__), 'VERSION.txt'), 'r')
__version__ = file.read()
file.close()
|
the-stack_0_22320 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SqueezeNet implementation with TPU support.
This version does not contain the model compression components (
sparsification and quantization).
Original paper: (https://arxiv.org/pdf/1602.07360.pdf)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import tpu as contrib_tpu
def conv2d(inputs,
filters,
kernel_size,
strides=(1, 1),
kernel_initializer=contrib_layers.xavier_initializer_conv2d(),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=contrib_layers.l2_regularizer(scale=0.0002),
name=None):
return tf.layers.conv2d(
inputs,
filters,
kernel_size,
strides,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
activation=tf.nn.relu,
name=name,
padding="same")
def fire_module(inputs, squeeze_depth, expand_depth, name):
"""Fire module: squeeze input filters, then apply spatial convolutions."""
with tf.variable_scope(name, "fire", [inputs]):
squeezed = conv2d(inputs, squeeze_depth, [1, 1], name="squeeze")
e1x1 = conv2d(squeezed, expand_depth, [1, 1], name="e1x1")
e3x3 = conv2d(squeezed, expand_depth, [3, 3], name="e3x3")
return tf.concat([e1x1, e3x3], axis=3)
def squeezenet(images, is_training=True, num_classes=1001):
"""Squeezenet 1.0 model."""
net = conv2d(images, 96, [7, 7], strides=(2, 2), name="conv1")
net = tf.layers.max_pooling2d(net, [3, 3], strides=(2, 2), name="maxpool1")
net = fire_module(net, 16, 64, name="fire2")
net = fire_module(net, 16, 64, name="fire3")
net = fire_module(net, 32, 128, name="fire4")
net = tf.layers.max_pooling2d(net, [3, 3], strides=(2, 2), name="maxpool4")
net = fire_module(net, 32, 128, name="fire5")
net = fire_module(net, 48, 192, name="fire6")
net = fire_module(net, 48, 192, name="fire7")
net = fire_module(net, 64, 256, name="fire8")
net = tf.layers.max_pooling2d(net, [3, 3], strides=(2, 2), name="maxpool8")
net = fire_module(net, 64, 256, name="fire9")
net = tf.layers.dropout(net, rate=0.5 if is_training else 0.0, name="drop9")
net = conv2d(net, num_classes, [1, 1], strides=(1, 1), name="conv10")
net = tf.layers.average_pooling2d(net, pool_size=(13, 13), strides=(1, 1))
logits = tf.layers.flatten(net)
return logits
def metric_fn(labels, logits, learning_rate):
predictions = tf.cast(tf.argmax(logits, 1), tf.int32)
labels = tf.cast(labels, tf.int64)
return {
"accuracy": tf.metrics.accuracy(labels, predictions),
"recall_at_5": tf.metrics.recall_at_k(labels, logits, 5),
"recall_at_1": tf.metrics.recall_at_k(labels, logits, 1),
"learning_rate": tf.metrics.mean(learning_rate),
}
def model_fn(features, labels, mode, params):
"""TPUEstimatorSpec for the Squeezenet model."""
is_training = mode == tf.estimator.ModeKeys.TRAIN
logits = squeezenet(
features, is_training=is_training, num_classes=params["num_classes"])
loss = tf.reduce_mean(
tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels))
global_batch_size = (params["train"]["num_cores_per_replica"] *
params["train"]["train_batch_size"])
decay_steps = (params["train"]["num_examples_per_epoch"] *
params["train"]["num_epochs"]) // global_batch_size
learning_rate = tf.train.polynomial_decay(
params["train"]["learning_rate"]["init_learning_rate"],
global_step=tf.train.get_or_create_global_step(),
end_learning_rate=params["train"]["learning_rate"]["end_learning_rate"],
decay_steps=decay_steps,
power=1.0,
cycle=False)
# TODO(power): Hack copied from resnet: remove when summaries are working.
lr_repeat = tf.reshape(
tf.tile(tf.expand_dims(learning_rate, 0),
[params["train"]["train_batch_size"],]),
[params["train"]["train_batch_size"], 1])
if params["train"]["optimizer"]["type"] == "adam":
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
elif params["train"]["optimizer"]["type"] == "rmsprop":
optimizer = tf.train.RMSPropOptimizer(
learning_rate=learning_rate,
momentum=params["train"]["optimizer"]["momentum"],
epsilon=1.0
)
else:
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=params["train"]["optimizer"]["momentum"],
use_nesterov=True)
if params["use_tpu"]:
optimizer = contrib_tpu.CrossShardOptimizer(optimizer)
train_op = optimizer.minimize(loss, tf.train.get_global_step())
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metrics=(metric_fn, [labels, logits, lr_repeat]),
predictions={
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
},
)
|
the-stack_0_22321 | """This module defines rwrtrack.py commands."""
import code
import logging
import sys
from datetime import datetime, timedelta
from sqlalchemy.util import KeyedTuple
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import func, distinct, text
from rwrtrack.get import get_stats
from rwrtrack.csv import load_stats_from_csv, write_stats_to_csv
from rwrtrack.db import sesh, _set_db_readonly, _set_db_writable
from rwrtrack.dbinfo import DbInfo, get_dbinfo
from rwrtrack.account import Account, get_account_by_name
from rwrtrack.record import Record, get_records_on_date
from rwrtrack.difference import Diff, difference
from rwrtrack.sum import sum_, diffsum
from rwrtrack.average import avg, diffavg
from rwrtrack.rank import rank, diffrank
from rwrtrack.filter import filter_
from rwrtrack.exceptions import NoAccountError, NoRecordError, NoCsvError
from rwrtrack.tablify import render_analysis_table
from rwrtrack.migrate import migrate
logger = logging.getLogger(__name__)
def _prettify_int_or_float(x):
if isinstance(x, int):
return f"{x:,d}"
elif isinstance(x, float):
return f"{x:,.2f}"
else:
raise Exception("Can't pretty '{x}' as it is not an int or float!")
def process_numeric_dates(date_string):
"""Identify and convert date or date range to int(s)."""
if date_string.isnumeric():
return "single", int(date_string)
else:
# Handle date ranges
dates = date_string.split("-")
d_older = int(dates[0])
d_newer = int(dates[1])
if (d_older > d_newer):
logger.error("Dates must be older-newer!")
sys.exit(1)
return "range", (d_newer, d_older)
def _get(csv_hist_dir, args):
# TODO: Rewrite to use write to db as well
num_pages = int(args["<pages>"]) if args["<pages>"] else 10
stats = get_stats(num_pages)
write_stats_to_csv(csv_hist_dir, stats)
def _analyse(args):
username = args["<name>"]
dates = args["<dates>"]
try:
account = get_account_by_name(username)
except NoAccountError as e:
logger.error(e)
sys.exit(1)
logger.info(f"Performing individual analysis for '{username}'...")
try:
if not dates:
print(f"'{account.username}' on {account.latest_date}:")
render_analysis_table(account.latest_record)
else:
dt, d = process_numeric_dates(dates)
if dt == "single":
record = account.on_date(d)
print(f"'{account.username}' on {record.date}:")
render_analysis_table(record)
elif dt == "range":
record_newer = account.on_date(d[0])
record_older = account.on_date(d[1])
diff = record_newer - record_older
print(f"'{account.username}' from {record_older.date} to {record_newer.date}:")
render_analysis_table(diff)
except NoRecordError as e:
logger.error(e)
sys.exit(1)
def _average(args):
dates = args["<dates>"]
rf, df = args["--record-filters"], args["--diff-filters"]
if not dates:
try:
db_info = get_dbinfo()
a = avg(db_info.latest_date, record_filters=rf)
except NoResultFound:
logger.info("Empty database! Exit.")
sys.exit(1)
else:
dt, d = process_numeric_dates(dates)
if dt == "single":
a = avg(d, record_filters=rf)
elif dt == "range":
a = diffavg(d[0], d[1], record_filters=rf, diff_filters=df)
# TODO: add nice table for averages
print(a)
def _rank(args):
metric = args["<metric>"]
try:
if args["--limit"]:
limit = abs(int(args["--limit"]))
if not limit > 0:
raise ValueError
else:
limit = 5
except ValueError as e:
logger.error(f"Limit must be an integer greater than or equal to 1")
sys.exit(1)
dates = args["<dates>"]
rf, df = args["--record-filters"], args["--diff-filters"]
if not dates:
try:
db_info = get_dbinfo()
ranking = rank(db_info.latest_date, metric, record_filters=rf)
except NoResultFound:
logger.info("Empty database! Exit.")
sys.exit(1)
else:
dt, d = process_numeric_dates(dates)
if dt == "single":
print(f"Ranking by '{metric}' on {d}:")
ranking = rank(d, metric, record_filters=rf)
elif dt == "range":
print(f"Ranking by '{metric}' between {d[1]} and {d[0]}:")
ranking = diffrank(d[0], d[1], metric, record_filters=rf, diff_filters=df)
ranking = ranking.limit(limit)
for x, r in enumerate(ranking.all()):
if isinstance(r, Record):
# render in the form "#12...MR. BANG...8,752,631"
v = _prettify_int_or_float(getattr(r, metric))
print(f"#{x+1:<8}{r.username:<24}{v}")
else:
# print(r._asdict())
v = _prettify_int_or_float(getattr(r, metric))
print(f"#{x+1:<8}{r.username:<24}{v}")
# print("\n", end="")
def _sum(args):
dates = args["<dates>"]
rf, df = args["--record-filters"], args["--diff-filters"]
if not dates:
try:
db_info = get_dbinfo()
s = sum_(db_info.latest_date, record_filters=rf)
except NoResultFound:
logger.info("Empty database! Exit.")
sys.exit(1)
else:
dt, d = process_numeric_dates(dates)
if dt == "single":
s = sum_(d, record_filters=rf)
elif dt == "range":
s = diffsum(d[0], d[1], record_filters=rf, diff_filters=df)
# TODO: add nice table for sums
print(s)
def _dbinfo():
try:
db_info = get_dbinfo()
except NoResultFound:
logger.info("Empty database! Exit.")
sys.exit(1)
print(f"First date: {db_info.first_date} Latest date: {db_info.latest_date}")
num_accounts = sesh.query(func.count(Account._id)).scalar()
print(f"Accounts recorded: {num_accounts}")
num_days = sesh.query(func.count(distinct(Record.date))).scalar()
print(f"Days recorded: {num_days}")
total_records = sesh.query(func.count(Record.date)).scalar()
print(f"Number of records: {total_records}")
def _db_migrate_csv(csv_hist_dir):
try:
migrate(csv_hist_dir)
except NoCsvError as e:
logger.error(e)
sys.exit(1)
def _interact():
print("Entering interactive mode...")
code.interact(local=globals(), banner="", exitmsg="")
|
the-stack_0_22322 | import copy
import math
import pickle
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import warnings
from cmaes import CMA
import numpy as np
import optuna
from optuna import logging
from optuna._study_direction import StudyDirection
from optuna._transform import _SearchSpaceTransform
from optuna.distributions import BaseDistribution
from optuna.exceptions import ExperimentalWarning
from optuna.samplers import BaseSampler
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
_logger = logging.get_logger(__name__)
_EPS = 1e-10
# The value of system_attrs must be less than 2046 characters on RDBStorage.
_SYSTEM_ATTR_MAX_LENGTH = 2045
class CmaEsSampler(BaseSampler):
"""A Sampler using CMA-ES algorithm.
Example:
Optimize a simple quadratic function by using :class:`~optuna.samplers.CmaEsSampler`.
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_uniform("x", -1, 1)
y = trial.suggest_int("y", -1, 1)
return x ** 2 + y
sampler = optuna.samplers.CmaEsSampler()
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=20)
Please note that this sampler does not support CategoricalDistribution.
If your search space contains categorical parameters, I recommend you
to use :class:`~optuna.samplers.TPESampler` instead.
Furthermore, there is room for performance improvements in parallel
optimization settings. This sampler cannot use some trials for updating
the parameters of multivariate normal distribution.
For further information about CMA-ES algorithm and its restarting strategy
algorithm, please refer to the following papers:
- `N. Hansen, The CMA Evolution Strategy: A Tutorial. arXiv:1604.00772, 2016.
<https://arxiv.org/abs/1604.00772>`_
- `A. Auger and N. Hansen. A restart CMA evolution strategy with increasing population
size. In Proceedings of the IEEE Congress on Evolutionary Computation (CEC 2005),
pages 1769–1776. IEEE Press, 2005.
<http://www.cmap.polytechnique.fr/~nikolaus.hansen/cec2005ipopcmaes.pdf>`_
.. seealso::
You can also use :class:`optuna.integration.PyCmaSampler` which is a sampler using cma
library as the backend.
Args:
x0:
A dictionary of an initial parameter values for CMA-ES. By default, the mean of ``low``
and ``high`` for each distribution is used. Note that ``x0`` is sampled uniformly
within the search space domain for each restart if you specify ``restart_strategy``
argument.
sigma0:
Initial standard deviation of CMA-ES. By default, ``sigma0`` is set to
``min_range / 6``, where ``min_range`` denotes the minimum range of the distributions
in the search space.
seed:
A random seed for CMA-ES.
n_startup_trials:
The independent sampling is used instead of the CMA-ES algorithm until the given number
of trials finish in the same study.
independent_sampler:
A :class:`~optuna.samplers.BaseSampler` instance that is used for independent
sampling. The parameters not contained in the relative search space are sampled
by this sampler.
The search space for :class:`~optuna.samplers.CmaEsSampler` is determined by
:func:`~optuna.samplers.intersection_search_space()`.
If :obj:`None` is specified, :class:`~optuna.samplers.RandomSampler` is used
as the default.
.. seealso::
:class:`optuna.samplers` module provides built-in independent samplers
such as :class:`~optuna.samplers.RandomSampler` and
:class:`~optuna.samplers.TPESampler`.
warn_independent_sampling:
If this is :obj:`True`, a warning message is emitted when
the value of a parameter is sampled by using an independent sampler.
Note that the parameters of the first trial in a study are always sampled
via an independent sampler, so no warning messages are emitted in this case.
restart_strategy:
Strategy for restarting CMA-ES optimization when converges to a local minimum.
If given :obj:`None`, CMA-ES will not restart (default).
If given 'ipop', CMA-ES will restart with increasing population size.
Please see also ``inc_popsize`` parameter.
.. note::
Added in v2.1.0 as an experimental feature. The interface may change in newer
versions without prior notice. See
https://github.com/optuna/optuna/releases/tag/v2.1.0.
inc_popsize:
Multiplier for increasing population size before each restart.
This argument will be used when setting ``restart_strategy = 'ipop'``.
consider_pruned_trials:
If this is :obj:`True`, the PRUNED trials are considered for sampling.
.. note::
Added in v2.0.0 as an experimental feature. The interface may change in newer
versions without prior notice. See
https://github.com/optuna/optuna/releases/tag/v2.0.0.
.. note::
It is suggested to set this flag :obj:`False` when the
:class:`~optuna.pruners.MedianPruner` is used. On the other hand, it is suggested
to set this flag :obj:`True` when the :class:`~optuna.pruners.HyperbandPruner` is
used. Please see `the benchmark result
<https://github.com/optuna/optuna/pull/1229>`_ for the details.
Raises:
ValueError:
If ``restart_strategy`` is not 'ipop' or :obj:`None`.
"""
def __init__(
self,
x0: Optional[Dict[str, Any]] = None,
sigma0: Optional[float] = None,
n_startup_trials: int = 1,
independent_sampler: Optional[BaseSampler] = None,
warn_independent_sampling: bool = True,
seed: Optional[int] = None,
*,
consider_pruned_trials: bool = False,
restart_strategy: Optional[str] = None,
inc_popsize: int = 2,
) -> None:
self._x0 = x0
self._sigma0 = sigma0
self._independent_sampler = independent_sampler or optuna.samplers.RandomSampler(seed=seed)
self._n_startup_trials = n_startup_trials
self._warn_independent_sampling = warn_independent_sampling
self._cma_rng = np.random.RandomState(seed)
self._search_space = optuna.samplers.IntersectionSearchSpace()
self._consider_pruned_trials = consider_pruned_trials
self._restart_strategy = restart_strategy
self._inc_popsize = inc_popsize
if self._restart_strategy:
warnings.warn(
"`restart_strategy` option is an experimental feature."
" The interface can change in the future.",
ExperimentalWarning,
)
if self._consider_pruned_trials:
warnings.warn(
"`consider_pruned_trials` option is an experimental feature."
" The interface can change in the future.",
ExperimentalWarning,
)
# TODO(c-bata): Support BIPOP-CMA-ES.
if restart_strategy not in (
"ipop",
None,
):
raise ValueError(
"restart_strategy={} is unsupported. Please specify: 'ipop' or None.".format(
restart_strategy
)
)
def reseed_rng(self) -> None:
# _cma_rng doesn't require reseeding because the relative sampling reseeds in each trial.
self._independent_sampler.reseed_rng()
def infer_relative_search_space(
self, study: "optuna.Study", trial: "optuna.trial.FrozenTrial"
) -> Dict[str, BaseDistribution]:
search_space: Dict[str, BaseDistribution] = {}
for name, distribution in self._search_space.calculate(study).items():
if distribution.single():
# `cma` cannot handle distributions that contain just a single value, so we skip
# them. Note that the parameter values for such distributions are sampled in
# `Trial`.
continue
if not isinstance(
distribution,
(
optuna.distributions.UniformDistribution,
optuna.distributions.LogUniformDistribution,
optuna.distributions.DiscreteUniformDistribution,
optuna.distributions.IntUniformDistribution,
optuna.distributions.IntLogUniformDistribution,
),
):
# Categorical distribution is unsupported.
continue
search_space[name] = distribution
return search_space
def sample_relative(
self,
study: "optuna.Study",
trial: "optuna.trial.FrozenTrial",
search_space: Dict[str, BaseDistribution],
) -> Dict[str, Any]:
self._raise_error_if_multi_objective(study)
if len(search_space) == 0:
return {}
completed_trials = self._get_trials(study)
if len(completed_trials) < self._n_startup_trials:
return {}
if len(search_space) == 1:
_logger.info(
"`CmaEsSampler` only supports two or more dimensional continuous "
"search space. `{}` is used instead of `CmaEsSampler`.".format(
self._independent_sampler.__class__.__name__
)
)
self._warn_independent_sampling = False
return {}
trans = _SearchSpaceTransform(search_space)
optimizer, n_restarts = self._restore_optimizer(completed_trials)
if optimizer is None:
n_restarts = 0
optimizer = self._init_optimizer(trans)
if self._restart_strategy is None:
generation_attr_key = "cma:generation" # for backward compatibility
else:
generation_attr_key = "cma:restart_{}:generation".format(n_restarts)
if optimizer.dim != len(trans.bounds):
_logger.info(
"`CmaEsSampler` does not support dynamic search space. "
"`{}` is used instead of `CmaEsSampler`.".format(
self._independent_sampler.__class__.__name__
)
)
self._warn_independent_sampling = False
return {}
# TODO(c-bata): Reduce the number of wasted trials during parallel optimization.
# See https://github.com/optuna/optuna/pull/920#discussion_r385114002 for details.
solution_trials = [
t
for t in completed_trials
if optimizer.generation == t.system_attrs.get(generation_attr_key, -1)
]
if len(solution_trials) >= optimizer.population_size:
solutions: List[Tuple[np.ndarray, float]] = []
for t in solution_trials[: optimizer.population_size]:
assert t.value is not None, "completed trials must have a value"
x = trans.transform(t.params)
y = t.value if study.direction == StudyDirection.MINIMIZE else -t.value
solutions.append((x, y))
optimizer.tell(solutions)
if self._restart_strategy == "ipop" and optimizer.should_stop():
n_restarts += 1
generation_attr_key = "cma:restart_{}:generation".format(n_restarts)
popsize = optimizer.population_size * self._inc_popsize
optimizer = self._init_optimizer(
trans, population_size=popsize, randomize_start_point=True
)
# Store optimizer
optimizer_str = pickle.dumps(optimizer).hex()
optimizer_attrs = _split_optimizer_str(optimizer_str)
for key in optimizer_attrs:
study._storage.set_trial_system_attr(trial._trial_id, key, optimizer_attrs[key])
# Caution: optimizer should update its seed value
seed = self._cma_rng.randint(1, 2 ** 16) + trial.number
optimizer._rng = np.random.RandomState(seed)
params = optimizer.ask()
study._storage.set_trial_system_attr(
trial._trial_id, generation_attr_key, optimizer.generation
)
study._storage.set_trial_system_attr(trial._trial_id, "cma:n_restarts", n_restarts)
external_values = trans.untransform(params)
# Exclude upper bounds for parameters that should have their upper bounds excluded.
# TODO(hvy): Remove this exclusion logic when it is handled by the data transformer.
for name, param in external_values.items():
distribution = search_space[name]
if isinstance(distribution, optuna.distributions.UniformDistribution):
external_values[name] = min(external_values[name], distribution.high - _EPS)
elif isinstance(distribution, optuna.distributions.LogUniformDistribution):
external_values[name] = min(
external_values[name], math.exp(math.log(distribution.high) - _EPS)
)
return external_values
def _restore_optimizer(
self,
completed_trials: "List[optuna.trial.FrozenTrial]",
) -> Tuple[Optional[CMA], int]:
# Restore a previous CMA object.
for trial in reversed(completed_trials):
optimizer_attrs = {
key: value
for key, value in trial.system_attrs.items()
if key.startswith("cma:optimizer")
}
if len(optimizer_attrs) == 0:
continue
# Check "cma:optimizer" key for backward compatibility.
optimizer_str = optimizer_attrs.get("cma:optimizer", None)
if optimizer_str is None:
optimizer_str = _concat_optimizer_attrs(optimizer_attrs)
n_restarts: int = trial.system_attrs.get("cma:n_restarts", 0)
return pickle.loads(bytes.fromhex(optimizer_str)), n_restarts
return None, 0
def _init_optimizer(
self,
trans: _SearchSpaceTransform,
population_size: Optional[int] = None,
randomize_start_point: bool = False,
) -> CMA:
lower_bounds = trans.bounds[:, 0]
upper_bounds = trans.bounds[:, 1]
n_dimension = len(trans.bounds)
if randomize_start_point:
mean = lower_bounds + (upper_bounds - lower_bounds) * self._cma_rng.rand(n_dimension)
elif self._x0 is None:
mean = lower_bounds + (upper_bounds - lower_bounds) / 2
else:
# `self._x0` is external representations.
mean = trans.transform(self._x0)
if self._sigma0 is None:
sigma0 = np.min((upper_bounds - lower_bounds) / 6)
else:
sigma0 = self._sigma0
# Avoid ZeroDivisionError in cmaes.
sigma0 = max(sigma0, _EPS)
return CMA(
mean=mean,
sigma=sigma0,
bounds=trans.bounds,
seed=self._cma_rng.randint(1, 2 ** 31 - 2),
n_max_resampling=10 * n_dimension,
population_size=population_size,
)
def sample_independent(
self,
study: "optuna.Study",
trial: "optuna.trial.FrozenTrial",
param_name: str,
param_distribution: BaseDistribution,
) -> Any:
self._raise_error_if_multi_objective(study)
if self._warn_independent_sampling:
complete_trials = self._get_trials(study)
if len(complete_trials) >= self._n_startup_trials:
self._log_independent_sampling(trial, param_name)
return self._independent_sampler.sample_independent(
study, trial, param_name, param_distribution
)
def _log_independent_sampling(self, trial: FrozenTrial, param_name: str) -> None:
_logger.warning(
"The parameter '{}' in trial#{} is sampled independently "
"by using `{}` instead of `CmaEsSampler` "
"(optimization performance may be degraded). "
"`CmaEsSampler` does not support dynamic search space or `CategoricalDistribution`. "
"You can suppress this warning by setting `warn_independent_sampling` "
"to `False` in the constructor of `CmaEsSampler`, "
"if this independent sampling is intended behavior.".format(
param_name, trial.number, self._independent_sampler.__class__.__name__
)
)
def _get_trials(self, study: "optuna.Study") -> List[FrozenTrial]:
complete_trials = []
for t in study.get_trials(deepcopy=False):
if t.state == TrialState.COMPLETE:
complete_trials.append(t)
elif (
t.state == TrialState.PRUNED
and len(t.intermediate_values) > 0
and self._consider_pruned_trials
):
_, value = max(t.intermediate_values.items())
if value is None:
continue
# We rewrite the value of the trial `t` for sampling, so we need a deepcopy.
copied_t = copy.deepcopy(t)
copied_t.value = value
complete_trials.append(copied_t)
return complete_trials
def _split_optimizer_str(optimizer_str: str) -> Dict[str, str]:
optimizer_len = len(optimizer_str)
attrs = {}
for i in range(math.ceil(optimizer_len / _SYSTEM_ATTR_MAX_LENGTH)):
start = i * _SYSTEM_ATTR_MAX_LENGTH
end = min((i + 1) * _SYSTEM_ATTR_MAX_LENGTH, optimizer_len)
attrs["cma:optimizer:{}".format(i)] = optimizer_str[start:end]
return attrs
def _concat_optimizer_attrs(optimizer_attrs: Dict[str, str]) -> str:
return "".join(
optimizer_attrs["cma:optimizer:{}".format(i)] for i in range(len(optimizer_attrs))
)
|
the-stack_0_22323 | #!/usr/bin/env python
"""
_DashboardInterface_t_
Unit tests for the DashboardInterface module
"""
import unittest
import socket
import os
import os.path
from nose.plugins.attrib import attr
from WMQuality.TestInit import TestInit
from WMQuality.Emulators.WMSpecGenerator.WMSpecGenerator import WMSpecGenerator
from WMQuality.Emulators.EmulatedUnitTestCase import EmulatedUnitTestCase
from WMCore.DataStructs.Job import Job
from WMCore.DataStructs.File import File
from WMCore.DataStructs.Run import Run
from WMCore.FwkJobReport.Report import Report
from WMCore.WMRuntime.DashboardInterface import DashboardInfo, getUserProxyDN
from WMCore.WMBase import getTestBase
class DashboardInterfaceTest(EmulatedUnitTestCase):
"""
Test for the dashboard interface and its monitoring interaction
Well, once I've written them it will be
"""
def setUp(self):
"""
Basically, do nothing
"""
super(DashboardInterfaceTest, self).setUp()
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testDir = self.testInit.generateWorkDir()
return
def tearDown(self):
"""
Clean up the test directory
"""
self.testInit.delWorkDir()
super(DashboardInterfaceTest, self).tearDown()
return
def createWorkload(self):
"""
Create a workload in order to test things
"""
generator = WMSpecGenerator()
workload = generator.createReRecoSpec("Tier1ReReco")
return workload
def createTestJob(self):
"""
Create a test job to pass to the DashboardInterface
"""
job = Job(name = "ThisIsASillyName")
testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10)
testFileA.addRun(Run(1, *[45]))
testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10)
testFileB.addRun(Run(1, *[46]))
job.addFile(testFileA)
job.addFile(testFileB)
job['id'] = 1
return job
def createReport(self, outcome = 0):
"""
Create a test report
"""
jobReport = Report()
jobReport.addStep('cmsRun1')
jobReport.setStepStartTime(stepName = 'cmsRun1')
jobReport.setStepStopTime(stepName = 'cmsRun1')
if outcome:
jobReport.addError('cmsRun1', 200, 'FakeError', 'FakeError')
return jobReport
def setupJobEnvironment(self, name = 'test'):
"""
_setupJobEnvironment_
Make some sort of environment in which to run tests
"""
os.environ['WMAGENT_SITE_CONFIG_OVERRIDE'] = os.path.join(getTestBase(),
"WMCore_t/Storage_t",
"T1_US_FNAL_SiteLocalConfig.xml")
return
def testASuccessfulJobMonitoring(self):
"""
_testASuccessfulJobMonitoring_
Check that the data packets make sense when a job completes successfully
"""
# Get the necessary objects
name = 'testA'
job = self.createTestJob()
workload = self.createWorkload()
task = workload.getTask(taskName = "DataProcessing")
report = self.createReport()
# Fill the job environment
self.setupJobEnvironment(name = name)
# Instantiate DBInfo
dbInfo = DashboardInfo(job=job, task=task, dashboardUrl='127.0.0.1:8884')
# Check jobStart information
data = dbInfo.jobStart()
self.assertEqual(data['MessageType'], 'JobStatus')
self.assertEqual(data['StatusValue'], 'running')
self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')
# Do the first step
step = task.getStep(stepName = "cmsRun1")
# Do the step start
data = dbInfo.stepStart(step = step.data)
self.assertNotEqual(data['jobStart'], None)
self.assertEqual(data['jobStart']['ExeStart'], step.name())
self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
self.assertEqual(data['1_ExeStart'], step.name())
#Do the step end
data = dbInfo.stepEnd(step = step.data, stepReport = report)
self.assertEqual(data['1_ExeEnd'], step.name())
self.assertEqual(data['1_ExeExitCode'], 0)
self.assertTrue(data['1_ExeWCTime'] >= 0)
self.assertEqual(data['1_NCores'], 1)
self.assertEqual(report.retrieveStep("cmsRun1").counter, 1)
#Do a second step
step = task.getStep(stepName = "cmsRun1")
#Do the step start (It's not the first step)
data = dbInfo.stepStart(step = step.data)
self.assertEqual(data['jobStart'], None)
self.assertEqual(data['2_ExeStart'], step.name())
#Do the step end
data = dbInfo.stepEnd(step = step.data, stepReport = report)
self.assertEqual(data['2_ExeEnd'], step.name())
self.assertEqual(data['2_ExeExitCode'], 0)
self.assertTrue(data['2_ExeWCTime'] >= 0)
self.assertEqual(data['2_NCores'], 1)
self.assertEqual(report.retrieveStep("cmsRun1").counter, 2)
# End the job!
data = dbInfo.jobEnd()
self.assertEqual(data['ExeEnd'], "cmsRun1")
self.assertEqual(data['JobExitCode'], 0)
self.assertEqual(data['WrapperCPUTime'], 0)
self.assertTrue(data['WrapperWCTime'] >= 0)
self.assertNotEqual(data['JobExitReason'], "")
return
def testMultithreadedApplication(self):
"""
_testMultithreadedApplication_
Check that the data packets have NCores and it picks it up successfully from the CMSSW step
"""
# Get the necessary objects
name = 'testMT'
job = self.createTestJob()
workload = self.createWorkload()
task = workload.getTask(taskName = "DataProcessing")
report = self.createReport()
# Fill the job environment
self.setupJobEnvironment(name = name)
# Instantiate DBInfo
dbInfo = DashboardInfo(job=job, task=task, dashboardUrl='127.0.0.1:8884')
# Modify the first step
step = task.getStep(stepName = "cmsRun1")
step.getTypeHelper().setNumberOfCores(8)
# Check jobStart information
data = dbInfo.jobStart()
self.assertEqual(data['NCores'], 8)
# Do the first step
step = task.getStep(stepName = "cmsRun1")
# Do the step start
data = dbInfo.stepStart(step = step.data)
#Do the step end
data = dbInfo.stepEnd(step = step.data, stepReport = report)
self.assertEqual(data['1_NCores'], 8)
self.assertEqual(report.retrieveStep("cmsRun1").counter, 1)
# End the job and test the final NCores report
data = dbInfo.jobEnd()
self.assertEqual(data['NCores'], 8)
return
def testAFailedJobMonitoring(self):
"""
_TestAFailedJobMonitoring_
Simulate a job that completes but fails, check that the data sent is
correct
"""
# Get the necessary objects
name = 'testB'
job = self.createTestJob()
workload = self.createWorkload()
task = workload.getTask(taskName = "DataProcessing")
report = self.createReport(outcome = 1)
# Fill the job environment
self.setupJobEnvironment(name = name)
# Instantiate DBInfo
dbInfo = DashboardInfo(job=job, task=task, dashboardUrl='127.0.0.1:8884')
# Check jobStart information
data = dbInfo.jobStart()
self.assertEqual(data['MessageType'], 'JobStatus')
self.assertEqual(data['StatusValue'], 'running')
self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')
# Do the first step
step = task.getStep(stepName = "cmsRun1")
# Do the step start
data = dbInfo.stepStart(step = step.data)
self.assertNotEqual(data['jobStart'], None)
self.assertEqual(data['jobStart']['ExeStart'], step.name())
self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
self.assertEqual(data['1_ExeStart'], step.name())
#Do the step end
data = dbInfo.stepEnd(step = step.data, stepReport = report)
self.assertEqual(data['1_ExeEnd'], step.name())
self.assertNotEqual(data['1_ExeExitCode'], 0)
self.assertTrue(data['1_ExeWCTime'] >= 0)
self.assertEqual(report.retrieveStep("cmsRun1").counter, 1)
# End the job!
data = dbInfo.jobEnd()
self.assertEqual(data['ExeEnd'], "cmsRun1")
self.assertNotEqual(data['JobExitCode'], 0)
self.assertEqual(data['WrapperCPUTime'], 0)
self.assertTrue(data['WrapperWCTime'] >= 0)
self.assertNotEqual(data['JobExitReason'].find('cmsRun1'), -1)
return
def testAKilledJobMonitoring(self):
"""
_TestAKilledJobMonitoring_
Simulate a job that is killed check that the data sent is
correct
"""
# Get the necessary objects
name = 'testC'
job = self.createTestJob()
workload = self.createWorkload()
task = workload.getTask(taskName = "DataProcessing")
report = self.createReport(outcome = 1)
# Fill the job environment
self.setupJobEnvironment(name = name)
# Instantiate DBInfo
dbInfo = DashboardInfo(job=job, task=task, dashboardUrl='127.0.0.1:8884')
# Check jobStart information
data = dbInfo.jobStart()
self.assertEqual(data['MessageType'], 'JobStatus')
self.assertEqual(data['StatusValue'], 'running')
self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')
# Do the first step
step = task.getStep(stepName = "cmsRun1")
# Do the step start
data = dbInfo.stepStart(step = step.data)
self.assertNotEqual(data['jobStart'], None)
self.assertEqual(data['jobStart']['ExeStart'], step.name())
self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
self.assertEqual(data['1_ExeStart'], step.name())
#Do the step end
data = dbInfo.stepEnd(step = step.data, stepReport = report)
self.assertEqual(data['1_ExeEnd'], step.name())
self.assertNotEqual(data['1_ExeExitCode'], 0)
self.assertTrue(data['1_ExeWCTime'] >= 0)
# Kill the job!
data = dbInfo.jobKilled()
self.assertEqual(data['ExeEnd'], "cmsRun1")
self.assertNotEqual(data['JobExitCode'], 0)
self.assertEqual(data['WrapperCPUTime'], 0)
self.assertTrue(data['WrapperWCTime'] >= 0)
self.assertNotEqual(data['JobExitReason'].find('killed'), -1)
return
@attr('integration')
def testGetDN(self):
"""
_testGetDN_
Checks that we can get a DN
"""
dn = getUserProxyDN()
if 'X509_USER_PROXY' in os.environ:
self.assertNotEqual(dn, None, 'Error: This should get a DN, if you have set one')
else:
self.assertEqual(dn, None, 'Error: There is no proxy in the environment, it should not get one')
if __name__ == "__main__":
unittest.main()
|
the-stack_0_22326 | """The command-line interface for Fava."""
import errno
import os
import click
from cheroot.wsgi import Server # type: ignore
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from werkzeug.middleware.profiler import ProfilerMiddleware
from fava import __version__
from fava.application import app
from fava.util import simple_wsgi
# pylint: disable=too-many-arguments
@click.command(context_settings=dict(auto_envvar_prefix="FAVA"))
@click.argument(
"filenames",
nargs=-1,
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
)
@click.option(
"-p",
"--port",
type=int,
default=5000,
show_default=True,
metavar="<port>",
help="The port to listen on.",
)
@click.option(
"-H",
"--host",
type=str,
default="localhost",
show_default=True,
metavar="<host>",
help="The host to listen on.",
)
@click.option("--prefix", type=str, help="Set an URL prefix.")
@click.option(
"--incognito",
is_flag=True,
help="Run in incognito mode and obscure all numbers.",
)
@click.option("-d", "--debug", is_flag=True, help="Turn on debugging.")
@click.option(
"--profile", is_flag=True, help="Turn on profiling. Implies --debug."
)
@click.option(
"--profile-dir",
type=click.Path(),
help="Output directory for profiling data.",
)
@click.version_option(version=__version__, prog_name="fava")
def main(
filenames, port, host, prefix, incognito, debug, profile, profile_dir
): # pragma: no cover
"""Start Fava for FILENAMES on http://<host>:<port>.
If the `BEANCOUNT_FILE` environment variable is set, Fava will use the
files (space-delimited) specified there in addition to FILENAMES.
Note you can also specify command-line options via environment variables.
For example, `--host=0.0.0.0` is equivalent to setting the environment
variable `FAVA_HOST=0.0.0.0`.
"""
if profile:
debug = True
env_filename = os.environ.get("BEANCOUNT_FILE")
if env_filename:
filenames = filenames + tuple(env_filename.split())
if not filenames:
raise click.UsageError("No file specified")
app.config["BEANCOUNT_FILES"] = filenames
app.config["INCOGNITO"] = incognito
if prefix:
app.wsgi_app = DispatcherMiddleware(
simple_wsgi, {prefix: app.wsgi_app}
)
if not debug:
server = Server((host, port), app)
print(f"Running Fava on http://{host}:{port}")
server.safe_start()
else:
if profile:
app.config["PROFILE"] = True
app.wsgi_app = ProfilerMiddleware(
app.wsgi_app,
restrictions=(30,),
profile_dir=profile_dir if profile_dir else None,
)
app.jinja_env.auto_reload = True
try:
app.run(host, port, debug)
except OSError as error:
if error.errno == errno.EADDRINUSE:
raise click.UsageError(
"Can not start webserver because the port is already in "
"use. Please choose another port with the '-p' option."
)
raise
# needed for pyinstaller:
if __name__ == "__main__": # pragma: no cover
main() # pylint: disable=no-value-for-parameter
|
the-stack_0_22327 | #!/usr/bin/env python3
import os
import time
import multiprocessing
from tqdm import tqdm
import argparse
# run DM procs
os.environ["USE_WEBCAM"] = "1"
import cereal.messaging as messaging
from cereal.services import service_list
from cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error
from common.params import Params
from common.realtime import Ratekeeper, DT_MDL, DT_DMON, sec_since_boot
from common.transformations.camera import eon_f_frame_size, eon_d_frame_size, tici_f_frame_size, tici_d_frame_size
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.manager.process import ensure_running
from selfdrive.manager.process_config import managed_processes
from selfdrive.test.update_ci_routes import upload_route
from tools.lib.route import Route
from tools.lib.framereader import FrameReader
from tools.lib.logreader import LogReader
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
FAKEDATA = os.path.join(process_replay_dir, "fakedata/")
def replay_panda_states(s, msgs):
pm = messaging.PubMaster([s, 'peripheralState'])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
smsgs = [m for m in msgs if m.which() in ['pandaStates', 'pandaStateDEPRECATED']]
# Migrate safety param base on carState
cp = [m for m in msgs if m.which() == 'carParams'][0].carParams
if len(cp.safetyConfigs):
safety_param = cp.safetyConfigs[0].safetyParam
else:
safety_param = cp.safetyParamDEPRECATED
while True:
for m in smsgs:
if m.which() == 'pandaStateDEPRECATED':
new_m = messaging.new_message('pandaStates', 1)
new_m.pandaStates[0] = m.pandaStateDEPRECATED
new_m.pandaStates[0].safetyParam = safety_param
pm.send(s, new_m)
else:
new_m = m.as_builder()
new_m.logMonoTime = int(sec_since_boot() * 1e9)
pm.send(s, new_m)
new_m = messaging.new_message('peripheralState')
pm.send('peripheralState', new_m)
rk.keep_time()
def replay_manager_state(s, msgs):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
while True:
new_m = messaging.new_message('managerState')
new_m.managerState.processes = [{'name': name, 'running': True} for name in managed_processes]
pm.send(s, new_m)
rk.keep_time()
def replay_device_state(s, msgs):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
smsgs = [m for m in msgs if m.which() == s]
while True:
for m in smsgs:
new_m = m.as_builder()
new_m.logMonoTime = int(sec_since_boot() * 1e9)
new_m.deviceState.freeSpacePercent = 50
new_m.deviceState.memoryUsagePercent = 50
pm.send(s, new_m)
rk.keep_time()
def replay_sensor_events(s, msgs):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
smsgs = [m for m in msgs if m.which() == s]
while True:
for m in smsgs:
new_m = m.as_builder()
new_m.logMonoTime = int(sec_since_boot() * 1e9)
for evt in new_m.sensorEvents:
evt.timestamp = new_m.logMonoTime
pm.send(s, new_m)
rk.keep_time()
def replay_service(s, msgs):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
smsgs = [m for m in msgs if m.which() == s]
while True:
for m in smsgs:
new_m = m.as_builder()
new_m.logMonoTime = int(sec_since_boot() * 1e9)
pm.send(s, new_m)
rk.keep_time()
def replay_cameras(lr, frs):
eon_cameras = [
("roadCameraState", DT_MDL, eon_f_frame_size, VisionStreamType.VISION_STREAM_ROAD),
("driverCameraState", DT_DMON, eon_d_frame_size, VisionStreamType.VISION_STREAM_DRIVER),
]
tici_cameras = [
("roadCameraState", DT_MDL, tici_f_frame_size, VisionStreamType.VISION_STREAM_ROAD),
("driverCameraState", DT_MDL, tici_d_frame_size, VisionStreamType.VISION_STREAM_DRIVER),
]
def replay_camera(s, stream, dt, vipc_server, frames, size):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(1 / dt, print_delay_threshold=None)
img = b"\x00" * int(size[0]*size[1]*3/2)
while True:
if frames is not None:
img = frames[rk.frame % len(frames)]
rk.keep_time()
m = messaging.new_message(s)
msg = getattr(m, s)
msg.frameId = rk.frame
pm.send(s, m)
vipc_server.send(stream, img, msg.frameId, msg.timestampSof, msg.timestampEof)
init_data = [m for m in lr if m.which() == 'initData'][0]
cameras = tici_cameras if (init_data.initData.deviceType == 'tici') else eon_cameras
# init vipc server and cameras
p = []
vs = VisionIpcServer("camerad")
for (s, dt, size, stream) in cameras:
fr = frs.get(s, None)
frames = None
if fr is not None:
print(f"Decomressing frames {s}")
frames = []
for i in tqdm(range(fr.frame_count)):
img = fr.get(i, pix_fmt='yuv420p')[0]
frames.append(img.flatten().tobytes())
vs.create_buffers(stream, 40, False, size[0], size[1])
p.append(multiprocessing.Process(target=replay_camera,
args=(s, stream, dt, vs, frames, size)))
# hack to make UI work
vs.create_buffers(VisionStreamType.VISION_STREAM_RGB_ROAD, 4, True, eon_f_frame_size[0], eon_f_frame_size[1])
vs.start_listener()
return vs, p
def regen_segment(lr, frs=None, outdir=FAKEDATA):
lr = list(lr)
if frs is None:
frs = dict()
# setup env
params = Params()
params.clear_all()
params.put_bool("Passive", False)
params.put_bool("OpenpilotEnabledToggle", True)
os.environ["LOG_ROOT"] = outdir
os.environ["REPLAY"] = "1"
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
# TODO: remove after getting new route for mazda
migration = {
"Mazda CX-9 2021": "MAZDA CX-9 2021",
}
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = migration.get(msg.carParams.carFingerprint, msg.carParams.carFingerprint)
if len(msg.carParams.carFw) and (car_fingerprint in FW_VERSIONS):
params.put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
elif msg.which() == 'liveCalibration':
params.put("CalibrationParams", msg.as_builder().to_bytes())
vs, cam_procs = replay_cameras(lr, frs)
fake_daemons = {
'sensord': [
multiprocessing.Process(target=replay_sensor_events, args=('sensorEvents', lr)),
],
'pandad': [
multiprocessing.Process(target=replay_service, args=('can', lr)),
multiprocessing.Process(target=replay_service, args=('ubloxRaw', lr)),
multiprocessing.Process(target=replay_panda_states, args=('pandaStates', lr)),
],
'managerState': [
multiprocessing.Process(target=replay_manager_state, args=('managerState', lr)),
],
'thermald': [
multiprocessing.Process(target=replay_device_state, args=('deviceState', lr)),
],
'camerad': [
*cam_procs,
],
}
try:
# start procs up
ignore = list(fake_daemons.keys()) + ['ui', 'manage_athenad', 'uploader']
ensure_running(managed_processes.values(), started=True, not_run=ignore)
for procs in fake_daemons.values():
for p in procs:
p.start()
for _ in tqdm(range(60)):
# ensure all procs are running
for d, procs in fake_daemons.items():
for p in procs:
if not p.is_alive():
raise Exception(f"{d}'s {p.name} died")
time.sleep(1)
finally:
# kill everything
for p in managed_processes.values():
p.stop()
for procs in fake_daemons.values():
for p in procs:
p.terminate()
del vs
r = params.get("CurrentRoute", encoding='utf-8')
return os.path.join(outdir, r + "--0")
def regen_and_save(route, sidx, upload=False, use_route_meta=False):
if use_route_meta:
r = Route(args.route)
lr = LogReader(r.log_paths()[args.seg])
fr = FrameReader(r.camera_paths()[args.seg])
else:
lr = LogReader(f"cd:/{route.replace('|', '/')}/{sidx}/rlog.bz2")
fr = FrameReader(f"cd:/{route.replace('|', '/')}/{sidx}/fcamera.hevc")
rpath = regen_segment(lr, {'roadCameraState': fr})
lr = LogReader(os.path.join(rpath, 'rlog.bz2'))
controls_state_active = [m.controlsState.active for m in lr if m.which() == 'controlsState']
assert any(controls_state_active), "Segment did not engage"
relr = os.path.relpath(rpath)
print("\n\n", "*"*30, "\n\n")
print("New route:", relr, "\n")
if upload:
upload_route(relr)
return relr
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate new segments from old ones")
parser.add_argument("--upload", action="store_true", help="Upload the new segment to the CI bucket")
parser.add_argument("route", type=str, help="The source route")
parser.add_argument("seg", type=int, help="Segment in source route")
args = parser.parse_args()
regen_and_save(args.route, args.seg, args.upload)
|
the-stack_0_22328 | # Copyright (c) 2017 https://github.com/ping
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
# -*- coding: utf-8 -*-
import logging
import hmac
import hashlib
import uuid
import json
import re
import time
import random
from datetime import datetime
import gzip
from io import BytesIO
import warnings
from socket import timeout, error as SocketError
from ssl import SSLError
from .compat import (
compat_urllib_parse, compat_urllib_error,
compat_urllib_request, compat_urllib_parse_urlparse,
compat_http_client)
from .errors import (
ErrorHandler, ClientError,
ClientLoginRequiredError, ClientCookieExpiredError,
ClientConnectionError
)
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError # pylint: disable=redefined-builtin
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .constants import Constants
from .http import ClientCookieJar
from .endpoints import (
AccountsEndpointsMixin, DiscoverEndpointsMixin, FeedEndpointsMixin,
FriendshipsEndpointsMixin, LiveEndpointsMixin, MediaEndpointsMixin,
MiscEndpointsMixin, LocationsEndpointsMixin, TagsEndpointsMixin,
UsersEndpointsMixin, UploadEndpointsMixin, UsertagsEndpointsMixin,
CollectionsEndpointsMixin, HighlightsEndpointsMixin,
IGTVEndpointsMixin,
ClientDeprecationWarning, ClientPendingDeprecationWarning,
ClientExperimentalWarning, ChallengeEndpointsMixin
)
logger = logging.getLogger(__name__)
# Force Client deprecation warnings to always appear
warnings.simplefilter('always', ClientDeprecationWarning)
warnings.simplefilter('always', ClientPendingDeprecationWarning)
warnings.simplefilter('default', ClientExperimentalWarning)
class Client(AccountsEndpointsMixin, DiscoverEndpointsMixin, FeedEndpointsMixin,
FriendshipsEndpointsMixin, LiveEndpointsMixin, MediaEndpointsMixin,
MiscEndpointsMixin, LocationsEndpointsMixin, TagsEndpointsMixin,
UsersEndpointsMixin, UploadEndpointsMixin, UsertagsEndpointsMixin,
CollectionsEndpointsMixin, HighlightsEndpointsMixin, ChallengeEndpointsMixin,
IGTVEndpointsMixin, object):
"""Main API client class for the private app api."""
API_URL = 'https://i.instagram.com/api/{version!s}/'
USER_AGENT = Constants.USER_AGENT
IG_SIG_KEY = Constants.IG_SIG_KEY
IG_CAPABILITIES = Constants.IG_CAPABILITIES
SIG_KEY_VERSION = Constants.SIG_KEY_VERSION
APPLICATION_ID = Constants.APPLICATION_ID
def __init__(self, username, password, **kwargs):
"""
:param username: Login username
:param password: Login password
:param kwargs: See below
:Keyword Arguments:
- **auto_patch**: Patch the api objects to match the public API. Default: False
- **drop_incompat_key**: Remove api object keys that is not in the public API. Default: False
- **timeout**: Timeout interval in seconds. Default: 15
- **api_url**: Override the default api url base
- **cookie**: Saved cookie string from a previous session
- **settings**: A dict of settings from a previous session
- **on_login**: Callback after successful login
- **proxy**: Specify a proxy ex: 'http://127.0.0.1:8888' (ALPHA)
- **proxy_handler**: Specify your own proxy handler
- **authenticate**: Specify if you want to login default is True
:return:
"""
self.username = username
self.password = password
self.auto_patch = kwargs.pop('auto_patch', False)
self.drop_incompat_keys = kwargs.pop('drop_incompat_keys', False)
self.api_url = kwargs.pop('api_url', None) or self.API_URL
self.timeout = kwargs.pop('timeout', 15)
self.on_login = kwargs.pop('on_login', None)
self.logger = logger
user_settings = kwargs.pop('settings', None) or {}
self.uuid = (
kwargs.pop('guid', None) or kwargs.pop('uuid', None) or
user_settings.get('uuid') or self.generate_uuid(False))
self.device_id = (
kwargs.pop('device_id', None) or user_settings.get('device_id') or
self.generate_deviceid())
# application session ID
self.session_id = (
kwargs.pop('session_id', None) or user_settings.get('session_id') or
self.generate_uuid(False))
self.signature_key = (
kwargs.pop('signature_key', None) or user_settings.get('signature_key') or
self.IG_SIG_KEY)
self.key_version = (
kwargs.pop('key_version', None) or user_settings.get('key_version') or
self.SIG_KEY_VERSION)
self.ig_capabilities = (
kwargs.pop('ig_capabilities', None) or user_settings.get('ig_capabilities') or
self.IG_CAPABILITIES)
self.application_id = (
kwargs.pop('application_id', None) or user_settings.get('application_id') or
self.APPLICATION_ID)
self.authenticate = kwargs.pop('authenticate', True)
# to maintain backward compat for user_agent kwarg
custom_ua = kwargs.pop('user_agent', '') or user_settings.get('user_agent')
if custom_ua:
self.user_agent = custom_ua
else:
self.app_version = (
kwargs.pop('app_version', None) or user_settings.get('app_version') or
Constants.APP_VERSION)
self.android_release = (
kwargs.pop('android_release', None) or user_settings.get('android_release') or
Constants.ANDROID_RELEASE)
self.android_version = int(
kwargs.pop('android_version', None) or user_settings.get('android_version') or
Constants.ANDROID_VERSION)
self.phone_manufacturer = (
kwargs.pop('phone_manufacturer', None) or user_settings.get('phone_manufacturer') or
Constants.PHONE_MANUFACTURER)
self.phone_device = (
kwargs.pop('phone_device', None) or user_settings.get('phone_device') or
Constants.PHONE_DEVICE)
self.phone_model = (
kwargs.pop('phone_model', None) or user_settings.get('phone_model') or
Constants.PHONE_MODEL)
self.phone_dpi = (
kwargs.pop('phone_dpi', None) or user_settings.get('phone_dpi') or
Constants.PHONE_DPI)
self.phone_resolution = (
kwargs.pop('phone_resolution', None) or user_settings.get('phone_resolution') or
Constants.PHONE_RESOLUTION)
self.phone_chipset = (
kwargs.pop('phone_chipset', None) or user_settings.get('phone_chipset') or
Constants.PHONE_CHIPSET)
self.version_code = (
kwargs.pop('version_code', None) or user_settings.get('version_code') or
Constants.VERSION_CODE)
cookie_string = kwargs.pop('cookie', None) or user_settings.get('cookie')
cookie_jar = ClientCookieJar(cookie_string=cookie_string)
if cookie_string and cookie_jar.auth_expires and int(time.time()) >= cookie_jar.auth_expires:
raise ClientCookieExpiredError('Cookie expired at {0!s}'.format(cookie_jar.auth_expires))
cookie_handler = compat_urllib_request.HTTPCookieProcessor(cookie_jar)
proxy_handler = kwargs.pop('proxy_handler', None)
if not proxy_handler:
proxy = kwargs.pop('proxy', None)
if proxy:
warnings.warn('Proxy support is alpha.', UserWarning)
parsed_url = compat_urllib_parse_urlparse(proxy)
if parsed_url.netloc and parsed_url.scheme:
proxy_address = '{0!s}://{1!s}'.format(parsed_url.scheme, parsed_url.netloc)
proxy_handler = compat_urllib_request.ProxyHandler({'https': proxy_address})
else:
raise ValueError('Invalid proxy argument: {0!s}'.format(proxy))
handlers = []
if proxy_handler:
handlers.append(proxy_handler)
# Allow user to override custom ssl context where possible
custom_ssl_context = kwargs.pop('custom_ssl_context', None)
try:
https_handler = compat_urllib_request.HTTPSHandler(context=custom_ssl_context)
except TypeError:
# py version < 2.7.9
https_handler = compat_urllib_request.HTTPSHandler()
handlers.extend([
compat_urllib_request.HTTPHandler(),
https_handler,
cookie_handler])
opener = compat_urllib_request.build_opener(*handlers)
opener.cookie_jar = cookie_jar
self.opener = opener
# ad_id must be initialised after cookie_jar/opener because
# it relies on self.authenticated_user_name
self.ad_id = (
kwargs.pop('ad_id', None) or user_settings.get('ad_id') or
self.generate_adid())
if not cookie_string and self.authenticate: # [TODO] There's probably a better way than to depend on cookie_string
if not self.username or not self.password:
raise ClientLoginRequiredError('login_required', code=400)
self.login()
self.logger.debug('USERAGENT: {0!s}'.format(self.user_agent))
super(Client, self).__init__()
@property
def is_authenticated(self):
if self.authenticated_user_id:
return True
return False
@property
def settings(self):
"""Helper property that extracts the settings that you should cache
in addition to username and password."""
return {
'uuid': self.uuid,
'device_id': self.device_id,
'ad_id': self.ad_id,
'session_id': self.session_id,
'cookie': self.cookie_jar.dump(),
'created_ts': int(time.time())
}
@property
def user_agent(self):
"""Returns the useragent string that the client is currently using."""
return Constants.USER_AGENT_FORMAT.format(**{
'app_version': self.app_version,
'android_version': self.android_version,
'android_release': self.android_release,
'brand': self.phone_manufacturer,
'device': self.phone_device,
'model': self.phone_model,
'dpi': self.phone_dpi,
'resolution': self.phone_resolution,
'chipset': self.phone_chipset,
'version_code': self.version_code})
@user_agent.setter
def user_agent(self, value):
"""Override the useragent string with your own"""
mobj = re.search(Constants.USER_AGENT_EXPRESSION, value)
if not mobj:
raise ValueError('User-agent specified does not fit format required: {0!s}'.format(
Constants.USER_AGENT_EXPRESSION))
self.app_version = mobj.group('app_version')
self.android_release = mobj.group('android_release')
self.android_version = int(mobj.group('android_version'))
self.phone_manufacturer = mobj.group('manufacturer')
self.phone_device = mobj.group('device')
self.phone_model = mobj.group('model')
self.phone_dpi = mobj.group('dpi')
self.phone_resolution = mobj.group('resolution')
self.phone_chipset = mobj.group('chipset')
self.version_code = mobj.group('version_code')
@staticmethod
def generate_useragent(**kwargs):
"""
Helper method to generate a useragent string based on device parameters
:param kwargs:
- **app_version**
- **android_version**
- **android_release**
- **brand**
- **device**
- **model**
- **dpi**
- **resolution**
- **chipset**
:return: A compatible user agent string
"""
return Constants.USER_AGENT_FORMAT.format(**{
'app_version': kwargs.pop('app_version', None) or Constants.APP_VERSION,
'android_version': int(kwargs.pop('android_version', None) or Constants.ANDROID_VERSION),
'android_release': kwargs.pop('android_release', None) or Constants.ANDROID_RELEASE,
'brand': kwargs.pop('phone_manufacturer', None) or Constants.PHONE_MANUFACTURER,
'device': kwargs.pop('phone_device', None) or Constants.PHONE_DEVICE,
'model': kwargs.pop('phone_model', None) or Constants.PHONE_MODEL,
'dpi': kwargs.pop('phone_dpi', None) or Constants.PHONE_DPI,
'resolution': kwargs.pop('phone_resolution', None) or Constants.PHONE_RESOLUTION,
'chipset': kwargs.pop('phone_chipset', None) or Constants.PHONE_CHIPSET,
'version_code': kwargs.pop('version_code', None) or Constants.VERSION_CODE})
@staticmethod
def validate_useragent(value):
"""
Helper method to validate a useragent string for format correctness
:param value:
:return:
"""
mobj = re.search(Constants.USER_AGENT_EXPRESSION, value)
if not mobj:
raise ValueError(
'User-agent specified does not fit format required: {0!s}'.format(
Constants.USER_AGENT_EXPRESSION))
parse_params = {
'app_version': mobj.group('app_version'),
'android_version': int(mobj.group('android_version')),
'android_release': mobj.group('android_release'),
'brand': mobj.group('manufacturer'),
'device': mobj.group('device'),
'model': mobj.group('model'),
'dpi': mobj.group('dpi'),
'resolution': mobj.group('resolution'),
'chipset': mobj.group('chipset'),
'version_code': mobj.group('version_code'),
}
return {
'user_agent': Constants.USER_AGENT_FORMAT.format(**parse_params),
'parsed_params': parse_params
}
def get_cookie_value(self, key, domain=''):
now = int(time.time())
eternity = now + 100 * 365 * 24 * 60 * 60 # future date for non-expiring cookies
if not domain:
domain = compat_urllib_parse_urlparse(self.API_URL).netloc
for cookie in sorted(
self.cookie_jar, key=lambda c: c.expires or eternity, reverse=True):
# don't return expired cookie
if cookie.expires and cookie.expires < now:
continue
# cookie domain may be i.instagram.com or .instagram.com
cookie_domain = cookie.domain
# simple domain matching
if cookie_domain.startswith('.'):
cookie_domain = cookie_domain[1:]
if not domain.endswith(cookie_domain):
continue
if cookie.name.lower() == key.lower():
return cookie.value
return None
@property
def csrftoken(self):
"""The client's current csrf token"""
return self.get_cookie_value('csrftoken')
@property
def token(self):
"""For compatibility. Equivalent to :meth:`csrftoken`"""
return self.csrftoken
@property
def authenticated_user_id(self):
"""The current authenticated user id"""
return self.get_cookie_value('ds_user_id')
@property
def authenticated_user_name(self):
"""The current authenticated user name"""
return self.get_cookie_value('ds_user')
@property
def phone_id(self):
"""Current phone ID. For use in certain functions."""
return self.generate_uuid(return_hex=False, seed=self.device_id)
@property
def timezone_offset(self):
"""Timezone offset in seconds. For use in certain functions."""
return int(round((datetime.now() - datetime.utcnow()).total_seconds()))
@property
def rank_token(self):
if not self.authenticated_user_id:
return None
return '{0!s}_{1!s}'.format(self.authenticated_user_id, self.uuid)
@property
def authenticated_params(self):
return {
'_csrftoken': self.csrftoken,
'_uuid': self.uuid,
'_uid': self.authenticated_user_id
}
@property
def cookie_jar(self):
"""The client's cookiejar instance."""
return self.opener.cookie_jar
@property
def default_headers(self):
return {
'User-Agent': self.user_agent,
'Connection': 'close',
'Accept': '*/*',
'Accept-Language': 'en-US',
'Accept-Encoding': 'gzip, deflate',
'X-IG-Capabilities': self.ig_capabilities,
'X-IG-Connection-Type': 'WIFI',
'X-IG-Connection-Speed': '{0:d}kbps'.format(random.randint(1000, 5000)),
'X-IG-App-ID': self.application_id,
'X-IG-Bandwidth-Speed-KBPS': '-1.000',
'X-IG-Bandwidth-TotalBytes-B': '0',
'X-IG-Bandwidth-TotalTime-MS': '0',
'X-FB-HTTP-Engine': Constants.FB_HTTP_ENGINE,
}
@property
def radio_type(self):
"""For use in certain endpoints"""
return 'wifi-none'
def _generate_signature(self, data):
"""
Generates the signature for a data string
:param data: content to be signed
:return:
"""
return hmac.new(
self.signature_key.encode('ascii'), data.encode('ascii'),
digestmod=hashlib.sha256).hexdigest()
@classmethod
def generate_uuid(cls, return_hex=False, seed=None):
"""
Generate uuid
:param return_hex: Return in hex format
:param seed: Seed value to generate a consistent uuid
:return:
"""
if seed:
m = hashlib.md5()
m.update(seed.encode('utf-8'))
new_uuid = uuid.UUID(m.hexdigest())
else:
new_uuid = uuid.uuid1()
if return_hex:
return new_uuid.hex
return str(new_uuid)
@classmethod
def generate_deviceid(cls, seed=None):
"""
Generate an android device ID
:param seed: Seed value to generate a consistent device ID
:return:
"""
return 'android-{0!s}'.format(cls.generate_uuid(True, seed)[:16])
def generate_adid(self, seed=None):
"""
Generate an Advertising ID based on the login username since
the Google Ad ID is a personally identifying but resettable ID.
:return:
"""
modified_seed = seed or self.authenticated_user_name or self.username
if modified_seed:
# Do some trivial mangling of original seed
sha2 = hashlib.sha256()
sha2.update(modified_seed.encode('utf-8'))
modified_seed = sha2.hexdigest()
return self.generate_uuid(False, modified_seed)
@staticmethod
def _read_response(response):
"""
Extract the response body from a http response.
:param response:
:return:
"""
if response.info().get('Content-Encoding') == 'gzip':
buf = BytesIO(response.read())
res = gzip.GzipFile(fileobj=buf).read().decode('utf8')
else:
res = response.read().decode('utf8')
return res
def _call_api(self, endpoint, params=None, query=None, return_response=False, unsigned=False, version='v1'):
"""
Calls the private api.
:param endpoint: endpoint path that should end with '/', example 'discover/explore/'
:param params: POST parameters
:param query: GET url query parameters
:param return_response: return the response instead of the parsed json object
:param unsigned: use post params as-is without signing
:param version: for the versioned api base url. Default 'v1'.
:return:
"""
url = '{0}{1}'.format(self.api_url.format(version=version), endpoint)
if query:
url += ('?' if '?' not in endpoint else '&') + compat_urllib_parse.urlencode(query)
headers = self.default_headers
data = None
if params or params == '':
headers['Content-type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
if params == '': # force post if empty string
data = ''.encode('ascii')
else:
if not unsigned:
json_params = json.dumps(params, separators=(',', ':'))
hash_sig = self._generate_signature(json_params)
post_params = {
'ig_sig_key_version': self.key_version,
'signed_body': hash_sig + '.' + json_params
}
else:
# direct form post
post_params = params
data = compat_urllib_parse.urlencode(post_params).encode('ascii')
req = compat_urllib_request.Request(url, data, headers=headers)
try:
self.logger.debug('REQUEST: {0!s} {1!s}'.format(url, req.get_method()))
self.logger.debug('DATA: {0!s}'.format(data))
response = self.opener.open(req, timeout=self.timeout)
except compat_urllib_error.HTTPError as e:
error_response = self._read_response(e)
self.logger.debug('RESPONSE: {0:d} {1!s}'.format(e.code, error_response))
ErrorHandler.process(e, error_response)
except (SSLError, timeout, SocketError,
compat_urllib_error.URLError, # URLError is base of HTTPError
compat_http_client.HTTPException,
ConnectionError) as connection_error:
raise ClientConnectionError('{} {}'.format(
connection_error.__class__.__name__, str(connection_error)))
if return_response:
return response
response_content = self._read_response(response)
self.logger.debug('RESPONSE: {0:d} {1!s}'.format(response.code, response_content))
json_response = json.loads(response_content)
if json_response.get('message', '') == 'login_required':
raise ClientLoginRequiredError(
json_response.get('message'), code=response.code,
error_response=json.dumps(json_response))
# not from oembed or an ok response
if not json_response.get('provider_url') and json_response.get('status', '') != 'ok':
raise ClientError(
json_response.get('message', 'Unknown error'), code=response.code,
error_response=json.dumps(json_response))
return json_response
|
the-stack_0_22329 | """
flask_excel
~~~~~~~~~~~~~~~~~~~
A flask extension that provides one application programming interface
to read and write data in different excel file formats
:copyright: (c) 2015-2020 by Onni Software Ltd and its contributors
:license: New BSD License
"""
try:
# if in py2
from urllib import quote
_PY_VERSION = 2
except ImportError:
# else (aka in py3)
from urllib.parse import quote
_PY_VERSION = 3
import pyexcel_webio as webio
from flask import Request, Response
class ExcelRequest(webio.ExcelInputInMultiDict, Request):
"""
Mix in pyexcel's webio function signatures to Flask request
"""
def get_file_tuple(self, field_name):
"""
Implement Flask specific way of getting uploaded files
"""
filehandle = self.files[field_name]
filename = filehandle.filename
extension = filename.split(".")[-1]
if extension == filename:
raise IOError("Failed to find out file extension")
return extension, filehandle
def _make_response(content, content_type, status, file_name=None):
"""
Custom response function that is called by pyexcel-webio
"""
response = Response(content, content_type=content_type, status=status)
if file_name:
if _PY_VERSION == 2 and isinstance(file_name, unicode):
file_name = file_name.encode("utf-8")
url_encoded_file_name = quote(file_name)
response.headers[
"Content-Disposition"
] = "attachment; filename=%s;filename*=utf-8''%s" % (
url_encoded_file_name,
url_encoded_file_name,
)
return response
from pyexcel_webio import (
make_response,
make_response_from_a_table,
make_response_from_array,
make_response_from_book_dict,
make_response_from_dict,
make_response_from_query_sets,
make_response_from_records,
make_response_from_tables,
)
def init_excel(app):
app.request_class = ExcelRequest
webio.init_webio(_make_response)
return app
|
the-stack_0_22330 | #Advent of code 2021
#Day 7 Part 2
#The Treachery of Whales
import numpy as np
np.set_printoptions(threshold=np.inf)
import pdb #pdb.set_trace()
import time
import copy
from functools import reduce
import math
from decimal import Decimal
start_time = time.time()
###Functions
def calculate_stats_and_shit(positions):
#Calculate a lot of averages cos why the fuck not.
#I'm definetly not going to use all of these lol
#I had fun everything is fine
#(My RAM might disagree)
Sigma_x = sum(positions)
Sigma_x2 = reduce(lambda a, x: a + x**2, [0]+positions)
Sigma_x2_minus_x = reduce(lambda a, x: a + x**2 - x, [0]+positions)
no = len(positions)
no_dec = Decimal(no)
mean = Sigma_x / no
RMS = ((1/no)*Sigma_x2)**0.5
MRS = (1/no)*(Sigma_x2)**0.5
positions_plus_one = [x+1 for x in positions]
product_x_plus_one = Decimal(reduce((lambda x, y: x * y), positions_plus_one))
GM_add_one = product_x_plus_one**(1/no_dec) - 1
positions_wo_zero = [i for i in positions if i != 0]
no_wo_zero = Decimal(len(positions_wo_zero))
product_x_wo_zero = Decimal(reduce((lambda x, y: x * y), positions_wo_zero))
GM_ignore_zero = product_x_wo_zero**(1/no_wo_zero)
positions_zero_to_one = [i if i != 0 else 1 for i in positions]
product_x_zero_to_one = Decimal(reduce((lambda x, y: x * y), positions_zero_to_one))
GM_zero_to_one = product_x_zero_to_one**(1/no_dec)
return mean, RMS, MRS, GM_add_one, GM_ignore_zero, GM_zero_to_one
def calc_fuel_needed(positions, x):
total_fuel = 0
for position in positions:
distance = abs(position - x)
fuel = 0
for num in range(1, distance + 1, 1):
fuel += num
total_fuel += fuel
return total_fuel
#Import and sort File
positions = []
with open("input_07.txt") as positionsf:
for line in positionsf:
#Import operations
line = line.strip("/n")#Remove EOL symbol
line = line.split(",")
positions = list(map(int,line))
positions.sort()
###Main Code
mean, RMS, MRS, GM_add_one, GM_ignore_zero, GM_zero_to_one = calculate_stats_and_shit(positions)
#Mean of the Geometric Means
MoGMs = (GM_add_one + GM_ignore_zero + GM_zero_to_one) / 3
#Find to nearest integers
x_lower = int(math.floor(mean))
x_upper = x_lower + 1
#Calculate fuel for nearest integers
total_fuel_lower = calc_fuel_needed(positions, x_lower)
total_fuel_upper = calc_fuel_needed(positions, x_upper)
#Find nearest local minima
#Direction To Search
if total_fuel_lower < total_fuel_upper:
search_dir = -1
elif total_fuel_lower > total_fuel_upper:
search_dir = 1
else:
print('lol that shouldnt happen, fuck this')
i = 0
while (time.time() - start_time) <= 5*60:
x_lower += search_dir
x_upper += search_dir
total_fuel_lower = calc_fuel_needed(positions, x_lower)
total_fuel_upper = calc_fuel_needed(positions, x_upper)
i += 1
if (total_fuel_lower > total_fuel_upper and search_dir == -1):
x = x_upper
total_fuel = total_fuel_upper
break
if (total_fuel_lower < total_fuel_upper and search_dir == 1):
x = x_lower
total_fuel = total_fuel_lower
break
#Result
print('Input positions are ',positions)
print('Mean is ',mean)
print('Root Mean Square is ',RMS)
print('Mean Root Square is ',MRS)
print('Geometric Mean using the add one to all method is ',GM_add_one)
print('Geometric Mean using the ignore zero method is ',GM_ignore_zero)
print('Geometric Mean using the convert zero to one method is ',GM_zero_to_one)
print('The Mean of the Geometric Means is ',MoGMs)
print('.......')
print('.......')
print('.......')
print('Well that was a lot of silly data')
print('maybe you might like some results now')
print('.......')
print('.......')
print('.......')
print('maybe')
print('.......')
print('.......')
print('.......')
print('The best place for the crab subs to meet is ',x)
print('The fuel usage required for that is ',total_fuel)
print('I did ',i,' loops to find the local optima')
timetaken = time.time() - start_time
print("Completed in ", timetaken, " seconds") |
the-stack_0_22332 | from django.contrib.auth.models import User
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework import serializers
class MyTokenObtainPairSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
return super().get_token(user)
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
data['username'] = self.user.username
return data
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__' |
the-stack_0_22333 | if __name__ == '__main__':
import sys
sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')
from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt') as answers:
for line in answers:
answers_list.append(int(line))
progress_ = Progress("Problem 009: Special Pythagorean triplet", 1, 1000)
for a in range(1, 1000):
progress_.count = a
progress_.progress()
for b in range(a, 1000):
if a**2 + b**2 == (1000 - a - b)**2:
break
else:
continue
break
progress_.count = a*b*(1000 - a - b)
progress_.total = answers_list[9]
progress_.progress()
if __name__ == '__main__':
input()
|
the-stack_0_22334 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Customize Form is a Single DocType used to mask the Property Setter
Thus providing a better UI from user perspective
"""
import frappe
import frappe.translate
from frappe import _
from frappe.utils import cint
from frappe.model.document import Document
from frappe.model import no_value_fields, core_doctypes_list
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
from frappe.model.docfield import supports_translation
doctype_properties = {
'search_fields': 'Data',
'title_field': 'Data',
'image_field': 'Data',
'sort_field': 'Data',
'sort_order': 'Data',
'default_print_format': 'Data',
'allow_copy': 'Check',
'istable': 'Check',
'quick_entry': 'Check',
'editable_grid': 'Check',
'max_attachments': 'Int',
'track_changes': 'Check',
'track_views': 'Check',
'allow_auto_repeat': 'Check',
'allow_import': 'Check',
'show_title_field_in_link': 'Check',
'send_me_a_copy': 'Check',
'send_read_receipt': 'Check',
'attach_document_print': 'Check',
'file_autoname': 'Data',
'autoname': 'Data'
}
docfield_properties = {
'idx': 'Int',
'label': 'Data',
'fieldtype': 'Select',
'options': 'Text',
'fetch_from': 'Small Text',
'fetch_if_empty': 'Check',
'permlevel': 'Int',
'width': 'Data',
'print_width': 'Data',
'reqd': 'Check',
'unique': 'Check',
'ignore_user_permissions': 'Check',
'in_list_view': 'Check',
'in_standard_filter': 'Check',
'in_global_search': 'Check',
'bold': 'Check',
'hidden': 'Check',
'collapsible': 'Check',
'collapsible_depends_on': 'Data',
'print_hide': 'Check',
'print_hide_if_no_value': 'Check',
'report_hide': 'Check',
'allow_on_submit': 'Check',
'translatable': 'Check',
'mandatory_depends_on': 'Data',
'read_only_depends_on': 'Data',
'depends_on': 'Data',
'description': 'Text',
'default': 'Text',
'precision': 'Select',
'read_only': 'Check',
'length': 'Int',
'columns': 'Int',
'remember_last_selected_value': 'Check',
'allow_bulk_edit': 'Check',
'auto_repeat': 'Link',
'allow_in_quick_entry': 'Check'
}
allowed_fieldtype_change = (('Currency', 'Float', 'Percent'), ('Small Text', 'Data'),
('Text', 'Data'), ('Text', 'Text Editor', 'Text Editor Alt', 'Code', 'Signature', 'HTML Editor'), ('Data', 'Select'),
('Text', 'Small Text'), ('Text', 'Data', 'Barcode'), ('Code', 'Geolocation'), ('Table', 'Table MultiSelect'))
allowed_fieldtype_for_options_change = ('Read Only', 'HTML', 'Select', 'Data')
class CustomizeForm(Document):
def on_update(self):
frappe.db.sql("delete from tabSingles where doctype='Customize Form'")
frappe.db.sql("delete from `tabCustomize Form Field`")
def fetch_to_customize(self):
self.clear_existing_doc()
if not self.doc_type:
return
meta = frappe.get_meta(self.doc_type)
if self.doc_type in core_doctypes_list:
return frappe.msgprint(_("Core DocTypes cannot be customized."))
if meta.issingle:
return frappe.msgprint(_("Single DocTypes cannot be customized."))
if meta.custom:
return frappe.msgprint(_("Only standard DocTypes are allowed to be customized from Customize Form."))
# doctype properties
for property in doctype_properties:
self.set(property, meta.get(property))
for d in meta.get("fields"):
new_d = {"fieldname": d.fieldname, "is_custom_field": d.get("is_custom_field"), "name": d.name}
for property in docfield_properties:
new_d[property] = d.get(property)
self.append("fields", new_d)
# load custom translation
translation = self.get_name_translation()
self.label = translation.target_name if translation else ''
#If allow_auto_repeat is set, add auto_repeat custom field.
if self.allow_auto_repeat:
if not frappe.db.exists('Custom Field', {'fieldname': 'auto_repeat', 'dt': self.doc_type}):
insert_after = self.fields[len(self.fields) - 1].fieldname
df = dict(fieldname='auto_repeat', label='Auto Repeat', fieldtype='Link', options='Auto Repeat', insert_after=insert_after, read_only=1, no_copy=1, print_hide=1)
create_custom_field(self.doc_type, df)
# NOTE doc is sent to clientside by run_method
def get_name_translation(self):
'''Get translation object if exists of current doctype name in the default language'''
return frappe.get_value('Translation',
{'source_name': self.doc_type, 'language': frappe.local.lang or 'en'},
['name', 'target_name'], as_dict=True)
def set_name_translation(self):
'''Create, update custom translation for this doctype'''
current = self.get_name_translation()
if current:
if self.label and current.target_name != self.label:
frappe.db.set_value('Translation', current.name, 'target_name', self.label)
frappe.translate.clear_cache()
else:
# clear translation
frappe.delete_doc('Translation', current.name)
else:
if self.label:
frappe.get_doc(dict(doctype='Translation',
source_name=self.doc_type,
target_name=self.label,
language_code=frappe.local.lang or 'en')).insert()
def clear_existing_doc(self):
doc_type = self.doc_type
for fieldname in self.meta.get_valid_columns():
self.set(fieldname, None)
for df in self.meta.get_table_fields():
self.set(df.fieldname, [])
self.doc_type = doc_type
self.name = "Customize Form"
def save_customization(self):
if not self.doc_type:
return
self.flags.update_db = False
self.flags.rebuild_doctype_for_global_search = False
self.set_property_setters()
self.update_custom_fields()
self.set_name_translation()
validate_fields_for_doctype(self.doc_type)
if self.flags.update_db:
frappe.db.updatedb(self.doc_type)
if not hasattr(self, 'hide_success') or not self.hide_success:
frappe.msgprint(_("{0} updated").format(_(self.doc_type)), alert=True)
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
if self.flags.rebuild_doctype_for_global_search:
frappe.enqueue('frappe.utils.global_search.rebuild_for_doctype',
now=True, doctype=self.doc_type)
def set_property_setters(self):
meta = frappe.get_meta(self.doc_type)
# doctype property setters
for property in doctype_properties:
if self.get(property) != meta.get(property):
self.make_property_setter(property=property, value=self.get(property),
property_type=doctype_properties[property])
for df in self.get("fields"):
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not meta_df or meta_df[0].get("is_custom_field"):
continue
for property in docfield_properties:
if property != "idx" and (df.get(property) or '') != (meta_df[0].get(property) or ''):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
elif property == "allow_on_submit" and df.get(property):
if not frappe.db.get_value("DocField",
{"parent": self.doc_type, "fieldname": df.fieldname}, "allow_on_submit"):
frappe.msgprint(_("Row {0}: Not allowed to enable Allow on Submit for standard fields")\
.format(df.idx))
continue
elif property == "reqd" and \
((frappe.db.get_value("DocField",
{"parent":self.doc_type,"fieldname":df.fieldname}, "reqd") == 1) \
and (df.get(property) == 0)):
frappe.msgprint(_("Row {0}: Not allowed to disable Mandatory for standard fields")\
.format(df.idx))
continue
elif property == "in_list_view" and df.get(property) \
and df.fieldtype!="Attach Image" and df.fieldtype in no_value_fields:
frappe.msgprint(_("'In List View' not allowed for type {0} in row {1}")
.format(df.fieldtype, df.idx))
continue
elif property == "precision" and cint(df.get("precision")) > 6 \
and cint(df.get("precision")) > cint(meta_df[0].get("precision")):
self.flags.update_db = True
elif property == "unique":
self.flags.update_db = True
elif (property == "read_only" and cint(df.get("read_only"))==0
and frappe.db.get_value("DocField", {"parent": self.doc_type, "fieldname": df.fieldname}, "read_only")==1):
# if docfield has read_only checked and user is trying to make it editable, don't allow it
frappe.msgprint(_("You cannot unset 'Read Only' for field {0}").format(df.label))
continue
elif property == "options" and df.get("fieldtype") not in allowed_fieldtype_for_options_change:
frappe.msgprint(_("You can't set 'Options' for field {0}").format(df.label))
continue
elif property == 'translatable' and not supports_translation(df.get('fieldtype')):
frappe.msgprint(_("You can't set 'Translatable' for field {0}").format(df.label))
continue
elif (property == 'in_global_search' and
df.in_global_search != meta_df[0].get("in_global_search")):
self.flags.rebuild_doctype_for_global_search = True
self.make_property_setter(property=property, value=df.get(property),
property_type=docfield_properties[property], fieldname=df.fieldname)
def update_custom_fields(self):
for i, df in enumerate(self.get("fields")):
if df.get("is_custom_field"):
if not frappe.db.exists('Custom Field', {'dt': self.doc_type, 'fieldname': df.fieldname}):
self.add_custom_field(df, i)
self.flags.update_db = True
else:
self.update_in_custom_field(df, i)
self.delete_custom_fields()
def add_custom_field(self, df, i):
d = frappe.new_doc("Custom Field")
d.dt = self.doc_type
for property in docfield_properties:
d.set(property, df.get(property))
if i!=0:
d.insert_after = self.fields[i-1].fieldname
d.idx = i
d.insert()
df.fieldname = d.fieldname
def update_in_custom_field(self, df, i):
meta = frappe.get_meta(self.doc_type)
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not (meta_df and meta_df[0].get("is_custom_field")):
# not a custom field
return
custom_field = frappe.get_doc("Custom Field", meta_df[0].name)
changed = False
for property in docfield_properties:
if df.get(property) != custom_field.get(property):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
custom_field.set(property, df.get(property))
changed = True
# check and update `insert_after` property
if i!=0:
insert_after = self.fields[i-1].fieldname
if custom_field.insert_after != insert_after:
custom_field.insert_after = insert_after
custom_field.idx = i
changed = True
if changed:
custom_field.db_update()
self.flags.update_db = True
#custom_field.save()
def delete_custom_fields(self):
meta = frappe.get_meta(self.doc_type)
fields_to_remove = (set([df.fieldname for df in meta.get("fields")])
- set(df.fieldname for df in self.get("fields")))
for fieldname in fields_to_remove:
df = meta.get("fields", {"fieldname": fieldname})[0]
if df.get("is_custom_field"):
frappe.delete_doc("Custom Field", df.name)
def make_property_setter(self, property, value, property_type, fieldname=None):
self.delete_existing_property_setter(property, fieldname)
property_value = self.get_existing_property_value(property, fieldname)
if property_value==value:
return
# create a new property setter
# ignore validation becuase it will be done at end
frappe.make_property_setter({
"doctype": self.doc_type,
"doctype_or_field": "DocField" if fieldname else "DocType",
"fieldname": fieldname,
"property": property,
"value": value,
"property_type": property_type
}, ignore_validate=True)
def delete_existing_property_setter(self, property, fieldname=None):
# first delete existing property setter
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.doc_type,
"property": property, "field_name['']": fieldname or ''})
if existing_property_setter:
frappe.db.sql("delete from `tabProperty Setter` where name=%s", existing_property_setter)
def get_existing_property_value(self, property_name, fieldname=None):
# check if there is any need to make property setter!
if fieldname:
property_value = frappe.db.get_value("DocField", {"parent": self.doc_type,
"fieldname": fieldname}, property_name)
else:
try:
property_value = frappe.db.get_value("DocType", self.doc_type, property_name)
except Exception as e:
if frappe.db.is_column_missing(e):
property_value = None
else:
raise
return property_value
def validate_fieldtype_change(self, df, old_value, new_value):
allowed = False
self.check_length_for_fieldtypes = []
for allowed_changes in allowed_fieldtype_change:
if (old_value in allowed_changes and new_value in allowed_changes):
allowed = True
old_value_length = cint(frappe.db.type_map.get(old_value)[1])
new_value_length = cint(frappe.db.type_map.get(new_value)[1])
# Ignore fieldtype check validation if new field type has unspecified maxlength
# Changes like DATA to TEXT, where new_value_lenth equals 0 will not be validated
if new_value_length and (old_value_length > new_value_length):
self.check_length_for_fieldtypes.append({'df': df, 'old_value': old_value})
self.validate_fieldtype_length()
else:
self.flags.update_db = True
break
if not allowed:
frappe.throw(_("Fieldtype cannot be changed from {0} to {1} in row {2}").format(old_value, new_value, df.idx))
def validate_fieldtype_length(self):
for field in self.check_length_for_fieldtypes:
df = field.get('df')
max_length = cint(frappe.db.type_map.get(df.fieldtype)[1])
fieldname = df.fieldname
docs = frappe.db.sql('''
SELECT name, {fieldname}, LENGTH({fieldname}) AS len
FROM `tab{doctype}`
WHERE LENGTH({fieldname}) > {max_length}
'''.format(
fieldname=fieldname,
doctype=self.doc_type,
max_length=max_length
), as_dict=True)
links = []
label = df.label
for doc in docs:
links.append(frappe.utils.get_link_to_form(self.doc_type, doc.name))
links_str = ', '.join(links)
if docs:
frappe.throw(_('Value for field {0} is too long in {1}. Length should be lesser than {2} characters')
.format(
frappe.bold(label),
links_str,
frappe.bold(max_length)
), title=_('Data Too Long'), is_minimizable=len(docs) > 1)
self.flags.update_db = True
def reset_to_defaults(self):
if not self.doc_type:
return
frappe.db.sql("""DELETE FROM `tabProperty Setter` WHERE doc_type=%s
and `field_name`!='naming_series'
and `property`!='options'""", self.doc_type)
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
|
the-stack_0_22335 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Text problems mnist."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensor2tensor.data_generators import problem as problem_lib
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
import tensorflow as tf
class Test1(text_problems.Text2textTmpdir):
@property
def name(self):
# name is normally provided by register_problem, but this problem is not
# registered, so we provide one here to avoid inheriting the parent class's
# name.
return "test1"
@property
def approx_vocab_size(self):
return 3
@property
def dataset_splits(self):
return [{
"split": problem_lib.DatasetSplit.TRAIN,
"shards": 1,
}, {
"split": problem_lib.DatasetSplit.EVAL,
"shards": 1,
}]
class TextProblems(tf.test.TestCase):
@classmethod
def setUpClass(cls):
cls.tmp_dir = tf.test.get_temp_dir()
shutil.rmtree(cls.tmp_dir)
os.mkdir(cls.tmp_dir)
cls.inputs = [
"Hello world",
"Goodbye world",
]
cls.targets = [
"Hola mundo",
"Adios mundo",
]
cls.labels = [2, 3]
cls.labels_strs = ["c", "d"]
cls.inputs_file = os.path.join(cls.tmp_dir, "inputs.train.txt")
cls.targets_file = os.path.join(cls.tmp_dir, "targets.train.txt")
cls.labels_file = os.path.join(cls.tmp_dir, "labels.train.txt")
cls.labels_str_file = os.path.join(cls.tmp_dir, "labels_str.train.txt")
data = [(cls.inputs, cls.inputs_file), (cls.targets, cls.targets_file),
(cls.labels, cls.labels_file), (cls.labels_strs,
cls.labels_str_file)]
for lines, filename in data:
with tf.gfile.Open(filename, "w") as f:
for line in lines:
f.write(str(line))
f.write("\n")
cls.tabbed_file = os.path.join(cls.tmp_dir, "tabbed.train.txt")
with tf.gfile.Open(cls.tabbed_file, "w") as f:
for inputs, targets in zip(cls.inputs, cls.targets):
f.write("%s\t%s\n" % (inputs, targets))
tf.gfile.Copy(cls.inputs_file, os.path.join(cls.tmp_dir, "inputs.eval.txt"))
tf.gfile.Copy(cls.targets_file, os.path.join(cls.tmp_dir,
"targets.eval.txt"))
def testTxtLineIterator(self):
lines = [line for line in text_problems.txt_line_iterator(self.inputs_file)]
self.assertEqual(lines, self.inputs)
def testText2TextTxtIterator(self):
inputs = []
targets = []
for entry in text_problems.text2text_txt_iterator(self.inputs_file,
self.targets_file):
inputs.append(entry["inputs"])
targets.append(entry["targets"])
self.assertEqual(inputs, self.inputs)
self.assertEqual(targets, self.targets)
def testText2SelfTxtIterator(self):
targets = [
entry["targets"]
for entry in text_problems.text2self_txt_iterator(self.targets_file)
]
self.assertEqual(targets, self.targets)
def testText2ClassTxtIterator(self):
inputs = []
labels = []
for entry in text_problems.text2class_txt_iterator(self.inputs_file,
self.labels_file):
inputs.append(entry["inputs"])
labels.append(entry["label"])
self.assertEqual(inputs, self.inputs)
self.assertEqual(labels, self.labels)
def testText2ClassTxtIteratorWithStrs(self):
inputs = []
labels = []
for entry in text_problems.text2class_txt_iterator(
self.inputs_file, self.labels_str_file, class_strs=["a", "b", "c",
"d"]):
inputs.append(entry["inputs"])
labels.append(entry["label"])
self.assertEqual(inputs, self.inputs)
self.assertEqual(labels, self.labels)
def testText2TextTxtTabIterator(self):
inputs = []
targets = []
for entry in text_problems.text2text_txt_tab_iterator(self.tabbed_file):
inputs.append(entry["inputs"])
targets.append(entry["targets"])
self.assertEqual(inputs, self.inputs)
self.assertEqual(targets, self.targets)
def testText2TextTmpDir(self):
problem = Test1()
problem.generate_data(self.tmp_dir, self.tmp_dir)
vocab_file = os.path.join(self.tmp_dir, "vocab.test1.3.subwords")
train_file = os.path.join(self.tmp_dir, "test1-train-00000-of-00001")
eval_file = os.path.join(self.tmp_dir, "test1-dev-00000-of-00001")
self.assertTrue(tf.gfile.Exists(vocab_file))
self.assertTrue(tf.gfile.Exists(train_file))
self.assertTrue(tf.gfile.Exists(eval_file))
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, self.tmp_dir)
features = dataset.make_one_shot_iterator().get_next()
examples = []
exhausted = False
with self.test_session() as sess:
examples.append(sess.run(features))
examples.append(sess.run(features))
try:
sess.run(features)
except tf.errors.OutOfRangeError:
exhausted = True
self.assertTrue(exhausted)
self.assertEqual(2, len(examples))
self.assertNotEqual(
list(examples[0]["inputs"]), list(examples[1]["inputs"]))
example = examples[0]
encoder = text_encoder.SubwordTextEncoder(vocab_file)
inputs_encoded = list(example["inputs"])
inputs_encoded.pop() # rm EOS
self.assertTrue(encoder.decode(inputs_encoded) in self.inputs)
targets_encoded = list(example["targets"])
targets_encoded.pop() # rm EOS
self.assertTrue(encoder.decode(targets_encoded) in self.targets)
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_22336 | import sys
# generator expression
inname, outname = sys.argv[1:3]
# (venv) nguyendangtuan@Nguyens-MacBook-Pro Chapter09 % python warning_generators.py sample_log.txt warning_log.txt
def generator():
with open(inname) as infile:
with open(outname, 'w') as outfile:
warnings = (l.replace("WARNING", "") for l in infile if 'WARNING' in l)
for line in warnings:
outfile.write(line)
generator()
outname = "normal_" + outname
def normal_loop():
with open(inname) as infile:
with open(outname, 'w') as outfile:
for line in infile:
if "WARNING" in line:
outfile.write(line.replace("\tWARNING", ""))
normal_loop()
class WarningFilter:
def __init__(self, sequence):
self.sequence = sequence
def __iter__(self):
return self
def __next__(self):
line = self.sequence.readline()
while line and "WARNING" not in line:
line = self.sequence.readline()
if not line:
raise StopIteration
return line.replace("\tWARNING", "")
outname = "oop_" + outname
def oopReader():
with open(inname) as infile:
with open(outname, 'w') as outfile:
filter = WarningFilter(infile)
for line in filter:
outfile.write(line)
oopReader()
outname = "yield_" + outname
def warning_filter(sequence):
for line in sequence:
if "WARNING" in line:
yield line.replace("\tWARNING", "")
print(dir(warning_filter))
def generator_yield():
with open(inname) as infile:
with open(outname, "w") as outfile:
filter = warning_filter(infile)
for line in filter:
outfile.write(line)
generator_yield()
outname = "yield_form_" + outname
def warning_filter_yield_from(inname):
with open(inname) as infile:
yield from (
line.replace("\tWARNING", "") for line in infile if "WARNING" in line
)
print(dir(warning_filter_yield_from))
def write_out_filter():
filter = warning_filter_yield_from(inname)
with open(outname, "w") as outfile:
for line in filter:
outfile.write(line)
write_out_filter()
#
# with open(inname) as infile:
# with open(outname, "w") as outfile:
# warnings = (
# l.replace("\tWARNING", "") for l in infile if "WARNING" in l
# )
# for l in warnings:
# outfile.write(l)
#
# # normal loop
# with open(inname) as infile:
# with open(outname, "w") as outfile:
# for l in infile:
# if "WARNING" in l:
# outfile.write(l.replace("\tWARNING", ""))
#
#
# # object-oriented
# class WarningFilter:
# def __init__(self, insequence):
# self.insequence = insequence
#
# def __iter__(self):
# return self
#
# def __next__(self):
# l = self.insequence.readline()
# while l and "WARNING" not in l:
# l = self.insequence.readline()
# if not l:
# raise StopIteration
# return l.replace("\tWARNING", "")
#
#
# with open(inname) as infile:
# with open(outname, "w") as outfile:
# filter = WarningFilter(infile)
# for l in filter:
# outfile.write(l)
#
#
# # Generator with yield
# def warnings_filter(insequence):
# for l in insequence:
# if "WARNING" in l:
# yield l.replace("\tWARNING", "")
#
#
# with open(inname) as infile:
# with open(outname, "w") as outfile:
# filter = warnings_filter(infile)
# for l in filter:
# outfile.write(l)
#
#
# # Generator with yield from
# def warnings_filter(infilename):
# with open(infilename) as infile:
# yield from (
# l.replace("\tWARNING", "") for l in infile if "WARNING" in l
# )
#
#
# filter = warnings_filter(inname)
# with open(outname, "w") as outfile:
# for l in filter:
# outfile.write(l)
|
the-stack_0_22337 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AutoScaleRunError(Model):
"""An error that occurred when executing or evaluating a pool autoscale
formula.
:param code: An identifier for the autoscale error. Codes are invariant
and are intended to be consumed programmatically.
:type code: str
:param message: A message describing the autoscale error, intended to be
suitable for display in a user interface.
:type message: str
:param values: A list of additional error details related to the autoscale
error.
:type values: list of :class:`NameValuePair
<azure.batch.models.NameValuePair>`
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'values': {'key': 'values', 'type': '[NameValuePair]'},
}
def __init__(self, code=None, message=None, values=None):
self.code = code
self.message = message
self.values = values
|
the-stack_0_22340 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete node pool command."""
import argparse
from googlecloudsdk.api_lib.container import util
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
from googlecloudsdk.third_party.apitools.base.py import exceptions as apitools_exceptions
DETAILED_HELP = {
'DESCRIPTION': """\
*{command}* deletes a node pool from a Google Container Engine cluster.
""",
'EXAMPLES': """\
To delete the "node-pool-1" node pool from the cluster
"sample-cluster", run:
$ {command} node-pool-1 --cluster=sample-cluster
""",
}
class Delete(base.DeleteCommand):
"""Delete an existing node pool in a running cluster."""
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
# TODO(b/28639250): Support remote completion when the SDK supports it.
parser.add_argument(
'name',
metavar='NAME',
help='The name of the node pool to delete.')
parser.add_argument(
'--timeout',
type=int,
default=1800,
help=argparse.SUPPRESS)
parser.add_argument(
'--wait',
action='store_true',
default=True,
help='Poll the operation for completion after issuing a delete '
'request.')
parser.add_argument(
'--cluster',
help='The cluster from which to delete the node pool.',
action=actions.StoreProperty(properties.VALUES.container.cluster))
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
adapter = self.context['api_adapter']
pool_ref = adapter.ParseNodePool(args.name)
console_io.PromptContinue(
message=('The following node pool will be deleted.\n'
'[{name}] in cluster [{clusterId}] in zone [{zone}]')
.format(name=pool_ref.nodePoolId,
clusterId=pool_ref.clusterId,
zone=adapter.Zone(pool_ref)),
throw_if_unattended=True,
cancel_on_no=True)
try:
# Make sure it exists (will raise appropriate error if not)
adapter.GetNodePool(pool_ref)
op_ref = adapter.DeleteNodePool(pool_ref)
if args.wait:
adapter.WaitForOperation(
op_ref,
'Deleting node pool {0}'.format(pool_ref.nodePoolId),
timeout_s=args.timeout)
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(util.GetError(error))
log.DeletedResource(pool_ref)
return op_ref
Delete.detailed_help = DETAILED_HELP
|
the-stack_0_22342 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 二叉树
# f(root) = f(root->left) + f(root->right)
class Solution:
def allPossibleFBT(self, N: int) -> List[TreeNode]:
if N == 1:
return [TreeNode(0)]
res = []
for i in range(1,N,2):
nLeft = i
nRight = N -1 - i
for left in self.allPossibleFBT(nLeft):
for right in self.allPossibleFBT(nRight):
root = TreeNode(0)
root.left = left
root.right = right
res.append(root)
return res |
the-stack_0_22343 | from xsd2tkform.core.annotation import Annotation
from xsd2tkform.core.restriction import Restriction
from xsd2tkform.core.list import List
from xsd2tkform.core.sequence import Sequence
from xsd2tkform.core.choice import Choice
from xsd2tkform.core.element import Element
def get_annotation(element):
"""Get annotations from a type definition element
"""
docs={}
for child in element:
if child.tag.endswith("annotation"):
for gchild in child:
if gchild.tag.endswith("documentation"):
# find language
for k in gchild.attrib:
if k.endswith("lang"):
docs[gchild.attrib[k]]=gchild.text.strip()
return Annotation(docs=docs)
def get_restriction(element):
for child in element:
if child.tag.endswith("restriction"):
if len(child):
enum=[e.attrib["value"] for e in child if e.tag.endswith("enumeration")]
return Restriction(base=child.attrib["base"], enum=enum)
else:
return Restriction(base=child.attrib["base"])
return
def get_list(element):
item_list=None
for child in element:
if child.tag.endswith("list"):
attr = child.attrib
attr["item_type"]=attr.pop("itemType")
item_list=List(**attr)
return item_list
def get_sequence(element):
sequence = []
for child in element:
if child.tag.endswith("sequence"):
# children should be elements of choices
for gchild in child:
if gchild.tag.endswith("element"):
sequence.append(Element.from_element(gchild))
elif gchild.tag.endswith("choice"):
sequence.append(Choice.from_element(gchild))
else:
pass
return Sequence(sequence)
|
the-stack_0_22344 | import numpy as np
from numpy.testing import assert_allclose
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import pytest
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
from sklearn.ensemble._hist_gradient_boosting.common import (
G_H_DTYPE,
PREDICTOR_RECORD_DTYPE,
ALMOST_INF,
X_BINNED_DTYPE,
X_BITSET_INNER_DTYPE,
X_DTYPE,
)
from sklearn.ensemble._hist_gradient_boosting._bitset import (
set_bitset_memoryview,
set_raw_bitset_from_binned_bitset,
)
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
n_threads = _openmp_effective_n_threads()
@pytest.mark.parametrize("n_bins", [200, 256])
def test_regression_dataset(n_bins):
X, y = make_regression(
n_samples=500, n_features=10, n_informative=5, random_state=42
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
mapper = _BinMapper(n_bins=n_bins, random_state=42)
X_train_binned = mapper.fit_transform(X_train)
# Init gradients and hessians to that of least squares loss
gradients = -y_train.astype(G_H_DTYPE)
hessians = np.ones(1, dtype=G_H_DTYPE)
min_samples_leaf = 10
max_leaf_nodes = 30
grower = TreeGrower(
X_train_binned,
gradients,
hessians,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=max_leaf_nodes,
n_bins=n_bins,
n_bins_non_missing=mapper.n_bins_non_missing_,
)
grower.grow()
predictor = grower.make_predictor(binning_thresholds=mapper.bin_thresholds_)
known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
f_idx_map = np.zeros(0, dtype=np.uint32)
y_pred_train = predictor.predict(X_train, known_cat_bitsets, f_idx_map, n_threads)
assert r2_score(y_train, y_pred_train) > 0.82
y_pred_test = predictor.predict(X_test, known_cat_bitsets, f_idx_map, n_threads)
assert r2_score(y_test, y_pred_test) > 0.67
@pytest.mark.parametrize(
"num_threshold, expected_predictions",
[
(-np.inf, [0, 1, 1, 1]),
(10, [0, 0, 1, 1]),
(20, [0, 0, 0, 1]),
(ALMOST_INF, [0, 0, 0, 1]),
(np.inf, [0, 0, 0, 0]),
],
)
def test_infinite_values_and_thresholds(num_threshold, expected_predictions):
# Make sure infinite values and infinite thresholds are handled properly.
# In particular, if a value is +inf and the threshold is ALMOST_INF the
# sample should go to the right child. If the threshold is inf (split on
# nan), the +inf sample will go to the left child.
X = np.array([-np.inf, 10, 20, np.inf]).reshape(-1, 1)
nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
# We just construct a simple tree with 1 root and 2 children
# parent node
nodes[0]["left"] = 1
nodes[0]["right"] = 2
nodes[0]["feature_idx"] = 0
nodes[0]["num_threshold"] = num_threshold
# left child
nodes[1]["is_leaf"] = True
nodes[1]["value"] = 0
# right child
nodes[2]["is_leaf"] = True
nodes[2]["value"] = 1
binned_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
raw_categorical_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
known_cat_bitset = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
f_idx_map = np.zeros(0, dtype=np.uint32)
predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets)
predictions = predictor.predict(X, known_cat_bitset, f_idx_map, n_threads)
assert np.all(predictions == expected_predictions)
@pytest.mark.parametrize(
"bins_go_left, expected_predictions",
[
([0, 3, 4, 6], [1, 0, 0, 1, 1, 0]),
([0, 1, 2, 6], [1, 1, 1, 0, 0, 0]),
([3, 5, 6], [0, 0, 0, 1, 0, 1]),
],
)
def test_categorical_predictor(bins_go_left, expected_predictions):
# Test predictor outputs are correct with categorical features
X_binned = np.array([[0, 1, 2, 3, 4, 5]], dtype=X_BINNED_DTYPE).T
categories = np.array([2, 5, 6, 8, 10, 15], dtype=X_DTYPE)
bins_go_left = np.array(bins_go_left, dtype=X_BINNED_DTYPE)
# We just construct a simple tree with 1 root and 2 children
# parent node
nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
nodes[0]["left"] = 1
nodes[0]["right"] = 2
nodes[0]["feature_idx"] = 0
nodes[0]["is_categorical"] = True
nodes[0]["missing_go_to_left"] = True
# left child
nodes[1]["is_leaf"] = True
nodes[1]["value"] = 1
# right child
nodes[2]["is_leaf"] = True
nodes[2]["value"] = 0
binned_cat_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
raw_categorical_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
for go_left in bins_go_left:
set_bitset_memoryview(binned_cat_bitsets[0], go_left)
set_raw_bitset_from_binned_bitset(
raw_categorical_bitsets[0], binned_cat_bitsets[0], categories
)
predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets)
# Check binned data gives correct predictions
prediction_binned = predictor.predict_binned(
X_binned, missing_values_bin_idx=6, n_threads=n_threads
)
assert_allclose(prediction_binned, expected_predictions)
# manually construct bitset
known_cat_bitsets = np.zeros((1, 8), dtype=np.uint32)
known_cat_bitsets[0, 0] = np.sum(2 ** categories, dtype=np.uint32)
f_idx_map = np.array([0], dtype=np.uint32)
# Check with un-binned data
predictions = predictor.predict(
categories.reshape(-1, 1), known_cat_bitsets, f_idx_map, n_threads
)
assert_allclose(predictions, expected_predictions)
# Check missing goes left because missing_values_bin_idx=6
X_binned_missing = np.array([[6]], dtype=X_BINNED_DTYPE).T
predictions = predictor.predict_binned(
X_binned_missing, missing_values_bin_idx=6, n_threads=n_threads
)
assert_allclose(predictions, [1])
# missing and unknown go left
predictions = predictor.predict(
np.array([[np.nan, 17]], dtype=X_DTYPE).T,
known_cat_bitsets,
f_idx_map,
n_threads,
)
assert_allclose(predictions, [1, 1])
|
the-stack_0_22346 | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import torch
import pdb
import cv2
import torchvision.transforms as T
import os.path as osp
from PIL import Image
from torch.utils.data import Dataset
import random
import numpy as np
from ..transforms import RandomErasing #RandomErasing2
from .import_Market1501Attribute import import_Market1501Attribute_binary
from .import_DukeMTMCAttribute import import_DukeMTMCAttribute_binary
def read_image(img_path):
"""Keep reading image until succeed.
This can avoid IOError incurred by heavy IO process."""
got_img = False
if not osp.exists(img_path):
raise IOError("{} does not exist".format(img_path))
while not got_img:
try:
img = Image.open(img_path).convert('RGB')
got_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
return img
class MTImageDataset(Dataset):
"""Image Person ReID Dataset"""
def __init__(self, dataset, transform=None, cfg=None):
try:
self.dataset, self.label2pid = dataset
except:
self.dataset = dataset
self.dataset_name = cfg.DATASETS.NAMES
if cfg.DATASETS.NAMES == 'market1501':
train_attr, test_attr, self.attr_label = import_Market1501Attribute_binary(cfg.DATASETS.ROOT_DIR)
elif cfg.DATASETS.NAMES == 'dukemtmc':
train_attr, test_attr, self.attr_label = import_DukeMTMCAttribute_binary(cfg.DATASETS.ROOT_DIR)
elif 'cuhk03' in cfg.DATASETS.NAMES:
train_attr = []
self.attr_label = ['not used']
else:
raise ValueError(f'dataset not support: {cfg.DATASETS.NAMES}')
self.train_attr = train_attr
self.transform = transform
self.img_size = cfg.INPUT.SIZE_TRAIN
self.resize = T.Resize(self.img_size)
self.flip = T.RandomHorizontalFlip(p=1.0)
self.crop_loc_generator = T.RandomCrop(self.img_size)
self.pad = T.Pad(cfg.INPUT.PADDING)
self.erase = RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
self.flip_prob = cfg.INPUT.PROB
if self.img_size[0] == 224:
self.seg_height = 28
self.seg_weight = 28
elif self.img_size[0] == 256:
self.seg_height = 16
self.seg_weight = 32
elif self.img_size[0] == 384:
if self.img_size[1] == 128:
self.seg_height = 16
elif self.img_size[1] == 192:
self.seg_height = 24
self.seg_weight = 48
def __len__(self):
return len(self.dataset)
def attr_labels(self):
return self.attr_label
def num_attr_label(self):
return len(self.attr_label)
def __getitem__(self, index):
img_path, pid, camid = self.dataset[index]
#if self.dataset_name == 'market1501':
temp_root, temp_name = img_path.split('bounding')
seg_path = temp_root + 'segmentation/' + 'bounding'+ temp_name + '.npy'
#else:
# temp_root, temp_name = img_path.split('cuhk03_')
# seg_path = temp_root + 'segmentation/' + 'cuhk03_'+ temp_name + '.npy'
img = read_image(img_path)
seg = np.load(seg_path)
seg = Image.fromarray(seg.astype('uint8')).convert('L')
try:
attr = np.asarray(self.train_attr[f'{self.label2pid[pid]:04}'], dtype=np.float32)
except:
attr = [0] #np.array(self.train_attr)
if self.transform is not None:
# resize
img = self.resize(img)
seg = self.resize(seg)
# random horizontal flip
if(random.random()>self.flip_prob):
img = self.flip(img)
seg = self.flip(seg)
# pad
img = self.pad(img)
seg = self.pad(seg)
# random crop
crop_loc = self.crop_loc_generator.get_params(img, self.img_size)
img = T.functional.crop(img, crop_loc[0], crop_loc[1], crop_loc[2], crop_loc[3])
seg = T.functional.crop(seg, crop_loc[0], crop_loc[1], crop_loc[2], crop_loc[3])
# visualization
#img.save('/mnt/lustre/liuyuan1/cvpr20/network/MT-Net/data_quality/img/img{}.jpg'.format(index))
#seg.save('/mnt/lustre/liuyuan1/cvpr20/network/MT-Net/data_quality/seg/seg{}.jpg'.format(index))
# normalize and erase, only for img, not for seg
img = self.transform(img)
img = self.erase(img)
#img, seg = self.erase(img, np.array(seg))
#temp = Image.fromarray(seg)
#temp.save('/mnt/lustre/liuyuan1/cvpr20/network/MT-Net/data_quality/seg/seg_erase{}.jpg'.format(index))
seg = np.array(seg)
seg = torch.from_numpy(cv2.resize(
seg, (self.seg_height, self.seg_weight), cv2.INTER_NEAREST)).long()
return img, pid, seg, attr, camid, img_path
|
the-stack_0_22347 | """Optimization utilities."""
import inspect
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow_addons.optimizers.weight_decay_optimizers import DecoupledWeightDecayExtension
def get_optimizer_class(name):
"""Returns the optimizer class.
Args:
name: The optimizer name.
Returns:
A class extending ``tf.keras.optimizers.Optimizer``.
Raises:
ValueError: if :obj:`name` can not be resolved to an optimizer class.
"""
optimizer_class = None
if optimizer_class is None:
optimizer_class = getattr(tf.keras.optimizers, name, None)
if optimizer_class is None:
optimizer_class = getattr(tfa.optimizers, name, None)
if optimizer_class is None:
raise ValueError("Unknown optimizer class: %s" % name)
return optimizer_class
def make_optimizer(name, learning_rate, **kwargs):
"""Creates the optimizer.
Args:
name: The name of the optimizer class in ``tf.keras.optimizers`` or
``tfa.optimizers`` as a string.
learning_rate: The learning rate or learning rate schedule to use.
**kwargs: Additional optimizer arguments. If ``weight_decay`` is set, the
optimizer will be extended with decoupled weight decay.
Returns:
A ``tf.keras.optimizers.Optimizer`` instance.
Raises:
ValueError: if :obj:`name` can not be resolved to an optimizer class.
"""
optimizer_class = get_optimizer_class(name)
if "weight_decay" in kwargs:
if DecoupledWeightDecayExtension not in inspect.getmro(optimizer_class):
optimizer_class = tfa.optimizers.extend_with_decoupled_weight_decay(optimizer_class)
optimizer = optimizer_class(learning_rate=learning_rate, **kwargs)
return optimizer
class GradientAccumulator(object):
"""Distribution strategies-aware gradient accumulation utility."""
def __init__(self):
"""Initializes the accumulator."""
self._gradients = []
self._accum_steps = tf.Variable(
initial_value=0,
dtype=tf.int64,
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
@property
def step(self):
"""Number of accumulated steps."""
return self._accum_steps.value()
@property
def gradients(self):
"""The accumulated gradients."""
return list(gradient.value() for gradient in self._get_replica_gradients())
def __call__(self, gradients):
"""Accumulates :obj:`gradients`."""
if not self._gradients:
self._gradients.extend([
tf.Variable(tf.zeros_like(gradient), trainable=False)
for gradient in gradients])
if len(gradients) != len(self._gradients):
raise ValueError("Expected %s gradients, but got %d" % (
len(self._gradients), len(gradients)))
for accum_gradient, gradient in zip(self._get_replica_gradients(), gradients):
accum_gradient.assign_add(gradient)
self._accum_steps.assign_add(1)
def reset(self):
"""Resets the accumulated gradients."""
if self._gradients:
self._accum_steps.assign(0)
for gradient in self._get_replica_gradients():
gradient.assign(tf.zeros_like(gradient))
def _get_replica_gradients(self):
if tf.distribute.has_strategy():
# In a replica context, we want to accumulate gradients on each replica
# without synchronization, so we directly assign the value of the
# current replica.
replica_context = tf.distribute.get_replica_context()
if replica_context is None:
return self._gradients
return (
gradient.device_map.select_for_current_replica(gradient.values, replica_context)
for gradient in self._gradients)
else:
return self._gradients
|
the-stack_0_22349 | import hashlib
from math import ceil
__author__ = 'Iurii Sergiichuk'
'''
The ANSI X9.63 key derivation function
We assume that we use SHA-512 hash function
'''
HASH_LEN = 512
MAX_INPUT = HASH_LEN * (2 << 32 - 1)
class SharedInfo(object):
def __init__(self, algorithm_id, counter=0, entityAinfo=None, entityBinfo=None, suppPrivInfo=None,
suppPubInfo=None):
"""
:arg algorithm_id: unique identifier of used hash algorithm
:arg counter: counter of iteration
:type algorithm_id: int
:type counter: int
:type entityAinfo: long
:type entityBinfo: long
:type suppPrivInfo: long
:type suppPubInfo: long
:rtype : SharedInfo
"""
self.algorithm_id = algorithm_id
self.counter = counter
self.entityAinfo = entityAinfo
self.entityBinfo = entityBinfo
self.suppPrivInfo = suppPrivInfo
self.suppPubInfo = suppPubInfo
def __str__(self):
result = str(self.algorithm_id) + str(self.counter)
if self.entityAinfo is not None:
result += bin(self.entityAinfo)[2:]
if self.entityBinfo is not None:
result += bin(self.entityBinfo)[2:]
if self.suppPrivInfo is not None:
result += bin(self.suppPrivInfo)[2:]
if self.suppPubInfo is not None:
result += bin(self.suppPubInfo)[2:]
return result
def is_whole(number):
'''
Check whether given number is whole or not
:param number: number to check
:type number: number
:return: true, if given number is whole
:rtype: bool
'''
if number % 1 == 0:
return True
return False
def derivate_key(Z, keydatalen, shared_info):
"""
Process key derivation
:arg Z: shared secret as long number
:arg keydatalen: integer that point ZZ bit length
:arg shared_info: possible additional information
:type Z: long
:type keydatalen: int
:type SharedInfo: SharedInfo
:return: derivated key in bit-string format
:rtype : str
"""
if keydatalen > MAX_INPUT:
raise ValueError("Keydatalen should be less than HASH_LEN*(2^32-1), but was:" + str(keydatalen))
shared_info.counter = 0x00000001
hash_parts = []
for i in xrange(int(ceil(keydatalen * 1.0 / HASH_LEN))):
value_to_hash = bin(Z)[2:]
value_to_hash += str(shared_info)
h = hashlib.sha512()
h.update(value_to_hash)
hex_digest = h.hexdigest()
long_digest = long(hex_digest, base=16)
h_i = bin(long_digest)[2:]
hash_parts.append(h_i)
shared_info.counter += 1
r = ''
for i in xrange(len(hash_parts) - 1):
r += hash_parts[i]
h_hash = hash_parts[len(hash_parts) - 1]
if not is_whole(keydatalen * 1.0 / HASH_LEN):
h_hash = h_hash[:keydatalen - HASH_LEN * (len(hash_parts) - 1)]
r += h_hash
return r
|
the-stack_0_22350 | #%reload_ext autoreload
#%autoreload 2
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from sklearn.cluster import SpectralClustering
from sklearn.cluster import KMeans
from sklearn import metrics
import nmslib
dataset = fetch_20newsgroups(subset='all', shuffle=True, download_if_missing=True)
np.random.seed(123)
texts = dataset.data # Extract text
target = dataset.target # Extract target
#texts[0:10]
#target[0:10]
vectorizer = TfidfVectorizer(stop_words='english', max_df = 0.3)
X = vectorizer.fit_transform(texts)
index = nmslib.init(method='hnsw', space='cosinesimil_sparse', data_type=nmslib.DataType.SPARSE_VECTOR)
index.addDataPointBatch(X)
index_time_params = {'post':2}
index.createIndex(index_time_params, print_progress=True)
nn = 1000
neighbors = index.knnQueryBatch(X, k=nn, num_threads=4)
col = np.array([i for n in neighbors for i in n[0].tolist()])
#row = np.repeat(np.arange(0, len(neighbors)), nn)
row = np.repeat(np.arange(0, len(neighbors)), np.array([len(n[0]) for n in neighbors]))
#data = np.array([1]*len(row))
data = np.array([i for n in neighbors for i in n[1].tolist()])
from scipy.sparse import csc_matrix
connectivity = csc_matrix((data, (row, col)), shape = (X.shape[0], X.shape[0]))
#affinity_matrix = 0.5 * (connectivity + connectivity.T)
from scipy.sparse.csgraph import laplacian as csgraph_laplacian
##### Versuch, alle eigenvalues zu bekommen #########################
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import lobpcg
from sklearn.manifold._spectral_embedding import _set_diag
from scipy import sparse
from pyamg import smoothed_aggregation_solver
import matplotlib.pyplot as plt
plt.grid()
norm_laplacian = True
n_components = laplacian.shape[0]
random_state = check_random_state(1234)
laplacian, dd = csgraph_laplacian(connectivity, normed=norm_laplacian, return_diag=True)
laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])
laplacian += diag_shift
ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
laplacian -= diag_shift
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
eigs, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-5, largest=False)
plt.scatter(np.arange(len(eigs)), eigs)
plt.grid()
plt.show()
# funktioniert
#####################################################################
##
#solution = SpectralClustering(n_clusters=20, assign_labels='kmeans', \
# affinity='precomputed', n_neighbors=20).fit(affinity_matrix)
solution = SpectralClustering(n_clusters = 20, n_components = 21, affinity = 'precomputed', gamma=0.7, eigen_solver='amg').fit(connectivity)
metrics.adjusted_rand_score(solution.labels_, target)
#laplacian, dd = csgraph_laplacian(affinity_matrix, normed = True, return_diag=True)
from sklearn.manifold import spectral_embedding
# this step doesn't help anything:
maps = spectral_embedding(connectivity, n_components=21, eigen_solver='amg', drop_first=False)
solutionKMeans = KMeans(n_clusters=20, init='k-means++',\
max_iter= 100).fit(maps)
metrics.adjusted_rand_score(solutionKMeans.labels_, target)
|
the-stack_0_22351 | import os
import cv2
import torch
import numpy as np
import scipy.io as sio
import torch.utils.data as tud
from opts import Opts
from datasets.penn import PENN_CROP
from utils.visualizer import Visualizer
from utils.eval import AverageMeter, get_preds, get_ref, original_coordinate, error, accuracy
def main():
# Parse the options from parameters
opts = Opts().parse()
## For PyTorch 0.4.1, cuda(device)
opts.device = torch.device(f'cuda:{opts.gpu[0]}')
print(opts.expID, opts.task,os.path.dirname(os.path.realpath(__file__)))
# Load the trained model test
if opts.loadModel != 'none':
model_path = os.path.join(opts.root_dir, opts.loadModel)
model = torch.load(model_path).cuda(device=opts.device)
model.eval()
else:
print('ERROR: No model is loaded!')
return
# Read the input image, pass input to gpu
if opts.img == 'None':
val_dataset = PENN_CROP(opts, 'val')
val_loader = tud.DataLoader(
val_dataset,
batch_size=1,
shuffle=False,
num_workers=int(opts.num_workers)
)
opts.nJoints = val_dataset.nJoints
opts.skeleton = val_dataset.skeleton
for i, gt in enumerate(val_loader):
# Test Visualizer, Input and get_preds
if i == 0:
input, label = gt['input'], gt['label']
gtpts, center, scale, proj = gt['gtpts'], gt['center'], gt['scale'], gt['proj']
input_var = input[:, 0, ].float().cuda(device=opts.device, non_blocking=True)
# output = label
output = model(input_var)
# Test Loss, Err and Acc(PCK)
Loss, Err, Acc = AverageMeter(), AverageMeter(), AverageMeter()
ref = get_ref(opts.dataset, scale)
for j in range(opts.preSeqLen):
pred = get_preds(output[:, j, ].cpu().float())
pred = original_coordinate(pred, center[:, ], scale, opts.outputRes)
err, ne = error(pred, gtpts[:, j, ], ref)
acc, na = accuracy(pred, gtpts[:, j, ], ref)
# assert ne == na, "ne must be the same as na"
Err.update(err)
Acc.update(acc)
print(j, f"{Err.val:.6f}", Acc.val)
print('all', f"{Err.avg:.6f}", Acc.avg)
# Visualizer Object
## Initialize
v = Visualizer(opts.nJoints, opts.skeleton, opts.outputRes)
# ## Add input image
# v.add_img(input[0,0,].transpose(2, 0).numpy().astype(np.uint8))
# ## Get the predicted joints
# predJoints = get_preds(output[:, 0, ])
# # ## Add joints and skeleton to the figure
# v.add_2d_joints_skeleton(predJoints, (0, 0, 255))
# Transform heatmap to show
hm_img = output[0,0,].cpu().detach().numpy()
v.add_hm(hm_img)
## Show image
v.show_img(pause=True)
break
else:
print('NOT ready for the raw input outside the dataset')
img = cv2.imread(opts.img)
input = torch.from_numpy(img.tramspose(2, 0, 1)).float()/256.
input = input.view(1, input.size(0), input.size(1), input.size(2))
input_var = torch.autograd.variable(input).float().cuda(device=opts.device)
output = model(input_var)
predJoints = get_preds(output[-2].data.cpu().numpy())[0] * 4
if __name__ == '__main__':
main()
|
the-stack_0_22352 | import asyncio
import os
from typing import cast
import ccxt.async_support as ccxt
from ccxt.async_support.bybit import bybit
# from magnet.config import APICredentialBinance
from pydantic import BaseSettings
class APICredentialBybit(BaseSettings):
API_BYBIT_API_KEY: str
API_BYBIT_API_SECRET: str
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
credential = APICredentialBybit()
# https://github.com/ccxt/ccxt
exchange_id = "bybit"
exchange_class = getattr(ccxt, exchange_id)
def create_api() -> bybit:
return exchange_class(
{
"apiKey": credential.API_BYBIT_API_KEY,
"secret": credential.API_BYBIT_API_SECRET,
"timeout": 30000,
"enableRateLimit": True,
}
)
def ccxt_context(func):
import functools
@functools.wraps(func)
async def wapped():
api: bybit = create_api()
try:
api.urls["api"] = api.urls["test"]
await func(api)
except:
raise
finally:
await api.close()
return wapped
@ccxt_context
async def main(api: bybit):
import pprint
# pprint.pprint(api.api)
# https://note.com/kanawoinvestment/n/ne287ff725fd9
markets = await api.load_markets()
# print(markets) printするとバグる、、、
pairs = [x for x in api.symbols if "BTC" in x]
symbol = "BTC/USDT"
symbol = "BTC/USDT"
order_type = "limit"
side = "buy"
amount = 1
price = 50000
# order = await api.create_order(
# symbol, order_type, side, amount, price, {"qty": amount}
# )
# クロスマージン取引
# 口座残高の全てを証拠金としてロックし、更に最大レバレッジをかける恐ろしいモード。レバレッジは、数量で調整する
# 分離マージン取引
# 注文分の証拠金のみをロックする。レバレッジを指定可能。
symbol_new = symbol.replace("/", "")
# USDTシンボルはこちら
# https://help.bybit.com/hc/en-us/articles/360039260574-What-is-a-reduce-only-order-
# https://help.bybit.com/hc/en-us/articles/360039260534-What-is-a-close-on-trigger-Order-
if True:
order = await api.private_linear_post_order_create(
{
"side": "Buy",
"symbol": symbol_new,
"order_type": "Market", # Limit
"qty": 1, # 1BitCoin
# "price": 1 # 1USD
"take_profit": 1, # 利確価格 1USD
"stop_loss": 1, # 損切価格 1USD
"time_in_force": "GoodTillCancel",
"reduce_only": False, # オープンポジションがない場合、にリデュースのみの注文は拒否される。要は、クローズ注文として扱う場合に利用する。
"close_on_trigger": False, # システムが他のアクティブな注文を自動的にキャンセルして証拠金を解放
# "order_link_id ": "" # どの注文に関連するのか、カスタマイズでリンクしていい
}
)
print(order)
|
the-stack_0_22354 | # Copyright 2011 Google Inc. All Rights Reserved.
#
__author__ = '[email protected] (Krystian Baclawski)'
from django.conf import settings
from django.conf.urls.defaults import patterns
urlpatterns = patterns(
'dashboard', (r'^job-group$', 'JobGroupListPageHandler'),
(r'^machine$', 'MachineListPageHandler'),
(r'^job/(?P<job_id>\d+)/log$', 'LogPageHandler'),
(r'^job/(?P<job_id>\d+)$', 'JobPageHandler'), (
r'^job-group/(?P<job_group_id>\d+)/files/(?P<path>.*)$',
'JobGroupFilesPageHandler'),
(r'^job-group/(?P<job_group_id>\d+)$', 'JobGroupPageHandler'),
(r'^$', 'DefaultPageHandler'))
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}))
|
the-stack_0_22355 | import os
import sys
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
import traci
import sumolib
net = sumolib.net.readNet('nets/charlottenburg/actuated.net.xml')
import numpy as np
from gym import spaces
class TrafficSignal:
"""
This class represents a Traffic Signal of an intersection
It is responsible for retrieving information and changing the traffic phase using Traci API
"""
def __init__(self, env, ts_id, delta_time, yellow_time, min_green, max_green, use_pressure=False):
self.id = ts_id
self.env = env
self.delta_time = delta_time
# self.yellow_time = yellow_time # patch: we use the yellow time from the net definition further below
self.yellow_time = 0
self.red_time = 0
self.min_green = min_green
self.max_green = max_green
self.green_phase = 0
self.is_yellow = False
self.has_red = False
self.is_red = False
self.is_green = False
self.time_since_last_phase_change = 0
self.use_pressure = use_pressure
self.next_action_time = 0
self.last_measure = 0.0
self.last_reward = None
self.phases = traci.trafficlight.getCompleteRedYellowGreenDefinition(self.id)[0].phases
self.different_phases = {
'g': 0,
'y': 0,
'r': 0
}
self.num_green_phases = 0
self.green_phase_mapper = {}
for index, phase in enumerate(self.phases):
s = phase.state
if 'G' in s or 'g' in s:
self.num_green_phases += 1
self.different_phases['g'] += 1
self.green_phase_mapper[self.num_green_phases-1] = index
elif 'r' in s and 'G' not in s and 'g' not in s and 'y' not in s:
self.different_phases['r'] += 1
if phase.duration > 0:
if self.red_time != 0:
if self.red_time != phase.duration:
raise Exception(f'{self.id} has different red times!')
else:
self.has_red = True
self.red_time = phase.duration
elif 'y' in s:
self.different_phases['y'] += 1
if phase.duration > 0:
if self.yellow_time != 0:
if self.yellow_time != phase.duration:
raise Exception(f'{self.id} has different yellow times!')
else:
self.yellow_time = phase.duration
else:
raise Exception(f'{self.id} has another state {s} within phases!')
self.num_different_phases = 0
for diff_phase in self.different_phases:
if self.different_phases[diff_phase] > 0:
self.num_different_phases += 1
#self.num_green_phases = len(self.phases) // 2 # Number of green phases == number of phases (green+yellow) divided by 2
self.lanes = list(dict.fromkeys(traci.trafficlight.getControlledLanes(self.id))) # Remove duplicates and keep order
if len(self.phases) > 3: # ignore traffic lights without intersection (e.g. just a light for pedestrians to cross)
copy = []
alreadyVisited = []
offset = 0
for i, lane in enumerate(self.lanes):
self.verify_and_append_incoming_lanes(copy, lane, i+offset, alreadyVisited)
if set(self.lanes) != set(copy):
print(f'intersection {self.id} had at least one incoming lane smaller than 23 meters, extended it with:')
print(set(copy) - set(self.lanes))
self.lanes = copy
self.out_lanes = [link[0][1] for link in traci.trafficlight.getControlledLinks(self.id) if link]
self.out_lanes = list(set(self.out_lanes))
self.neighbors = {}
"""
Default observation space is a vector R^(#greenPhases + 2 * #lanes)
s = [current phase one-hot encoded, observation metric]
You can change this by modifing self.observation_space and the method _compute_observations()
Action space is which green phase is going to be open for the next delta_time seconds
"""
self.observation_space = spaces.Box(
low=np.zeros(self.num_green_phases + 2*len(self.lanes)),
high=np.ones(self.num_green_phases + 2*len(self.lanes)))
self.discrete_observation_space = spaces.Tuple((
spaces.Discrete(self.num_green_phases), # Green Phase
spaces.Discrete(10) # Metric
))
self.action_space = spaces.Discrete(self.num_green_phases)
logic = traci.trafficlight.Logic("new-program"+self.id, 0, 0, phases=self.phases)
traci.trafficlight.setCompleteRedYellowGreenDefinition(self.id, logic)
def verify_and_append_incoming_lanes(self, lanes, lane, offset, alreadyVisited):
if lane not in lanes and lane not in alreadyVisited:
alreadyVisited.append(lane)
lanes.insert(offset, lane)
length = traci.lane.getLength(lane)
if length < 23:
incomingLanes = net.getLane(lane).getIncoming()
for incomingLane in incomingLanes:
offset += 1
self.verify_and_append_incoming_lanes(lanes, incomingLane.getID(), offset, alreadyVisited)
def set_neighbors(self, traffic_signals):
for ts in traffic_signals:
lanes = list(dict.fromkeys(traci.trafficlight.getControlledLanes(ts)))
sharedLanes = 0
for lane in lanes:
if lane in self.out_lanes:
sharedLanes += 1
if sharedLanes > 0:
self.neighbors[ts] = traffic_signals[ts]
self.discrete_observation_space = spaces.Tuple((
spaces.Discrete(self.num_green_phases),
*(spaces.Discrete(10) for _ in range(1 + len(self.neighbors))) # own metric + metric of neighbor TS
))
@property
def phase(self):
return traci.trafficlight.getPhase(self.id)
@property
def time_to_act(self):
return self.next_action_time == self.env.sim_step
def update(self):
self.time_since_last_phase_change += 1
if self.is_yellow and self.time_since_last_phase_change == self.yellow_time:
traci.trafficlight.setPhase(self.id, (self.phase + 1) % len(self.phases))
self.is_yellow = False
self.time_since_last_phase_change = 0
s = traci.trafficlight.getRedYellowGreenState(self.id)
if 'G' in s or 'g' in s:
self.is_green = True
self.is_red = False
self.is_yellow = False
elif 'r' in s and 'G' not in s and 'g' not in s and 'y' not in s:
self.is_green = False
self.is_red = True
self.is_yellow = False
elif 'y' in s:
self.is_green = False
self.is_red = False
self.is_yellow = True
if self.is_red and self.time_since_last_phase_change == self.red_time:
traci.trafficlight.setPhase(self.id, (self.phase + 1) % len(self.phases))
self.is_red = False
self.time_since_last_phase_change = 0
s = traci.trafficlight.getRedYellowGreenState(self.id)
if 'G' in s or 'g' in s:
self.is_green = True
self.is_red = False
self.is_yellow = False
elif 'r' in s and 'G' not in s and 'g' not in s and 'y' not in s:
self.is_green = False
self.is_red = True
self.is_yellow = False
elif 'y' in s:
self.is_green = False
self.is_red = False
self.is_yellow = True
def set_next_phase(self, new_phase):
"""
Sets the next phase if the given new_phase is different than the current
:param new_phase: (int) Number between [0..num_green_phases]
"""
if self.phase == self.green_phase_mapper[new_phase] or self.time_since_last_phase_change < self.min_green + self.yellow_time + self.red_time:
traci.trafficlight.setPhase(self.id, self.phase)
self.next_action_time = self.env.sim_step + self.delta_time
else:
traci.trafficlight.setPhase(self.id, (self.phase + 1) % len(self.phases))
self.next_action_time = self.env.sim_step + self.delta_time
self.time_since_last_phase_change = 0
s = traci.trafficlight.getRedYellowGreenState(self.id)
if 'G' in s or 'g' in s:
self.is_green = True
self.is_red = False
self.is_yellow = False
elif 'r' in s and 'G' not in s and 'g' not in s and 'y' not in s:
self.is_green = False
self.is_red = True
self.is_yellow = False
self.next_action_time += self.red_time
elif 'y' in s:
self.is_green = False
self.is_red = False
self.is_yellow = True
self.next_action_time += self.yellow_time
def compute_observation(self):
phase_id = [1 if self.phase//2 == i else 0 for i in range(self.num_green_phases)] # one-hot encoding
if self.use_pressure:
print('pressure')
pressure = self.get_pressure()
for neighbor in self.neighbors:
print('neighbors pressure')
pressure += self.neighbors[neighbor].get_pressure()
return np.array(phase_id + pressure)
else:
print('Q+D')
density = self.get_lanes_density()
queue = self.get_lanes_queue()
for neighbor in self.neighbors:
print('neighbors Q+D')
density += self.neighbors[neighbor].get_lanes_density()
queue += self.neighbors[neighbor].get_lanes_queue()
return np.array(phase_id + density + queue)
def compute_reward(self):
self.last_reward = self._waiting_time_reward()
return self.last_reward
def _pressure_reward(self):
return -self.get_pressure()[0]
def _queue_average_reward(self):
new_average = np.mean(self.get_stopped_vehicles_num())
reward = self.last_measure - new_average
self.last_measure = new_average
return reward
def _queue_reward(self):
return - (sum(self.get_stopped_vehicles_num()))**2
def _waiting_time_reward(self):
ts_wait = sum(self.get_waiting_time_per_lane()) / 100.0
reward = self.last_measure - ts_wait
self.last_measure = ts_wait
return reward
def _waiting_time_reward2(self):
ts_wait = sum(self.get_waiting_time())
self.last_measure = ts_wait
if ts_wait == 0:
reward = 1.0
else:
reward = 1.0/ts_wait
return reward
def _waiting_time_reward3(self):
ts_wait = sum(self.get_waiting_time())
reward = -ts_wait
self.last_measure = ts_wait
return reward
def get_waiting_time_per_lane(self):
wait_time_per_lane = []
for lane in self.lanes:
veh_list = traci.lane.getLastStepVehicleIDs(lane)
wait_time = 0.0
for veh in veh_list:
veh_lane = traci.vehicle.getLaneID(veh)
acc = traci.vehicle.getAccumulatedWaitingTime(veh)
if veh not in self.env.vehicles:
self.env.vehicles[veh] = {veh_lane: acc}
else:
self.env.vehicles[veh][veh_lane] = acc - sum([self.env.vehicles[veh][lane] for lane in self.env.vehicles[veh].keys() if lane != veh_lane])
wait_time += self.env.vehicles[veh][veh_lane]
wait_time_per_lane.append(wait_time)
return wait_time_per_lane
def get_pressure(self):
sum_in = 0
for lane in self.lanes:
sum_in += traci.lane.getLastStepVehicleNumber(lane)
sum_out = 0
for lane in self.out_lanes:
sum_out += traci.lane.getLastStepVehicleNumber(lane)
return [abs(sum_in - sum_out)]
def get_out_lanes_density(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepVehicleNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.out_lanes]
def get_lanes_density(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepVehicleNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.lanes]
def get_lanes_queue(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepHaltingNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.lanes]
def get_total_queued(self):
return sum([traci.lane.getLastStepHaltingNumber(lane) for lane in self.lanes])
def _get_veh_list(self):
veh_list = []
for lane in self.lanes:
veh_list += traci.lane.getLastStepVehicleIDs(lane)
return veh_list
|
the-stack_0_22357 | """empty message
Revision ID: 0202_new_letter_pricing
Revises: 0201_another_letter_org
Create Date: 2017-07-09 12:44:16.815039
"""
revision = '0202_new_letter_pricing'
down_revision = '0201_another_letter_org'
import uuid
from datetime import datetime
from alembic import op
start = datetime(2018, 6, 30, 23, 0)
NEW_RATES = [
(uuid.uuid4(), start, 4, 0.39, True, 'second'),
(uuid.uuid4(), start, 4, 0.51, False, 'second'),
(uuid.uuid4(), start, 5, 0.42, True, 'second'),
(uuid.uuid4(), start, 5, 0.57, False, 'second'),
]
def upgrade():
conn = op.get_bind()
for id, start_date, sheet_count, rate, crown, post_class in NEW_RATES:
conn.execute("""
INSERT INTO letter_rates (id, start_date, sheet_count, rate, crown, post_class)
VALUES ('{}', '{}', '{}', '{}', '{}', '{}')
""".format(id, start_date, sheet_count, rate, crown, post_class))
def downgrade():
pass
|
the-stack_0_22359 | from django import template
from django.template.base import Node, TemplateSyntaxError, kwarg_re
from django.utils.html import conditional_escape
from feincms3 import apps
register = template.Library()
@register.simple_tag(takes_context=True)
def render_region(context, regions, region, **kwargs):
"""
Render a single region. See :class:`~feincms3.regions.Regions` for
additional details. Any and all keyword arguments are forwarded to the
``render`` method of the ``Regions`` instance.
Usage::
{% render_region regions "main" %}
"""
return regions.render(region, context, **kwargs)
class ReverseAppNode(Node):
def __init__(self, namespaces, view_name, args, kwargs, asvar):
self.namespaces = namespaces
self.view_name = view_name
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def _current_app(self, context):
try:
return context.request.current_app
except AttributeError:
try:
return context.request.resolver_match.namespace
except AttributeError:
return None
def render(self, context):
args = [arg.resolve(context) for arg in self.args]
kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()}
namespaces = self.namespaces.resolve(context)
view_name = self.view_name.resolve(context)
fallback = kwargs.pop("fallback", None)
if not isinstance(namespaces, (list, tuple)):
namespaces = namespaces.split(",")
# Try to look up the URL. If it fails, raise NoReverseMatch unless the
# {% reverse ... as var %} construct is used, in which case return
# nothing.
url = ""
try:
url = apps.reverse_app(
namespaces,
view_name,
args=args,
kwargs=kwargs,
current_app=self._current_app(context),
)
except apps.NoReverseMatch:
if fallback is not None:
url = fallback
elif self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ""
else:
if context.autoescape:
url = conditional_escape(url)
return url
@register.tag
def reverse_app(parser, token):
"""
Reverse app URLs, preferring the active language.
Usage::
{% load feincms3 %}
{% reverse_app 'blog' 'detail' [args] [kw=args] [fallback='/'] %}
``namespaces`` can either be a list or a comma-separated list of
namespaces. ``NoReverseMatch`` exceptions can be avoided by providing a
``fallback`` as a keyword argument or by saving the result in a variable,
similar to ``{% url 'view' as url %}`` does::
{% reverse_app 'newsletter' 'subscribe-form' fallback='/newsletter/' %}
Or::
{% reverse_app 'extranet' 'login' as login_url %}
"""
bits = token.split_contents()
if len(bits) < 3:
raise TemplateSyntaxError(
"'reverse_app' takes at least two arguments, a namespace and"
" a URL pattern name."
)
namespaces = parser.compile_filter(bits[1])
viewname = parser.compile_filter(bits[2])
args = []
kwargs = {}
asvar = None
bits = bits[3:]
if len(bits) >= 2 and bits[-2] == "as":
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to reverse_app tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return ReverseAppNode(namespaces, viewname, args, kwargs, asvar)
|
the-stack_0_22360 | def leiaInt(mgn):
while True:
try:
n = int(input(mgn))
except (ValueError, TypeError):
print('Erro: Digite um número inteiro válido')
except Exception as erro:
print(erro.__cause__, erro.__class__)
else:
return n
break
def leiaReal(mgn):
while True:
try:
f = float(input(mgn))
except (ValueError, TypeError):
print('Erro: Digite um número real válido')
except Exception as erro:
print(erro.__cause__, erro.__class__)
except KeyboardInterrupt:
f = 0
return f
break
else:
return f
break
i = leiaInt('Digite um número Inteiro: ')
f = leiaReal('Digite um número Real: ')
print(f'O valor inteiro digitado foi {i} e o valor rela foi {f}') |
the-stack_0_22365 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
import redis
import logging
from django.conf import settings
import json
redis_server = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB)
logger = logging.getLogger(__name__)
def send_message(user_id, message):
"""
Sends a message to a user
"""
logger.info("send_message() - user_id: '%s' - message: '%s'", user_id, message)
url = "/app/user/{0}/notifications".format(user_id)
redis_server.publish(url, message)
def store_uuid_cookie(user_id):
"""
Generates an uuidCookie and store it in Radis.
Returns: uuidCookie if stored correctly
Returns: None if cookie couldn't be stored
"""
uuid_cookie = str(uuid.uuid4())
data = json.dumps({
'userId': user_id
})
logger.info("store_uuid_cookie() - uuid_cookie: '%s' - data: '%s'", uuid_cookie, data)
set_result = redis_server.set(settings.UUIDCOOKIE_PREFIX + uuid_cookie,
data,
settings.UUIDCOOKIE_EXPIRE_TIME,
nx=True)
if set_result is True:
return uuid_cookie
else:
logger.error("store_uuid_cookie() - redis_server.set() FAILED")
return None
|
the-stack_0_22367 | """empty message
Revision ID: 0eeaa5aed53b
Revises: e36f1d0c4947
Create Date: 2019-06-26 17:31:17.238355
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "0eeaa5aed53b"
down_revision = "e36f1d0c4947"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"projects",
sa.Column(
"enforce_random_task_selection",
sa.Boolean(),
nullable=True,
server_default=sa.false(),
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("projects", "enforce_random_task_selection")
# ### end Alembic commands ###
|
the-stack_0_22371 | import Domoticz
from devices.configuration import config
from devices.device import Device
from devices.device import DeviceCreateException
from devices.device import DeviceUpdateException
class DeviceModulationLevel(Device):
domoticz_device_type = 243
domoticz_subtype = 6
domoticz_switch_type = 0
def __init__(self, plugin_devices, toon):
super().__init__(config.STR_UNIT_MODULATION_LEVEL,
config.STD_UNIT_MODULATION_LEVEL,
plugin_devices,
toon)
def create(self):
if not self.exists:
try:
Domoticz.Log("Creating modulation level device " + self.name)
Domoticz.Device(Name=self.name, Unit=self.unit, Type=self.domoticz_device_type,
Subtype=self.domoticz_subtype, Switchtype=self.domoticz_switch_type).Create()
except DeviceCreateException as ex:
Domoticz.Log("An error occurred creating " + self.name)
Domoticz.Log("Exception: " + str(ex))
elif config.debug:
Domoticz.Log("Unit " + str(self.unit) + " exists - nothing to do")
return self
def update(self):
super().update()
str_value = ""
try:
modulation_level = self.toon.thermostat_info.current_modulation_level
str_value = str(modulation_level)
if str_value != self.previous_value:
if config.debug:
Domoticz.Log("Update modulation level: " + str_value)
self.plugin_devices[self.unit].Update(modulation_level, str_value)
except DeviceUpdateException as ex:
Domoticz.Log("An error occurred updating " + self.name)
Domoticz.Log("Exception: " + str(ex))
self.set_previous_value(str_value)
|
the-stack_0_22372 | import os
from unittest.mock import patch
import pytest
import requests
from autovirt.session import VirtSession, VirtSessionOptions
@pytest.fixture
def options() -> VirtSessionOptions:
return VirtSessionOptions(
session_file="tests/session.dat",
session_timeout=1800,
base_url="https://virtonomica.ru/api/vera",
login="login",
password="password",
)
@pytest.fixture
def response_ok() -> requests.Response:
response = requests.Response()
response.status_code = 200
return response
@pytest.fixture
def response_not_ok() -> requests.Response:
response = requests.Response()
response.status_code = 500
return response
@pytest.fixture
def session(options):
return VirtSession(options=options)
@patch("autovirt.session.VirtSession.login")
def test_get_logged_session_new_session(login_mock, session):
if os.path.isfile(session.options.session_file):
os.remove(session.options.session_file)
assert type(session.get_logged_session()) == requests.Session
login_mock.assert_called()
@patch("autovirt.session.requests.Session.post")
def test_login_response_ok(
post_mock, session, options: VirtSessionOptions, response_ok
):
post_mock.return_value = response_ok
session.login(requests.Session())
post_mock.assert_called_with(
f"{options.base_url}/user/login",
{"email": options.login, "password": options.password},
)
@patch("autovirt.session.requests.Session.post")
def test_login_response_not_ok(
post_mock, session, options: VirtSessionOptions, response_not_ok
):
post_mock.return_value = response_not_ok
with pytest.raises(RuntimeError):
session.login(requests.Session())
|
the-stack_0_22373 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
from collections import defaultdict
from datetime import datetime, timedelta
import logging
import os
import time
import webapp2
from google.appengine.api import memcache
from google.appengine.ext.webapp import template
from google.appengine.ext import db
import encoding
import json
import settings
import tweeter
import models
import mailer
import subscription
import random
LOGGER = logging.getLogger(__name__)
def cache(method, seconds=60 * 60 * 24):
""" A basic caching wrapper that will generate a key based off of the URL
of the request """
#@functools.wraps
def wrapped(handler, *a, **kw):
key = (handler.request.path.replace('/', '') +
handler.request.query_string)
# This is absolutely required, to normalize memcached keys
# from /twitter/post and /uptime/post
if "post" in key:
key = key.replace("post", '')
data = memcache.get(key)
if not data:
LOGGER.info('CACHE miss')
data = method(handler, *a, **kw)
if not memcache.add(key=key, value=data, time=seconds):
LOGGER.error('Failed to set cache ' + key)
return data
return wrapped
class TwitterBaseController(webapp2.RequestHandler):
def __init__(self, *a, **kw):
super(TwitterBaseController, self).__init__(*a, **kw)
self.tweet_manager = tweeter.TwitterStatusProcessor(
**settings.TWITTER['AUTH']
)
class TwitterHandler(TwitterBaseController):
def get(self, service=None, **_):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(self._get(service))
@cache
def _get(self, service):
tweets = []
services = [service] if service else tweeter.SERVICES
for service in services:
tweets += self.tweet_manager.get(service)
return encoding.to_json({
'messages': [encoding.to_dict(m) for m in tweets]
})
def post(self):
self.tweet_manager.run()
keys = [
'twitter',
'twittermessages',
'twittermessageslatest',
]
for key in keys:
memcache.delete(key)
# Send notifications on tweet
for service in tweeter.SERVICES:
latest_tweet = self.tweet_manager.get_last_message(service)
# Notified must be set, False, and created within the last 10
# minutes
if (latest_tweet and hasattr(latest_tweet, 'notified') and
not latest_tweet.notified
and latest_tweet.created_at > datetime.utcnow() - timedelta(minutes=10)):
self.tweet_manager._set_notified(latest_tweet.tweet_id)
subscription.send_emails(service=service,
request_url=self.request.url,
current_state=latest_tweet.status,
twitter_tweet=latest_tweet.message)
self.get()
class TwitterPostHandler(webapp2.RequestHandler):
def get(self):
th = TwitterHandler(self.request, self.response)
th.post()
class TwitterMessageHandler(TwitterBaseController):
def get(self, *a, **kw):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(self._get())
@cache
def _get(self):
offset = int(self.request.get('offset', 0))
max_date = datetime.utcnow() - timedelta(days=offset)
min_date = max_date - timedelta(days=30)
messages = defaultdict(list) # messages by service
messages['min'] = time.mktime(min_date.timetuple())
messages['max'] = time.mktime(max_date.timetuple())
for service in tweeter.SERVICES:
tweets = self.tweet_manager.get_by_dates(
service,
max_date=max_date,
min_date=min_date,
)
tweets = [t for t in tweets]
messages[service] = [encoding.to_dict(m)
for m in reversed(tweets)]
return encoding.to_json({
'messages': messages,
})
class TwitterLatestMessageHandler(TwitterBaseController):
"""
Mounted at /twitter/messages/latest
GET returns a dictionary of messages by service
{
'DASH': message1,
'API': message1,
'JS': message1,
}
"""
def get(self, *a, **kw):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(self._get())
@cache
def _get(self):
messages = {} # messages by date + service
for service in tweeter.SERVICES:
tweet = self.tweet_manager.get_last_message(service)
if tweet:
if tweet.created_at < datetime.utcnow() - timedelta(days=1):
tweet = None
messages[service] = encoding.to_dict(
tweet,
) if tweet else None
return encoding.to_json({
'messages': messages,
})
class UptimeHandler(TwitterBaseController):
"""
Mounted at /uptime
GET returns a dictionary of uptime for the various services
POST deletes cached results, the subsequent GET will re-populate the cache
"""
def __init__(self, request, response):
super(UptimeHandler, self).__init__(request, response)
def get(self, *a, **kw):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(self._get())
@cache
def _get(self):
raw = {
'uptime': {}
}
for service in tweeter.SERVICES:
# we need to have a state, so if we can't find one (e.g. because
# the last tweet was a long time ago), default to UP
tweet_state = self.tweet_manager.get_latest_state(service) or 'UP'
raw['uptime'][service] = {
'status': tweet_state,
'uptime': self.tweet_manager.get_uptime_month(service)
}
subscription.should_notify(service, tweet_state, self.request.url)
return encoding.to_json(raw)
def post(self):
memcache.delete('uptime')
self.get()
class UptimePostHandler(webapp2.RequestHandler):
def get(self):
uh = UptimeHandler(self.request, self.response)
uh.post()
class MainHandler(webapp2.RequestHandler):
"""
Serves the index.html, that's it.
"""
def get(self, *a, **kw):
path = os.path.join(
os.path.dirname(__file__),
'templates',
'index.html')
self.response.out.write(template.render(path, {}))
class SubscribeEmailHandler(webapp2.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'application/json'
email = self.request.get('email')
services = self.request.get('services').rstrip(',')
query = db.GqlQuery(
"SELECT * FROM EmailSubscriber WHERE email = :1",
email)
number_rows = query.count()
if number_rows > 0:
self.response.status = 409
self.response.out.write(json.dumps({
"error": email + " is already subscribed."
}))
return
mail = mailer.Mail()
mail.send(email,
"Successfully subscribed to Kloudless " +
services + " incidents",
"You successfully subscribed to Kloudless " +
services + " incidents.",
self.request.url)
s = models.EmailSubscriber(email=email,
services=services.split(','))
s.put()
self.response.out.write(json.dumps({
"subscribed": "email",
"services": services.split(',')
}))
class UnsubscribeEmailHandler(webapp2.RequestHandler):
def get(self, base64email):
if not base64email:
self.redirect("/")
email = base64.urlsafe_b64decode(base64email)
if email:
email_subscriber = models.EmailSubscriber.all()
email_subscriber.filter('email =', email)
for es in email_subscriber:
es.delete()
# ToDo: show a nice pretty notification that e-mail is subscribed
self.redirect("/")
app = webapp2.WSGIApplication([
('/', MainHandler),
('/uptime', UptimeHandler),
('/uptime/post', UptimePostHandler),
('/twitter', TwitterHandler),
('/twitter/post', TwitterPostHandler),
('/twitter/messages', TwitterMessageHandler),
('/twitter/messages/latest', TwitterLatestMessageHandler),
('/twitter/(.*)', TwitterHandler),
('/subscriptions/email', SubscribeEmailHandler),
('/subscriptions/email/(.*)', UnsubscribeEmailHandler)
], debug=settings.DEBUG)
|
the-stack_0_22374 | """Support for Tahoma devices."""
from collections import defaultdict
import logging
from requests.exceptions import RequestException
from tahoma_api import Action, TahomaApi
import voluptuous as vol
from homeassistant.const import CONF_EXCLUDE, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "tahoma"
TAHOMA_ID_FORMAT = "{}_{}"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_EXCLUDE, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
TAHOMA_COMPONENTS = ["binary_sensor", "cover", "lock", "scene", "sensor", "switch"]
TAHOMA_TYPES = {
"io:AwningValanceIOComponent": "cover",
"io:ExteriorVenetianBlindIOComponent": "cover",
"io:DiscreteGarageOpenerIOComponent": "cover",
"io:DiscreteGarageOpenerWithPartialPositionIOComponent": "cover",
"io:HorizontalAwningIOComponent": "cover",
"io:GarageOpenerIOComponent": "cover",
"io:LightIOSystemSensor": "sensor",
"io:OnOffIOComponent": "switch",
"io:OnOffLightIOComponent": "switch",
"io:RollerShutterGenericIOComponent": "cover",
"io:RollerShutterUnoIOComponent": "cover",
"io:RollerShutterVeluxIOComponent": "cover",
"io:RollerShutterWithLowSpeedManagementIOComponent": "cover",
"io:SomfyBasicContactIOSystemSensor": "sensor",
"io:SomfyContactIOSystemSensor": "sensor",
"io:TemperatureIOSystemSensor": "sensor",
"io:VerticalExteriorAwningIOComponent": "cover",
"io:VerticalInteriorBlindVeluxIOComponent": "cover",
"io:WindowOpenerVeluxIOComponent": "cover",
"opendoors:OpenDoorsSmartLockComponent": "lock",
"rtds:RTDSContactSensor": "sensor",
"rtds:RTDSMotionSensor": "sensor",
"rtds:RTDSSmokeSensor": "smoke",
"rts:BlindRTSComponent": "cover",
"rts:CurtainRTSComponent": "cover",
"rts:DualCurtainRTSComponent": "cover",
"rts:ExteriorVenetianBlindRTSComponent": "cover",
"rts:GarageDoor4TRTSComponent": "switch",
"rts:LightRTSComponent": "switch",
"rts:RollerShutterRTSComponent": "cover",
"rts:OnOffRTSComponent": "switch",
"rts:VenetianBlindRTSComponent": "cover",
"somfythermostat:SomfyThermostatTemperatureSensor": "sensor",
"somfythermostat:SomfyThermostatHumiditySensor": "sensor",
}
def setup(hass, config):
"""Activate Tahoma component."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
exclude = conf.get(CONF_EXCLUDE)
try:
api = TahomaApi(username, password)
except RequestException:
_LOGGER.exception("Error when trying to log in to the Tahoma API")
return False
try:
api.get_setup()
devices = api.get_devices()
scenes = api.get_action_groups()
except RequestException:
_LOGGER.exception("Error when getting devices from the Tahoma API")
return False
hass.data[DOMAIN] = {"controller": api, "devices": defaultdict(list), "scenes": []}
for device in devices:
_device = api.get_device(device)
if all(ext not in _device.type for ext in exclude):
device_type = map_tahoma_device(_device)
if device_type is None:
_LOGGER.warning(
"Unsupported type %s for Tahoma device %s",
_device.type,
_device.label,
)
continue
hass.data[DOMAIN]["devices"][device_type].append(_device)
for scene in scenes:
hass.data[DOMAIN]["scenes"].append(scene)
for component in TAHOMA_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
def map_tahoma_device(tahoma_device):
"""Map Tahoma device types to Home Assistant components."""
return TAHOMA_TYPES.get(tahoma_device.type)
class TahomaDevice(Entity):
"""Representation of a Tahoma device entity."""
def __init__(self, tahoma_device, controller):
"""Initialize the device."""
self.tahoma_device = tahoma_device
self.controller = controller
self._name = self.tahoma_device.label
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return {"tahoma_device_id": self.tahoma_device.url}
def apply_action(self, cmd_name, *args):
"""Apply Action to Device."""
action = Action(self.tahoma_device.url)
action.add_command(cmd_name, *args)
self.controller.apply_actions("HomeAssistant", [action])
|
the-stack_0_22376 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import platform
import re
import itertools
import shutil
import tempfile
import llnl.util.lang
from llnl.util.filesystem import (
path_contains_subdirectory, paths_containing_libs)
import llnl.util.tty as tty
import spack.error
import spack.spec
import spack.architecture
import spack.util.executable
import spack.compilers
from spack.util.environment import filter_system_paths
__all__ = ['Compiler']
def _verify_executables(*paths):
for path in paths:
if not os.path.isfile(path) and os.access(path, os.X_OK):
raise CompilerAccessError(path)
@llnl.util.lang.memoized
def get_compiler_version_output(compiler_path, version_arg, ignore_errors=()):
"""Invokes the compiler at a given path passing a single
version argument and returns the output.
Args:
compiler_path (path): path of the compiler to be invoked
version_arg (str): the argument used to extract version information
"""
compiler = spack.util.executable.Executable(compiler_path)
output = compiler(
version_arg, output=str, error=str, ignore_errors=ignore_errors)
return output
def tokenize_flags(flags_str):
"""Given a compiler flag specification as a string, this returns a list
where the entries are the flags. For compiler options which set values
using the syntax "-flag value", this function groups flags and their
values together. Any token not preceded by a "-" is considered the
value of a prior flag."""
tokens = flags_str.split()
if not tokens:
return []
flag = tokens[0]
flags = []
for token in tokens[1:]:
if not token.startswith('-'):
flag += ' ' + token
else:
flags.append(flag)
flag = token
flags.append(flag)
return flags
#: regex for parsing linker lines
_LINKER_LINE = re.compile(
r'^( *|.*[/\\])'
r'(link|ld|([^/\\]+-)?ld|collect2)'
r'[^/\\]*( |$)')
#: components of linker lines to ignore
_LINKER_LINE_IGNORE = re.compile(r'(collect2 version|^[A-Za-z0-9_]+=|/ldfe )')
#: regex to match linker search paths
_LINK_DIR_ARG = re.compile(r'^-L(.:)?(?P<dir>[/\\].*)')
#: regex to match linker library path arguments
_LIBPATH_ARG = re.compile(r'^[-/](LIBPATH|libpath):(?P<dir>.*)')
def _parse_link_paths(string):
"""Parse implicit link paths from compiler debug output.
This gives the compiler runtime library paths that we need to add to
the RPATH of generated binaries and libraries. It allows us to
ensure, e.g., that codes load the right libstdc++ for their compiler.
"""
lib_search_paths = False
raw_link_dirs = []
tty.debug('parsing implicit link info')
for line in string.splitlines():
if lib_search_paths:
if line.startswith('\t'):
raw_link_dirs.append(line[1:])
continue
else:
lib_search_paths = False
elif line.startswith('Library search paths:'):
lib_search_paths = True
if not _LINKER_LINE.match(line):
continue
if _LINKER_LINE_IGNORE.match(line):
continue
tty.debug('linker line: %s' % line)
next_arg = False
for arg in line.split():
if arg in ('-L', '-Y'):
next_arg = True
continue
if next_arg:
raw_link_dirs.append(arg)
next_arg = False
continue
link_dir_arg = _LINK_DIR_ARG.match(arg)
if link_dir_arg:
link_dir = link_dir_arg.group('dir')
tty.debug('linkdir: %s' % link_dir)
raw_link_dirs.append(link_dir)
link_dir_arg = _LIBPATH_ARG.match(arg)
if link_dir_arg:
link_dir = link_dir_arg.group('dir')
tty.debug('libpath: %s', link_dir)
raw_link_dirs.append(link_dir)
tty.debug('found raw link dirs: %s' % ', '.join(raw_link_dirs))
implicit_link_dirs = list()
visited = set()
for link_dir in raw_link_dirs:
normalized_path = os.path.abspath(link_dir)
if normalized_path not in visited:
implicit_link_dirs.append(normalized_path)
visited.add(normalized_path)
tty.debug('found link dirs: %s' % ', '.join(implicit_link_dirs))
return implicit_link_dirs
def _parse_non_system_link_dirs(string):
"""Parses link paths out of compiler debug output.
Args:
string (str): compiler debug output as a string
Returns:
(list of str): implicit link paths parsed from the compiler output
"""
link_dirs = _parse_link_paths(string)
# Return set of directories containing needed compiler libs, minus
# system paths. Note that 'filter_system_paths' only checks for an
# exact match, while 'in_system_subdirectory' checks if a path contains
# a system directory as a subdirectory
link_dirs = filter_system_paths(link_dirs)
return list(p for p in link_dirs if not in_system_subdirectory(p))
def in_system_subdirectory(path):
system_dirs = ['/lib/', '/lib64/', '/usr/lib/', '/usr/lib64/',
'/usr/local/lib/', '/usr/local/lib64/']
return any(path_contains_subdirectory(path, x) for x in system_dirs)
class Compiler(object):
"""This class encapsulates a Spack "compiler", which includes C,
C++, and Fortran compilers. Subclasses should implement
support for specific compilers, their possible names, arguments,
and how to identify the particular type of compiler."""
# Subclasses use possible names of C compiler
cc_names = []
# Subclasses use possible names of C++ compiler
cxx_names = []
# Subclasses use possible names of Fortran 77 compiler
f77_names = []
# Subclasses use possible names of Fortran 90 compiler
fc_names = []
# Optional prefix regexes for searching for this type of compiler.
# Prefixes are sometimes used for toolchains, e.g. 'powerpc-bgq-linux-'
prefixes = []
# Optional suffix regexes for searching for this type of compiler.
# Suffixes are used by some frameworks, e.g. macports uses an '-mp-X.Y'
# version suffix for gcc.
suffixes = [r'-.*']
#: Compiler argument that produces version information
version_argument = '-dumpversion'
#: Return values to ignore when invoking the compiler to get its version
ignore_version_errors = ()
#: Regex used to extract version from compiler's output
version_regex = '(.*)'
# These libraries are anticipated to be required by all executables built
# by any compiler
_all_compiler_rpath_libraries = ['libc', 'libc++', 'libstdc++']
# Default flags used by a compiler to set an rpath
@property
def cc_rpath_arg(self):
return '-Wl,-rpath,'
@property
def cxx_rpath_arg(self):
return '-Wl,-rpath,'
@property
def f77_rpath_arg(self):
return '-Wl,-rpath,'
@property
def fc_rpath_arg(self):
return '-Wl,-rpath,'
@property
def linker_arg(self):
"""Flag that need to be used to pass an argument to the linker."""
return '-Wl,'
@property
def disable_new_dtags(self):
if platform.system() == 'Darwin':
return ''
return '--disable-new-dtags'
@property
def enable_new_dtags(self):
if platform.system() == 'Darwin':
return ''
return '--enable-new-dtags'
# Cray PrgEnv name that can be used to load this compiler
PrgEnv = None
# Name of module used to switch versions of this compiler
PrgEnv_compiler = None
def __init__(self, cspec, operating_system, target,
paths, modules=[], alias=None, environment=None,
extra_rpaths=None, enable_implicit_rpaths=None,
**kwargs):
self.spec = cspec
self.operating_system = str(operating_system)
self.target = target
self.modules = modules
self.alias = alias
self.extra_rpaths = extra_rpaths
self.enable_implicit_rpaths = enable_implicit_rpaths
def check(exe):
if exe is None:
return None
_verify_executables(exe)
return exe
self.cc = check(paths[0])
self.cxx = check(paths[1])
if len(paths) > 2:
self.f77 = check(paths[2])
if len(paths) == 3:
self.fc = self.f77
else:
self.fc = check(paths[3])
self.environment = environment
self.extra_rpaths = extra_rpaths or []
# Unfortunately have to make sure these params are accepted
# in the same order they are returned by sorted(flags)
# in compilers/__init__.py
self.flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
value = kwargs.get(flag, None)
if value is not None:
self.flags[flag] = tokenize_flags(value)
@property
def version(self):
return self.spec.version
def implicit_rpaths(self):
if self.enable_implicit_rpaths is False:
return []
exe_paths = [
x for x in [self.cc, self.cxx, self.fc, self.f77] if x]
link_dirs = self._get_compiler_link_paths(exe_paths)
all_required_libs = (
list(self.required_libs) + Compiler._all_compiler_rpath_libraries)
return list(paths_containing_libs(link_dirs, all_required_libs))
@property
def required_libs(self):
"""For executables created with this compiler, the compiler libraries
that would be generally required to run it.
"""
# By default every compiler returns the empty list
return []
@classmethod
def _get_compiler_link_paths(cls, paths):
first_compiler = next((c for c in paths if c), None)
if not first_compiler:
return []
try:
tmpdir = tempfile.mkdtemp(prefix='spack-implicit-link-info')
fout = os.path.join(tmpdir, 'output')
fin = os.path.join(tmpdir, 'main.c')
with open(fin, 'w+') as csource:
csource.write(
'int main(int argc, char* argv[]) { '
'(void)argc; (void)argv; return 0; }\n')
compiler_exe = spack.util.executable.Executable(first_compiler)
output = str(compiler_exe(cls.verbose_flag(), fin, '-o', fout,
output=str, error=str)) # str for py2
return _parse_non_system_link_dirs(output)
except spack.util.executable.ProcessError as pe:
tty.debug('ProcessError: Command exited with non-zero status: ' +
pe.long_message)
return []
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
@classmethod
def verbose_flag(cls):
"""
This property should be overridden in the compiler subclass if a
verbose flag is available.
If it is not overridden, it is assumed to not be supported.
"""
# This property should be overridden in the compiler subclass if
# OpenMP is supported by that compiler
@property
def openmp_flag(self):
# If it is not overridden, assume it is not supported and warn the user
raise UnsupportedCompilerFlag(self, "OpenMP", "openmp_flag")
# This property should be overridden in the compiler subclass if
# C++98 is not the default standard for that compiler
@property
def cxx98_flag(self):
return ""
# This property should be overridden in the compiler subclass if
# C++11 is supported by that compiler
@property
def cxx11_flag(self):
# If it is not overridden, assume it is not supported and warn the user
raise UnsupportedCompilerFlag(self,
"the C++11 standard",
"cxx11_flag")
# This property should be overridden in the compiler subclass if
# C++14 is supported by that compiler
@property
def cxx14_flag(self):
# If it is not overridden, assume it is not supported and warn the user
raise UnsupportedCompilerFlag(self,
"the C++14 standard",
"cxx14_flag")
# This property should be overridden in the compiler subclass if
# C++17 is supported by that compiler
@property
def cxx17_flag(self):
# If it is not overridden, assume it is not supported and warn the user
raise UnsupportedCompilerFlag(self,
"the C++17 standard",
"cxx17_flag")
# This property should be overridden in the compiler subclass if
# C99 is supported by that compiler
@property
def c99_flag(self):
# If it is not overridden, assume it is not supported and warn the user
raise UnsupportedCompilerFlag(self,
"the C99 standard",
"c99_flag")
# This property should be overridden in the compiler subclass if
# C11 is supported by that compiler
@property
def c11_flag(self):
# If it is not overridden, assume it is not supported and warn the user
raise UnsupportedCompilerFlag(self,
"the C11 standard",
"c11_flag")
#
# Compiler classes have methods for querying the version of
# specific compiler executables. This is used when discovering compilers.
#
# Compiler *instances* are just data objects, and can only be
# constructed from an actual set of executables.
#
@classmethod
def default_version(cls, cc):
"""Override just this to override all compiler version functions."""
output = get_compiler_version_output(
cc, cls.version_argument, tuple(cls.ignore_version_errors))
return cls.extract_version_from_output(output)
@classmethod
@llnl.util.lang.memoized
def extract_version_from_output(cls, output):
"""Extracts the version from compiler's output."""
match = re.search(cls.version_regex, output)
return match.group(1) if match else 'unknown'
@classmethod
def cc_version(cls, cc):
return cls.default_version(cc)
@classmethod
def cxx_version(cls, cxx):
return cls.default_version(cxx)
@classmethod
def f77_version(cls, f77):
return cls.default_version(f77)
@classmethod
def fc_version(cls, fc):
return cls.default_version(fc)
@classmethod
def search_regexps(cls, language):
# Compile all the regular expressions used for files beforehand.
# This searches for any combination of <prefix><name><suffix>
# defined for the compiler
compiler_names = getattr(cls, '{0}_names'.format(language))
prefixes = [''] + cls.prefixes
suffixes = [''] + cls.suffixes
regexp_fmt = r'^({0}){1}({2})$'
return [
re.compile(regexp_fmt.format(prefix, re.escape(name), suffix))
for prefix, name, suffix in
itertools.product(prefixes, compiler_names, suffixes)
]
def setup_custom_environment(self, pkg, env):
"""Set any environment variables necessary to use the compiler."""
pass
def __repr__(self):
"""Return a string representation of the compiler toolchain."""
return self.__str__()
def __str__(self):
"""Return a string representation of the compiler toolchain."""
return "%s(%s)" % (
self.name, '\n '.join((str(s) for s in (
self.cc, self.cxx, self.f77, self.fc, self.modules,
str(self.operating_system)))))
class CompilerAccessError(spack.error.SpackError):
def __init__(self, path):
super(CompilerAccessError, self).__init__(
"'%s' is not a valid compiler." % path)
class InvalidCompilerError(spack.error.SpackError):
def __init__(self):
super(InvalidCompilerError, self).__init__(
"Compiler has no executables.")
class UnsupportedCompilerFlag(spack.error.SpackError):
def __init__(self, compiler, feature, flag_name, ver_string=None):
super(UnsupportedCompilerFlag, self).__init__(
"{0} ({1}) does not support {2} (as compiler.{3})."
.format(compiler.name,
ver_string if ver_string else compiler.version,
feature,
flag_name),
"If you think it should, please edit the compiler.{0} subclass to"
.format(compiler.name) +
" implement the {0} property and submit a pull request or issue."
.format(flag_name)
)
|
the-stack_0_22380 | """This module contains the general information for SysdebugCoreFileRepository ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class SysdebugCoreFileRepositoryConsts():
pass
class SysdebugCoreFileRepository(ManagedObject):
"""This is SysdebugCoreFileRepository class."""
consts = SysdebugCoreFileRepositoryConsts()
naming_props = set([])
mo_meta = MoMeta("SysdebugCoreFileRepository", "sysdebugCoreFileRepository", "corefiles", VersionMeta.Version101a, "InputOutput", 0xf, [], ["admin", "operations"], [u'topSystem'], [u'sysdebugCore', u'sysfileMutation'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.status = None
ManagedObject.__init__(self, "SysdebugCoreFileRepository", parent_mo_or_dn, **kwargs)
|
the-stack_0_22381 | # ---
# title: Developing custom scikit-learn transformers and estimators
# date: 2020-03-05T17:15:00-05:00
# ---
# scikit-learn offers a wide range of Machine Learning models, but it goes way beyond that by providing other tools such as hyperparameter optimization using `GridSearchCV` or composed estimators via `Pipeline`. One of the characteristics I like the most about scikit-learn is their consistent API, all estimators implement the same basic methods (fit and predict). This consistency has been immensely useful to the ML open source community since a lot of third party packages are developed with this in mind (e.g. Keras), hence they are able to interface with each other.
#
# Often we need to implement some functionality that does not exist in scikit-learn or any other packages, if we conform to scikit-learn's API we can limit ourselves to develop a custom transformer/estimator and our code will nicely interface with scikit-learn modules.
#
# In this blog post, I will show how to build custom transformers and estimators, as well as discuss implementation details to do this correctly. [The official docs](https://scikit-learn.org/stable/developers/develop.html) contain all you need to know but here are the most important facts:
#
# 1. All constructor (the `__init__` function) parameters should have default values
# 2. Constructor parameters should be added as attributes *without any modifications*
# 3. Attributes estimated from data must have a name with a trailing underscore
#
# There are other rules but you can use utility functions provided by scikit-learn to take care of them. A `check_estimator` function is also provided to exhaustively verify that your implementation is correct. An [official code template](https://github.com/scikit-learn-contrib/project-template/blob/master/skltemplate/_template.py) is also provided.
# ## Transformer use case: verifying model's input
#
# scikit-learn estimators were originally designed to operate on numpy arrays (although there is current ongoing work to better interface with pandas Data Frames). For practical purposes, this means our estimators do not have a notion of column names (only input shape is verified to raise errors): if columns are shuffled, the transformer/estimator will not complain, but the prediction will be meaningless.
#
# Our custom transformer (an object that implements fit and transform) adds this capability: when used in a `Pipeline` object, it will verify that the we are getting the right input columns. The (commented) implementation looks as follows:
# + hide=true
import warnings
import logging
import pickle
import sklearn
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.linear_model import ElasticNet, LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split, GridSearchCV
import pandas as pd
import numpy as np
warnings.filterwarnings('ignore')
dataset = load_boston()
X = pd.DataFrame(dataset.data, columns=dataset.feature_names)
y = dataset.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
# -
# We inherit from TransformerMixin to get the fit_transform implementation.
# Transformers in scikit-learn also have to inherit from BaseEstimator
# see: https://github.com/scikit-learn/scikit-learn/blob/b194674c4/sklearn/preprocessing/_data.py#L520
class InputGuard(TransformerMixin, BaseEstimator):
"""
Verify column names at predict time match the ones used when fitting
Parameters
----------
strict : bool, optional
If True, it will raise an error if the input does not match
exactly (same columns, same order), if False, it will ignore
order and extra columns (will only show a warning), defaults
to True
Notes
-----
Must be used in a Pipeline object and must be the first step. fit
and predict should be called with a pandas.DataFrame object
"""
def __init__(self, strict=True):
# no logic allowed here, just assign attributes
# from __init__ args
self.strict = strict
def fit(self, X, y=None):
# we need this to pass check_estimator
X_out, y = check_X_y(X, y)
X = X if hasattr(X, 'columns') else X_out
# our estimator is designed to work on structures
# that have a columns attribute (such as pandas Data Frame)
if hasattr(X, 'columns'):
self.expected_ = list(X.columns)
self.expected_n_ = X.shape[1]
# ...but we still need to support numpy.arrays to
# pass check_estimator
else:
self.expected_ = None
self.expected_n_ = X.shape[1]
warnings.warn('Input does not have a columns attribute, '
'only number of columns will be validated')
return self
def transform(self, X):
# these two are to pass check_estimator
check_is_fitted(self)
X_out = check_array(X)
X = X if hasattr(X, 'columns') else X_out
# if column names are available...
if self.expected_:
return self._transform(X)
else:
# this is raised to pass check_estimator
if self.expected_n_ != X.shape[1] and self.strict:
raise ValueError('Number of columns from fit {} is different from transform {}'
.format(self.expected_n_, X.shape[1]))
return X
def _transform(self, X):
# this function implements our core logic and it
# will only be called when fit received an X with a columns attribute
if not hasattr(X, 'columns'):
raise ValueError('{}.fit ran with a X object that had '
'a columns attribute, but the current '
'X does not have it'.format(type(self).__name__))
columns_got = list(X.columns)
if self.strict:
if self.expected_ != columns_got:
missing = set(self.expected_) - set(columns_got)
raise ValueError('Columns during fit were: {}, but got {} '
'for predict.'
' Missing: {}'.format(self.expected_,
columns_got,
missing))
else:
missing = set(self.expected_) - set(columns_got)
extra = set(columns_got) - set(self.expected_)
if missing:
raise ValueError('Missing columns: {}'.format(missing))
elif extra:
extra = set(columns_got) - set(self.expected_)
warnings.warn('Got extra columns: {}, ignoring'
.format(extra))
return X[self.expected_]
return X
# The `sklearn.utils.validation` module provides utility functions to pass some of `check_estimator` tests without having to implement the logic ourselves (I actually had to perform a few modifications to my original implementation, to fix errors thrown by `check_estimator`). These utility functions transform inputs (`check_X_y`, `check_array`) to return the expected format (numpy arrays) and throw the appropriate exceptions when this is not possible. `check_is_fitted` only raises an error if a call to `predict` is attempted without fitting the model first.
#
# We now verify that our transformer passes all the tests:
# if no exceptions are raised, we're good
check_estimator(InputGuard)
# Passing all tests is not absolutely necessary for your transformer (or estimator) to integrate correctly with other scikit-learn modules, but doing so assures that your implementation is robust by handling common scenarios on behalf of the user (e.g. passing a 2D array with one column as y instead of a 1D array) and throwing informative errors. Given the large user base scikit-learn has, this is a must, however, for some very customized implementation, passing all the tests is simply not possible, as we will see in the the custom estimator use case.
#
# For now, let's verify that our transformer plays nicely with Pipeline and GridSearchCV:
# +
# our transformer *has* to be the first step in the pipeline,
# to make sure it gets a pandas Data Frame as input and not
# a numpy array (which strips column names)
pipe = Pipeline([('guard', InputGuard()),
('scaler', StandardScaler()),
('reg', ElasticNet())])
# perform hyperparameter tuning
grid = GridSearchCV(pipe, param_grid={'reg__alpha': [0.5, 1.0, 2.0]})
best_pipe = grid.fit(X_train, y_train).best_estimator_
# make predictions using the best model
y_pred = best_pipe.predict(X_test)
print(f'MAE: {np.abs(y_test - y_pred).mean():.2f}')
# -
# We now verify that our transformer throws an error if a column is missing:
# +
# drop a feature used during training
X_corrupted = X_test.drop('CRIM', axis='columns')
try:
best_pipe.predict(X_corrupted)
except ValueError as e:
print('Error message: ', e)
# -
# If we add a column but switch to non-strict mode, we get a warning instead of an error:
warnings.filterwarnings('default')
X_corrupted = X_test.copy()
X_corrupted['extra'] = 1
best_pipe.named_steps['guard'].strict = False
_ = best_pipe.predict(X_corrupted)
warnings.filterwarnings('ignore')
# ## Estimator use case: logging model's predictions
#
# Say we want to log all our predictions to monitor a production model, for the sake of example, we will just use the `logging` module but this same logic applies to other methods such as saving predictions to a database. There are a few nuances here. `Pipeline` requires all intermediate steps to be transformers (fit/transform), which means we can only add our model (the one that implements predict) at the end.
#
# Since we cannot split our logging in two steps, we have to wrap an existing estimator and add the logging functionality to it, from the outside, our custom estimator will just look like another standard estimator.
#
# The 3 considerations that apply for transformers apply for estimators, plus a fourth one (copied directly from scikit-learn's documentation):
#
# 4. Estimators have `get_params` and `set_params` functions. The get_params function takes no arguments and returns a dict of the `__init__` parameters of the estimator, together with their values. It must take one keyword argument, `deep`, which receives a boolean value that determines whether the method should return the parameters of sub-estimators (for most estimators, this can be ignored). The default value for deep should be true.
class LoggingEstimator(BaseEstimator):
"""
A wrapper for scikit-learn estimators that logs every prediction
Parameters
----------
est_class
The estimator class to use
**kwargs
Keyword arguments to initialize the estimator
"""
# NOTE: we arbitrarily selected a default estimator class
# so check_estimator does not fail when doing LoggingEstimator()
def __init__(self, est_class=LinearRegression, **kwargs):
self.est_class = est_class
# kwargs depend on the model used, so assign them whatever they are
for key, value in kwargs.items():
setattr(self, key, value)
# these attributes support the logging functionality
self._logger = logging.getLogger(__name__)
self._logging_enabled = False
self._param_names = ['est_class'] + list(kwargs.keys())
# in the transformer case, we did not implement get_params
# nor set_params since we inherited them from BaseEstimator
# but such implementation will not work here due to the **kwargs
# in the constructor, so we implemented it
def get_params(self, deep=True):
# Note: we are ignoring the deep parameter
# this will not work with estimators that have sub-estimators
# see https://scikit-learn.org/stable/developers/develop.html#get-params-and-set-params
return {param: getattr(self, param)
for param in self._param_names}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
# our fit method instantiates the actual model, and
# it forwards any extra keyword arguments
def fit(self, X, y, **kwargs):
est_kwargs = self.get_params()
del est_kwargs['est_class']
# remember the trailing underscore
self.model_ = self.est_class(**est_kwargs)
self.model_.fit(X, y, **kwargs)
# fit must return self
return self
def predict(self, X):
check_is_fitted(self)
# we use the fitted model and log if logging is enabled
y_pred = self.model_.predict(X)
if self._logging_enabled:
self._logger.info('Logging predicted values: %s', y_pred)
return y_pred
# requiring a score method is not documented but throws an
# error if not implemented
def score(self, X, y, **kwargs):
return self.model_.score(X, y, **kwargs)
# some models implement custom methods. Anything that is not implemented here
# will be delegated to the underlying model. There is one condition we have
# to cover: if the underlying estimator has class attributes they won't
# be accessible until we fit the model (since we instantiate the model there)
# to fix it, we try to look it up attributes in the instance, if there
# is no instance, we look up the class. More info here:
# https://scikit-learn.org/stable/developers/develop.html#estimator-types
def __getattr__(self, key):
if key != 'model_':
if hasattr(self, 'model_'):
return getattr(self.model_, key)
else:
return getattr(self.est_class, key)
else:
raise AttributeError(
"'{}' object has no attribute 'model_'".format(type(self).__name__))
# these two control logging
def enable_logging(self):
self._logging_enabled = True
def disable_logging(self):
self._logging_enabled = False
# ignore the following two for now, more info in the Appendix
def __getstate__(self):
state = self.__dict__.copy()
del state['_logger']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._logger = logging.getLogger(__name__)
# `check_estimator` has a `generate_only` parameter that let us run checks one by one instead of failing at the first error. Let's use that option to check `LoggingEstimator`.
for est, check in check_estimator(LoggingEstimator, generate_only=True):
try:
check(est)
except AssertionError as e:
print('Failed: ', check, e)
# Names aren't very informative, so I took a look at the [source code](https://github.com/scikit-learn/scikit-learn/blob/b194674c4/sklearn/utils/estimator_checks.py).
#
# `check_parameters_default_constructible` checks that the estimator `__init__` parameters are of certain type, our estimator passes a class as an argument, that's why it breaks, but it shouldn't be an issue when interfacing with other components. I don't know why they restrict the types of arguments, my guess is that they want to avoid problems with objects that don't play nicely with the `multiprocessing` module.
#
# `check_no_attributes_set_in_init` is also about `__init__` arguments, according to the spec, we should not set any attributes other than the arguments, but we need them for logging to work, it should not affect either.
#
# Finally `check_supervised_y_2d`, checks that if a 2D numpy array is passed to `fit` a warning is issued, since it has to be converted to a 1D array, our custom estimator wraps any estimator, which could have multi-output, so we cannot use the utility functions to fix this.
#
# The bottom line is that `check_estimator` runs a very strict test suite, if your estimator does not pass all the tests, it does not mean it won't work, but you'll have to be more careful about your implementation.
#
# Let's now see our pipeline in action, note that we are also including our `InputGuard`, we change the underlying model in `LoggingEstimator` to demonstrate that it works with any estimator.
# +
pipe = Pipeline([('guard', InputGuard()),
('scaler', StandardScaler()),
('reg', LoggingEstimator(est_class=ElasticNet))])
# perform hyperparameter tuning
grid = GridSearchCV(
pipe, param_grid={'reg__alpha': [0.5, 1.0, 2.0]}, n_jobs=-1)
best_pipe = grid.fit(X_train, y_train).best_estimator_
# make predictions using the best model
y_pred = best_pipe.predict(X_test)
print(f'MAE: {np.abs(y_test - y_pred).mean():.2f}')
# -
# Let's now configure the `logging` module and enable it in our custom estimator:
logging.basicConfig(level=logging.INFO)
best_pipe.named_steps['reg'].enable_logging()
# The following line shows our logging in effect:
best_pipe.predict(X_test.iloc[0:2])
# Since we implemented `__getattr__`, any model-specific attribute also works, let's get the linear model coefficients:
best_pipe.named_steps['reg'].coef_
# ### Appendix: making our estimator work with `pickle` (or any other pickling mechanism)
#
# Pickling an object means saving it to disk. This is useful if we want to fit and then deploy a model ([be careful when doing this!](https://scikit-learn.org/stable/modules/model_persistence.html#security-maintainability-limitations)) but it is also needed if we want our model to work with the `multiprocessing` module. Some objects are *picklable* but some others are not (this also depends on which library you are using). `logger` objects do not work with the `pickle` module but we can easily fix this by deleting it before saving to disk and initializing it after loading, this boils down to adding two more methods: `__getstate__` and `__setstate__`, if you are interested in the details, [read this](https://docs.python.org/3/library/pickle.html#handling-stateful-objects).
# showing pickling and unpickling works
pickle.loads(pickle.dumps(best_pipe))
# ## Closing remarks
# In this post we showed how to develop transformers and estimators compatible with scikit-learn. Given how many details the API has, using the `check_estimator` function will guide you through the process. However, if your implementations contains non-standard behavior (like ours), your custom objects will fail the tests even if they integrate correctly with other modules. In such case, you'll have to be careful about your implementation, using `check_estimator` with `generate_only=True` is useful for getting a list of failing tests and deciding whether it is acceptable or not.
#
# Following the scikit-learn API spec gives you access to a wide set of ML tools so you can focus on implementing your custom model and still use other modules for grid search, cross validation, etc. This is a huge time saver for ML projects.
#
# Source code for this post is available [here]({{url_source}}).
#
# Found an error in this post? [Click here to let us know]({{url_issue}}).
#
# Looking for commercial support? [Drop us a line](mailto:[email protected]).
#
# This post was generated using scikit-learn version:
# + hide=true
print(sklearn.__version__)
|
the-stack_0_22382 | """ Image class and relative functions
Defines the Image class and functions that operate on it.
CLASSES
---------
Image
Subclass of numpy `ndarray`. Has the additional attribute properties
which contains the properties used to generate it as a `list` of
`dicts`.
Functions
---------
pad_image_to_fft(image: Image, axes = (0, 1))
Pads the image with zeros to optimize the speed of Fast Fourier
Transforms.
"""
import warnings
import cupy
import numpy as np
import numpy.lib.mixins
import operator as ops
from tensorflow import Tensor
import tensorflow
from .backend.tensorflow_bindings import TENSORFLOW_BINDINGS
CUPY_INSTALLED = False
try:
import cupy as cp
CUPY_INSTALLED = True
except Exception:
CUPY_INSTALLED = False
warnings.warn(
"cupy not installed. GPU-accelerated simulations will not be possible"
)
def _binary_method(op):
"""Implement a forward binary method with a noperator, e.g., __add__."""
def func(self, other):
self, other = coerce([self, other])
if isinstance(other, Image):
return Image(
op(self._value, other._value), copy=False
).merge_properties_from([self, other])
else:
return Image(op(self._value, other), copy=False).merge_properties_from(self)
func.__name__ = "__{}__".format(op.__name__)
return func
def _reflected_binary_method(op):
"""Implement a reflected binary method with a noperator, e.g., __radd__."""
def func(self, other):
self, other = coerce([self, other])
if isinstance(other, Image):
return Image(
op(other._value, self._value), copy=False
).merge_properties_from([other, self])
else:
return Image(op(other, self._value), copy=False).merge_properties_from(self)
func.__name__ = "__r{}__".format(op.__name__)
return func
def _inplace_binary_method(op):
"""Implement a reflected binary method with a noperator, e.g., __radd__."""
def func(self, other):
self, other = coerce([self, other])
if isinstance(other, Image):
self._value = op(self._value, other._value)
self.merge_properties_from(other)
else:
self._value = op(self._value, other)
return self
func.__name__ = "__i{}__".format(op.__name__)
return func
def _numeric_methods(op):
"""Implement forward, reflected and inplace binary methods with an ufunc."""
return (
_binary_method(op),
_reflected_binary_method(op),
_inplace_binary_method(op),
)
def _unary_method(
op,
):
"""Implement a unary special method with an ufunc."""
def func(self):
return Image(op(self._value)).merge_properties_from(self)
func.__name__ = "__{}__".format(op)
return func
class Image:
def __init__(self, value, copy=True):
super().__init__()
if copy:
self._value = self._view(value)
else:
if isinstance(value, Image):
self._value = value._value
else:
self._value = value
if isinstance(value, Image):
self.properties = list(value.properties)
else:
self.properties = []
def append(self, property_dict: dict):
"""Appends a dictionary to the properties list.
Parameters
----------
property_dict : dict
A dictionary to append to the property list. Most commonly
the current values of the properties of a feature.
Returns
-------
Image
Returns itself.
"""
self.properties = [*self.properties, property_dict]
return self
def get_property(
self, key: str, get_one: bool = True, default: any = None
) -> list or any:
"""Retrieve a property.
If the feature has the property defined by `key`, return
its current_value. Otherwise, return `default`.
If `get_one` is True, the first instance is returned;
otherwise, all instances are returned as a list.
Parameters
----------
key
The name of the property.
get_one: optional
Whether to return all instances of that property or just the first.
default : optional
What is returned if the property is not found.
Returns
-------
any
The value of the property if found, else `default`.
"""
if get_one:
for prop in self.properties:
if key in prop:
return prop[key]
return default
else:
return [prop[key] for prop in self.properties if key in prop] or default
def merge_properties_from(self, other: "Image") -> "Image":
"""Merge properties with those from another Image.
Appends properties from another images such that no property is duplicated.
Uniqueness of a dictionary of properties is determined from the
property `hash_key`.
Most functions involving two images should automatically output an image with
merged properties. However, since each property is guaranteed to be unique,
it is safe to manually call this function if there is any uncertainty.
Parameters
----------
other
The Image to retrieve properties from.
"""
if isinstance(other, Image):
for new_prop in other.properties:
should_append = True
for my_prop in self.properties:
if my_prop is new_prop:
# Prop already added
should_append = False
break
if should_append:
self.append(new_prop)
elif isinstance(other, np.ndarray):
return self
else:
try:
for i in other:
self.merge_properties_from(i)
except TypeError:
pass
return self
def _view(self, value):
if isinstance(value, Image):
return self._view(value._value)
if isinstance(value, (np.ndarray, list, tuple, int, float, bool)):
return np.array(value)
if isinstance(value, Tensor):
return value
return value
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
args = coerce(inputs)
args = tuple(strip(arg) for arg in args)
if isinstance(self._value, Tensor):
if ufunc in TENSORFLOW_BINDINGS:
ufunc = TENSORFLOW_BINDINGS[ufunc]
else:
return NotImplemented
out = kwargs.get("out", ())
if out:
kwargs["out"] = tuple(x._value if isinstance(x, Image) else x for x in out)
results = getattr(ufunc, method)(*args, **kwargs)
if type(results) is tuple:
outputs = []
for result in results:
out = Image(result, copy=False)
out.merge_properties_from(inputs)
outputs.append(out)
return tuple(outputs)
elif method == "at":
return None
else:
result = Image(results, copy=False)
result.merge_properties_from(inputs)
return result
def __array_function__(self, func, types, args, kwargs):
# # Note: this allows subclasses that don't override
# # __array_function__ to handle DiagonalArray objects.
# if not all(issubclass(t, Image) for t in types):
# return NotImplemented
values = coerce(args)
values = [strip(arg) for arg in values]
if isinstance(self._value, Tensor):
if func in TENSORFLOW_BINDINGS:
func = TENSORFLOW_BINDINGS[func]
else:
return NotImplemented
elif not (
isinstance(self._value, (np.ndarray, tuple, list))
or np.isscalar(self._value)
) and not hasattr(self._value, "__array_function__"):
return NotImplemented
out = func(*values, **kwargs)
if isinstance(out, (bool, int, float)):
return out
out = Image(out, copy=False)
for inp in args:
if isinstance(inp, Image):
out.merge_properties_from(inp)
return out
def __array__(self, *args, **kwargs):
return np.array(self.to_numpy()._value)
def to_tf(self):
if isinstance(self._value, np.ndarray):
return Image(
tensorflow.constant(self._value), copy=False
).merge_properties_from(self)
if isinstance(self._value, cupy.ndarray):
return Image(
tensorflow.constant(self._value.get()), copy=False
).merge_properties_from(self)
return self
def to_cupy(self):
if isinstance(self._value, np.ndarray):
return Image(cupy.array(self._value), copy=False).merge_properties_from(
self
)
return self
def to_numpy(self):
if isinstance(self._value, cupy.ndarray):
return Image(self._value.get(), copy=False).merge_properties_from(self)
if isinstance(self._value, tensorflow.Tensor):
return Image(self._value.numpy(), copy=False).merge_properties_from(self)
return self
def __getattr__(self, key):
return getattr(self._value, key)
def __getitem__(self, idx):
idx = strip(idx)
out = Image(self._value.__getitem__(idx), copy=False)
out.merge_properties_from([self, idx])
return out
def __setitem__(self, key, value):
key = strip(key)
value = strip(value)
o = self._value.__setitem__(key, value)
self.merge_properties_from([key, value])
return o
def __int__(self):
return int(self._value)
def __float__(self):
return float(self._value)
def __nonzero__(self):
return bool(self._value)
def __bool__(self):
return bool(self._value)
def __round__(self, *args, **kwargs):
return round(self._value, *args, **kwargs)
def __len__(self):
return len(self._value)
def __repr__(self):
return repr(self._value) + "\nWith properties:" + repr(self.properties)
__lt__ = _binary_method(ops.lt)
__le__ = _binary_method(ops.le)
__eq__ = _binary_method(ops.eq)
__ne__ = _binary_method(ops.ne)
__gt__ = _binary_method(ops.gt)
__ge__ = _binary_method(ops.ge)
# numeric methods
__add__, __radd__, __iadd__ = _numeric_methods(ops.add)
__sub__, __rsub__, __isub__ = _numeric_methods(ops.sub)
__mul__, __rmul__, __imul__ = _numeric_methods(ops.mul)
__matmul__, __rmatmul__, __imatmul__ = _numeric_methods(ops.matmul)
# Python 3 does not use __div__, __rdiv__, or __idiv__
__truediv__, __rtruediv__, __itruediv__ = _numeric_methods(ops.truediv)
__floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(ops.floordiv)
__mod__, __rmod__, __imod__ = _numeric_methods(ops.mod)
__divmod__ = _binary_method(divmod)
__rdivmod__ = _reflected_binary_method(divmod)
# __idivmod__ does not exist
# TODO: handle the optional third argument for __pow__?
__pow__, __rpow__, __ipow__ = _numeric_methods(ops.pow)
__lshift__, __rlshift__, __ilshift__ = _numeric_methods(ops.lshift)
__rshift__, __rrshift__, __irshift__ = _numeric_methods(ops.rshift)
__and__, __rand__, __iand__ = _numeric_methods(ops.and_)
__xor__, __rxor__, __ixor__ = _numeric_methods(ops.xor)
__or__, __ror__, __ior__ = _numeric_methods(ops.or_)
# unary methods
__neg__ = _unary_method(ops.neg)
__pos__ = _unary_method(ops.pos)
__abs__ = _unary_method(ops.abs)
__invert__ = _unary_method(ops.invert)
def strip(v):
if isinstance(v, Image):
return v._value
if isinstance(v, (list, tuple)):
return type(v)([strip(i) for i in v])
return v
def array(v):
return np.array(strip(v))
def coerce(images):
images = [Image(image, copy=False) for image in images]
# if any(isinstance(i._value, tensorflow.Tensor) for i in images):
# return [i.to_tf() for i in images]
if any(isinstance(i._value, cupy.ndarray) for i in images):
return [i.to_cupy() for i in images]
else:
return images
FASTEST_SIZES = [0]
for n in range(1, 10):
FASTEST_SIZES += [2 ** a * 3 ** (n - a - 1) for a in range(n)]
FASTEST_SIZES = np.sort(FASTEST_SIZES)
def pad_image_to_fft(image: Image, axes=(0, 1)) -> Image:
"""Pads image to speed up fast fourier transforms.
Pads image to speed up fast fourier transforms by adding 0s to the
end of the image.
Parameters
----------
image
The image to pad
axes : iterable of int, optional
The axes along which to pad.
"""
def _closest(dim):
# Returns the smallest value frin FASTEST_SIZES
# larger than dim
for size in FASTEST_SIZES:
if size >= dim:
return size
new_shape = np.array(image.shape)
for axis in axes:
new_shape[axis] = _closest(new_shape[axis])
increase = np.array(new_shape) - image.shape
pad_width = [(0, inc) for inc in increase]
return np.pad(image, pad_width, mode="constant")
def maybe_cupy(array):
from . import config
if config.gpu_enabled:
return cp.array(array)
return array
|
the-stack_0_22384 | from subdaap import monkey # noqa
from subdaap.application import Application
from subdaap.utils import VerboseAction, PathAction, NewPathAction
import argparse
import logging
import atexit
import sys
import gc
import os
# Logger instance
logger = logging.getLogger(__name__)
def parse_arguments():
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser()
# Add options
parser.add_argument(
"-D", "--daemon", action="store_true", help="run as daemon")
parser.add_argument(
"-v", "--verbose", nargs="?", action=VerboseAction, default=0,
help="toggle verbose mode (-vv, -vvv for more)")
parser.add_argument(
"-c", "--config-file", action=PathAction, default="config.ini",
help="config file")
parser.add_argument(
"-d", "--data-dir", action=PathAction, default=os.getcwd(),
help="data directory")
parser.add_argument(
"-p", "--pid-file", action=NewPathAction, help="pid file")
parser.add_argument(
"-l", "--log-file", action=NewPathAction, help="log file")
# Parse command line
return parser.parse_args(), parser
def setup_logging(console=True, log_file=None, verbose=False):
"""
Setup logging.
:param bool console: If True, log to console.
:param str log_file: If set, log to a file (append) as specified.
:param bool verbose: Enable debug logging if True.
"""
# Configure logging
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
level = logging.DEBUG if verbose else logging.INFO
# Add console output handler
if console:
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(level)
console_log_handler.setFormatter(formatter)
logging.getLogger().addHandler(console_log_handler)
# Add file output handler
if log_file:
file_log_handler = logging.FileHandler(log_file)
file_log_handler.setLevel(level)
file_log_handler.setFormatter(formatter)
logging.getLogger().addHandler(file_log_handler)
logging.getLogger().setLevel(level)
logger.info("Verbose level is %d", verbose)
def daemonize(pid_file=None):
"""
Daemonize the current process. Returns the PID of the continuing child
process. As an extra option, the PID of the child process can be written to
a specified PID file.
Note that parent process ends with `os._exit` instead of `sys.exit`. The
first will not trigger any cleanups that may have been set. These are left
for the child process that continues.
:param str pid_file: Path to PID file to write process ID into. Must be in
a writeable folder. If left `None`, no file will be
written.
:return: Process ID
:rtype: int
"""
# Dependency check to make sure the imports are OK. Saves you from a lot of
# debugging trouble when you forget to import them.
assert atexit.register and os.fork and sys.stdout and gc.collect
# Force cleanup old resources to minimize the risk of sharing them.
gc.collect()
# First fork
try:
if os.fork() > 0:
os._exit(0)
except OSError as e:
sys.stderr.write("Unable to fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent
os.setsid()
os.umask(0)
# Second fork
try:
if os.fork() > 0:
os._exit(0)
except OSError as e:
sys.stderr.write("Unable to fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Redirect file descriptors
sys.stdout.flush()
sys.stderr.flush()
stdin = file("/dev/null", "r")
stdout = file("/dev/null", "a+")
stderr = file("/dev/null", "a+", 0)
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
# Write PID file
if pid_file:
atexit.register(os.remove, pid_file)
with open(pid_file, "w+") as fp:
fp.write("%d" % os.getpid())
# Return the PID
return os.getpid()
def main():
"""
Main entry point. Parses arguments, daemonizes and creates the application.
"""
# Parse arguments and configure application instance.
arguments, parser = parse_arguments()
if arguments.daemon:
daemonize(arguments.pid_file)
setup_logging(not arguments.daemon, arguments.log_file, arguments.verbose)
# Change to data directory
os.chdir(arguments.data_dir)
# Create application instance and run it.
try:
application = Application(
config_file=arguments.config_file,
data_dir=arguments.data_dir,
verbose=arguments.verbose)
except Exception as e:
logger.error(
"One or more components failed to initialize: %s. The application "
"will now exit.", e)
if arguments.verbose > 1:
logger.exception("Stack trace")
return 1
try:
application.start()
except KeyboardInterrupt:
application.stop()
# E.g. `python SubDaap.py --daemonize --config-file=config.ini"
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_22387 | #!/usr/bin/env python
import os
from skimage._build import cython
base_path = os.path.abspath(os.path.dirname(__file__))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('transform', parent_package, top_path)
config.add_data_dir('tests')
cython(['_hough_transform.pyx',
'_warps_cy.pyx',
'_radon_transform.pyx'], working_path=base_path)
config.add_extension('_hough_transform', sources=['_hough_transform.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_warps_cy', sources=['_warps_cy.c'],
include_dirs=[get_numpy_include_dirs(), '../_shared'])
config.add_extension('_radon_transform',
sources=['_radon_transform.c'],
include_dirs=[get_numpy_include_dirs()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer='scikit-image Developers',
author='scikit-image Developers',
maintainer_email='[email protected]',
description='Transforms',
url='https://github.com/scikit-image/scikit-image',
license='SciPy License (BSD Style)',
**(configuration(top_path='').todict())
)
|
the-stack_0_22389 | """ Implement the transformations we need to use
to convert a link to an adaptive loss scaled link. """
# NOTE: this file is deprecated
import chainer
import chainer.links as L
import chainer.initializers as I
# pylint: disable=unused-wildcard-import
from ada_loss.chainer_impl.links import *
__all__ = [
"AdaLossTransformLinear",
"AdaLossTransformConvolution2D",
]
class AdaLossTransform(object):
""" The base class """
def __call__(self, link, cfg):
""" Entry """
raise NotImplementedError("This call function should be implemented properly")
class AdaLossTransformLinear(AdaLossTransform):
""" """
cls = L.Linear
def __call__(self, link, cfg, initialW=I.HeNormal()):
assert isinstance(link, self.cls)
link_ = AdaLossLinear(
link.in_size,
out_size=link.out_size,
nobias=link.b is None,
ada_loss_cfg=cfg,
)
link_.copyparams(link)
return link_
class AdaLossTransformConvolution2D(AdaLossTransform):
""" """
cls = L.Convolution2D
def __call__(self, link, cfg, initialW=I.HeNormal()):
assert isinstance(link, self.cls)
link_ = AdaLossConvolution2D(
link.in_channels,
link.out_channels,
ksize=link.ksize,
stride=link.stride,
pad=link.pad,
dilate=link.dilate,
groups=link.groups,
nobias=link.b is None,
ada_loss_cfg=cfg,
)
link_.copyparams(link)
return link_
|
the-stack_0_22390 | import numpy as np
import torch
import torchmetrics
from utils import criterion_utils
def frame_based_metrics(model, data_loader, threshold, reduction):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device=device)
model.eval()
precision = torchmetrics.Precision(threshold=threshold, average="micro", compute_on_step=False)
precision.to(device=device)
recall = torchmetrics.Recall(threshold=threshold, average="micro", compute_on_step=False)
recall.to(device=device)
f1 = torchmetrics.F1(threshold=threshold, average="micro", compute_on_step=False)
f1.to(device=device)
with torch.no_grad():
for batch_idx, data in enumerate(data_loader, 0):
audio_feats, audio_lens, queries, query_lens, labels, infos = data
audio_feats, queries, labels = audio_feats.to(device), queries.to(device), labels.to(device)
if reduction == "baseline":
alignment_matrices = torch.rand_like(labels, device=device)
else:
audio_embeds, query_embeds = model(audio_feats, queries, query_lens)
# Alignment matrices [N, T, Q]
alignment_matrices = criterion_utils.compute_similarities(audio_embeds, query_embeds, audio_lens)
# Aggregate along Q
if reduction == "mean":
alignment_matrices = alignment_matrices.mean(dim=2, keepdim=False) # [N, T]
elif reduction == "max":
alignment_matrices = alignment_matrices.max(dim=2, keepdim=False).values # [N, T]
# Min-max normalization
alignment_matrices -= alignment_matrices.min(dim=1, keepdim=True)[0]
alignment_matrices /= alignment_matrices.max(dim=1, keepdim=True)[0]
# Frame-based metric
precision(alignment_matrices, labels.long())
recall(alignment_matrices, labels.long())
f1(alignment_matrices, labels.long())
precision = precision.compute().item()
recall = recall.compute().item()
f1 = f1.compute().item()
return {"precision": precision, "recall": recall, "f1": f1}
def retrieval_metrics(model, dataset, reduction):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device=device)
model.eval()
# Initial metric instances
recalls = {"a2c_R": {}, "c2a_R": {}}
for task in recalls:
for k in [1, 5]:
instance = torchmetrics.RetrievalRecall(empty_target_action="skip", compute_on_step=False, k=k)
instance.to(device=device)
recalls[task]["{}{}".format(task, k)] = instance
with torch.no_grad():
# Group audio-caption pairs by ytid
ytid_group = {}
for i in range(len(dataset)):
item = dataset.data_df.iloc[i]
if ytid_group.get(item["ytid"]) is None:
ytid_group[item["ytid"]] = [i]
else:
ytid_group[item["ytid"]].append(i)
# ytid-to-index, index-to-ytid
ytid2ind = {ytid: ind for ind, ytid in enumerate(ytid_group, 0)}
ind2ytid = {ytid2ind[ytid]: ytid for ytid in ytid2ind}
# Randomize 30 audio-caption pairs (1 ground-truth + 29 non-positive) for each audio sample (ytid)
for i_ytid in ytid_group:
indexes, a2c_preds, c2a_preds, target = [], [], [], []
# Select the ground truth
i = np.random.choice(a=ytid_group[i_ytid], size=1, replace=False, p=None)[0]
i_audio_emb, i_query_emb, i_info = transform(model, dataset, i, device)
gt_score = criterion_utils.score(i_audio_emb, i_query_emb, reduction=reduction)
indexes.append(i)
a2c_preds.append(gt_score.item())
c2a_preds.append(gt_score.item())
target.append(i_info["ytid"] == i_ytid)
# Select 29 non-positive audio samples (ytids)
num_items = 30
ytid_indexes = np.array([ind for ind in ind2ytid])
if len(ytid_indexes) > num_items:
probs = np.array([ytid2ind[i_ytid] != ind for ind in ytid_indexes])
probs = probs / (len(ytid_indexes) - 1)
ytid_indexes[:num_items - 1] = np.random.choice(a=ytid_indexes, size=num_items - 1, replace=False,
p=probs)
ytid_indexes[num_items - 1] = ytid2ind[i_ytid]
ytid_indexes = ytid_indexes[:num_items]
assert len(ytid_indexes) == num_items
assert ytid_indexes[num_items - 1] == ytid2ind[i_ytid]
# Randomize 29 non-positives
for ind in ytid_indexes[:num_items - 1]:
j_ytid = ind2ytid[ind]
j = np.random.choice(a=ytid_group[j_ytid], size=1, replace=False, p=None)[0]
j_audio_emb, j_query_emb, j_info = transform(model, dataset, j, device)
a2c_score = criterion_utils.score(i_audio_emb, j_query_emb, reduction=reduction)
c2a_score = criterion_utils.score(j_audio_emb, i_query_emb, reduction=reduction)
indexes.append(i)
a2c_preds.append(a2c_score.item())
c2a_preds.append(c2a_score.item())
target.append(i_info["ytid"] == j_info["ytid"])
indexes = torch.tensor(indexes, device=device, dtype=torch.long)
a2c_preds = torch.tensor(a2c_preds, device=device)
c2a_preds = torch.tensor(c2a_preds, device=device)
target = torch.tensor(target, device=device)
# Update metrics
for key in recalls["a2c_R"]:
instance = recalls["a2c_R"][key]
instance(a2c_preds, target, indexes=indexes)
for key in recalls["c2a_R"]:
instance = recalls["c2a_R"][key]
instance(c2a_preds, target, indexes=indexes)
# Compute metrics
for task in recalls:
for key in recalls[task]:
instance = recalls[task][key]
recalls[task][key] = instance.compute().item()
return recalls
def transform(model, dataset, index, device=None):
audio, query, _, info = dataset[index]
audio = torch.unsqueeze(audio, dim=0).to(device=device)
query = torch.unsqueeze(query, dim=0).to(device=device)
audio_emb, query_emb = model(audio, query, [query.size(-1)])
audio_emb = torch.squeeze(audio_emb, dim=0).to(device=device)
query_emb = torch.squeeze(query_emb, dim=0).to(device=device)
return audio_emb, query_emb, info
|
the-stack_0_22391 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import numpy as np
from bokeh.plotting import *
from bokeh.objects import Range1d
N = 80
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
output_server("line.py example")
hold()
line(x, y, color="#3333ee", tools="pan,zoom,resize")
line([0,4*np.pi], [-1, 1], color="#ee3333", tools="pan,zoom,resize")
show()
import time
from bokeh.objects import Glyph
renderer = [r for r in curplot().renderers if isinstance(r, Glyph)][0]
ds = renderer.data_source
while True:
for i in np.hstack((np.linspace(1, -1, 100), np.linspace(-1, 1, 100))):
ds.data["y"] = y * i
ds._dirty = True
session().store_obj(ds)
time.sleep(0.05)
|
the-stack_0_22392 | import re
from servo import Servo
import socket
import time
from threading import Thread
import queue
ERROR_TIMEOUT = 0.5 # Seconds to wait after a client socket error.
class ServoServer(Thread):
"""
A thread class that listens for servo messages over a socket in the form of
"<pin address> <angle>".
"""
def __init__(self, port):
"""
Initialized the server but does not open any ports until run() is
called.
:param port: The port to use to listen for commands.
"""
super(ServoServer, self).__init__()
self.__port = port
def run(self):
"""
Infinitely loops, waiting for socket connections and commands.
"""
try:
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(('127.0.0.1', self.__port))
servo_queue = ServoQueue()
servo_queue.start()
while True:
try:
server_socket.listen(2)
client_socket, _ = server_socket.accept()
message = client_socket.recv(2048).decode("utf-8")
re_result = re.match('^(?P<pin>[0-9]+) (?P<angle>[0-9]{1,3})$', message)
if re_result:
pin = int(re_result.group('pin'))
angle = int(re_result.group('angle'))
servo_queue.add_servo(Servo(pin, angle))
client_socket.shutdown(socket.SHUT_RDWR)
client_socket.close()
except Exception as e:
print('Socket exception %s' % e)
time.sleep(ERROR_TIMEOUT)
except Exception as e:
print('Server exception %s' % e)
return
class ServoQueue(Thread):
"""
A thread class that manages the servos in a queue. Allows only one servo to
run at a time.
"""
def __init__(self):
self.__queue = queue.Queue()
super(ServoQueue, self).__init__()
def add_servo(self, srvo):
self.__queue.put(srvo)
def run(self):
while True:
srvo = self.__queue.get()
srvo.start()
while srvo.is_alive():
time.sleep(0.1)
|
the-stack_0_22393 | import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
import tensorflow.keras.layers as KL
import matplotlib.pyplot as plt
from . import neuron as ne
from . import layers
def setup_device(gpuid=None):
"""
Configures the appropriate TF device from a cuda device string.
Returns the device id and total number of devices.
"""
if gpuid is not None and not isinstance(gpuid, str):
gpuid = str(gpuid)
nb_devices = len(gpuid.split(','))
if gpuid is not None and (gpuid != '-1'):
device = '/gpu:' + gpuid
os.environ['CUDA_VISIBLE_DEVICES'] = gpuid
# GPU memory configuration differs between TF 1 and 2
if hasattr(tf, 'ConfigProto'):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
tf.keras.backend.set_session(tf.Session(config=config))
else:
tf.config.set_soft_device_placement(True)
for pd in tf.config.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(pd, True)
else:
device = '/cpu:0'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
return device, nb_devices
def transform(img, trf, interp_method='linear', rescale=None):
"""
Applies a transform to an image. Note that inputs and outputs are
in tensor format i.e. (batch, *imshape, nchannels).
"""
img_input = tf.keras.Input(shape=img.shape[1:])
trf_input = tf.keras.Input(shape=trf.shape[1:])
trf_scaled = trf_input if rescale is None else layers.RescaleTransform(rescale)(trf_input)
y_img = layers.SpatialTransformer(interp_method=interp_method)([img_input, trf_scaled])
return tf.keras.Model([img_input, trf_input], y_img).predict([img, trf])
def is_affine(shape):
return len(shape) == 1 or (len(shape) == 2 and shape[0] + 1 == shape[1])
def extract_affine_ndims(shape):
if len(shape) == 1:
# if vector, just compute ndims since length = N * (N + 1)
return int((np.sqrt(4 * int(shape[0]) + 1) - 1) / 2)
else:
return int(shape[0])
def affine_shift_to_identity(trf):
ndims = extract_affine_ndims(trf.shape.as_list())
trf = tf.reshape(trf, [ndims, ndims + 1])
trf = tf.concat([trf, tf.zeros((1, ndims + 1))], axis=0)
trf += tf.eye(ndims + 1)
return trf
def affine_identity_to_shift(trf):
ndims = int(trf.shape.as_list()[-1]) - 1
trf = trf - tf.eye(ndims + 1)
trf = trf[:ndims, :]
return tf.reshape(trf, [ndims * (ndims + 1)])
def value_at_location(x, single_vol=False, single_pts=False, force_post_absolute_val=True):
"""
Extracts value at given point.
"""
# vol is batch_size, *vol_shape, nb_feats
# loc_pts is batch_size, nb_surface_pts, D or D+1
vol, loc_pts = x
fn = lambda y: ne.utils.interpn(y[0], y[1])
z = tf.map_fn(fn, [vol, loc_pts], dtype=tf.float32)
if force_post_absolute_val:
z = K.abs(z)
return z
def point_spatial_transformer(x, single=False, sdt_vol_resize=1):
"""
Transforms surface points with a given deformation.
Note that the displacement field that moves image A to image B will be "in the space of B".
That is, `trf(p)` tells you "how to move data from A to get to location `p` in B".
Therefore, that same displacement field will warp *landmarks* in B to A easily
(that is, for any landmark `L(p)`, it can easily find the appropriate `trf(L(p))` via interpolation.
"""
# surface_points is a N x D or a N x (D+1) Tensor
# trf is a *volshape x D Tensor
surface_points, trf = x
trf = trf * sdt_vol_resize
surface_pts_D = surface_points.get_shape().as_list()[-1]
trf_D = trf.get_shape().as_list()[-1]
assert surface_pts_D in [trf_D, trf_D + 1]
if surface_pts_D == trf_D + 1:
li_surface_pts = K.expand_dims(surface_points[..., -1], -1)
surface_points = surface_points[..., :-1]
# just need to interpolate.
# at each location determined by surface point, figure out the trf...
# note: if surface_points are on the grid, gather_nd should work as well
fn = lambda x: ne.utils.interpn(x[0], x[1])
diff = tf.map_fn(fn, [trf, surface_points], dtype=tf.float32)
ret = surface_points + diff
if surface_pts_D == trf_D + 1:
ret = tf.concat((ret, li_surface_pts), -1)
return ret
def plot_history(hist, model_name):
"""
Quick function to plot the history
"""
# plt.figure()
plt.style.use('ggplot')
plt.plot(hist.epoch, hist.history['loss'], '.-')
plt.ylabel('loss')
plt.xlabel('epoch')
# plt.show()
dir_imgs = f'./logs/learning_curve/'
if not os.path.exists(dir_imgs):
os.makedirs(dir_imgs)
plt.savefig(dir_imgs + f'{model_name}.png', format='png', dpi=100, transparent=False, bbox_inches='tight')
|
the-stack_0_22395 | import requests
import telebot,os
API_KEY=os.environ.get('TOKEN')
bot=telebot.TeleBot(API_KEY)
from datetime import datetime
#from flask import request
import re,textwrap
now=datetime.now()
today_date=now.strftime("%d-%m-%Y")
base_url="https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict"
district={622: 'Agra', 623: 'Aligarh', 625: 'Ambedkar Nagar', 626: 'Amethi', 627: 'Amroha', 628: 'Auraiya', 646: 'Ayodhya', 629: 'Azamgarh', 630: 'hathrus'}
def availability_data(response):
response_json=response.json()
message=""
for center in response_json["centers"]:
for session in center["sessions"]:
if(session["available_capacity_dose1"]>0):
message+="PinCode:{} \nDate :{} \nCenter Name :{} \nMin Age :{} \nVaccine Type :{} \nCapacity :{}\n---\n".format(center["pincode"],
session["date"],center["name"],session["min_age_limit"],session["vaccine"],session["available_capacity_dose1"])
return message
def fetch_data_from_covin(dist_id):
query="?district_id={}&date={}".format(dist_id,today_date)
final_url=base_url+query
response=requests.get(final_url)
return availability_data(response)
#print(fetch_data_from_covin(622
if __name__=="__main__":
try:
@bot.message_handler(commands=['start'])
def start_message(message):
text="i will help you to find vaccine slots in your district just type districts then enter your district code:) "
bot.send_message(message.chat.id,text)
#bot.reply_to(message,"somthing went wrong :-{")
@bot.message_handler(content_types=['text'])
def send_text(message):
try:
txt=message.text
if(len(txt)==0):
bot.reply_to(message,"NOT available")
if txt.lower()=="district":
districts=""
for a,b in district.items():
districts+="{} - {}\n".format(a,b)
bot.reply_to(message,districts)
elif txt.isnumeric() and int(txt)>0:
dist_id=int(txt)
slots=fetch_data_from_covin(dist_id)
split_text=textwrap.wrap(slots,width=4096,break_long_words=False)
#bot.sendMessage(chat_id=chat_id, text=slots, reply_to_message_id=msg_id)
if len(slots)>=4095:
for text in split_text:
text=text+"\n--"
bot.send_message(message.chat.id,text.upper())
else:
bot.send_message(message.chat.id,slots)
else:
bot.send_message(message.chat.id,"INVALID CODE! TRY AGAIN")
# elif message.text.lower() =='Bye':
# bot.send_message(message.chat.id,'see you soom' )
# elif message.text.lower() == 'I love you':
# bot.send_sticker(message.chat.id, 'API')
# bot.send_message(message.chat.id,'Wrong code ' )
except Exception as e:
print(e)
bot.reply_to(message,"slots are not available :-{")
bot.polling(none_stop=True)
except Exception as e:
print(e)
bot.polling(none_stop=True)
|
the-stack_0_22396 | import django_tables2 as tables
from dcim.models import Location, Region, Site, SiteGroup
from tenancy.tables import TenantColumn
from utilities.tables import (
BaseTable, ButtonsColumn, ChoiceFieldColumn, LinkedCountColumn, MarkdownColumn, MPTTColumn, TagColumn, ToggleColumn,
)
from .template_code import LOCATION_ELEVATIONS
__all__ = (
'LocationTable',
'RegionTable',
'SiteTable',
'SiteGroupTable',
)
#
# Regions
#
class RegionTable(BaseTable):
pk = ToggleColumn()
name = MPTTColumn(
verbose_name='地区',
linkify=True
)
site_count = LinkedCountColumn(
viewname='dcim:site_list',
url_params={'region_id': 'pk'},
verbose_name='站点数'
)
actions = ButtonsColumn(Region)
class Meta(BaseTable.Meta):
model = Region
fields = ('pk', 'name', 'slug', 'site_count', 'description', 'actions')
default_columns = ('pk', 'name', 'site_count', 'description', 'actions')
#
# Site groups
#
class SiteGroupTable(BaseTable):
pk = ToggleColumn()
name = MPTTColumn(
verbose_name='站点组名',
linkify=True
)
site_count = LinkedCountColumn(
viewname='dcim:site_list',
url_params={'group_id': 'pk'},
verbose_name='站点数'
)
actions = ButtonsColumn(SiteGroup)
class Meta(BaseTable.Meta):
model = SiteGroup
fields = ('pk', 'name', 'slug', 'site_count', 'description', 'actions')
default_columns = ('pk', 'name', 'site_count', 'description', 'actions')
#
# Sites
#
class SiteTable(BaseTable):
pk = ToggleColumn()
name = tables.Column(
verbose_name='站点名',
linkify=True
)
status = ChoiceFieldColumn(
verbose_name='状态'
)
region = tables.Column(
verbose_name='地区',
linkify=True
)
group = tables.Column(
verbose_name='组别',
linkify=True
)
tenant = TenantColumn(
verbose_name='租户'
)
comments = MarkdownColumn()
tags = TagColumn(
url_name='dcim:site_list'
)
class Meta(BaseTable.Meta):
model = Site
fields = (
'pk', 'name', 'slug', 'status', 'facility', 'region', 'group', 'tenant', 'asn', 'time_zone', 'description',
'physical_address', 'shipping_address', 'latitude', 'longitude', 'contact_name', 'contact_phone',
'contact_email', 'comments', 'tags',
)
default_columns = ('pk', 'name', 'status', 'facility', 'region', 'group', 'tenant', 'asn', 'description')
#
# Locations
#
class LocationTable(BaseTable):
pk = ToggleColumn()
name = MPTTColumn(
verbose_name='名称',
linkify=True
)
site = tables.Column(
verbose_name='站点',
linkify=True
)
rack_count = LinkedCountColumn(
viewname='dcim:rack_list',
url_params={'location_id': 'pk'},
verbose_name='机架数'
)
device_count = LinkedCountColumn(
viewname='dcim:device_list',
url_params={'location_id': 'pk'},
verbose_name='设备数'
)
actions = ButtonsColumn(
model=Location,
prepend_template=LOCATION_ELEVATIONS
)
class Meta(BaseTable.Meta):
model = Location
fields = ('pk', 'name', 'site', 'rack_count', 'device_count', 'description', 'slug', 'actions')
default_columns = ('pk', 'name', 'site', 'rack_count', 'device_count', 'description', 'actions')
|
the-stack_0_22402 | """
This is mostly based on references/infection_alg.pdf
"""
import ast
from functools import (lru_cache, partial)
import json
import logging
import mocos_helper
#import random
import time
from collections import defaultdict
import pickle
import psutil
from shutil import copyfile
from math import log
from git import Repo
import pandas as pd
import scipy.optimize
import scipy.stats
from src.models.schemas import *
from src.models.defaults import *
from src.models.states_and_functions import *
from src.visualization.visualize import Visualize
import click
from dotenv import find_dotenv, load_dotenv
from queue import (PriorityQueue)
q = PriorityQueue()
class InfectionModel:
def __init__(self, params_path: str, df_individuals_path: str, df_households_path: str = '') -> None:
self.params_path = params_path
self.df_individuals_path = df_individuals_path
self.df_households_path = df_households_path
logger.info('Loading params...')
self._params = dict()
with open(params_path, 'r') as params_file:
params = json.loads(
params_file.read()
) # TODO: check whether this should be moved to different place
logger.info('Parsing params...')
for key, schema in infection_model_schemas.items():
self._params[key] = schema.validate(params.get(key, defaults[key]))
default_household_input_path = os.path.join(self._params[OUTPUT_ROOT_DIR], self._params[EXPERIMENT_ID],
'input_df_households.csv') # TODO: ensure households are valid!
if df_households_path == '':
self.df_households_path = default_household_input_path
self._global_time = None
self._max_time = None
self._vis = None
self._max_time_offset = 0.0
self._expected_case_severity = None
self._df_individuals = None
self._df_households = None
#self._individuals_gender = None
self._individuals_age = None
self._individuals_household_id = None
self._individuals_indices = None
self._households_capacities = None
self._households_inhabitants = None
self._init_for_stats = None
self._affected_people = 0
self._active_people = 0
self._quarantined_people = 0
self._detected_people = 0
self._immune_people = 0
self._deaths = 0
self._icu_needed = 0
self._disable_friendship_kernel = False
self._set_up_data_frames()
self._infection_status = None
self._detection_status = None
self._quarantine_status = None
self._expected_case_severity = None
if self._params[REUSE_EXPECTED_CASE_SEVERITIES]:
self._expected_case_severity = self.draw_expected_case_severity()
self._infections_dict = None
self._progression_times_dict = None
t0_f, t0_args, t0_kwargs = self.setup_random_distribution(T0)
self.rv_t0 = lambda: t0_f(*t0_args, **t0_kwargs)
t1_f, t1_args, t1_kwargs = self.setup_random_distribution(T1)
self.rv_t1 = lambda: t1_f(*t1_args, **t1_kwargs)
t2_f, t2_args, t2_kwargs = self.setup_random_distribution(T2)
self.rv_t2 = lambda: t2_f(*t2_args, **t2_kwargs)
tdeath_f, tdeath_args, tdeath_kwargs = self.setup_random_distribution(TDEATH)
self.rv_tdeath = lambda: tdeath_f(*tdeath_args, **tdeath_kwargs)
# TODO: This should be refactored
self.fear_fun = dict()
self.fear_weights_detected = dict()
self.fear_weights_deaths = dict()
self.fear_scale = dict()
self.fear_loc = dict()
self.fear_limit_value = dict()
self.serial_intervals = []
self.band_time = None
self._last_affected = None
self._per_day_increases = {}
self._disable_constant_age_kernel = False
self._constant_age_helper_age_dict = {}
self._constant_age_individuals = defaultdict(list)
self._setup_constant_age_kernel()
def _setup_constant_age_kernel(self):
if self._params[CONSTANT_AGE_SETUP] is None:
self._disable_constant_age_kernel = True
return
if isinstance(self._params[CONSTANT_AGE_SETUP][AGE], int):
self._constant_age_helper_age_dict[self._params[CONSTANT_AGE_SETUP][AGE]] = 0
else:
if self._params[CONSTANT_AGE_SETUP][INTER_AGE_CONTACTS]:
# so all ages specified can be mixed
for age in self._params[CONSTANT_AGE_SETUP][AGE]:
self._constant_age_helper_age_dict[age] = 0
else:
for i, age in enumerate(self._params[CONSTANT_AGE_SETUP][AGE]):
self._constant_age_helper_age_dict[age] = i
for age, individual_list_key in self._constant_age_helper_age_dict.items():
self._constant_age_individuals[individual_list_key].extend([
k for k, v in self._individuals_age_dct.items() if v==age
])
def get_detection_status_(self, person_id):
return self._detection_status.get(person_id, default_detection_status)
def get_quarantine_status_(self, person_id):
return self._quarantine_status.get(person_id, default_quarantine_status)
def get_infection_status(self, person_id):
return self._infection_status.get(person_id, InfectionStatus.Healthy.value)
@staticmethod
def parse_random_seed(random_seed):
mocos_helper.seed(random_seed)
def _set_up_data_frames(self) -> None:
"""
The purpose of this method is to set up two dataframes.
One is self._df_individuals that stores features for the population
Second is self._df_households that stores list of people idx per household
building df_households is time consuming, therefore we try to reuse previously computed df_households
:return:
"""
logger.info('Set up data frames: Reading population csv...')
self._df_individuals = pd.read_csv(self.df_individuals_path)
self._df_individuals.index = self._df_individuals.idx
self._individuals_age = self._df_individuals[AGE].values
self._individuals_age_dct = self._df_individuals[AGE].to_dict()
self._individuals_gender_dct = self._df_individuals[GENDER].to_dict()
self._individuals_household_id = self._df_individuals[HOUSEHOLD_ID].to_dict()
self._individuals_indices = self._df_individuals.index.values
if SOCIAL_COMPETENCE in self._df_individuals.columns:
if self._params[TRANSMISSION_PROBABILITIES][FRIENDSHIP] == 0:
logger.info('Friendship = 0.0 - Disable friendship kernel...')
self._disable_friendship_kernel = True
else:
logger.info('Set up data frames: Social competence and loading social activity sampler...')
self._social_activity_scores = self._df_individuals[SOCIAL_COMPETENCE].to_dict()
self._social_activity_sampler = mocos_helper.AgeDependentFriendSampler(
self._individuals_indices,
self._individuals_age,
self._df_individuals[GENDER].values,
self._df_individuals[SOCIAL_COMPETENCE].values
)
self._disable_friendship_kernel = False
else:
logger.info('Social competence missing - Disable friendship kernel...')
self._disable_friendship_kernel = True
logger.info('Set up data frames: Building households df...')
if os.path.exists(self.df_households_path):
self._df_households = pd.read_csv(self.df_households_path, index_col=HOUSEHOLD_ID,
converters={ID: ast.literal_eval})
else:
self._df_households = pd.DataFrame({ID: self._df_individuals.groupby(HOUSEHOLD_ID)[ID].apply(list)})
os.makedirs(os.path.dirname(self.df_households_path), exist_ok=True)
self._df_households.to_csv(self.df_households_path)
self._df_households[CAPACITY] = self._df_households[ID].apply(lambda x: len(x))
d = self._df_households.to_dict()
self._households_inhabitants = d[ID] #self._df_households[ID]
self._households_capacities = d[CAPACITY] #self._df_households[CAPACITY]
if not self._params[LOG_OUTPUTS]:
self._df_households = None
self._df_individuals = None
@staticmethod
def append_event(event: Event) -> None:
q.put(event)
def _fill_queue_based_on_auxiliary_functions(self) -> None:
# TODO: THIS IS NOT WORKING WHEN CAP = INF, let's fix it
# (one possible way to fix it: generate say first N events and a followup "filling EVENT"
# on time T(N) of N-th event - at T(N) generate N more events and enqueue next portion.
# Alternatively add just one event of type AUXILIARY_FUNCTION/IMPORT_INTENSITY
# that will then draw time of next event of that type
"""
The purpose of this method is to mark some people of the population as sick according to provided function.
Possible functions: see possible values of ImportIntensityFunctions enum
Outcome of the function can be adjusted by overriding default parameters:
multiplier, rate, cap, infectious_probability.
:return:
"""
def _generate_event_times(func, rate, multiplier, cap, root_buffer=100, root_guess=0) -> list:
"""
Here a naive way of generating event times is proposed.
The idea is to generate N events
:param func: currently two functions are supported: exponential a*exp(r*t) and polynomial a*r^t
:param rate: a parameter that is making the slope more steep
:param multiplier: a parameter that scales the time down
:param cap: the maximum amount of cases generated and added to queue list
:param root_buffer: one-directional range to find roots in
:param root_guess: guess on first solution (i=1)
:return:
"""
root_min = root_guess - root_buffer
root_max = root_guess + root_buffer
time_events_ = []
def bisect_fun(x, integer):
return func(x, rate=rate, multiplier=multiplier) - integer
for i in range(1, 1 + cap):
bisect_fun = partial(bisect_fun, integer=i)
root = scipy.optimize.bisect(bisect_fun, root_min, root_max)
time_events_.append(root)
root_min = root
root_max = root + root_buffer
return time_events_
import_intensity = self._params[IMPORT_INTENSITY]
f_choice = ImportIntensityFunctions(import_intensity[FUNCTION])
if f_choice == ImportIntensityFunctions.NoImport:
return
func = import_intensity_functions[f_choice]
multiplier = import_intensity[MULTIPLIER]
rate = import_intensity[RATE]
cap = import_intensity[CAP]
infectious_prob = import_intensity[INFECTIOUS]
event_times = _generate_event_times(func=func, rate=rate, multiplier=multiplier, cap=cap)
for event_time in event_times:
person_id = self._individuals_indices[mocos_helper.randint(0, len(self._individuals_indices))]
t_state = TMINUS1
if mocos_helper.rand() < infectious_prob:
t_state = T0
self.append_event(Event(event_time, person_id, t_state, None, IMPORT_INTENSITY, self.global_time))
def _fill_queue_based_on_initial_conditions(self):
"""
The purpose of this method is to mark some people of the population as sick according to provided
initial conditions.
Conditions can be provided using one of two supported schemas.
schema v1 is list with details per person, while schema v2 is dictionary specifying selection algorithm
and cardinalities of each group of patients (per symptom).
:return:
"""
def _assign_t_state(status):
if status == CONTRACTION:
return TMINUS1
if status == INFECTIOUS:
return T0
if status == IMMUNE:
return TRECOVERY
raise ValueError(f'invalid initial infection status {status}')
initial_conditions = self._params[INITIAL_CONDITIONS]
if isinstance(initial_conditions, list): # schema v1
for initial_condition in initial_conditions:
person_idx = initial_condition[PERSON_INDEX]
t_state = _assign_t_state(initial_condition[INFECTION_STATUS])
if EXPECTED_CASE_SEVERITY in initial_condition:
self._expected_case_severity[person_idx] = initial_condition[EXPECTED_CASE_SEVERITY]
self.append_event(Event(initial_condition[CONTRACTION_TIME], person_idx, t_state, None,
INITIAL_CONDITIONS, self.global_time))
elif isinstance(initial_conditions, dict): # schema v2
if initial_conditions[SELECTION_ALGORITHM] == InitialConditionSelectionAlgorithms.RandomSelection.value:
# initially all indices can be drawn
#choice_set = self._individuals_indices# self._df_individuals.index.values
choice_set = list(self._individuals_indices)
for infection_status, cardinality in initial_conditions[CARDINALITIES].items():
if cardinality > 0:
if cardinality < 1:
c = cardinality
cardinality = int(cardinality * len(choice_set))
if cardinality == 0:
logger.info(f"too small cardinality provided {cardinality} ({c})")
continue
else:
cardinality = int(cardinality)
#selected_rows = np.random.choice(choice_set, cardinality, replace=False)
# now only previously unselected indices can be drawn in next steps
#choice_set = np.array(list(set(choice_set) - set(selected_rows)))
choice_set, selected_rows = mocos_helper.randomly_split_list(choice_set, howmuch=cardinality)
t_state = _assign_t_state(infection_status)
for row in selected_rows:
whom = None
if t_state == TRECOVERY:
whom = row
self.append_event(Event(self.global_time, row, t_state, whom, INITIAL_CONDITIONS,
self.global_time))
else:
err_msg = f'Unsupported selection algorithm provided {initial_conditions[SELECTION_ALGORITHM]}'
logger.error(err_msg)
raise ValueError(err_msg)
else:
err_msg = f'invalid schema provided {initial_conditions}'
logger.error(err_msg)
raise ValueError(err_msg)
@property
def global_time(self):
return self._global_time
@property
def df_individuals(self):
return self._df_individuals
@property
def stop_simulation_threshold(self):
return self._params[STOP_SIMULATION_THRESHOLD]
@property
def case_severity_distribution(self):
return self._params[CASE_SEVERITY_DISTRIBUTION]
@property
def disease_progression(self):
return self._params[DISEASE_PROGRESSION][DEFAULT]
@property
def affected_people(self):
return self._affected_people
@property
def detected_people(self):
return self._detected_people
@property
def quarantined_people(self):
return self._quarantined_people
@property
def active_people(self):
return self._active_people
@property
def deaths(self):
return self._deaths
def draw_expected_case_severity(self):
case_severity_dict = self.case_severity_distribution
keys = list(case_severity_dict.keys())
d = {}
for age_min, age_max, fatality_prob in default_age_induced_fatality_rates:
cond_lb = self._individuals_age >= age_min
cond_ub = self._individuals_age < age_max
cond = np.logical_and(cond_lb, cond_ub)
if np.count_nonzero(cond) == 0:
continue
age_induced_severity_distribution = dict()
age_induced_severity_distribution[CRITICAL] = fatality_prob/self._params[DEATH_PROBABILITY][CRITICAL]
for x in case_severity_dict:
if x != CRITICAL:
age_induced_severity_distribution[x] = case_severity_dict[x] / (1 - case_severity_dict[CRITICAL]) * (1 - age_induced_severity_distribution[CRITICAL])
realizations = mocos_helper.sample_with_replacement_shuffled((age_induced_severity_distribution[x] for x in case_severity_dict), len(self._individuals_indices[cond]))
values = [keys[r] for r in realizations]
df = pd.DataFrame(values, index=self._individuals_indices[cond])
d = {**d, **df.to_dict()[0]}
return d
def setup_random_distribution(self, t):
params = self.disease_progression[t]
distribution = params.get(DISTRIBUTION, default_distribution[DISTRIBUTION])
if distribution == FROM_FILE:
filepath = params.get('filepath', None).replace('$ROOT_DIR', config.ROOT_DIR)
Schema(lambda x: os.path.exists(x)).validate(filepath)
array = np.load(filepath)
approximate_distribution = params.get('approximate_distribution', None)
if approximate_distribution == LOGNORMAL:
shape, loc, scale = scipy.stats.lognorm.fit(array, floc=0)
return mocos_helper.lognormal, [], {'mean': log(scale), 'sigma': shape}
if approximate_distribution == GAMMA:
shape, loc, scale = scipy.stats.gamma.fit(array, floc=0)
return mocos_helper.gamma, [], {'alpha': shape, 'beta': scale}
if approximate_distribution:
raise NotImplementedError(f'Approximating to this distribution {approximate_distribution}'
f'is not yet supported but we can quickly add it if needed')
raise NotImplementedError(f'Currently not supporting empirical distribution'
f' without approximating it')
if distribution == LOGNORMAL:
mean = params.get('mean', 0.0)
sigma = params.get('sigma', 1.0)
return mocos_helper.lognormal, [], {'mean': mean, 'sigma': sigma}
if distribution == EXPONENTIAL:
lambda_ = params.get('lambda', 1.0)
return mocos_helper.exponential, [], {'scale': 1/lambda_}
if distribution == POISSON:
lambda_ = params.get('lambda', 1.0)
return mocos_helper.poisson, [], {'lam': lambda_}
raise ValueError(f'Sampling from distribution {distribution} is not yet supported but we can quickly add it')
def add_potential_contractions_from_transport_kernel(self, person_id):
pass
def set_up_internal_fear(self, kernel_id):
fear_factors = self._params[FEAR_FACTORS]
fear_factor = fear_factor_schema.validate(fear_factors.get(kernel_id, fear_factors.get(DEFAULT, None)))
if not fear_factor:
return fear_functions[FearFunctions.FearDisabled], 0, 0, 0, 0, 0
f = fear_functions[FearFunctions(fear_factor[FEAR_FUNCTION])]
limit_value = fear_factor[LIMIT_VALUE]
scale = fear_factor[SCALE_FACTOR]
loc = fear_factor[LOC_FACTOR]
weights_deaths = fear_factor[DEATHS_MULTIPLIER]
weights_detected = fear_factor[DETECTED_MULTIPLIER]
return f, weights_detected, weights_deaths, scale, loc, limit_value
def fear(self, kernel_id) -> float:
if kernel_id not in self.fear_fun:
res = self.set_up_internal_fear(kernel_id)
(self.fear_fun[kernel_id], self.fear_weights_detected[kernel_id],
self.fear_weights_deaths[kernel_id], self.fear_scale[kernel_id],
self.fear_loc[kernel_id], self.fear_limit_value[kernel_id]) = res
detected = self.detected_people
deaths = self.deaths
time = self._global_time
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset != np.inf:
time -= self._max_time_offset
else:
time = -np.inf
return self.fear_fun[kernel_id](detected, deaths, time, self.fear_weights_detected[kernel_id],
self.fear_weights_deaths[kernel_id], self.fear_loc[kernel_id],
self.fear_scale[kernel_id], self.fear_limit_value[kernel_id])
def gamma(self, kernel_id):
return self._params[TRANSMISSION_PROBABILITIES][kernel_id]
def household_kernel_old_implementation(self, person_id):
prog_times = self._progression_times_dict[person_id]
start = prog_times[T0]
end = prog_times[T2] or prog_times[TRECOVERY]
total_infection_rate = (end - start) * self.gamma('household')
infected = mocos_helper.poisson(total_infection_rate)
if infected == 0:
return
household_id = self._individuals_household_id[person_id]
inhabitants = self._households_inhabitants[household_id]
possible_choices = [i for i in inhabitants if i != person_id]
for choice_idx in mocos_helper.sample_idxes_with_replacement_uniform(len(possible_choices), infected):
person_idx = possible_choices[choice_idx]
if self.get_infection_status(person_idx) == InfectionStatus.Healthy:
contraction_time = mocos_helper.uniform(low=start, high=end)
self.append_event(Event(contraction_time, person_idx, TMINUS1, person_id, HOUSEHOLD, self.global_time))
def add_potential_contractions_from_household_kernel(self, person_id):
if self._params[OLD_IMPLEMENTATION_FOR_HOUSEHOLD_KERNEL]:
self.household_kernel_old_implementation(person_id)
return
prog_times = self._progression_times_dict[person_id]
start = prog_times[T0]
end = prog_times[T2] or prog_times[TRECOVERY]
household_id = self._individuals_household_id[person_id]
inhabitants = self._households_inhabitants[household_id]
possible_choices = [i for i in inhabitants if i != person_id]
for person_idx in possible_choices:
if self.get_infection_status(person_idx) == InfectionStatus.Healthy:
scale = len(possible_choices) / self.gamma('household')
contraction_time = start + mocos_helper.exponential(scale=scale)
if contraction_time >= end:
continue
self.append_event(Event(contraction_time, person_idx, TMINUS1, person_id, HOUSEHOLD, self.global_time))
def add_potential_contractions_from_constant_kernel(self, person_id):
""" Constant kernel draws a number of infections based on base gamma and enqueue randomly selected events """
prog_times = self._progression_times_dict[person_id]
start = prog_times[T0]
end = prog_times[T1]
if end is None:
end = prog_times[T2]
total_infection_rate = (end - start) * self.gamma('constant')
infected = mocos_helper.poisson(total_infection_rate)
if infected == 0:
return
selected_rows = mocos_helper.nonreplace_sample_few(self._individuals_indices,
infected, person_id)
for person_idx in selected_rows:
if self.get_infection_status(person_idx) == InfectionStatus.Healthy:
contraction_time = mocos_helper.uniform(low=start, high=end)
self.append_event(Event(contraction_time, person_idx, TMINUS1, person_id, CONSTANT, self.global_time))
def add_potential_contractions_from_constant_age_kernel(self, person_id):
if self._disable_constant_age_kernel is True:
return
age = self._individuals_age_dct[person_id]
if age not in self._constant_age_helper_age_dict:
return
prog_times = self._progression_times_dict[person_id]
start = prog_times[T0]
end = prog_times[T1]
if end is None:
end = prog_times[T2]
total_infection_rate = (end - start) * self.gamma('constant_age')
infected = mocos_helper.poisson(total_infection_rate)
if infected == 0:
return
selected_rows = mocos_helper.nonreplace_sample_few(
self._constant_age_individuals[self._constant_age_helper_age_dict[age]],
infected,
person_id
)
for person_idx in selected_rows:
if self.get_infection_status(person_idx) == InfectionStatus.Healthy:
contraction_time = mocos_helper.uniform(low=start, high=end)
self.append_event(Event(contraction_time, person_idx, TMINUS1, person_id, CONSTANT_AGE, self.global_time))
def add_potential_contractions_from_friendship_kernel(self, person_id):
if self._disable_friendship_kernel is True:
return
prog_times = self._progression_times_dict[person_id]
start = prog_times[T0]
end = prog_times[T1]
if end is None:
end = prog_times[T2]
total_infection_rate = (end - start) * self.gamma('friendship')
no_infected = mocos_helper.poisson(total_infection_rate * self._social_activity_scores[person_id])
# Add a constant multiplicand above?
age = self._individuals_age_dct[person_id]
gender = self._individuals_gender_dct[person_id]
for _ in range(no_infected):
infected_idx = self._social_activity_sampler.gen(age, gender)
if self.get_infection_status(infected_idx) == InfectionStatus.Healthy:
contraction_time = mocos_helper.uniform(low=start, high=end)
self.append_event(Event(contraction_time, infected_idx, TMINUS1, person_id, FRIENDSHIP, self.global_time))
def handle_t0(self, person_id):
self._active_people += 1
if self.get_infection_status(person_id) in [
InfectionStatus.Healthy,
InfectionStatus.Contraction
]:
self._infection_status[person_id] = InfectionStatus.Infectious.value
else:
raise AssertionError(f'Unexpected state detected: {self.get_infection_status(person_id)}'
f'person_id: {person_id}')
household_id = self._individuals_household_id[person_id] # self._df_individuals.loc[person_id, HOUSEHOLD_ID]
capacity = self._households_capacities[household_id] # self._df_households.loc[household_id][ID]
if capacity > 1:
self.add_potential_contractions_from_household_kernel(person_id)
self.add_potential_contractions_from_constant_kernel(person_id)
self.add_potential_contractions_from_friendship_kernel(person_id)
self.add_potential_contractions_from_constant_age_kernel(person_id)
def generate_disease_progression(self, person_id, event_time: float,
initial_infection_status: str) -> None:
"""Returns list of disease progression events
"future" disease_progression should be recalculated when the disease will be recognised at the state level
t0 - time when individual becomes infectious (Mild symptoms)
t1 - time when individual stay home/visit doctor due to Mild/Serious? symptoms
t2 - time when individual goes to hospital due to Serious symptoms
tdeath - time when individual dies (depending on death probability)
trecovery - time when individual is recovered (in case the patient will not die from covid19)
If person is Infected:
A - tminus1 is known (event time),
B - t0 is calculated as tminus1 + rv_t0,
If person is added to population as Infectious:
A - t0 is known (event time),
B - tminus 1 is calculated as t0 - rv_t0
For all infected:
A - t1 is calculated as t0 + rv_t1
If person will develop Severe or Critical symptoms:
A - t2 is calculated as t0 + rv_t2
B - if t1 is larger than t2, discard t1
C - calculate trecovery time as t0 + 6 weeks <- these 6 weeks are from WHO report, in python we use uniform[4w,8w]
D - calculate tdetection as t2
If person will develop Asymptomatic or Mild symptoms:
A - calculate trecovery time as t0 + 2 weeks <- these 2 weeks are from WHO report, in python we use uniform[11d,17d]
B - draw a random number uniform[0,1] and if less than detection_mild_proba, calculate tdetection as t0 + 2
Draw a random number uniform[0,1] and if less than death_probability[expected_case(person_id)]:
A - calculate tdeath time as t0 + rv_tdeath,
B - discard all times that are larger than tdeath
"""
if initial_infection_status == InfectionStatus.Contraction:
tminus1 = event_time
t0 = tminus1 + self.rv_t0()
self.append_event(Event(t0, person_id, T0, person_id, DISEASE_PROGRESSION, tminus1))
self._infection_status[person_id] = initial_infection_status
elif initial_infection_status == InfectionStatus.Infectious:
t0 = event_time
# tminus1 does not to be defined, but for completeness let's calculate it
tminus1 = t0 - self.rv_t0()
else:
raise ValueError(f'invalid initial infection status {initial_infection_status}')
t2 = None
if self._expected_case_severity[person_id] in [
ExpectedCaseSeverity.Severe,
ExpectedCaseSeverity.Critical
]:
t2 = t0 + self.rv_t2()
self.append_event(Event(t2, person_id, T2, person_id, DISEASE_PROGRESSION, t0))
t1 = t0 + self.rv_t1()
if not t2 or t1 < t2:
self.append_event(Event(t1, person_id, T1, person_id, DISEASE_PROGRESSION, t0))
else:
# if t2 < t1 then we reset t1 to avoid misleading in data exported from the simulation
t1 = None
tdetection = None
trecovery = None
tdeath = None
if mocos_helper.rand() <= self._params[DEATH_PROBABILITY][self._expected_case_severity[person_id]]:
tdeath = t0 + self.rv_tdeath()
self.append_event(Event(tdeath, person_id, TDEATH, person_id, DISEASE_PROGRESSION, t0))
else:
if self._expected_case_severity[person_id] in [
ExpectedCaseSeverity.Mild,
ExpectedCaseSeverity.Asymptomatic
]:
trecovery = t0 + mocos_helper.uniform(14.0 - 3.0, 14.0 + 3.0) # TODO: this should not be hardcoded!
else:
trecovery = t0 + mocos_helper.uniform(42.0 - 14.0, 42.0 + 14.0)
self.append_event(Event(trecovery, person_id, TRECOVERY, person_id, DISEASE_PROGRESSION, t0))
""" Following is for checking whther tdetection should be picked up"""
calculate_tdetection = self._params[TURN_ON_DETECTION]
if self._expected_case_severity[person_id] in [
ExpectedCaseSeverity.Mild,
ExpectedCaseSeverity.Asymptomatic
]:
if mocos_helper.rand() > self._params[DETECTION_MILD_PROBA]:
calculate_tdetection = False
if calculate_tdetection:
""" If t2 is defined (severe/critical), then use this time; if not; use some offset from t0 """
tdetection = t2 or t0 + 2 # TODO: this should not be hardcoded
ev = Event(tdetection, person_id, TDETECTION, person_id, DETECTION, t0)
self.append_event(ev)
self._progression_times_dict[person_id] = {ID: person_id, TMINUS1: tminus1, T0: t0, T1: t1, T2: t2,
TDEATH: tdeath, TRECOVERY: trecovery, TDETECTION: tdetection}
if initial_infection_status == InfectionStatus.Infectious:
self.handle_t0(person_id)
@property
def df_infections(self):
return pd.DataFrame.from_dict(self._infections_dict, orient='index')
@property
def df_progression_times(self):
return pd.DataFrame.from_dict(self._progression_times_dict, orient='index')
def save_progression_times(self, path):
with open(path, "w") as f:
f.write('idx,tminus1,t0,t1,t2,tdeath,trecovery,tdetection,quarantine\n')
for elem in self._progression_times_dict.values():
str = f'{elem.get(ID, None)},{elem.get(TMINUS1, None)},{elem.get(T0, None)},'\
f'{elem.get(T1, None)},{elem.get(T2, None)},{elem.get(TDEATH, None)},'\
f'{elem.get(TRECOVERY, None)},{elem.get(TDETECTION, None)},{elem.get(QUARANTINE, None)}\n'
f.write(str)
def save_potential_contractions(self, path):
with open(path, "w") as f:
f.write('source_id,target_id,contraction_time,kernel\n')
for elem in self._infections_dict.values():
if elem.get(CONTRACTION_TIME) <= self._global_time: # skiping events that were not realized yet
str = f'{elem.get(SOURCE, None)},{elem.get(TARGET, None)},{elem.get(CONTRACTION_TIME, None)},'\
f'{elem.get(KERNEL, None)}\n'
f.write(str)
def prevalance_at(self, time):
return len([1 for elem in self._infections_dict.values() if elem.get(CONTRACTION_TIME, np.inf) <= time])
def mean_day_increase_until(self, time):
mean_increase = 0.0
i = 0
for k, v in self._per_day_increases.items():
if k <= time:
mean_increase = (mean_increase * i + v) / (i + 1)
return mean_increase
def detected_cases(self, df_r1):
cond1 = ~df_r1.tdetection.isna()
cond2a = ~df_r1.trecovery.isna()
cond2b = df_r1.tdetection > df_r1.trecovery
cond2 = ~np.logical_and(cond2a, cond2b)
if len(df_r1[~df_r1.tdeath.isna()]) > 0:
cond3a = ~df_r1.tdeath.isna()
cond3b = df_r1.tdetection > df_r1.tdeath
cond3 = ~np.logical_and(cond3a, cond3b)
cond23 = np.logical_and(cond2, cond3)
else:
cond23 = cond2
cond = np.logical_and(cond1, cond23)
df = df_r1[cond]
detected_cases = df.sort_values(by='tdetection').tdetection
return detected_cases
@staticmethod
def store_parameter(simulation_output_dir, parameter, filename):
save_path = os.path.join(simulation_output_dir, filename)
with open(save_path, 'wb') as f:
pickle.dump(parameter, f)
def _save_population_parameters(self, simulation_output_dir):
run_id = f'{int(time.monotonic() * 1e9)}_{self._params[RANDOM_SEED]}'
if self._params[SAVE_EXPECTED_SEVERITY]:
self.store_parameter(simulation_output_dir, self._expected_case_severity, 'expected_case_severity.pkl')
self.store_parameter(simulation_output_dir, self._infection_status, 'infection_status.pkl')
self.store_parameter(simulation_output_dir, self._detection_status, 'detection_status.pkl')
self.store_parameter(simulation_output_dir, self._quarantine_status, 'quarantine_status.pkl')
def _save_dir(self, prefix=''):
underscore_if_prefix = '_' if len(prefix) > 0 else ''
json_name = os.path.splitext(os.path.basename(self.params_path))[0]
run_id = f'{prefix}{underscore_if_prefix}{json_name}_{int(time.monotonic() * 1e9)}_{self._params[RANDOM_SEED]}'
simulation_output_dir = os.path.join(self._params[OUTPUT_ROOT_DIR],
self._params[EXPERIMENT_ID],
run_id)
os.makedirs(simulation_output_dir)
return simulation_output_dir
def save_serial_interval(self, simulation_output_dir):
if len(self.serial_intervals) == 0:
return np.nan
np_intervals = np.array(self.serial_intervals)
serial_interval_median = np.median(np_intervals)
description = scipy.stats.describe(np_intervals)
serial_interval_str = f'serial interval: measured from {self._params[SERIAL_INTERVAL][MIN_TIME]}'\
f' to {self._params[SERIAL_INTERVAL][MAX_TIME]};'\
f' median={serial_interval_median}, stats describe: {description}'
logger.info(serial_interval_str)
np.save(os.path.join(simulation_output_dir, 'serial_intervals.npy'), np_intervals)
output_log_file = os.path.join(simulation_output_dir, 'serial_interval_stats.txt')
with open(output_log_file, "w") as out:
out.write(serial_interval_str)
return serial_interval_median
def log_outputs(self, simulation_output_dir):
self._save_population_parameters(simulation_output_dir)
copyfile(self.params_path, os.path.join(simulation_output_dir,
f'input_{os.path.basename(self.params_path)}'))
if self._params[SAVE_INPUT_DATA]:
copyfile(self.df_individuals_path, os.path.join(simulation_output_dir,
f'input_{os.path.basename(self.df_individuals_path)}'))
household_input_path = os.path.join(self._params[OUTPUT_ROOT_DIR], self._params[EXPERIMENT_ID],
'input_df_households.csv')
if not os.path.exists(household_input_path):
self._df_households.to_csv(household_input_path)
repo = Repo(config.ROOT_DIR)
git_active_branch_log = os.path.join(simulation_output_dir, 'git_active_branch_log.txt')
with open(git_active_branch_log, 'w') as f:
f.write(f'Active branch name {repo.active_branch.name}\n')
f.write(str(repo.active_branch.log()))
git_status = os.path.join(simulation_output_dir, 'git_status.txt')
with open(git_status, 'w') as f:
f.write(repo.git.status())
serial_interval = self.save_serial_interval(simulation_output_dir)
if self._params[ENABLE_VISUALIZATION]:
self._vis.visualize_simulation(simulation_output_dir, serial_interval, self.fear,
self.active_people, self._max_time_offset, self.detected_cases,
self.df_progression_times,
self.df_infections
)
def update_max_time_offset(self):
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset == np.inf:
if self._params[NUMBER_OF_DETECTED_AT_ZERO_TIME] <= self._detected_people:
self._max_time_offset = self._global_time
self._init_for_stats = self._active_people
def quick_return_condition(self, initiated_through):
""" Checks if event of type 'initiated_through' should be abandoned given current situation """
if initiated_through == HOUSEHOLD:
return False
r = mocos_helper.rand()
if initiated_through == CONSTANT and len(self._params[R_OUT_SCHEDULE]) > 0:
t = self._global_time - self._max_time_offset
for s in self._params[R_OUT_SCHEDULE]:
if s[MIN_TIME] <= t <= s[MAX_TIME]:
if r > s[OVERRIDE_R_FRACTION]:
return True
else:
return False
if r > self.fear(initiated_through):
return True
return False
def add_new_infection(self, person_id, infection_status,
initiated_by, initiated_through):
self._detection_status[person_id] = DetectionStatus.NotDetected.value
self._infections_dict[len(self._infections_dict)] = {
SOURCE: initiated_by,
TARGET: person_id,
CONTRACTION_TIME: self.global_time,
KERNEL: initiated_through
}
if self.global_time >= self._params[SERIAL_INTERVAL][MIN_TIME]:
if self.global_time < self._params[SERIAL_INTERVAL][MAX_TIME]:
if initiated_by is not None:
serial_interval = self.global_time - self._progression_times_dict[initiated_by][TMINUS1]
self.serial_intervals.append(serial_interval)
self._affected_people += 1
self.generate_disease_progression(person_id,
self.global_time,
infection_status)
# 'Event', [TIME, PERSON_INDEX, TYPE, INITIATED_BY, INITIATED_THROUGH, ISSUED_TIME])
def process_event(self, event) -> bool:
type_ = getattr(event, TYPE)
time = getattr(event, TIME)
if int(time / self._params[LOG_TIME_FREQ]) != int(self._global_time / self._params[LOG_TIME_FREQ]):
memory_use = ps.memory_info().rss / 1024 / 1024
fearC = self.fear(CONSTANT)
fearH = self.fear(HOUSEHOLD)
per_day_increase = 0
if self._last_affected:
per_day_increase = (self.affected_people - self._last_affected)/self._last_affected*100
self._last_affected = self.affected_people
self._per_day_increases[int(self._global_time)] = per_day_increase
logger.info(f'Time: {time:.2f}'
f'\tAffected: {self.affected_people}'
f'\tDetected: {self.detected_people}'
f'\tQuarantined: {self.quarantined_people}'
f'\tPer-day-increase: {per_day_increase:.2f} %'
f'\tActive: {self.active_people}'
f'\tDeaths: {self.deaths}'
f'\tFearC: {fearC}'
f'\tFearH: {fearH}'
f'\tPhysical memory use: {memory_use:.2f} MB')
self._global_time = time
if self._global_time > self._max_time + self._max_time_offset:
return False
person_id = getattr(event, PERSON_INDEX)
initiated_by = getattr(event, INITIATED_BY)
initiated_through = getattr(event, INITIATED_THROUGH)
# TODO the remaining attribute will be useful when we will take into account for backtracing
# issued_time = getattr(event, ISSUED_TIME)
if initiated_by is None and initiated_through != DISEASE_PROGRESSION:
if self.get_infection_status(person_id) == InfectionStatus.Healthy:
if type_ == TMINUS1:
self.add_new_infection(person_id, InfectionStatus.Contraction.value,
initiated_by, initiated_through)
elif type_ == T0:
self.add_new_infection(person_id, InfectionStatus.Infectious.value,
initiated_by, initiated_through)
elif type_ == TMINUS1:
# check if this action is still valid first
try:
initiated_inf_status = self._infection_status[initiated_by]
except KeyError:
logging.error(f'infection status should not be blank for infection! key: {initiated_by}')
if initiated_inf_status in active_states:
if self.quick_return_condition(initiated_through):
return True
current_status = self.get_infection_status(person_id)
if current_status == InfectionStatus.Healthy:
new_infection = False
# TODO below is a spaghetti code that should be sorted out! SORRY!
if initiated_through != HOUSEHOLD:
if initiated_inf_status != InfectionStatus.StayHome:
new_infection = True
if self.get_quarantine_status_(initiated_by) == QuarantineStatus.Quarantine:
new_infection = False
if self.get_quarantine_status_(person_id) == QuarantineStatus.Quarantine:
new_infection = False
else: # HOUSEHOLD kernel:
new_infection = True
if new_infection:
self.add_new_infection(person_id, InfectionStatus.Contraction.value,
initiated_by, initiated_through)
elif type_ == T0:
if self.get_infection_status(person_id) == InfectionStatus.Contraction:
self.handle_t0(person_id)
elif type_ == T1:
if self.get_infection_status(person_id) == InfectionStatus.Infectious:
self._infection_status[person_id] = InfectionStatus.StayHome.value
elif type_ == T2:
if self.get_infection_status(person_id) in [
InfectionStatus.StayHome,
InfectionStatus.Infectious
]:
self._infection_status[person_id] = InfectionStatus.Hospital.value
if self._expected_case_severity[person_id] == ExpectedCaseSeverity.Critical:
self._icu_needed += 1
elif type_ == TDEATH:
if self.get_infection_status(person_id) not in [
InfectionStatus.Death,
InfectionStatus.Recovered
]:
self._deaths += 1
if self._expected_case_severity[person_id] == ExpectedCaseSeverity.Critical:
if self._progression_times_dict[person_id][T2] < self.global_time:
self._icu_needed -= 1
self._active_people -= 1
self._infection_status[person_id] = InfectionStatus.Death.value
elif type_ == TRECOVERY: # TRECOVERY is exclusive with regards to TDEATH (when this comment was added)
if self.get_infection_status(person_id) not in [
InfectionStatus.Recovered,
InfectionStatus.Death
]:
if initiated_through != INITIAL_CONDITIONS:
self._active_people -= 1
if self._expected_case_severity[person_id] == ExpectedCaseSeverity.Critical:
if self._progression_times_dict[person_id][T2] < self.global_time:
self._icu_needed -= 1
self._infection_status[person_id] = InfectionStatus.Recovered
self._immune_people += 1
elif type_ == TDETECTION:
if self.get_infection_status(person_id) not in [
InfectionStatus.Recovered,
InfectionStatus.Healthy
]:
if self.get_detection_status_(person_id) == DetectionStatus.NotDetected:
self._detection_status[person_id] = DetectionStatus.Detected.value
self._detected_people += 1
self.update_max_time_offset()
household_id = self._individuals_household_id[person_id]
for inhabitant in self._households_inhabitants[household_id]:
if self.get_quarantine_status_(inhabitant) == QuarantineStatus.NoQuarantine:
if self.get_infection_status(inhabitant) != InfectionStatus.Death:
self._quarantine_status[inhabitant] = QuarantineStatus.Quarantine.value
self._quarantined_people += 1
if inhabitant not in self._progression_times_dict:
self._progression_times_dict[inhabitant] = {}
self._progression_times_dict[inhabitant][QUARANTINE] = self.global_time
if self.get_infection_status(inhabitant) in [InfectionStatus.Infectious,
InfectionStatus.StayHome]:
# TODO: this has to be implemented better, just a temporary solution:
if self._progression_times_dict[inhabitant].get(TDETECTION, None) is None:
new_detection_time = self.global_time + 2.0
self._progression_times_dict[inhabitant][TDETECTION] = new_detection_time
ev = Event(new_detection_time, inhabitant, TDETECTION,
person_id, 'quarantine_followed_detection',
self.global_time)
self.append_event(ev)
else:
raise ValueError(f'unexpected status of event: {event}')
return True
def run_simulation(self):
def _inner_loop(iter):
threshold_type = self._params[STOP_SIMULATION_THRESHOLD_TYPE]
value_to_be_checked = None
start = time.time()
times_mean = 0.0
i = 0
while not q.empty():
event_start = time.time()
if threshold_type == PREVALENCE:
value_to_be_checked = self.affected_people
elif threshold_type == DETECTIONS:
value_to_be_checked = self.detected_people
if value_to_be_checked is None:
logging.error(f"we have an error here")
if value_to_be_checked >= self.stop_simulation_threshold:
logging.info(
f"The outbreak reached a high number {self.stop_simulation_threshold} ({threshold_type})")
break
event = q.get()
if not self.process_event(event):
logging.info(f"Processing event {event} returned False")
q.task_done()
break
q.task_done()
event_end = time.time()
elapsed = event_end - event_start
times_mean = ( times_mean * i + elapsed ) / (i + 1)
i += 1
end = time.time()
print(f'Sim runtime {end - start}, event proc. avg time: {times_mean}')
# cleaning up priority queue:
while not q.empty():
q.get_nowait()
q.task_done()
simulation_output_dir = self._save_dir()
self.save_progression_times(os.path.join(simulation_output_dir, 'output_df_progression_times.csv'))
self.save_potential_contractions(os.path.join(simulation_output_dir, 'output_df_potential_contractions.csv'))
if self._params[LOG_OUTPUTS]:
logger.info('Log outputs')
self.log_outputs(simulation_output_dir)
if self._icu_needed >= self._params[ICU_AVAILABILITY]:
return True
if value_to_be_checked >= self.stop_simulation_threshold:
return True
return False
seeds = None
if isinstance(self._params[RANDOM_SEED], str):
seeds = eval(self._params[RANDOM_SEED]) # TODO: warning, this is unsafe! not use in production
elif isinstance(self._params[RANDOM_SEED], int):
seeds = [self._params[RANDOM_SEED]]
runs = 0
output_log = 'Last_processed_time;Total_#Affected;Total_#Detected;Total_#Deceased;Total_#Quarantined;'\
'c;c_norm;Init_#people;Band_hit_time;Subcritical;runs;fear;detection_rate;'\
'incidents_per_last_day;over_icu;hospitalized;zero_time_offset;total_#immune'
if self._params[ENABLE_ADDITIONAL_LOGS]:
output_log += ';Prevalence_30days;Prevalence_60days;Prevalence_90days;Prevalence_120days;'\
'Prevalence_150days;Prevalence_180days;Prevalence_360days;'\
'increase_10;increase_20;increase_30;increase_40;increase_50;increase_100;increase_150'
output_log += '\n'
for i, seed in enumerate(seeds):
runs += 1
self.parse_random_seed(seed)
self.setup_simulation()
logger.info('Filling queue based on initial conditions...')
self._fill_queue_based_on_initial_conditions()
logger.info('Filling queue based on auxiliary functions...')
self._fill_queue_based_on_auxiliary_functions()
logger.info('Initialization step is done!')
outbreak = _inner_loop(i + 1)
last_processed_time = self._global_time
c = self._params[TRANSMISSION_PROBABILITIES][CONSTANT]
c_norm = c * self._params[AVERAGE_INFECTIVITY_TIME_CONSTANT_KERNEL]
subcritical = self._active_people < self._init_for_stats / 2 # at 200 days
bandtime = self.band_time
#if bandtime:
# return 0
fear_ = self.fear(CONSTANT)
detection_rate = self._params[DETECTION_MILD_PROBA]
affected = self.affected_people
detected = self.detected_people
deceased = self.deaths
quarantined = self.quarantined_people
incidents_per_last_day = self.prevalance_at(self._global_time) - self.prevalance_at(self._global_time - 1)
hospitalized = self._icu_needed
zero_time_offset = self._max_time_offset
immune = self._immune_people
output_add = f'{last_processed_time };{affected};{detected};{deceased};{quarantined};{c};{c_norm};'\
f'{self._init_for_stats};{bandtime};{subcritical};{runs};{fear_};{detection_rate};'\
f'{incidents_per_last_day};{outbreak};{hospitalized};{zero_time_offset};{immune}'
if self._params[ENABLE_ADDITIONAL_LOGS]:
prev30 = self.prevalance_at(30)
prev60 = self.prevalance_at(60)
prev90 = self.prevalance_at(90)
prev120 = self.prevalance_at(120)
prev150 = self.prevalance_at(150)
prev180 = self.prevalance_at(180)
prev360 = self.prevalance_at(360)
mean_increase_at_10 = self.mean_day_increase_until(10)
mean_increase_at_20 = self.mean_day_increase_until(20)
mean_increase_at_30 = self.mean_day_increase_until(30)
mean_increase_at_40 = self.mean_day_increase_until(40)
mean_increase_at_50 = self.mean_day_increase_until(50)
mean_increase_at_100 = self.mean_day_increase_until(100)
mean_increase_at_150 = self.mean_day_increase_until(150)
output_add += f'{prev30};{prev60};{prev90};{prev120};{prev150};{prev180};{prev360};'\
f'{mean_increase_at_10};{mean_increase_at_20};{mean_increase_at_30};'\
f'{mean_increase_at_40};{mean_increase_at_50};{mean_increase_at_100};'\
f'{mean_increase_at_150}'
output_add += '\n'
logger.info(output_add)
output_log = f'{output_log}{output_add}'
logger.info(output_log)
simulation_output_dir = self._save_dir('aggregated_results')
output_log_file = os.path.join(simulation_output_dir, 'results.txt')
if self._params[ENABLE_VISUALIZATION]:
self._vis.visualize_scenario(simulation_output_dir)
with open(output_log_file, "w") as out:
out.write(output_log)
def setup_simulation(self):
self._init_for_stats = 0 # TODO support different import methods
if isinstance(self._params[INITIAL_CONDITIONS], dict):
cardinalities = self._params[INITIAL_CONDITIONS][CARDINALITIES]
self._init_for_stats = cardinalities.get(CONTRACTION, 0) + cardinalities.get(INFECTIOUS, 0)
# TODO and think how to better group them, ie namedtuple state_stats?
self._affected_people = 0
self._active_people = 0
self._detected_people = 0
self._quarantined_people = 0
self._immune_people = 0
self._deaths = 0
self._icu_needed = 0
self._max_time_offset = 0
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
self._max_time_offset = np.inf
self._fear_factor = {}
self._infection_status = {}
self._infections_dict = {}
self._progression_times_dict = {}
self._per_day_increases = {}
self._global_time = self._params[START_TIME]
self._max_time = self._params[MAX_TIME]
if not self._params[REUSE_EXPECTED_CASE_SEVERITIES]:
self._expected_case_severity = self.draw_expected_case_severity()
self._last_affected = None
self.band_time = None
self._quarantine_status = {}
self._detection_status = {}
if self._params[ENABLE_VISUALIZATION]:
self._vis = Visualize(self._params, self.df_individuals,
self._expected_case_severity, logger)
logger = logging.getLogger(__name__)
@click.command()
@click.option('--params-path', type=click.Path(exists=True))
@click.option('--df-individuals-path', type=click.Path(exists=True))
@click.option('--df-households-path', type=click.Path())
@click.argument('run-simulation') #ignored
def runner(params_path, df_individuals_path, run_simulation, df_households_path=''):
im = InfectionModel(params_path=params_path,
df_individuals_path=df_individuals_path,
df_households_path=df_households_path or '')
im.run_simulation()
# TODO: think about separate thread/process to generate random numbers, facilitate sampling
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
pid = os.getpid()
ps = psutil.Process(pid)
pd.set_option('display.max_columns', None)
#fire.Fire(InfectionModel)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
runner()
|
the-stack_0_22403 | import math
import torch
import torch.nn.functional as F
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq import utils
@register_criterion('word_prediction')
class WordPredictionCriterion(FairseqCriterion):
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss, as a Variable
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
predictor_output, decoder_output = model(**sample['net_input'])
# translation loss
translation_lprobs = model.get_normalized_probs(decoder_output, log_probs=True)
translation_target = model.get_targets(sample, decoder_output).view(-1)
translation_loss = F.nll_loss(
translation_lprobs,
translation_target,
size_average=False,
ignore_index=self.padding_idx,
reduce=reduce
)
# predictor loss
prediction_lprobs = model.get_predictor_normalized_probs(
predictor_output, log_probs=True)
# prevent domination of padding idx
non_padding_mask = torch.ones(prediction_lprobs.size(1)).cuda()
non_padding_mask[model.encoder.padding_idx] = 0
prediction_lprobs = prediction_lprobs * non_padding_mask.unsqueeze(0)
prediction_target = model.get_target_words(sample)
assert prediction_lprobs.size(0) == prediction_target.size(0)
assert prediction_lprobs.dim() == 2
word_prediction_loss = -torch.gather(prediction_lprobs, 1, prediction_target)
if reduce:
word_prediction_loss = word_prediction_loss.sum()
else:
word_prediction_loss = word_prediction_loss.sum(1) # loss per batch element
assert translation_loss.size() == word_prediction_loss.size()
loss = translation_loss + word_prediction_loss
if self.args.sentence_avg:
sample_size = sample['target'].size(0)
else:
sample_size = sample['ntokens']
logging_output = {
'loss': translation_loss,
'word_prediction_loss': word_prediction_loss,
'ntokens': sample['ntokens'],
'sample_size': sample_size,
}
if reduce:
logging_output['loss'] = utils.item(logging_output['loss'])
logging_output['word_prediction_loss'] = utils.item(
logging_output['word_prediction_loss'])
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
agg_output = {'sample_size': sample_size}
for loss in ['loss', 'word_prediction_loss']:
loss_sum = sum(log.get(loss, 0) for log in logging_outputs)
agg_output[loss] = loss_sum / sample_size / math.log(2)
if loss == 'loss' and sample_size != ntokens:
agg_output['nll_loss'] = loss_sum / ntokens / math.log(2)
return agg_output
|
the-stack_0_22404 | import rest_framework.authentication
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions
import waldur_core.logging.middleware
TOKEN_KEY = settings.WALDUR_CORE.get('TOKEN_KEY', 'x-auth-token')
def can_access_admin_site(user):
return user.is_active and (user.is_staff or user.is_support)
class AuthenticationBackend:
"""
Enables only support and staff to access admin site.
"""
def authenticate(self, request, username, password):
"""
Always return ``None`` to prevent authentication within this backend.
"""
return None
def has_perm(self, user_obj, perm, obj=None):
return can_access_admin_site(user_obj)
def has_module_perms(self, user_obj, app_label):
return can_access_admin_site(user_obj)
class TokenAuthentication(rest_framework.authentication.TokenAuthentication):
"""
Custom token-based authentication.
Use TOKEN_KEY from request query parameters if authentication token was not found in header.
"""
def get_authorization_value(self, request):
auth = rest_framework.authentication.get_authorization_header(request)
if not auth:
auth = request.query_params.get(TOKEN_KEY, '')
return auth
def authenticate_credentials(self, key):
model = self.get_model()
try:
token = model.objects.select_related('user').get(key=key)
except model.DoesNotExist:
raise exceptions.AuthenticationFailed(_('Invalid token.'))
if not token.user.is_active:
raise exceptions.AuthenticationFailed(_('User inactive or deleted.'))
if token.user.token_lifetime:
lifetime = timezone.timedelta(seconds=token.user.token_lifetime)
if token.created < timezone.now() - lifetime:
raise exceptions.AuthenticationFailed(_('Token has expired.'))
return token.user, token
def authenticate(self, request):
auth = self.get_authorization_value(request).split()
if not auth or auth[0].lower() != b'token':
return None
if len(auth) == 1:
msg = _('Invalid token. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid token. Token string should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
try:
token = auth[1].decode()
except UnicodeError:
msg = _(
'Invalid token header. Token string should not contain invalid characters.'
)
raise exceptions.AuthenticationFailed(msg)
return self.authenticate_credentials(token)
def user_capturing_auth(auth):
class CapturingAuthentication(auth):
def authenticate(self, request):
result = super(CapturingAuthentication, self).authenticate(request)
if result is not None:
user, _ = result
waldur_core.logging.middleware.set_current_user(user)
token = user.auth_token
if token:
token.created = timezone.now()
token.save()
return result
return CapturingAuthentication
class CsrfExemptSessionAuthentication(
rest_framework.authentication.SessionAuthentication
):
def enforce_csrf(self, request):
return # Skip CSRF check
SessionAuthentication = user_capturing_auth(CsrfExemptSessionAuthentication)
TokenAuthentication = user_capturing_auth(TokenAuthentication)
|
the-stack_0_22405 | # Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Measured Process for adaptive zeroing."""
import collections
from typing import Union
import attr
import tensorflow as tf
import tensorflow_privacy
import tree
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import intrinsics
from tensorflow_federated.python.core.api import placements
from tensorflow_federated.python.core.templates import measured_process
ValueType = Union[computation_types.TensorType, computation_types.StructType]
@attr.s(auto_attribs=True, eq=False, frozen=True)
class AdaptiveZeroingMetrics(object):
"""Structure metrics returned by adaptive zeroing mean prcoess.
Attributes:
current_threshold: A float. The current zeroing threshold.
num_zeroed: An integer. The number of updates that were zeroed this round.
"""
current_threshold: float
num_zeroed: int
def build_adaptive_zeroing_mean_process(
value_type: ValueType,
initial_threshold: float,
target_quantile: float,
multiplier: float,
learning_rate: float,
norm_order: float,
):
"""Builds `tff.templates.MeasuredProcess` for averaging with adaptive zeroing.
The returned `MeasuredProcess` averages values after zeroing out any values
whose norm is greater than C * r where C is adapted to approximate the q'th
quantile of the distribution of value norms. Its next function has the
following type signature:
({state_type}@SERVER,{value_type}@CLIENTS,{float32}@CLIENTS> ->
<state={state_type}@SERVER,result={value_type}@SERVER,
measurements=AdaptiveZeroingMetrics@SERVER>)
Args:
value_type: The type of values to be averaged by the `MeasuredProcess`. Can
be a `tff.TensorType` or a nested structure of `tff.StructType` that
bottoms out in `tff.TensorType`.
initial_threshold: The initial value of C * r. Values with norm greater than
this will be zeroed out.
target_quantile: The target quantile q. The adaptive process ensures that C
will approximate the q'th quantile of the distribution of value norms.
multiplier: The multiplier r of the quantile estimate C.
learning_rate: The learning rate l for the adaptive process. If the observed
fraction of values whose norm is less than C on a given round is p, then C
will be updated according to C *= exp(l * (q - p)). It follows that the
maximum possible update is multiplying or dividing by a factor of exp(l).
norm_order: The order of the norm. May be 1, 2, or np.inf.
Returns:
A `MeasuredProcess` implementing averaging values with adaptive zeroing with
the type signature described above.
"""
# Actually value_type can be any nested structure of StructType bottoming
# out in TensorType, but we'll just verify this much here.
py_typecheck.check_type(
value_type, (computation_types.TensorType, computation_types.StructType))
if isinstance(value_type, computation_types.StructType):
if not value_type:
raise ValueError("value_type cannot be empty.")
initial_quantile_estimate = initial_threshold / multiplier
quantile_query = tensorflow_privacy.NoPrivacyQuantileEstimatorQuery(
initial_estimate=initial_quantile_estimate,
target_quantile=target_quantile,
learning_rate=learning_rate,
geometric_update=True)
assert isinstance(quantile_query, tensorflow_privacy.SumAggregationDPQuery)
@computations.tf_computation
def initial_state_fn():
return quantile_query.initial_global_state()
@computations.federated_computation()
def initial_state_comp():
return intrinsics.federated_eval(initial_state_fn, placements.SERVER)
global_state_type = initial_state_fn.type_signature.result
@computations.tf_computation(global_state_type)
def derive_sample_params(global_state):
return quantile_query.derive_sample_params(global_state)
@computations.tf_computation(derive_sample_params.type_signature.result,
value_type, tf.float32)
def preprocess_value(params, value, weight):
vectors = tree.map_structure(lambda v: tf.reshape(v, [-1]), value)
norm = tf.norm(tf.concat(tree.flatten(vectors), axis=0), ord=norm_order)
quantile_record = quantile_query.preprocess_record(params, norm)
threshold = params.current_estimate * multiplier
too_large = (norm > threshold)
adj_weight = tf.cond(too_large, lambda: tf.constant(0.0), lambda: weight)
weighted_value = tree.map_structure(
lambda v: tf.math.multiply_no_nan(v, adj_weight), value)
too_large = tf.cast(too_large, tf.int32)
return weighted_value, adj_weight, quantile_record, too_large
quantile_record_type = preprocess_value.type_signature.result[2]
@computations.tf_computation(quantile_record_type, global_state_type)
def next_quantile(quantile_sum, global_state):
new_estimate, new_global_state = quantile_query.get_noised_result(
quantile_sum, global_state)
new_threshold = new_estimate * multiplier
return new_threshold, new_global_state
@computations.tf_computation(value_type, tf.float32)
def divide_no_nan(value_sum, total_weight):
return tree.map_structure(lambda v: tf.math.divide_no_nan(v, total_weight),
value_sum)
@computations.federated_computation(
initial_state_comp.type_signature.result,
computation_types.FederatedType(value_type, placements.CLIENTS),
computation_types.FederatedType(tf.float32, placements.CLIENTS))
def next_fn(global_state, value, weight):
sample_params = intrinsics.federated_broadcast(
intrinsics.federated_map(derive_sample_params, global_state))
weighted_value, adj_weight, quantile_record, too_large = (
intrinsics.federated_map(preprocess_value,
(sample_params, value, weight)))
value_sum = intrinsics.federated_sum(weighted_value)
total_weight = intrinsics.federated_sum(adj_weight)
quantile_sum = intrinsics.federated_sum(quantile_record)
num_zeroed = intrinsics.federated_sum(too_large)
mean_value = intrinsics.federated_map(divide_no_nan,
(value_sum, total_weight))
new_threshold, new_global_state = intrinsics.federated_map(
next_quantile, (quantile_sum, global_state))
measurements = intrinsics.federated_zip(
AdaptiveZeroingMetrics(new_threshold, num_zeroed))
return collections.OrderedDict(
state=new_global_state, result=mean_value, measurements=measurements)
return measured_process.MeasuredProcess(
initialize_fn=initial_state_comp, next_fn=next_fn)
|
the-stack_0_22406 | from django.shortcuts import render, redirect
from django.forms import inlineformset_factory
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .models import *
from .forms import CreateUserForm
# Create your views here.
def registerPage(request):
if request.user.is_authenticated:
return redirect('home')
else:
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
user = form.cleaned_data.get('username')
messages.success(request, 'Conta criada para usuário ' + user)
return redirect('login')
context = {'form':form}
return render(request, 'clio/register.html', context)
def loginPage(request):
if request.user.is_authenticated:
return redirect('home')
else:
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.info(request, 'Senha ou usuário incorretos')
context = {}
return render(request, 'clio/login.html', context)
def logoutUser(request):
logout(request)
return redirect('login')
@login_required(login_url='login')
def home(request):
return render(request, 'clio/homepage.html') |
the-stack_0_22408 | import numpy.linalg as lg
import numpy as np
import random
class Lattice:
def __init__(self, time_size, space_size, dim, beta, linkType):
self.sites = np.zeros(
[time_size] + [space_size for _ in range(dim-1)],
dtype=linkType
)
self.links = np.zeros(
list(self.sites.shape) + [dim],
dtype=linkType
)
self.dim = dim
self.beta = beta
self.linkType = linkType
self.bases = np.array([
np.array([1,0,0,0]),
np.array([0,1,0,0]),
np.array([0,0,1,0]),
np.array([0,0,0,1]),
])
self.numSites = reduce(lambda a, b: a*b, self.links.shape)
for link in self.iterLinks():
self.links[link] = linkType.getRandomElement()
def iterLinks(self):
for site in self.iterSites():
for mu in range(self.dim):
yield tuple(list(site)+[mu])
def iterSites(self):
for i in range(self.numSites):
indices = list()
for dim in self.sites.shape:
indices.append(i % dim)
i = i / dim
yield tuple(indices)
def getNeighbors(self, site):
shape = self.sites.shape
neighbors = list()
for i, dim in enumerate(shape):
e = list(site)
if site[i] > 0:
e[i] = e[i]-1
else:
e[i] = dim-1
neighbors.append(tuple(e))
e = list(site)
if site[i] < dim - 1:
e[i] = e[i]+1
else:
e[i] = 0
neighbors.append(tuple(e))
return neighbors
def getRandomSite(self):
return tuple([random.randint(0, d-1) for d in self.sites.shape])
def getRandomLink(self):
return tuple([random.randint(0, d-1) for d in self.links.shape])
def localAction(self, *links):
S = 0.0
for link in links:
site1 = link[:-1]
mu = link[-1]
for nu in range(self.dim):
if nu != mu:
site2 = np.array(site1) - self.bases[nu]
S += 5.0/3.0 * (
self.plaquetteOperator(site1, mu, nu)
+ self.plaquetteOperator(site2, mu, nu)
)
return S
def totalAction(self):
S = 0.0
for site in self.iterSites():
for mu in range(self.dim):
for nu in range(self.dim):
if nu > mu:
S += 5.0/3.0 * self.plaquetteOperator(site, mu, nu)
return S
def plaquetteOperator(self, c, mu, nu):
c = np.array(c)
return 1.0/3.0 * np.trace(
self.links[tuple(list(c%5)+[mu])]
*self.links[tuple(list((c+self.bases[mu])%5)+[nu])]
*self.links[tuple(list((c+self.bases[nu])%5)+[mu])].conjugate().T
*self.links[tuple(list(c%5)+[nu])].conjugate().T
).real
def rectOperator(self, c, mu, nu):
c = np.array(c)
return 1.0/3.0 * np.trace(
self.links[tuple(list(c%5)+[mu])]
*self.links[tuple(list((c+self.bases[mu])%5)+[mu])]
*self.links[tuple(list((c+2*self.bases[mu])%5)+[nu])]
*self.links[tuple(list((c+self.bases[mu]+self.bases[nu])%5)+[mu])].conjugate().T
*self.links[tuple(list((c+self.bases[nu])%5)+[mu])].conjugate().T
*self.links[tuple(list(c%5)+[nu])].conjugate().T
).real
def metropolisUpdate(self):
link = self.getRandomLink()
U = self.linkType.getRandomElement()
Si = self.localAction(link)
self.links[link] = U * self.links[link]
Sf = self.localAction(link)
# print Sf - Si
if np.random.rand() > min(1, np.exp(self.beta*(Sf-Si))):
self.links[link] = U.conjugate().T * self.links[link]
|
the-stack_0_22410 | # coding: utf-8
# PYTHON IMPORTS
import re
import os
import unicodedata
import math
# DJANGO IMPORTS
from django.utils import six
# FILEBROWSER IMPORTS
from filebrowser.settings import STRICT_PIL, NORMALIZE_FILENAME, CONVERT_FILENAME
# PIL import
if STRICT_PIL:
from PIL import Image
else:
try:
from PIL import Image
except ImportError:
import Image
def convert_filename(value):
"""
Convert Filename.
"""
if NORMALIZE_FILENAME:
chunks = value.split(os.extsep)
normalized = []
for v in chunks:
v = unicodedata.normalize('NFKD', six.text_type(v)).encode('ascii', 'ignore').decode('ascii')
v = re.sub(r'[^\w\s-]', '', v).strip()
normalized.append(v)
if len(normalized) > 1:
value = '.'.join(normalized)
else:
value = normalized[0]
if CONVERT_FILENAME:
value = value.replace(" ", "_").lower()
return value
def path_strip(path, root):
if not path or not root:
return path
path = os.path.normcase(path)
root = os.path.normcase(root)
if path.startswith(root):
return path[len(root):]
return path
def scale_and_crop(im, width, height, opts):
"""
Scale and Crop.
"""
x, y = [float(v) for v in im.size]
width = float(width or 0)
height = float(height or 0)
if 'upscale' not in opts:
if (x < width or not width) and (y < height or not height):
return False
if width:
xr = width
else:
xr = x * height / y
if height:
yr = height
else:
yr = y * width / x
if 'crop' in opts:
r = max(xr / x, yr / y)
else:
r = min(xr / x, yr / y)
if r < 1.0 or (r > 1.0 and 'upscale' in opts):
im = im.resize((int(math.ceil(x * r)), int(math.ceil(y * r))), resample=Image.ANTIALIAS)
if 'crop' in opts:
x, y = [float(v) for v in im.size]
ex, ey = (x - min(x, xr)) / 2, (y - min(y, yr)) / 2
if ex or ey:
im = im.crop((int(ex), int(ey), int(ex + xr), int(ey + yr)))
return im
scale_and_crop.valid_options = ('crop', 'upscale')
|
the-stack_0_22411 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
import sys
EULER = 0.5772156649015328606
EPS = 1e-10
if __name__ == '__main__':
x = float(input("Value of x?"))
if x==0:
print ("Illegal value of x", file= sys.stderr)
exit(1)
a=x
S, k = a, 1
while math.fabs(a)>EPS:
a*= x*k / (k+1)**2
S+=a
k+=1
print(f"Ei({x}) = {EULER + math.log(math.fabs(x))+S}") |
the-stack_0_22412 | import numpy as np
# from tensorflow import keras
from tensorflow import keras
import h5py
import os
import cv2
# data generator
class DataGenerator(keras.utils.Sequence):
def __init__(self, data_IDs,
labels,
batch_size=8,
dim=(288, 360, 3),
n_frames=41,
shuffle=True,
data_dir='./data/HockeyFights'):
self.dim = dim
self.batch_size = batch_size
self.n_frames = n_frames
self.shuffle = shuffle
self.data_IDs = data_IDs
self.labels = labels
self.data_dir = data_dir
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.data_IDs)) / self.batch_size)
def __getitem__(self, idx):
# Generate one batch of data
indexes = self.indexes[idx*self.batch_size:(idx+1)*self.batch_size]
# Find list of IDs
data_IDs_temp = [self.data_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(data_IDs_temp)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.data_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, data_IDs_temp):
# X : (n_samples, *dim, n_frames)
# Initialization
X = np.empty((self.batch_size, self.n_frames, *self.dim), dtype=np.float32)
y = np.empty((self.batch_size), dtype=int)
# Generate data
for i, ID in enumerate(data_IDs_temp):
# Store sample
h5_file = h5py.File(os.path.join(self.data_dir, f'./processed/{ID}.h5'), 'r')
# 전처리
if (h5_file['data'].shape[1] > self.dim[0]) and (h5_file['data'].shape[2] > self.dim[1]):
data = random_cropping(h5_file['data'], self.dim[:2])
else:
data = np.asarray([cv2.resize(im, dsize=self.dim[:2], interpolation=cv2.INTER_CUBIC)
for im in h5_file['data']])
X[i,] = data / 255.
h5_file.close()
# Store class
y[i] = self.labels[ID]
return X, keras.utils.to_categorical(y, num_classes=2)
# Random cropping
def random_cropping(data, crop_size=(224, 224)):
height, width = data.shape[1], data.shape[2]
he_idx = int(np.random.uniform(0, height - crop_size[0] + 1, size=()))
wi_idx = int(np.random.uniform(0, width - crop_size[0] + 1, size=()))
data = data[:, he_idx:he_idx + crop_size[0], wi_idx:wi_idx + crop_size[1]]
return data
def get_steps_hockey(num_data, batch):
return num_data//batch + 1 |
the-stack_0_22414 | import pyodbc
try:
connect = pyodbc.connect(r'Driver= {Microsoft Access Driver (*.mdb, *.accdb)};DBQ=C:\Users\steff\OneDrive\Desktop\files\Database1.accdb;')
print("Connected to a Database")
Fullname = "Almeda, Stephanie Joy G."
Assignment = 87
Laboratory = 90
Quiz = 85
Exam = 94
user_id = 10
record = connect.cursor()
record.execute('UPDATE Table1 SET Fullname = ?, Assignment = ?, Laboratory = ?, Quiz = ?, Exam = ? WHERE id = ?', (Fullname, Assignment, Laboratory, Quiz, Exam, user_id))
record.commit()
print("Data is updated")
except pyodbc.Error as e:
print("Error in Connection")
|
the-stack_0_22415 | from __future__ import print_function
from cement.core import controller
from cement.core import handler
from sandglass.time import __version__
class ShowController(controller.CementBaseController):
"""
Sandglass CLI command that display sandglass information.
"""
class Meta:
label = 'show'
interface = controller.IController
description = "Show sandglass information"
stacked_on = 'base'
stacked_type = 'nested'
@controller.expose(help="display sandglass version")
def version(self):
print("Sandglass", __version__)
handler.register(ShowController)
|
the-stack_0_22416 | #!/usr/bin/env python
#
# Copyright (c) 2013 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andrew Bardsley
#
# minorview.py: Minorview visuliser for MinorCPU model MinorTrace output
#
import gtk
import os
import sys
import argparse
# Find MinorView modules even if not called from minorview directory
minorviewDir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(minorviewDir)
from minorview.model import BlobModel
from minorview.view import BlobView, BlobController, BlobWindow
from minorview.point import Point
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Minor visualiser')
parser.add_argument('--picture', metavar='picture-file',
default=minorviewDir + '/minorview/minor.pic',
help='markup file containing blob information '
+ '(default: <minorview-path>/minor.pic)')
parser.add_argument('--prefix', metavar='name', default='system.cpu',
help='name prefix in trace for CPU to be visualised (default: '
+ 'system.cpu)')
parser.add_argument('--start-time', metavar='time', type=int, default=0,
help='time of first event to load from file')
parser.add_argument('--end-time', metavar='time', type=int, default=None,
help='time of last event to load from file')
parser.add_argument('--mini-views', action='store_true', default=False,
help='show tiny views of the next 10 time steps')
parser.add_argument('eventFile', metavar='event-file', default='ev')
args = parser.parse_args(sys.argv[1:])
model = BlobModel(unitNamePrefix=args.prefix)
if args.picture and os.access(args.picture, os.O_RDONLY):
model.load_picture(args.picture)
else:
parser.error('Can\'t read picture file: ' + args.picture)
# Make the key objects
view = BlobView(model)
controller = BlobController(model, view,
defaultEventFile=args.eventFile,
defaultPictureFile=args.picture)
window = BlobWindow(model, view, controller)
window.add_control_bar(controller.bar)
# Miniviews allow future timesteps to appear at the bottom of the
# display.
if args.mini_views:
window.miniViewCount = 10
window.show_window()
if args.eventFile and os.access(args.eventFile, os.O_RDONLY):
controller.startTime = args.start_time
controller.endTime = args.end_time
model.load_events(args.eventFile, startTime=args.start_time,
endTime=args.end_time)
controller.set_time_index(0)
else:
parser.error('Can\'t read event file: ' + args.eventFile)
gtk.main()
|
the-stack_0_22417 | from django.core.management.base import BaseCommand
from courses.models import Submission
class Command(BaseCommand):
help = (
"ONE Time command to calculate missing data. It will go through ALL Submissions and calculate "
"'video_watched_percent' if missing, however the lecture has to have 'video_duration' stored in metadata."
)
def handle(self, *args, **options):
for submission in Submission.objects.all():
# Rename stored data: watched_video_time_range changed to video_watched_time_range (all starts with video)
if submission.metadata and "watched_video_time_range" in submission.metadata:
submission.metadata["video_watched_time_range"] = submission.metadata["watched_video_time_range"]
del submission.metadata["watched_video_time_range"]
if submission.metadata and "video_watched_time_range" in submission.metadata:
if submission.lecture.metadata and "video_duration" in submission.lecture.metadata:
video_watched = 0
for video_range in submission.metadata["video_watched_time_range"]:
video_watched += video_range[1] - video_range[0]
video_watched_percent = video_watched / submission.lecture.metadata["video_duration"] * 100
submission.metadata["video_watched_percent"] = round(video_watched_percent, 1)
submission.save()
self.stdout.write(self.style.SUCCESS("Done. Bye!"))
|
the-stack_0_22419 | #!/usr/bin/python
# File: pbs_status.py
#
# Author: Brian Bockelman
# e-mail: [email protected]
#
#
# Copyright (c) University of Nebraska-Lincoln. 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Query PBS (or SLURM with the PBS emulation layer) for the status of a given job
Internally, it creates a cache of the PBS qstat response and will reuse this
for subsequent queries.
"""
from __future__ import print_function
import os
import re
import pwd
import sys
import time
import errno
import fcntl
import random
import struct
import subprocess
import signal
import tempfile
import pickle
import csv
cache_timeout = 60
launchtime = time.time()
def log(msg):
"""
A very lightweight log - not meant to be used in production, but helps
when debugging scale tests
"""
print(time.strftime("%x %X"), os.getpid(), msg, file = sys.stderr)
def createCacheDir():
uid = os.geteuid()
username = pwd.getpwuid(uid).pw_name
cache_dir = os.path.join("/var/tmp", "qstat_cache_%s" % username)
try:
os.mkdir(cache_dir, 0o755)
except OSError as oe:
if oe.errno != errno.EEXIST:
raise
s = os.stat(cache_dir)
if s.st_uid != uid:
raise Exception("Unable to check cache because it is owned by UID %d" % s.st_uid)
return cache_dir
def initLog():
"""
Determine whether to create a logfile based on the presence of a file
in the user's qstat cache directory. If so, make the logfile there.
"""
cache_dir = createCacheDir()
if os.path.exists(os.path.join(cache_dir, "pbs_status.debug")):
filename = os.path.join(cache_dir, "pbs_status.log")
else:
filename = "/dev/null"
fd = open(filename, "a")
# Do NOT close the file descriptor blahp originally hands us for stderr.
# This causes blahp to lose all status updates.
os.dup(2)
os.dup2(fd.fileno(), 2)
# Something else from a prior life - see gratia-probe-common's GratiaWrapper.py
def ExclusiveLock(fd, timeout=120):
"""
Grabs an exclusive lock on fd
If the lock is owned by another process, and that process is older than the
timeout, then the other process will be signaled. If the timeout is
negative, then the other process is never signaled.
If we are unable to hold the lock, this call will not block on the lock;
rather, it will throw an exception.
By default, the timeout is 120 seconds.
"""
# POSIX file locking is cruelly crude. There's nothing to do besides
# try / sleep to grab the lock, no equivalent of polling.
# Why hello, thundering herd.
# An alternate would be to block on the lock, and use signals to interupt.
# This would mess up Gratia's flawed use of signals already, and not be
# able to report on who has the lock. I don't like indefinite waits!
max_time = 30
starttime = time.time()
tries = 1
while time.time() - starttime < max_time:
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return
except IOError as ie:
if not ((ie.errno == errno.EACCES) or (ie.errno == errno.EAGAIN)):
raise
if check_lock(fd, timeout):
time.sleep(.2) # Fast case; however, we have *no clue* how
# long it takes to clean/release the old lock.
# Nor do we know if we'd get it if we did
# fcntl.lockf w/ blocking immediately. Blech.
# Check again immediately, especially if this was the last
# iteration in the for loop.
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return
except IOError as ie:
if not ((ie.errno == errno.EACCES) or (ie.errno == errno.EAGAIN)):
raise
sleeptime = random.random()
log("Unable to acquire lock, try %i; will sleep for %.2f " \
"seconds and try for %.2f more seconds." % (tries, sleeptime, max_time - (time.time()-starttime)))
tries += 1
time.sleep(sleeptime)
log("Fatal exception - Unable to acquire lock")
raise Exception("Unable to acquire lock")
def check_lock(fd, timeout):
"""
For internal use only.
Given a fd that is locked, determine which process has the lock.
Kill said process if it is older than "timeout" seconds.
This will log the PID of the "other process".
"""
pid = get_lock_pid(fd)
if pid == os.getpid():
return True
if timeout < 0:
log("Another process, %d, holds the cache lock." % pid)
return False
try:
age = get_pid_age(pid)
except:
log("Another process, %d, holds the cache lock." % pid)
log("Unable to get the other process's age; will not time it out.")
return False
log("Another process, %d (age %d seconds), holds the cache lock." % (pid, age))
if age > timeout:
os.kill(pid, signal.SIGKILL)
else:
return False
return True
linux_struct_flock = "hhxxxxqqixxxx"
try:
os.O_LARGEFILE
except AttributeError:
start_len = "hhlli"
def get_lock_pid(fd):
# For reference, here's the definition of struct flock on Linux
# (/usr/include/bits/fcntl.h).
#
# struct flock
# {
# short int l_type; /* Type of lock: F_RDLCK, F_WRLCK, or F_UNLCK. */
# short int l_whence; /* Where `l_start' is relative to (like `lseek'). */
# __off_t l_start; /* Offset where the lock begins. */
# __off_t l_len; /* Size of the locked area; zero means until EOF. */
# __pid_t l_pid; /* Process holding the lock. */
# };
#
# Note that things are different on Darwin
# Assuming off_t is unsigned long long, pid_t is int
try:
if sys.platform == "darwin":
arg = struct.pack("QQihh", 0, 0, 0, fcntl.F_WRLCK, 0)
else:
arg = struct.pack(linux_struct_flock, fcntl.F_WRLCK, 0, 0, 0, 0)
result = fcntl.fcntl(fd, fcntl.F_GETLK, arg)
except IOError as ie:
if ie.errno != errno.EINVAL:
raise
log("Unable to determine which PID has the lock due to a "
"python portability failure. Contact the developers with your"
" platform information for support.")
return False
if sys.platform == "darwin":
_, _, pid, _, _ = struct.unpack("QQihh", result)
else:
_, _, _, _, pid = struct.unpack(linux_struct_flock, result)
return pid
def get_pid_age(pid):
now = time.time()
st = os.stat("/proc/%d" % pid)
return now - st.st_ctime
def qstat(jobid=""):
"""
Call qstat directly for a jobid.
If none is specified, query all jobid's.
Returns a python dictionary with the job info.
"""
qstat = get_qstat_location()
command = (qstat, '--version')
qstat_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
qstat_version, _ = qstat_process.communicate()
starttime = time.time()
log("Starting qstat.")
if re.search(r'PBSPro', qstat_version):
child_stdout = os.popen("%s -f %s" % (qstat, jobid)) # -1 conflicts with -f in PBS Pro
else:
child_stdout = os.popen("%s -f -1 %s" % (qstat, jobid))
result = parse_qstat_fd(child_stdout)
exit_status = child_stdout.close()
log("Finished qstat (time=%f)." % (time.time()-starttime))
if exit_status:
exit_code = 0
if os.WIFEXITED(exit_status):
exit_code = os.WEXITSTATUS(exit_status)
if exit_code == 153 or exit_code == 35: # Completed
result = {jobid: {'BatchJobId': '"%s"' % jobid, "JobStatus": "4", "ExitCode": ' 0'}}
elif exit_code == 271: # Removed
result = {jobid: {'BatchJobId': '"%s"' % jobid, 'JobStatus': '3', 'ExitCode': ' 0'}}
else:
raise Exception("qstat failed with exit code %s" % str(exit_status))
# If the job has completed...
if jobid is not "" and "JobStatus" in result[jobid] and (result[jobid]["JobStatus"] == '4' or result[jobid]["JobStatus"] == '3'):
# Get the finished job stats and update the result
finished_job_stats = get_finished_job_stats(jobid)
result[jobid].update(finished_job_stats)
return result
def which(program):
"""
Determine if the program is in the path.
arg program: name of the program to search
returns: full path to executable, or None if executable is not found
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def convert_cpu_to_seconds(cpu_string):
import re
h,m,s = re.split(':',cpu_string)
return int(h) * 3600 + int(m) * 60 + int(s)
_cluster_type_cache = None
def get_finished_job_stats(jobid):
"""
Get a completed job's statistics such as used RAM and cpu usage.
"""
# List the attributes that we want
return_dict = { "ImageSize": 0, "ExitCode": 0, "RemoteUserCpu": 0 }
# First, determine if this is a pbs or slurm machine.
uid = os.geteuid()
username = pwd.getpwuid(uid).pw_name
cache_dir = os.path.join("/var/tmp", "qstat_cache_%s" % username)
cluster_type_file = os.path.join(cache_dir, "cluster_type")
global _cluster_type_cache
if not _cluster_type_cache:
# Look for the special file, cluster_type
if os.path.exists(cluster_type_file):
_cluster_type_cache = open(cluster_type_file).read()
else:
# No idea what type of cluster is running, not set, so give up
log("cluster_type file is not present, not checking for completed job statistics")
return return_dict
# Slurm completion
if _cluster_type_cache == "slurm":
# Next, query the appropriate interfaces for the completed job information
# TODO: fix for pbs
log("Querying sacct for completed job for jobid: %s" % (str(jobid)))
child_stdout = os.popen("sacct -j %s -l --noconvert -P" % (str(jobid)))
sacct_data = child_stdout.readlines()
ret = child_stdout.close()
if ret:
# retry without --noconvert for slurm < 15.8
child_stdout = os.popen("sacct -j %s -l -P" % (str(jobid)))
sacct_data = child_stdout.readlines()
child_stdout.close()
try:
reader = csv.DictReader(sacct_data, delimiter="|")
except Exception as e:
log("Unable to read in CSV output from sacct: %s" % str(e))
return return_dict
# Slurm can return more than 1 row, for some odd reason.
# so sum up relevant values
for row in reader:
if row["AveCPU"] is not "":
return_dict['RemoteUserCpu'] += convert_cpu_to_seconds(row["AveCPU"]) * int(row["AllocCPUS"])
if row["MaxRSS"] is not "":
# Remove the trailing [KMGTP] and scale the value appropriately
# Note: We assume that all values will have a suffix, and we
# want the value in kilos.
value = row["MaxRSS"]
factor = 1
if value[-1] == 'M':
factor = 1024
elif value[-1] == 'G':
factor = 1024 * 1024
elif value[-1] == 'T':
factor = 1024 * 1024 * 1024
elif value[-1] == 'P':
factor = 1024 * 1024 * 1024 * 1024
return_dict["ImageSize"] += int(value.strip('KMGTP')) * factor
if row["ExitCode"] is not "":
return_dict["ExitCode"] = int(row["ExitCode"].split(":")[0])
# PBS completion
elif _cluster_type_cache == "pbs":
pass
return return_dict
_qstat_location_cache = None
def get_qstat_location():
"""
Locate the copy of qstat the blahp configuration wants to use.
"""
global _qstat_location_cache
if _qstat_location_cache is not None:
return _qstat_location_cache
load_config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'blah_load_config.sh')
if os.path.exists(load_config_path) and os.access(load_config_path, os.R_OK):
cmd = "/bin/bash -c 'source %s && echo $pbs_binpath/qstat'" % load_config_path
else:
cmd = 'which qstat'
child_stdout = os.popen(cmd)
output = child_stdout.read()
location = output.split("\n")[0].strip()
if child_stdout.close():
raise Exception("Unable to determine qstat location: %s" % output)
_qstat_location_cache = location
return location
job_id_re = re.compile("\s*Job Id:\s([0-9]+)([\w\-\/.]*)")
exec_host_re = re.compile("\s*exec_host = ([\w\-\/.]+)")
status_re = re.compile("\s*job_state = ([QREFCH])")
exit_status_re = re.compile("\s*[Ee]xit_status = (-?[0-9]+)")
status_mapping = {"Q": 1, "R": 2, "E": 2, "F": 4, "C": 4, "H": 5}
def parse_qstat_fd(fd):
"""
Parse the stdout fd of "qstat -f" into a python dictionary containing
the information we need.
"""
job_info = {}
cur_job_id = None
cur_job_info = {}
for line in fd:
line = line.strip()
m = job_id_re.match(line)
if m:
if cur_job_id:
job_info[cur_job_id] = cur_job_info
cur_job_id = m.group(1)
#print cur_job_id, line
cur_job_info = {"BatchJobId": '"%s"' % cur_job_id.split(".")[0]}
continue
if cur_job_id is None:
continue
m = exec_host_re.match(line)
if m:
cur_job_info["WorkerNode"] = '"' + m.group(1).split("/")[0] + '"'
continue
m = status_re.match(line)
if m:
status = status_mapping.get(m.group(1), 0)
if status != 0:
cur_job_info["JobStatus"] = str(status)
continue
m = exit_status_re.match(line)
if m:
cur_job_info["ExitCode"] = ' %s' % m.group(1)
continue
if cur_job_id:
job_info[cur_job_id] = cur_job_info
return job_info
def job_dict_to_string(info):
result = ["%s=%s;" % (i[0], i[1]) for i in info.items()]
return "[" + " ".join(result) + " ]"
def fill_cache(cache_location):
log("Starting query to fill cache.")
results = qstat()
log("Finished query to fill cache.")
(fd, filename) = tempfile.mkstemp(dir = "/var/tmp")
# Open the file with a proper python file object
f = os.fdopen(fd, "w")
writer = csv.writer(f, delimiter='\t')
try:
try:
for key, val in results.items():
key = key.split(".")[0]
writer.writerow([key, pickle.dumps(val)])
os.fsync(fd)
except:
os.unlink(filename)
raise
finally:
f.close()
os.rename(filename, cache_location)
# Create the cluster_type file
uid = os.geteuid()
username = pwd.getpwuid(uid).pw_name
cache_dir = os.path.join("/var/tmp", "qstat_cache_%s" % username)
cluster_type_file = os.path.join(cache_dir, "cluster_type")
(fd, filename) = tempfile.mkstemp(dir = "/var/tmp")
global _cluster_type_cache
if which("sacct"):
os.write(fd, "slurm")
_cluster_type_cache = "slurm"
else:
log("Unable to find cluster type")
os.close(fd)
os.rename(filename, cluster_type_file)
global launchtime
launchtime = time.time()
cache_line_re = re.compile("([0-9]+[\.\w\-]+):\s+(.+)")
def cache_to_status(jobid, fd):
reader = csv.reader(fd, delimiter='\t')
for row in reader:
if row[0] == jobid:
return pickle.loads(row[1])
def check_cache(jobid, recurse=True):
uid = os.geteuid()
username = pwd.getpwuid(uid).pw_name
cache_dir = os.path.join("/var/tmp", "qstat_cache_%s" % username)
if recurse:
try:
s = os.stat(cache_dir)
except OSError as oe:
if oe.errno != 2:
raise
os.mkdir(cache_dir, 0o755)
s = os.stat(cache_dir)
if s.st_uid != uid:
raise Exception("Unable to check cache because it is owned by UID %d" % s.st_uid)
cache_location = os.path.join(cache_dir, "blahp_results_cache")
try:
fd = open(cache_location, "a+")
except IOError as ie:
if ie.errno != 2:
raise
# Create an empty file so we can hold the file lock
fd = open(cache_location, "w+")
ExclusiveLock(fd)
# If someone grabbed the lock between when we opened and tried to
# acquire, they may have filled the cache
if os.stat(cache_location).st_size == 0:
fill_cache(cache_location)
fd.close()
if recurse:
return check_cache(jobid, recurse=False)
else:
return None
ExclusiveLock(fd)
s = os.fstat(fd.fileno())
if s.st_uid != uid:
raise Exception("Unable to check cache file because it is owned by UID %d" % s.st_uid)
if (s.st_size == 0) or (launchtime - s.st_mtime > cache_timeout):
# If someone filled the cache between when we opened the file and
# grabbed the lock, we may not need to fill the cache.
s2 = os.stat(cache_location)
if (s2.st_size == 0) or (launchtime - s2.st_mtime > cache_timeout):
fill_cache(cache_location)
if recurse:
return check_cache(jobid, recurse=False)
else:
return None
return cache_to_status(jobid, fd)
job_status_re = re.compile(".*JobStatus=(\d+);.*")
def main():
initLog()
# Accept the optional -w argument, but ignore it
if len(sys.argv) == 2:
jobid_arg = sys.argv[1]
elif len(sys.argv) == 3 and sys.argv[1] == "-w":
jobid_arg = sys.argv[2]
else:
print("1Usage: pbs_status.sh pbs/<date>/<jobid>")
return 1
jobid = jobid_arg.split("/")[-1].split(".")[0]
log("Checking cache for jobid %s" % jobid)
cache_contents = None
try:
cache_contents = check_cache(jobid)
except Exception as e:
msg = "1ERROR: Internal exception, %s" % str(e)
log(msg)
#print msg
if not cache_contents:
log("Jobid %s not in cache; querying PBS" % jobid)
results = qstat(jobid)
log("Finished querying PBS for jobid %s" % jobid)
if not results or jobid not in results:
log("1ERROR: Unable to find job %s" % jobid)
print("1ERROR: Unable to find job %s" % jobid)
else:
log("0%s" % job_dict_to_string(results[jobid]))
print("0%s" % job_dict_to_string(results[jobid]))
else:
log("Jobid %s in cache." % jobid)
log("0%s" % job_dict_to_string(cache_contents))
if cache_contents["JobStatus"] == '4' or cache_contents["JobStatus"] == '3':
finished_job_stats = get_finished_job_stats(jobid)
cache_contents.update(finished_job_stats)
print("0%s" % job_dict_to_string(cache_contents))
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except SystemExit:
raise
except Exception as e:
print("1ERROR: %s" % str(e).replace("\n", "\\n"))
sys.exit(0)
|
the-stack_0_22422 | #!/usr/bin/env python3
"""
Lists all the UAVCAN files in a format compatible with CMake (column separated
data).
It is intended to be handled as part of the build, not be used interactively.
"""
import argparse
import glob
import os.path
import re
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--output-dir", help="Generate list of outputs")
parser.add_argument(
"dsdl_dirs", help="Files containing dsdl definitions", nargs="+"
)
return parser.parse_args()
def output_file(input_path: str, input_dir: str, output_path: str) -> str:
"""
Converts an input path to an output DSDLC path
>>> output_file('foo/uavcan_data_types/cvra/20001.Reboot.uavcan', 'foo/uavcan_data_types/cvra', 'dsdlc_generated')
'dsdlc_generated/cvra/Reboot.hpp'
"""
input_dir = os.path.join(*input_dir.split("/")[:-1])
path = input_path.replace(input_dir, "")
path = path.split("/", maxsplit=1)
# Change the extension
path[-1] = path[-1].replace(".uavcan", ".hpp")
path[-1] = re.sub(r"[0-9]+\.", "", path[-1])
return os.path.join(output_path, *path)
def main():
args = parse_args()
input_files = []
output_files = []
for d in args.dsdl_dirs:
files = glob.glob(os.path.join(d, "**/**.uavcan"), recursive=True)
input_files += files
if args.output_dir:
output_files += [output_file(i, d, args.output_dir) for i in files]
if args.output_dir:
print(output_files[0], end="")
else:
print(";".join(os.path.abspath(p) for p in input_files), end="")
if __name__ == "__main__":
main()
|
the-stack_0_22424 | """
To get standard out, run nosetests as follows:
nosetests -sv tests
nosetests --verbosity=2 --detailed-errors --nologcapture --processes=4 --process-restartworker --process-timeout=1000 tests
"""
import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
sys.path = [os.path.abspath(os.path.dirname(os.path.dirname(__file__)))] + sys.path
os.environ['is_test_suite'] = 'True'
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
from nose.tools import assert_equal, assert_not_equal, with_setup
from sklearn.metrics import accuracy_score
import dill
import numpy as np
import utils_testing as utils
def test_linear_model_analytics_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, model_names='RidgeClassifier')
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.21 < test_score < -0.131
def test_input_df_unmodified():
np.random.seed(42)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
df_shape = df_boston_train.shape
ml_predictor.train(df_boston_train)
training_shape = df_boston_train.shape
assert training_shape[0] == df_shape[0]
assert training_shape[1] == df_shape[1]
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
assert -3.35 < test_score < -2.8
def test_model_uses_user_provided_training_params(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
try:
ml_predictor.train(df_titanic_train, model_names='RidgeClassifier', training_params={'this_param_is_not_valid': True})
assert False
except ValueError as e:
assert True
def test_ignores_new_invalid_features():
# One of the great unintentional features of auto_ml is that you can pass in new features at prediction time, that weren't present at training time, and they're silently ignored!
# One edge case here is new features that are strange objects (lists, datetimes, intervals, or anything else that we can't process in our default data processing pipeline). Initially, we just ignored them in dict_vectorizer, but we need to ignore them earlier.
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
if random.random() > 0.9:
row['totally_new_feature'] = datetime.datetime.now()
row['really_strange_feature'] = random.random
row['we_should_really_ignore_this'] = Predictor
row['pretty_vanilla_ignored_field'] = 8
row['potentially_confusing_things_here'] = float('nan')
row['potentially_confusing_things_again'] = float('inf')
row['this_is_a_list'] = [1,2,3,4,5]
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('predictions[0]')
print(predictions[0])
print('type(predictions)')
print(type(predictions))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -3.0
assert lower_bound < first_score < -2.7
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.1 < duration.total_seconds() / 1.0 < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.7
|
the-stack_0_22425 | # -*- coding: utf-8 -*-
def calc_depth(vertex_count: int, graph):
"""Calculates depth of rooted tree.
Assumption:
The graph is connected.
Args:
vertex_count : The number of vertices in rooted tree.
graph : Rooted tree (0-indexed).
Returns:
depth : Depth of rooted tree (0-indexed).
Landau notation: O(|Edges|log|Vertices|).
"""
from collections import deque
PENDING = -1
depth = [PENDING for _ in range(vertex_count)]
parent = [PENDING for _ in range(vertex_count)]
depth[0], parent[0] = 0, 0
d = deque()
d.append(0)
while d:
vertex = d.popleft()
for g in graph[vertex]:
if depth[g] == PENDING:
depth[g] = depth[vertex] + 1
parent[g] = vertex
d.append(g)
return depth
def run_imos(graph, depth, imos):
"""Calculate values of rooted tree faster using imos method.
Args:
graph : Rooted tree (0-indexed).
depth : Depth of rooted tree (0-indexed).
imos : List of some values (0-indexed).
Returns:
imos : List of cumulative sum of values (0-indexed).
Landau notation: O(|Edges|log|Vertices|).
"""
from collections import deque
d = deque()
d.append(0)
while d:
vertex = d.popleft()
for g in graph[vertex]:
if depth[vertex] < depth[g]:
imos[g] += imos[vertex]
d.append(g)
return imos
|
the-stack_0_22426 | import numpy as np
class DataStats:
"""
This class holds statistics for any kind of numerical data.
Args:
mean (float): The mean overall data points.
var (float): The variance overall data points.
min_val (float): The minimum value within all data points.
max_val (float): The maximum value within all data points.
num (int): The number of data points, these stats were calculated of.
"""
__slots__ = ['mean', 'var', 'min', 'max', 'num']
def __init__(self, mean, var, min_val, max_val, num):
self.mean = mean
self.var = var
self.min = min_val
self.max = max_val
self.num = num
@property
def values(self):
""" Return all values as numpy-array (mean, var, min, max, num). """
return np.array([self.mean, self.var, self.min, self.max, self.num])
def to_dict(self):
""" Return the stats as a dictionary. """
return {
'mean': self.mean,
'var': self.var,
'min': self.min,
'max': self.max,
'num': self.num
}
@classmethod
def from_dict(cls, dict_with_stats):
"""
Create a DataStats object from a dictionary with stats.
Args:
dict_with_stats (dict): Dictionary containing stats.
Returns:
(DataStats): Statistics
"""
return DataStats(dict_with_stats['mean'],
dict_with_stats['var'],
dict_with_stats['min'],
dict_with_stats['max'],
dict_with_stats['num'])
@classmethod
def concatenate(cls, list_of_stats):
"""
Take a list of stats from different sets of data points and
merge the stats for getting stats overall data points.
Args:
list_of_stats (iterable): A list containing stats
for different sets of data points.
Returns:
DataStats: Stats calculated overall sets of data points.
"""
all_stats = np.stack([stats.values for stats in list_of_stats])
all_counts = all_stats[:, 4]
all_counts_relative = all_counts / np.sum(all_counts)
min_value = float(np.min(all_stats[:, 2]))
max_value = float(np.max(all_stats[:, 3]))
mean_value = float(np.sum(all_counts_relative * all_stats[:, 0]))
var_value = float(np.sum(all_counts_relative * (all_stats[:, 1] + np.power(all_stats[:, 0] - mean_value, 2))))
num_value = int(np.sum(all_counts))
return cls(mean_value, var_value, min_value, max_value, num_value)
|
the-stack_0_22430 | # -*- coding: utf-8 -*-
"""
@Author: oisc <[email protected]>
@Date: 2018/5/2
@Description: Default syntactic and dependency parser
"""
from interface import SentenceParser, SentenceParseError
from nltk.tree import ParentedTree
from nltk.parse.stanford import StanfordDependencyParser
from structure.dependency import DependencyGraph
import subprocess
import threading
import jieba
from jieba import posseg
import os
import logging
jieba.setLogLevel(logging.ERROR)
LRB = '-LRB-'
RRB = '-RRB-'
PREFIX = os.path.dirname(__file__)
BERKELEY_JAR = os.path.join(PREFIX, "../berkeleyparser/BerkeleyParser-1.7.jar")
BERKELEY_GRAMMAR = os.path.join(PREFIX, "../berkeleyparser/chn_sm5.gr")
STANFORD_JAR = os.path.join(PREFIX, "../stanford/stanford-parser.jar")
STANFORD_MODEL = os.path.join(PREFIX, "../stanford/stanford-chinese-corenlp-2018-02-27-models.jar")
STANFORD_GRAMMAR = "edu/stanford/nlp/models/lexparser/chinesePCFG.ser.gz"
class BerkeleyWarpper(object):
def __init__(self, path_to_jar: str, path_to_grammar: str, binarize=False, timeout=20):
self.env = dict(os.environ)
self.java_opt = ['-Xmx1024m']
self.jar = path_to_jar
self.gr = path_to_grammar
self.timeout = timeout
# check java
try:
subprocess.run(['java', '-version'])
except subprocess.CalledProcessError:
raise EnvironmentError("Java should be placed in system PATH environment!")
# start berkeley parser process
cmd = ['java']
cmd.extend(self.java_opt)
cmd.extend(['-jar', self.jar, '-gr', self.gr])
if binarize:
cmd.append('-binarize')
self.cmd = cmd
self.start()
def start(self):
self.process = subprocess.Popen(self.cmd, env=self.env, universal_newlines=True, shell=False, bufsize=0,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
errors='ignore')
def stop(self):
if hasattr(self, "process") and self.process:
self.process.terminate()
def restart(self):
self.stop()
self.start()
def parse_thread(self, text, results):
self.process.stdin.write(text + '\n')
self.process.stdin.flush()
ret = self.process.stdout.readline().strip()
results.append(ret)
def parse(self, text: str):
results = []
t = threading.Thread(target=self.parse_thread, kwargs={'text': text, 'results': results})
t.setDaemon(True)
t.start()
t.join(self.timeout)
if not results:
self.restart()
raise TimeoutError
else:
return results[0]
def __del__(self):
self.stop()
class StanfordWrapper(StanfordDependencyParser):
def _execute(self, cmd, input_, verbose=False):
# command hack
cmd.extend(['-outputFormatOptions', 'includePunctuationDependencies'])
return StanfordDependencyParser._execute(self, cmd, input_, verbose)
def _make_tree(self, result):
# pickleable hack
return DependencyGraph(result, top_relation_label='root')
def grammar(self):
raise NotImplementedError()
class ZhBerkeleyParser(SentenceParser):
name = "berkeley"
def __init__(self, binarize=False):
self.berkeley = BerkeleyWarpper(path_to_jar=BERKELEY_JAR, path_to_grammar=BERKELEY_GRAMMAR, binarize=binarize)
self.stanford = StanfordWrapper(path_to_jar=STANFORD_JAR, path_to_models_jar=STANFORD_MODEL,
model_path=STANFORD_GRAMMAR)
self.jieba = jieba
self.posseg = posseg
def cut(self, text: str):
yield from self.jieba.cut(text)
def tag(self, text: str):
for pair in self.posseg.cut(text):
yield pair.word, pair.flag
def parse(self, text: str):
_text = ' '.join(self.cut(text))
_text = _text.replace("(", LRB)
_text = _text.replace(")", RRB)
try:
parse_text = self.berkeley.parse(_text)
_parse = ParentedTree.fromstring(parse_text)
except TimeoutError:
raise SentenceParseError("timeout parsing sentence \"%s\"" % text)
except ValueError:
raise SentenceParseError("error parsing sentence %s to tree format" % text)
if not _parse.leaves():
raise SentenceParseError("get an empty parse tree for sentence: \"%s\"" % text)
for child in list(_parse.subtrees(lambda t: t.height() == 2 and t.label() != '-NONE-')):
if child[0] == LRB:
child[0] = '('
if child[0] == RRB:
child[0] = ')'
return _parse
def dependency(self, text):
cuted = list(self.cut(text))
return next(self.stanford.parse(cuted))
|
the-stack_0_22431 | # Django settings for test_project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': ':memory:', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'f3jab0f^tm*8%s#h)r*&-#0%*+@ij^o#-g5d$yd@phr$$0pmk8'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'test_project.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'infinite_memcached',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
the-stack_0_22432 | import bpy
import uuid
from ..Utils import *
from bpy.app.handlers import persistent
@persistent
def computeCollectionsUids(dummy):
for c in bpy.data.collections:
if not uid_key in c or not c[uid_key]: c[uid_key]=str(uuid.uuid4())
print(c.name+" has assigned f3b_uid "+c[uid_key])
getCollectionUid(c,c[uid_key])
def register():
print("Register collection uid generator")
bpy.app.handlers.save_pre.append(computeCollectionsUids)
def unregister():
bpy.app.handlers.save_pre.remove(computeCollectionsUids) |
the-stack_0_22436 | import requests
from bs4 import BeautifulSoup
def solveequation(x, y):
"""function that solves simple chemistry reaction equation(need WI FI enabled, cause uses web-parsing)"""
url = 'https://chemequations.com/ru/?s=' + x + '+%2B+' + y + '&ref=input'
def get_html(url, params=None):
r = requests.get(url, params=params)
return r
def get_content(html):
s = BeautifulSoup(html, 'html.parser')
pog = s.find('h1', class_='equation main-equation well')
if pog is None:
raise Exception('Error, insert valid elements')
return pog.get_text()
def parse():
html = get_html(url)
if html.status_code == 200:
return get_content(html.text)
else:
raise Exception('Error, enable WI-FI')
return parse()
def chain(text):
"""function that solves chemistry reactions chain(need WI FI enabled, cause uses web-parsing)"""
def get_content(html):
global y
s = BeautifulSoup(html, 'html.parser')
react = s.findAll('div', class_='reac')
for r in react:
reaction = r.get_text()
el = str(reaction).split()
z = el[0]
z1 = el[2]
if z == x or el[2] == x or z[1:] == x or z1[1:] == x:
totr = reaction
break
try:
return totr
except Exception as e:
return 'Invalid reaction or no no reactions like that'
def parse():
def get_html(url, params=None):
r = requests.get(url, params=params)
return r
html = get_html(url)
if html.status_code == 200:
return get_content(html.text)
else:
raise Exception('Error, enable WI-FI')
out = []
c = 0
a = list(text.split())
for i in range(len(a) - 1):
c += 1
x = a[i]
y = a[i + 1]
for j in range(10):
url = 'https://tutata.ru/chemistry/search?s=%3D+' + y + '&page=' + str(j)
res = parse()
if res != 'Invalid reaction or no no reactions like that' or res == 'Error':
break
if res == 'Invalid reaction or no no reactions like that':
out.append(str(c) + ': ' + 'Invalid reaction or no no reactions like that')
else:
out.append(str(c) + ': ' + ''.join(res)[1:])
return out
|
the-stack_0_22437 | import argparse
from typing import List
from catas.cli import MyArgumentParser
from catas.data import Version
from catas import __version__
from catas.exitcodes import (
EXIT_VALID, EXIT_KEYBOARD, EXIT_UNKNOWN, EXIT_CLI, EXIT_INPUT_FORMAT,
EXIT_INPUT_NOT_FOUND, EXIT_SYSERR, EXIT_CANT_OUTPUT
)
def cli(prog: str, args: List[str]) -> argparse.Namespace:
import joblib
parser = MyArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=(
"Examples:\n\n"
"```bash\n"
"$ %(prog)s -o outdir proteome_1.fasta proteome_2.fasta\n"
"```\n"
),
epilog=(
"Exit codes:\n\n"
f"{EXIT_VALID} - Everything's fine\n"
f"{EXIT_KEYBOARD} - Keyboard interrupt\n"
f"{EXIT_CLI} - Invalid command line usage\n"
f"{EXIT_INPUT_FORMAT} - Input format error\n"
f"{EXIT_INPUT_NOT_FOUND} - Cannot open the input\n"
f"{EXIT_SYSERR} - System error\n"
f"{EXIT_CANT_OUTPUT} - Can't create output file\n"
f"{EXIT_UNKNOWN} - Unhandled exception, please file a bug!\n"
)
)
parser.add_argument(
"infiles",
type=str,
nargs="+",
help=(
"Proteome fasta-files to run."
)
)
parser.add_argument(
"-m", "--model",
dest="model_version",
default=Version.latest(),
type=Version.from_string,
choices=list(Version),
help=(
"The version of the model to use. If you're using old dbCAN "
"predictions you may have to specify this. The version numbers "
"are just the versions of dbCAN used to train the models so just "
"select the dbCAN version that you used. The latest version is "
"used by default."
)
)
parser.add_argument(
"--hmms",
type=str,
help=(
"Path to the dbCAN hmmer-formatted database. "
"Note that the specified model version must match the version of "
"the database specified here."
)
)
parser.add_argument(
"-o", "--outdir",
type=str,
default="results",
help="A directory to put the files in. default will be 'results'"
)
parser.add_argument(
"--hmmscan_path",
type=str,
default="hmmscan",
help=(
"Where to look for hmmscan"
)
)
parser.add_argument(
"--hmmpress_path",
type=str,
default="hmmpress",
help=(
"Where to look for hmmpress"
)
)
parser.add_argument(
"--ncpu",
type=int,
default=joblib.cpu_count(),
help="How many processes to run."
)
parser.add_argument(
"-c", "--correct",
action="store_true",
default=False,
help="Replace invalid characters in Fasta files."
)
parser.add_argument(
"-q", "--quiet",
action="store_true",
default=False,
help="Suppress running feedback."
)
parser.add_argument(
'--version',
action='version',
version='%(prog)s {}'.format(__version__),
help="Print the version of %(prog)s and exit"
)
return parser.parse_args()
|
the-stack_0_22438 | # Copyright 2018-2019 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from joulescope import span
from joulescope.stream_buffer import StreamBuffer, stats_to_api, \
stats_array_factory, stats_array_invalidate
import threading
import queue
import numpy as np
import logging
TIMEOUT = 10.0
def data_array_to_update(x_limits, x, data_array):
"""Convert raw data buffer to a view update.
:param x_limits: The list of [x_min, x_max] or None if unknown.
:param x: The np.ndarray of x-axis times.
:param data_array: The np.ndarray((N, STATS_FIELD_COUNT), dtype=STATS_DTYPE)
"""
if len(x):
s = stats_to_api(data_array[0, :], float(x[0]), float(x[-1]))
else:
s = stats_to_api(None, 0.0, 0.0)
s['time']['x'] = {'value': x, 'units': 's'}
s['time']['limits'] = {'value': x_limits, 'units': 's'}
s['state'] = {'source_type': 'buffer'} # ['realtime', 'buffer']
for idx, signal in enumerate(s['signals'].values()):
signal['µ']['value'] = data_array[:, idx]['mean'].copy()
length = data_array[:, idx]['length'] - 1
length[length < 1] = 1.0
signal['σ2']['value'] = data_array[:, idx]['variance'] / length
signal['min']['value'] = data_array[:, idx]['min'].copy()
signal['max']['value'] = data_array[:, idx]['max'].copy()
signal['p2p']['value'] = signal['max']['value'] - signal['min']['value']
return s
class View:
def __init__(self, stream_buffer, calibration):
"""Create a new view instance.
:param stream_buffer: The stream buffer providing the data.
:param calibration: The device calibration data structure.
"""
self._state = 'idle'
self._stream_buffer = None
self._calibration = calibration
self._x = None
self._data = None # [N, STATS_FIELD_COUNT]
self._x_range = [0.0, 1.0] # the initial default range
self._samples_per = 1
self._data_idx = 0
self._span = None
self._changed = True
self._stream_notify_available = False # flag when stream_notify called
self._refresh_requested = False
self._log = logging.getLogger(__name__)
self._thread = None
self._closing = False
self._cmd_queue = queue.Queue() # tuples of (command, args, callback)
self._response_queue = queue.Queue()
self.on_update_fn = None # callable(data)
self._quit = False
self.on_close = None # optional callable() on close
if stream_buffer is not None:
self._stream_buffer_assign(stream_buffer)
def _stream_buffer_assign(self, stream_buffer):
if self._stream_buffer == stream_buffer:
return
self._stream_buffer = stream_buffer
self._x_range = list(self._stream_buffer.limits_time) # the initial default range
length = len(self)
if length <= 0:
length = 100
# todo : investigate - may want inclusive max time (not exclusive) -- off by 1 error?
self._span = span.Span(limits=self._stream_buffer.limits_time,
quant=1.0 / self.sampling_frequency,
length=length)
def __len__(self):
if self._data is None:
return 0
return self._data.shape[0]
@property
def sampling_frequency(self):
"""The output sampling frequency."""
if self._stream_buffer is None:
return None
return self._stream_buffer.output_sampling_frequency
@property
def calibration(self):
"""The device calibration."""
return self._calibration
@property
def limits(self):
"""Get the (x_min, x_max) limits for the view."""
if self._span is not None:
return list(self._span.limits)
return None
def _cmd_process(self, cmd, args):
rv = None
try:
# self._log.debug('_cmd_process %s - start', cmd)
if cmd == 'stream_notify':
rv = self._stream_notify(stream_buffer=args)
elif cmd == 'refresh':
if bool(args['force']):
self._log.debug('view refresh(force=True) requested')
self._update()
else:
self._refresh_requested = True
elif cmd == 'on_x_change':
rv = self._on_x_change(*args)
elif cmd == 'samples_get':
rv = self._samples_get(**args)
elif cmd == 'statistics_get':
rv = self._statistics_get(**args)
elif cmd == 'statistics_get_multiple':
rv = self._statistics_get_multiple(**args)
elif cmd == 'start':
rv = self._start()
elif cmd == 'stop':
rv = self._stop()
elif cmd == 'ping':
return args
elif cmd == 'close':
self._quit = True
else:
self._log.warning('unsupported command %s', cmd)
except Exception:
self._log.exception('While running command')
# self._log.debug('_cmd_process %s - done', cmd)
return rv
def _run(self):
cmd_count = 0
timeout = 1.0
self._log.info('View.run start')
while not self._quit:
try:
cmd, args, cbk = self._cmd_queue.get(timeout=timeout)
except queue.Empty:
timeout = 1.0
if cmd_count and self._refresh_requested and (self._changed or self._stream_notify_available):
self._update()
cmd_count = 0
continue
except Exception:
self._log.exception('Exception during View _cmd_queue get')
continue
cmd_count += 1
timeout = 0.0
rv = self._cmd_process(cmd, args)
if callable(cbk):
try:
cbk(rv)
except Exception:
self._log.exception('in callback')
self._data = None
self._log.info('View.run done')
def _post(self, command, args=None, cbk=None):
if self._thread is None:
self._log.info('View._post(%s) when thread not running', command)
else:
self._cmd_queue.put((command, args, cbk))
def _post_block(self, command, args=None, timeout=None):
timeout = TIMEOUT if timeout is None else float(timeout)
# self._log.debug('_post_block %s start', command)
while not self._response_queue.empty():
self._log.warning('response queue not empty')
try:
self._response_queue.get(timeout=0.0)
except queue.Empty:
pass
if self._thread is None:
raise IOError('View thread not running')
self._post(command, args, lambda rv_=None: self._response_queue.put(rv_))
try:
rv = self._response_queue.get(timeout=timeout)
except queue.Empty as ex:
self._log.error('view thread hung: %s - FORCE CLOSE', command)
self._join()
rv = ex
except Exception as ex:
rv = ex
if isinstance(rv, Exception):
raise IOError(rv)
# self._log.debug('_post_block %s done', command) # rv
return rv
def _update_from_buffer(self):
buffer = self._stream_buffer
if buffer is None:
return
length = len(self)
data_idx_view_end, sample_id_end, delta = self._view()
if self._data is None:
return
elif not self._changed and 0 == delta:
return
elif self._changed or delta >= length: # perform full recompute
stats_array_invalidate(self._data)
if data_idx_view_end > 0:
start_idx = (data_idx_view_end - length) * self._samples_per
# self.log.debug('recompute(start=%s, stop=%s, increment=%s)', start_idx, sample_id_end, self.samples_per)
buffer.data_get(start_idx, sample_id_end, self._samples_per, self._data)
elif data_idx_view_end > 0:
start_idx = self._data_idx * self._samples_per
# self.log.debug('update(start=%s, stop=%s, increment=%s)', start_idx, sample_id_end, self.samples_per)
self._data = np.roll(self._data, -delta, axis=0)
buffer.data_get(start_idx, sample_id_end, self._samples_per, self._data[-delta:, :])
else:
stats_array_invalidate(self._data)
self._data_idx = data_idx_view_end
self._changed = False
def _update(self):
if not callable(self.on_update_fn):
return
self._update_from_buffer()
if self._data is None:
data = None
else:
data = data_array_to_update(self.limits, self._x, self._data)
if self._state != 'idle':
data['state']['source_type'] = 'realtime'
self._stream_notify_available = False
self._refresh_requested = False
try:
self.on_update_fn(data)
except Exception:
self._log.exception('in on_update_fn')
def _clear(self):
self._changed = True
self._refresh_requested = True
self._data_idx = 0
if self._data is not None:
stats_array_invalidate(self._data)
def _start(self):
self._log.debug('start')
self._clear()
self._state = 'streaming'
def _stop(self):
self._log.debug('start')
self._state = 'idle'
def _on_x_change(self, cmd, kwargs):
x_range = list(self._x_range) # copy
if cmd == 'resize': # {pixels: int}
length = kwargs['pixels']
if length is not None and length != len(self):
self._log.info('resize %s', length)
self._span.length = length
self._data = stats_array_factory(length)
self._changed = True # invalidate
x_range, self._samples_per, self._x = self._span.conform_discrete(x_range)
elif cmd == 'span_absolute': # {range: (start: float, stop: float)}]
x_range, self._samples_per, self._x = self._span.conform_discrete(kwargs.get('range'))
elif cmd == 'span_relative': # {center: float, gain: float}]
x_range, self._samples_per, self._x = self._span.conform_discrete(
x_range, gain=kwargs.get('gain'), pivot=kwargs.get('pivot'))
elif cmd == 'span_pan':
delta = kwargs.get('delta', 0.0)
x_range = [x_range[0] + delta, x_range[-1] + delta]
x_range, self._samples_per, self._x = self._span.conform_discrete(x_range)
elif cmd == 'refresh':
self._log.warning('on_x_change(refresh)')
self._changed = True
return
else:
self._log.warning('on_x_change(%s) unsupported', cmd)
return
if self._state == 'streaming':
x_max = self._span.limits[1]
if x_range[1] < x_max:
x_shift = x_max - x_range[1]
x_range = [x_range[0] + x_shift, x_max]
x_range, self._samples_per, self._x = self._span.conform_discrete(x_range)
self._changed |= (self._x_range != x_range)
self._clear()
self._x_range = x_range
self._log.info('changed=%s, length=%s, span=%s, range=%s, samples_per=%s',
self._changed, len(self), self._x_range,
self._x_range[1] - self._x_range[0], self._samples_per)
if self._state == 'idle':
self._stream_notify(self._stream_buffer)
def _view(self):
buffer = self._stream_buffer
_, sample_id_end = buffer.sample_id_range
lag_time = self._span.limits[1] - self._x_range[1]
lag_samples = int(lag_time * self.sampling_frequency) // self._samples_per
data_idx_stream_end = sample_id_end // self._samples_per
data_idx_view_end = data_idx_stream_end - lag_samples
sample_id_end = data_idx_view_end * self._samples_per
delta = data_idx_view_end - self._data_idx
return data_idx_view_end, sample_id_end, delta
def time_to_sample_id(self, t):
return self._stream_buffer.time_to_sample_id(t)
def sample_id_to_time(self, s):
return self._stream_buffer.sample_id_to_time(s)
def _stream_notify(self, stream_buffer):
self._stream_buffer = stream_buffer
self._stream_notify_available = True
def _convert_time_to_samples(self, x, units):
if units is None or units == 'seconds':
return self.time_to_sample_id(x)
elif units == 'samples':
return int(x)
else:
raise ValueError(f'unsupported units {units}')
def _convert_time_range_to_samples(self, start, stop, units):
length = len(self)
data_idx_view_end, sample_id_end, delta = self._view()
start_idx = (data_idx_view_end - length) * self._samples_per
if start is None and units == 'seconds':
start = start_idx
else:
start = self._convert_time_to_samples(start, units)
if stop is None and units == 'seconds':
stop = data_idx_view_end * self._samples_per
else:
stop = self._convert_time_to_samples(stop, units)
return start, stop
def _samples_get(self, start=None, stop=None, units=None, fields=None):
s1, s2 = self._convert_time_range_to_samples(start, stop, units)
self._log.debug('_samples_get(start=%r, stop=%r, units=%s) -> %s, %s', start, stop, units, s1, s2)
return self._stream_buffer.samples_get(start=s1, stop=s2, fields=fields)
def _statistics_get(self, start=None, stop=None, units=None):
"""Get the statistics for the collected sample data over a time range.
:return: The statistics data structure.
See the :`statistics documentation <statistics.html>`_
for details on the data format.
"""
s1, s2 = self._convert_time_range_to_samples(start, stop, units)
# self._log.debug('buffer %s, %s, %s => %s, %s', start, stop, units, s1, s2)
d , x_range = self._stream_buffer.statistics_get(start=s1, stop=s2)
t_start = x_range[0] / self.sampling_frequency
t_stop = x_range[1] / self.sampling_frequency
return stats_to_api(d, t_start, t_stop)
def _statistics_get_multiple(self, ranges, units=None, source_id=None):
return [self._statistics_get(x[0], x[1], units=units) for x in ranges]
def open(self):
"""Open the view and run the thread."""
self.close()
self._log.info('open')
self._closing = False
self._thread = threading.Thread(name='view', target=self._run)
self._thread.start()
self._post_block('ping')
return
def start(self, stream_buffer: StreamBuffer):
"""Start streaming."""
self._post_block('start')
def stop(self):
"""Stop streaming."""
if self._thread is not None:
self._post_block('stop')
def _join(self, timeout=None):
timeout = TIMEOUT if timeout is None else timeout
if not self._closing:
self._closing = True
self._post('close', None, None)
if self._thread:
# thread can safely join() multiple times
self._thread.join(timeout=timeout)
self._thread = None
def close(self):
"""Close the view and stop the thread."""
if self._thread is not None:
self._log.info('close')
self._join()
on_close, self.on_close = self.on_close, None
if callable(on_close):
try:
on_close()
except Exception:
self._log.exception('view.on_close')
self._stream_buffer = None
def refresh(self, force=None):
return self._post('refresh', {'force': force})
def on_x_change(self, cmd, kwargs):
self._post('on_x_change', (cmd, kwargs))
def stream_notify(self, stream_buffer):
self._post('stream_notify', stream_buffer)
def samples_get(self, start=None, stop=None, units=None, fields=None):
"""Get exact samples over a range.
:param start: The starting time.
:param stop: The ending time.
:param units: The units for start and stop.
'seconds' or None is in floating point seconds relative to the view.
'samples' is in stream buffer sample indicies.
:param fields: The fields to get. None (default) gets the fundamental
fields available for this view instance, which may vary depending
upon the backend.
"""
args = {'start': start, 'stop': stop, 'units': units, 'fields': fields}
return self._post_block('samples_get', args)
def statistics_get(self, start=None, stop=None, units=None, callback=None):
"""Get statistics over a range.
:param start: The starting time.
:param stop: The ending time.
:param units: The units for start and stop.
'seconds' or None is in floating point seconds relative to the view.
'samples' is in stream buffer sample indices.
:param callback: The optional callable. When provided, this method will
not block and the callable will be called with the statistics
data structure from the view thread.
:return: The statistics data structure or None if callback is provided.
Note: this same format is used by the
:meth:`Driver.statistics_callback_register`.
See the `statistics documentation <statistics.html>`_
for details on the data format.
"""
args = {'start': start, 'stop': stop, 'units': units}
if callback is None:
return self._post_block('statistics_get', args)
else:
self._post('statistics_get', args=args, cbk=callback)
return None
def statistics_get_multiple(self, ranges, units=None, callback=None, source_id=None):
args = {'ranges': ranges, 'units': units, 'source_id': source_id}
if callback is None:
return self._post_block('statistics_get_multiple', args)
else:
self._post('statistics_get_multiple', args=args, cbk=callback)
return None
def ping(self, *args, **kwargs):
"""Ping the thread.
:param args: The positional arguments.
:param kwargs: The keyword arguments.
:return: (args, kwargs) after passing through the thread.
"""
return self._post_block('ping', (args, kwargs))
|
the-stack_0_22440 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from marionette import SkipTest
from gaiatest import GaiaTestCase
from gaiatest.apps.email.app import Email
class TestSetupAndSendIMAPEmail(GaiaTestCase):
def setUp(self):
try:
self.account = self.testvars['email']['IMAP']
except KeyError:
raise SkipTest('account details not present in test variables')
GaiaTestCase.setUp(self)
self.connect_to_network()
self.email = Email(self.marionette)
self.email.launch()
def test_setup_and_send_imap_email(self):
"""
https://moztrap.mozilla.org/manage/case/6113/
https://moztrap.mozilla.org/manage/case/6114/
"""
# setup IMAP account
self.email.setup_IMAP_email(self.account)
# check header area
self.assertTrue(self.email.header.is_compose_visible)
self.assertTrue(self.email.header.is_menu_visible)
self.assertEqual(self.email.header.label, 'Inbox')
# check toolbar area
self.assertTrue(self.email.toolbar.is_edit_visible)
self.assertTrue(self.email.toolbar.is_refresh_visible)
# check account has emails
self.email.wait_for_emails_to_sync()
self.assertGreater(len(self.email.mails), 0)
# Bug 878772 - email app doesn't show the last emails by default
self.email.mails[0].scroll_to_message()
curr_time = repr(time.time()).replace('.', '')
_subject = 's%s' % curr_time
_body = 'b%s' % curr_time
new_email = self.email.header.tap_compose()
new_email.type_to(self.testvars['email']['IMAP']['email'])
new_email.type_subject(_subject)
new_email.type_body(_body)
self.email = new_email.tap_send()
# wait for the email to be sent before we tap refresh
self.email.wait_for_email(_subject)
# assert that the email app subject is in the email list
self.assertIn(_subject, [mail.subject for mail in self.email.mails])
read_email = self.email.mails[0].tap_subject()
self.assertEqual(_body, read_email.body.splitlines()[0])
self.assertEqual(_subject, read_email.subject)
|
the-stack_0_22441 | # Import game state, action and player class, random module
Game2048State = __import__('2048_game').Game2048State
Game2048Action = __import__('2048_game').Game2048Action
Game2048Player = __import__('2048_game').Game2048Player
import random
import math
import numpy as np
# Max searching depth
search_max_depth = 0
# Processed tree nodes
processed_nodes = 0
# Tree search node class
class Node(object):
def __init__(self, state, player):
self.state = state
self.player = player
# Calculate utility of state
def getUtility(self):
utility = 0
empty_count = 0
# Sum non 0 tile values, times factor
# factor = log2(tile)
# Count num of zero tiles
for i in range(self.state.board.shape[0]):
for j in range(self.state.board.shape[1]):
if self.state.board[i][j] != 0:
utility += self.state.board[i][j] * math.log(self.state.board[i][j], 2)
else:
empty_count += 1
# More zero tiles = higher utility
utility += empty_count * 5
return utility
# Find next state using expectimax search
def getNextState(state):
# Reset processed nodes counter
global processed_nodes
processed_nodes = 0
# Update search depth
depth_map = {2: 5, 4: 1, 6: 1, 8: 1, 10: 1}
global search_max_depth
search_max_depth = depth_map[state.size]
# Find next best move
next_node = expectimax(Node(state, Game2048Player.USER), 0)
# Next state result and number of processed tree nodes
return next_node.state
# Expectimax tree search
def expectimax(node, depth):
# Increase tree node counter
global processed_nodes
processed_nodes += 1
# Max or Exp the node
if node.player == Game2048Player.USER: return findMax(node, depth)
if node.player == Game2048Player.GAME: return findExp(node, depth)
# Find max state
def findMax(node, depth):
# Best next move
next_node = None
# List all possible next nodes
next_nodes = []
valid_actions = node.state.validActions()
for action in valid_actions:
if (action == Game2048Action.SLIDE_UP):
next_state = node.state.slideUp()
next_nodes.append(Node(next_state, Game2048Player.GAME))
elif (action == Game2048Action.SLIDE_DOWN):
next_state = node.state.slideDown()
next_nodes.append(Node(next_state, Game2048Player.GAME))
elif (action == Game2048Action.SLIDE_LEFT):
next_state = node.state.slideLeft()
next_nodes.append(Node(next_state, Game2048Player.GAME))
elif (action == Game2048Action.SLIDE_RIGHT):
next_state = node.state.slideRight()
next_nodes.append(Node(next_state, Game2048Player.GAME))
elif (action == Game2048Action.ROTATE_CW):
next_state = node.state.rotateCenterCW()
next_nodes.append(Node(next_state, Game2048Player.GAME))
elif (action == Game2048Action.ROTATE_CCW):
next_state = node.state.rotateCenterCCW()
next_nodes.append(Node(next_state, Game2048Player.GAME))
# Find optimal board
max_utility = float('-inf')
for n in next_nodes:
expected_utility = expectimax(n, depth + 1)
if (expected_utility > max_utility): next_node = n; max_utility = expected_utility
# Next move
if next_node == None: return node
else: return next_node
# Find expected state
def findExp(node, depth):
if depth >= search_max_depth: return node.getUtility()
# Expected utility
expected_utility = 0.0
# All possible next nodes
next_nodes = []
# Add all possible next nodes with new tile value of 2
for i in range(node.state.board.shape[0]):
for j in range(node.state.board.shape[1]):
if (node.state.board[i][j] == 0):
next_state = Game2048State(node.state.size)
next_state.board = np.copy(node.state.board)
next_state.board[i][j] = 2
next_nodes.append(Node(next_state, Game2048Player.USER))
# Add all possible next nodes with new tile value of 4
for i in range(node.state.board.shape[0]):
for j in range(node.state.board.shape[1]):
if (node.state.board[i][j] == 0):
next_state = Game2048State(node.state.size)
next_state.board = np.copy(node.state.board)
next_state.board[i][j] = 4
next_nodes.append(Node(next_state, Game2048Player.USER))
# Sum up expected utility
for n in next_nodes:
expected_utility += 1/len(next_nodes) * expectimax(n, depth + 1).getUtility()
# Expected utility result
return expected_utility
if __name__ == "__main__":
# AI demo
state = Game2048State(6)
state = state.initialState()
print(state)
for _ in range(0, 50000):
state = getNextState(state)
print(state)
state = state.addNewTile()
print(state)
node = Node(state, Game2048Player.GAME)
print(node.getUtility()) |
the-stack_0_22443 | import subprocess
import os
import sys
def current_version(commits_ahead=False):
if isinstance(commits_ahead, str):
commits_ahead = commits_ahead.lower() == "true"
env = os.environ.copy()
output = subprocess.run(
"git describe --tags --long",
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if output.returncode == 0:
output = output.stdout.decode("utf-8").strip()
else:
output = "v0.0.0-0-0"
output = output.split("-")
tag = output[0]
if commits_ahead:
return tag, output[1]
return tag
def latest_version():
tag, patch = current_version(commits_ahead=True)
tag = tag.split(".")
patch = int(tag[-1]) + int(patch)
tag = ".".join(tag[:-1]) + "." + str(patch)
return tag
def update_version_tag():
tag = latest_version()
env = os.environ.copy()
subprocess.run(
f'git tag -a {tag} -m "updated to new version {tag}"', shell=True, env=env
)
return current_version()
def revert_version_tag():
tag = current_version()
env = os.environ.copy()
subprocess.run(f"git tag -d {tag}", shell=True, env=env)
return current_version()
def new_version(new_tag: str, update_patch: bool = True):
tag = new_tag
if isinstance(update_patch, str):
update_patch = update_patch.lower() != "false"
if update_patch:
_, patch = current_version(commits_ahead=True)
tag = tag.split(".")
if len(tag) < 3 or tag[-1] == "":
tag = ".".join(tag[:-1]) + "." + str(patch)
else:
patch = int(tag[-1]) + int(patch)
tag = ".".join(tag[:-1]) + "." + str(patch)
return tag
def create_new_version_tag(
new_tag: str,
new_version_description: str = "Release Version",
update_patch: bool = True,
):
tag = new_version(new_tag, update_patch)
env = os.environ.copy()
subprocess.run(
f'git tag -a {tag} -m "{new_version_description} - {tag}"', shell=True, env=env
)
return current_version()
def view_releases():
env = os.environ.copy()
output = subprocess.run(
'git tag -l --sort=-version:refname "v*" -n3',
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if output.returncode == 0:
return output.stdout.decode("utf-8")
else:
return None
def set_version_in_environemnt(env: str):
with open("./Dockerfile", "r+") as docker:
content = docker.read()
content.replace()
def get_remote_url():
env = os.environ.copy()
output = subprocess.run(
"git config --get remote.origin.url",
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if output.returncode == 0:
return output.stdout.decode("utf-8").replace(".git", "")
else:
return None
if __name__ == "__main__":
try:
function = sys.argv[1]
params = sys.argv[2:]
print(globals()[function](*params))
except IndexError:
print(current_version())
except KeyError:
raise Exception(f"Invalid argument {function}")
|
the-stack_0_22444 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2019-2018-2020 The Rapids developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import subprocess
import sys
def setup_linux():
global args, workdir
if os.path.isfile('/usr/bin/apt-get'):
programs = ['ruby', 'git', 'make', 'wget', 'curl']
if args.kvm:
programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
if not os.path.isfile('/lib/systemd/system/docker.service'):
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
subprocess.check_call(['sudo', 'usermod', '-aG', 'docker', os.environ['USER']])
print('Docker installed, restart your computer and re-run this script to continue the setup process.')
sys.exit(0)
if return_code != 0:
print('Cannot find any way to install Docker.', file=sys.stderr)
sys.exit(1)
else:
programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
setup_repos()
elif args.is_fedora:
pkgmgr = 'dnf'
repourl = 'https://download.docker.com/linux/fedora/docker-ce.repo'
elif args.is_centos:
pkgmgr = 'yum'
repourl = 'https://download.docker.com/linux/centos/docker-ce.repo'
if args.is_fedora or args.is_centos:
programs = ['ruby', 'make', 'wget', 'curl']
if args.kvm:
print('KVM not supported with Fedora/CentOS yet.')
sys.exit(1)
elif args.docker:
if not os.path.isfile('/lib/systemd/system/docker.service'):
user = os.environ['USER']
dockers = ['docker-ce', 'docker-ce-cli', 'containerd.io']
if args.is_fedora:
subprocess.check_call(['sudo', pkgmgr, 'install', '-y', 'dnf-plugins-core'])
subprocess.check_call(['sudo', pkgmgr, 'config-manager', '--add-repo', repourl])
elif args.is_centos:
reqs = ['yum-utils', 'device-mapper-persistent-data', 'lvm2']
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + reqs)
subprocess.check_call(['sudo', 'yum-config-manager', '--add-repo', repourl])
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + dockers)
subprocess.check_call(['sudo', 'usermod', '-aG', 'docker', user])
subprocess.check_call(['sudo', 'systemctl', 'enable', 'docker'])
print('Docker installed, restart your computer and re-run this script to continue the setup process.')
sys.exit(0)
subprocess.check_call(['sudo', 'systemctl', 'start', 'docker'])
else:
print('LXC not supported with Fedora/CentOS yet.')
sys.exit(1)
if args.is_fedora:
programs += ['git']
if args.is_centos:
# CentOS ships with an insanely outdated version of git that is no longer compatible with gitian builds
# Check current version and update if necessary
oldgit = b'2.' not in subprocess.check_output(['git', '--version'])
if oldgit:
subprocess.check_call(['sudo', pkgmgr, 'remove', '-y', 'git*'])
subprocess.check_call(['sudo', pkgmgr, 'install', '-y', 'https://centos7.iuscommunity.org/ius-release.rpm'])
programs += ['git2u-all']
subprocess.check_call(['sudo', pkgmgr, 'install', '-y'] + programs)
setup_repos()
else:
print('Unsupported system/OS type.')
sys.exit(1)
def setup_darwin():
global args, workdir
programs = []
if not os.path.isfile('/usr/local/bin/wget'):
programs += ['wget']
if not os.path.isfile('/usr/local/bin/git'):
programs += ['git']
if not os.path.isfile('/usr/local/bin/gsha256sum'):
programs += ['coreutils']
if args.docker:
print('Experimental setup for macOS host')
if len(programs) > 0:
subprocess.check_call(['brew', 'install'] + programs)
os.environ['PATH'] = '/usr/local/opt/coreutils/libexec/gnubin' + os.pathsep + os.environ['PATH']
elif args.kvm or not args.docker:
print('KVM and LXC are not supported under macOS at this time.')
sys.exit(0)
setup_repos()
def setup_repos():
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/RapidsOfficial/gitian.sigs.git'])
if not os.path.isdir('rapids-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/RapidsOfficial/Rapids.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('rapids'):
subprocess.check_call(['git', 'clone', 'https://github.com/RapidsOfficial/rapids.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
if args.host_os == 'darwin':
subprocess.check_call(['sed', '-i.old', '/50cacher/d', 'bin/make-base-vm'])
if args.host_os == 'linux':
if args.is_fedora or args.is_centos or args.is_wsl:
subprocess.check_call(['sed', '-i', '/50cacher/d', 'bin/make-base-vm'])
subprocess.check_call(make_image_prog)
subprocess.check_call(['git', 'checkout', 'bin/make-base-vm'])
os.chdir(workdir)
if args.host_os == 'linux':
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
print('Setup complete!')
sys.exit(0)
def build():
global args, workdir
os.makedirs('rapids-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True)
subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../rapids/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'rapids='+args.commit, '--url', 'rapids='+args.url, '../rapids/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../rapids/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/rapids-*.tar.gz build/out/src/rapids-*.tar.gz ../rapids-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'rapids='+args.commit, '--url', 'rapids='+args.url, '../rapids/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../rapids/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/rapids-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/rapids-*.zip build/out/rapids-*.exe build/out/src/rapids-*.tar.gz ../rapids-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'rapids='+args.commit, '--url', 'rapids='+args.url, '../rapids/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../rapids/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/rapids-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/rapids-*.tar.gz build/out/rapids-*.dmg build/out/src/rapids-*.tar.gz ../rapids-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
# TODO: Skip making signed windows sigs until we actually start producing signed windows binaries
#print('\nSigning ' + args.version + ' Windows')
#subprocess.check_call('cp inputs/rapids-' + args.version + '-win-unsigned.tar.gz inputs/rapids-win-unsigned.tar.gz', shell=True)
#subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../rapids/contrib/gitian-descriptors/gitian-win-signer.yml'])
#subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../rapids/contrib/gitian-descriptors/gitian-win-signer.yml'])
#subprocess.check_call('mv build/out/rapids-*win64-setup.exe ../rapids-binaries/'+args.version, shell=True)
#subprocess.check_call('mv build/out/rapids-*win32-setup.exe ../rapids-binaries/'+args.version, shell=True)
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/rapids-' + args.version + '-osx-unsigned.tar.gz inputs/rapids-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../rapids/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../rapids/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/rapids-osx-signed.dmg ../rapids-binaries/'+args.version+'/rapids-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
os.chdir('gitian.sigs')
commit = False
if os.path.isfile(args.version+'-win-signed/'+args.signer+'/rapids-win-signer-build.assert.sig'):
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
commit = True
if os.path.isfile(args.version+'-osx-signed/'+args.signer+'/rapids-dmg-signer-build.assert.sig'):
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
commit = True
if commit:
print('\nCommitting '+args.version+' Signed Sigs\n')
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
else:
print('\nNothing to commit\n')
os.chdir(workdir)
def verify():
global args, workdir
rc = 0
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../rapids/contrib/gitian-descriptors/gitian-linux.yml']):
print('Verifying v'+args.version+' Linux FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../rapids/contrib/gitian-descriptors/gitian-win.yml']):
print('Verifying v'+args.version+' Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../rapids/contrib/gitian-descriptors/gitian-osx.yml']):
print('Verifying v'+args.version+' MacOS FAILED\n')
rc = 1
# TODO: Skip checking signed windows sigs until we actually start producing signed windows binaries
#print('\nVerifying v'+args.version+' Signed Windows\n')
#if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../rapids/contrib/gitian-descriptors/gitian-win-signer.yml']):
# print('Verifying v'+args.version+' Signed Windows FAILED\n')
# rc = 1
print('\nVerifying v'+args.version+' Signed MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../rapids/contrib/gitian-descriptors/gitian-osx-signer.yml']):
print('Verifying v'+args.version+' Signed MacOS FAILED\n')
rc = 1
os.chdir(workdir)
return rc
def main():
global args, workdir
parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/RapidsOfficial/rapids', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.host_os = sys.platform
if args.host_os == 'win32' or args.host_os == 'cygwin':
raise Exception('Error: Native Windows is not supported by this script, use WSL')
if args.host_os == 'linux':
if os.environ['USER'] == 'root':
raise Exception('Error: Do not run this script as the root user')
args.is_bionic = False
args.is_fedora = False
args.is_centos = False
args.is_wsl = False
if os.path.isfile('/usr/bin/lsb_release'):
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if os.path.isfile('/etc/fedora-release'):
args.is_fedora = True
if os.path.isfile('/etc/centos-release'):
args.is_centos = True
if os.path.isfile('/proc/version') and open('/proc/version', 'r').read().find('Microsoft'):
args.is_wsl = True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
# Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
# can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
os.environ['USE_LXC'] = ''
os.environ['USE_VBOX'] = ''
os.environ['USE_DOCKER'] = ''
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if 'GITIAN_HOST_IP' not in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if 'LXC_GUEST_IP' not in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
if args.setup:
if args.host_os == 'linux':
setup_linux()
elif args.host_os == 'darwin':
setup_darwin()
if args.buildsign:
args.build = True
args.sign = True
if not args.build and not args.sign and not args.verify:
sys.exit(0)
if args.host_os == 'darwin':
os.environ['PATH'] = '/usr/local/opt/coreutils/libexec/gnubin' + os.pathsep + os.environ['PATH']
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
if args.detach_sign:
args.commit_files = False
script_name = os.path.basename(sys.argv[0])
if not args.signer:
print(script_name+': Missing signer')
print('Try '+script_name+' --help for more information')
sys.exit(1)
if not args.version:
print(script_name+': Missing version')
print('Try '+script_name+' --help for more information')
sys.exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
os.chdir('rapids')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
if not os.path.isdir('../gitian-builder/inputs/rapids'):
os.makedirs('../gitian-builder/inputs/rapids')
os.chdir('../gitian-builder/inputs/rapids')
if not os.path.isdir('.git'):
subprocess.check_call(['git', 'init'])
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
os.chdir('gitian-builder')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
sys.exit(verify())
if __name__ == '__main__':
main()
|
the-stack_0_22446 | """Support for lights under the iGlo brand."""
import logging
import math
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_COLOR, SUPPORT_EFFECT,
PLATFORM_SCHEMA, Light)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'iGlo Light'
DEFAULT_PORT = 8080
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the iGlo lights."""
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
port = config.get(CONF_PORT)
add_entities([IGloLamp(name, host, port)], True)
class IGloLamp(Light):
"""Representation of an iGlo light."""
def __init__(self, name, host, port):
"""Initialize the light."""
from iglo import Lamp
self._name = name
self._lamp = Lamp(0, host, port)
@property
def name(self):
"""Return the name of the light."""
return self._name
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int((self._lamp.state()['brightness'] / 200.0) * 255)
@property
def color_temp(self):
"""Return the color temperature."""
return color_util.color_temperature_kelvin_to_mired(
self._lamp.state()['white'])
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return math.ceil(color_util.color_temperature_kelvin_to_mired(
self._lamp.max_kelvin))
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return math.ceil(color_util.color_temperature_kelvin_to_mired(
self._lamp.min_kelvin))
@property
def hs_color(self):
"""Return the hs value."""
return color_util.color_RGB_to_hs(*self._lamp.state()['rgb'])
@property
def effect(self):
"""Return the current effect."""
return self._lamp.state()['effect']
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._lamp.effect_list()
@property
def supported_features(self):
"""Flag supported features."""
return (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP |
SUPPORT_COLOR | SUPPORT_EFFECT)
@property
def is_on(self):
"""Return true if light is on."""
return self._lamp.state()['on']
def turn_on(self, **kwargs):
"""Turn the light on."""
if not self.is_on:
self._lamp.switch(True)
if ATTR_BRIGHTNESS in kwargs:
brightness = int((kwargs[ATTR_BRIGHTNESS] / 255.0) * 200.0)
self._lamp.brightness(brightness)
return
if ATTR_HS_COLOR in kwargs:
rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
self._lamp.rgb(*rgb)
return
if ATTR_COLOR_TEMP in kwargs:
kelvin = int(color_util.color_temperature_mired_to_kelvin(
kwargs[ATTR_COLOR_TEMP]))
self._lamp.white(kelvin)
return
if ATTR_EFFECT in kwargs:
effect = kwargs[ATTR_EFFECT]
self._lamp.effect(effect)
return
def turn_off(self, **kwargs):
"""Turn the light off."""
self._lamp.switch(False)
|
the-stack_0_22448 | #!/usr/bin/env python
import os
from pathlib import Path
from typing import List, Optional
import click
import clickclick as cc
from pipenv.project import Project
@click.command("check-pipfile-lock")
@click.argument("pipfile_locks", metavar="PATH-TO-PIPFILE-LOCK", type=click.Path(exists=True), nargs=-1, required=False)
def main(pipfile_locks: List[str]):
"""
Check whether specified Pipfile.lock file(s) are up to date with their Pipfile(s).
If no Pipfile.lock paths are provided, the current directory is assumed.
"""
if not pipfile_locks:
pipfile_locks = [Path(os.getcwd()) / "Pipfile.lock"]
for pipfile_lock in pipfile_locks:
pipfile_dir: Path = Path(pipfile_lock).parent
if not check_dir(pipfile_dir):
cc.fatal_error(f"{pipfile_lock} is out of date. Consider running 'pipenv lock' or 'pipenv install'")
def check_dir(dir_path: Optional[Path] = None) -> bool:
"""
Check whether specified directory container a valid and
up-to-date Pipfile.lock. Lack of Pipfile.lock is considered
as success.
"""
cur_path: Path = os.getcwd()
try:
if dir_path:
os.chdir(dir_path)
project = Project()
if not project.lockfile_exists:
return
old_hash = project.get_lockfile_hash()
new_hash = project.calculate_pipfile_hash()
return old_hash == new_hash
finally:
os.chdir(cur_path)
if __name__ == "__main__":
main()
|
the-stack_0_22449 | import negspy.coordinates as nc
def load_chromsizes(chromsizes_filename, assembly=None):
'''
Load a set of chromosomes from a file or using an assembly
identifier. If using just an assembly identifier the chromsizes
will be loaded from the negspy repository.
Parameters:
-----------
chromsizes_filename: string
The file containing the tab-delimited chromosome sizes
assembly: string
Assembly name (e.g. 'hg19'). Not necessary if a chromsizes_filename is passed in
'''
if chromsizes_filename is not None:
chrom_info = nc.get_chrominfo_from_file(chromsizes_filename)
chrom_names = chrom_info.chrom_order
chrom_sizes = [chrom_info.chrom_lengths[c] for c in chrom_info.chrom_order]
else:
chrom_info = nc.get_chrominfo(assembly)
chrom_names = nc.get_chromorder(assembly)
chrom_sizes = nc.get_chromsizes(assembly)
return (chrom_info, chrom_names, chrom_sizes)
|
the-stack_0_22450 | from clarifai.rest import Image as ClImage
from clarifai.rest import ClarifaiApp
NUM_THRESHOLD = 20 # Numeric threshold for how many results to include.
PROB_THRESHOLD = 0.8 # Probabilistic threshold for lower limit of confidence rate of image
def setup_model(): # constructs boiler plate to get model ready
f = open('TOKENS','r')
lines = f.readlines()
f.close()
CLIENT_ID = lines[0][:-1]
CLIENT_SECRET = lines[1]
app = ClarifaiApp(CLIENT_ID, CLIENT_SECRET)
ACCESS_TOKEN = app.auth.get_token()
model = app.models.get('general-v1.3')
return model
def process_image(filename):
model = setup_model()
image = ClImage(file_obj=open(filename, 'rb'))
json_dict = model.predict([image])
results = json_dict['outputs'][0]['data']['concepts']
return_list = []
for i in range(0,NUM_THRESHOLD):
name = results[i]['name']
value = results[i]['value']
return_list.append([name,value])
# return_list.append(name)
if value < PROB_THRESHOLD:
break
else:
print(name + ' : ' + str(value))
return return_list |
the-stack_0_22451 | # -*- coding: utf-8 -*-
from pyramid.view import view_config
from chsdi.models.bod import Topics
from chsdi.lib.filters import filter_by_geodata_staging
@view_config(route_name='topics', renderer='jsonp')
def topics(request):
model = Topics
geodataStaging = request.registry.settings['geodata_staging']
showCatalog = True
query = request.db.query(model).filter(model.showCatalog == showCatalog) \
.order_by(model.groupId)
query = filter_by_geodata_staging(query, model.staging, geodataStaging)
results = [{
'id': q.id,
'defaultBackground': q.defaultBackground,
'backgroundLayers': q.backgroundLayers,
'selectedLayers': q.selectedLayers,
'activatedLayers': q.activatedLayers,
'plConfig': q.plconf,
'groupId': q.groupId
} for q in query]
return {'topics': results}
|
the-stack_0_22453 | import typing as tp
import os
from pathlib import Path
from catalyst import utils
from catalyst.dl import ConfigExperiment
from dataset import get_cat_dogs_dataset, get_reader
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
class Experiment(ConfigExperiment):
def get_loaders(self, stage: str, **kwargs) -> tp.Dict[str, DataLoader]:
loaders = dict()
data_params = dict(self.stages_config[stage]["data_params"])
data_path = (
Path(os.getenv("DATA_PATH")) / "data_cat_dogs"
).as_posix() + "/*"
tag_file_path = (
Path(os.getenv("DATA_PATH")) / "cat_dog_labeling.json"
).as_posix()
train_data, valid_data, num_classes = get_cat_dogs_dataset(
data_path, tag_file_path=tag_file_path
)
open_fn = get_reader(num_classes)
data = [("train", train_data), ("valid", valid_data)]
for mode, part in data:
data_transform = self.get_transforms(stage=stage, dataset=mode)
loaders[mode] = utils.get_loader(
part,
open_fn=open_fn,
dict_transform=data_transform,
shuffle=(mode == "train"),
sampler=None,
drop_last=(mode == "train"),
**data_params,
)
return loaders
|
the-stack_0_22455 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from bwtougu.data.daybar_store import DayBarStore
from bwtougu.interface import AbstractDataSource
from bwtougu.utils.datetime_func import convert_date_to_int, convert_int_to_date
from bwtougu.data.converter import StockBarConverter, IndexBarConverter
from bwtougu.data.instrument_store import InstrumentStore
from bwtougu.data.dividend_store import DividendStore
from bwtougu.data.trading_dates_store import TradingDatesStore
from bwtougu.data.simple_factor_store import SimpleFactorStore
from bwtougu.data.date_set import DateSet
from bwtougu.utils.py2 import lru_cache
from bwtougu.data.yield_curve_store import YieldCurveStore
class BaseDataSource(AbstractDataSource):
def __init__(self, path):
if not os.path.exists(path):
raise RuntimeError('bundle path {} not exist'.format(os.path.abspath))
def _p(name):
return os.path.join(path, name)
self._day_bars = [
DayBarStore(_p('stocks.bcolz'), StockBarConverter),
DayBarStore(_p('indexes.bcolz'), IndexBarConverter),
]
self._instruments = InstrumentStore(_p('instruments.pk'))
self._dividends = DividendStore(_p('original_dividends.bcolz'))
self._trading_dates = TradingDatesStore(_p('trading_dates.bcolz'))
self._yield_curve = YieldCurveStore(_p('yield_curve.bcolz'))
self._split_factor = SimpleFactorStore(_p('split_factor.bcolz'))
self._ex_cum_factor = SimpleFactorStore(_p('ex_cum_factor.bcolz'))
self._st_stock_days = DateSet(_p('st_stock_days.bcolz'))
self._suspend_days = DateSet(_p('suspended_days.bcolz'))
self.get_yield_curve = self._yield_curve.get_yield_curve
self.get_risk_free_rate = self._yield_curve.get_risk_free_rate
def get_trading_calendar(self):
return self._trading_dates.get_trading_calendar()
def get_all_instruments(self):
return self._instruments.get_all_instruments()
# 是否停牌
def is_suspended(self, order_book_id, dates):
return self._suspend_days.contains(order_book_id, dates)
# 是否为st股票
def is_st_stock(self, order_book_id, dates):
return self._st_stock_days.contains(order_book_id, dates)
INSTRUMENT_TYPE_MAP = {
'CS': 0,
'INDX': 1,
'Future': 2,
'ETF': 3,
'LOF': 3,
'FenjiA': 3,
'FenjiB': 3,
'FenjiMu': 3,
'PublicFund': 4
}
def _index_of(self, instrument):
return self.INSTRUMENT_TYPE_MAP[instrument.type]
@lru_cache(None)
def _all_day_bars_of(self, instrument):
i = self._index_of(instrument)
return self._day_bars[i].get_bars(instrument.order_book_id, fields=None)
def get_bar(self, instrument, dt, frequency):
if frequency != '1d':
raise NotImplementedError
bars = self._all_day_bars_of(instrument)
if bars is None:
return
dt = np.uint64(convert_date_to_int(dt))
pos = bars['datetime'].searchsorted(dt)
if pos >= len(bars) or bars['datetime'][pos] != dt:
return None
return bars[pos]
def available_data_range(self, frequency):
if frequency in ['tick', '1d']:
s, e = self._day_bars[self.INSTRUMENT_TYPE_MAP['INDX']].get_date_range('000001.XSHG')
return convert_int_to_date(s).date(), convert_int_to_date(e).date()
raise NotImplementedError
def get_dividend(self, order_book_id, public_fund=False):
if public_fund:
return self._public_fund_dividends.get_dividend(order_book_id)
return self._dividends.get_dividend(order_book_id)
|
the-stack_0_22456 | """
Solve some two-player games
"""
from puzzle_generator import PuzzleGenerator
from typing import List
# See https://github.com/microsoft/PythonProgrammingPuzzles/wiki/How-to-add-a-puzzle to learn about adding puzzles
class Nim(PuzzleGenerator):
"""
Compute optimal play for the classic two-player game [Nim](https://en.wikipedia.org/wiki/Nim)
Nim has an elegant theory for optimal play based on the xor of the bits in the heaps.
Instead of writing a program that plays the game interactively (since interaction is not allowed), we require
them to determine the winning states.
"""
value_multiplier = 10 # harder than most problems, worth more
@staticmethod
def sat(cert: List[List[int]], heaps=[5, 9]):
"""
Compute optimal play in Nim, a two-player game involving a number of heaps of objects. Players alternate,
in each turn removing one or more objects from a single non-empty heap. The player who takes the last object
wins. The initial board state is represented by heaps, a list of numbers of objects in each heap.
The optimal play is certified by a list of "winning leaves" which are themselves lists of heap sizes
that, with optimal play, are winning if you leave your opponent with those numbers of objects.
"""
good_leaves = {tuple(h) for h in cert} # for efficiency, we keep track of h as a tuple of n non-negative ints
cache = {}
def is_good_leave(h):
if h in cache:
return cache[h]
next_states = [(*h[:i], k, *h[i + 1:]) for i in range(len(h)) for k in range(h[i])]
conjecture = (h in good_leaves)
if conjecture: # check that it is a good leave
assert not any(is_good_leave(s) for s in next_states)
else: # check that it is a bad leave, only need to check one move
assert is_good_leave(next(s for s in next_states if s in good_leaves))
cache[h] = conjecture
return conjecture
init_leave = tuple(heaps)
return is_good_leave(init_leave) == (init_leave in good_leaves)
@staticmethod
def sol(heaps):
import itertools
def val(h): # return True if h is a good state to leave things in
xor = 0
for i in h:
xor ^= i
return xor == 0
return [list(h) for h in itertools.product(*[range(i + 1) for i in heaps]) if val(h)]
def gen_random(self):
num_heaps = self.random.randrange(10)
heaps = [self.random.randrange(10) for _ in range(num_heaps)]
prod = 1
for i in heaps:
prod *= i + 1
if prod < 10 ** 6:
self.add(dict(heaps=heaps))
class Mastermind(PuzzleGenerator):
"""Compute a strategy for winning in [mastermind](https://en.wikipedia.org/wiki/Mastermind_%28board_game%29)
in a given number of guesses.
Instead of writing a program that plays the game interactively (since interaction is not allowed), we require
them to provide a provable winning game tree.
"""
timeout = 10
@staticmethod
def sat(transcripts: List[str], max_moves=10):
"""
Come up with a winning strategy for Mastermind in max_moves moves. Colors are represented by the letters A-F.
The solution representation is as follows.
A transcript is a string describing the game so far. It consists of rows separated by newlines.
Each row has 4 letters A-F followed by a space and then two numbers indicating how many are exactly right
and how many are right but in the wrong location. A sample transcript is as follows:
```
AABB 11
ABCD 21
ABDC
```
This is the transcript as the game is in progress. The complete transcript might be:
```
AABB 11
ABCD 21
ABDC 30
ABDE 40
```
A winning strategy is described by a list of transcripts to visit. The next guess can be determined from
those partial transcripts.
"""
COLORS = "ABCDEF"
def helper(secret: str, transcript=""):
if transcript.count("\n") == max_moves:
return False
guess = min([t for t in transcripts if t.startswith(transcript)], key=len)[-4:]
if guess == secret:
return True
assert all(g in COLORS for g in guess)
perfect = {c: sum([g == s == c for g, s in zip(guess, secret)]) for c in COLORS}
almost = sum(min(guess.count(c), secret.count(c)) - perfect[c] for c in COLORS)
return helper(secret, transcript + f"{guess} {sum(perfect.values())}{almost}\n")
return all(helper(r + s + t + u) for r in COLORS for s in COLORS for t in COLORS for u in COLORS)
@staticmethod
def sol(max_moves):
COLORS = "ABCDEF"
transcripts = []
ALL = [r + s + t + u for r in COLORS for s in COLORS for t in COLORS for u in COLORS]
def score(secret, guess):
perfect = {c: sum([g == s == c for g, s in zip(guess, secret)]) for c in COLORS}
almost = sum(min(guess.count(c), secret.count(c)) - perfect[c] for c in COLORS)
return f"{sum(perfect.values())}{almost}"
def mastermind(transcript="AABB", feasible=ALL): # mastermind moves
transcripts.append(transcript)
assert transcript.count("\n") <= max_moves
guess = transcript[-4:]
feasibles = {}
for secret in feasible:
scr = score(secret, guess)
if scr not in feasibles:
feasibles[scr] = []
feasibles[scr].append(secret)
for scr, secrets in feasibles.items():
if scr != "40":
guesser(transcript + f" {scr}\n", secrets)
def guesser(transcript, feasible): # guesser moves
def max_ambiguity(guess):
by_score = {}
for secret2 in feasible:
scr = score(secret2, guess)
if scr not in by_score:
by_score[scr] = 0
by_score[scr] += 1
# for OPTIMAL solution, use return max(by_score.values()) + 0.5 * (guess not in feasible) instead of:
return max(by_score.values())
# for optimal solution use guess = min(ALL, key=max_ambiguity) instead of:
guess = min(feasible, key=max_ambiguity)
mastermind(transcript + guess, feasible)
mastermind()
return transcripts
def gen(self, target_num_instances):
for max_moves in [6, 8, 10]:
self.add(dict(max_moves=max_moves))
class TicTacToeX(PuzzleGenerator):
"""Since we don't have interaction, this problem asks for a full tie-guranteeing strategy."""
@staticmethod
def sat(good_boards: List[str]):
"""
Compute a strategy for X (first player) in tic-tac-toe that guarantees a tie. That is a strategy for X that,
no matter what the opponent does, X does not lose.
A board is represented as a 9-char string like an X in the middle would be "....X...." and a
move is an integer 0-8. The answer is a list of "good boards" that X aims for, so no matter what O does there
is always good board that X can get to with a single move.
"""
board_bit_reps = {tuple(sum(1 << i for i in range(9) if b[i] == c) for c in "XO") for b in good_boards}
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
def tie(x, o): # returns True if X has a forced tie/win assuming it's X's turn to move.
x |= 1 << [i for i in range(9) if (x | (1 << i), o) in board_bit_reps][0]
return not win[o] and (win[x] or all((x | o) & (1 << i) or tie(x, o | (1 << i)) for i in range(9)))
return tie(0, 0)
@staticmethod
def sol():
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)] # 9-bit representation
good_boards = []
def x_move(x, o): # returns True if x wins or ties, x's turn to move
if win[o]:
return False
if x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and o_move(x | (1 << i), o):
good_boards.append("".join(".XO"[((x >> j) & 1) + 2 * ((o >> j) & 1) + (i == j)] for j in range(9)))
return True
return False # O wins
def o_move(x, o): # returns True if x wins or ties, x's turn to move
if win[x] or x | o == 511: # full board
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and not x_move(x, o | (1 << i)):
return False
return True # O wins
res = x_move(0, 0)
assert res
return good_boards
class TicTacToeO(PuzzleGenerator):
"""Same as above but for 2nd player"""
@staticmethod
def sat(good_boards: List[str]):
"""
Compute a strategy for O (second player) in tic-tac-toe that guarantees a tie. That is a strategy for O that,
no matter what the opponent does, O does not lose.
A board is represented as a 9-char string like an X in the middle would be "....X...." and a
move is an integer 0-8. The answer is a list of "good boards" that O aims for, so no matter what X does there
is always good board that O can get to with a single move.
"""
board_bit_reps = {tuple(sum(1 << i for i in range(9) if b[i] == c) for c in "XO") for b in good_boards}
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
def tie(x, o): # returns True if O has a forced tie/win. It's O's turn to move.
if o | x != 511: # complete board
o |= 1 << [i for i in range(9) if (x, o | (1 << i)) in board_bit_reps][0]
return not win[x] and (win[o] or all((x | o) & (1 << i) or tie(x | (1 << i), o) for i in range(9)))
return all(tie(1 << i, 0) for i in range(9))
@staticmethod
def sol():
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)] # 9-bit representation
good_boards = []
def x_move(x, o): # returns True if o wins or ties, x's turn to move
if win[o] or x | o == 511: # full board
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and not o_move(x | (1 << i), o):
return False
return True # O wins/ties
def o_move(x, o): # returns True if o wins or ties, o's turn to move
if win[x]:
return False
if x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and x_move(x, o | (1 << i)):
good_boards.append(
"".join(".XO"[((x >> j) & 1) + 2 * ((o >> j) & 1) + 2 * (i == j)] for j in range(9)))
return True
return False # X wins
res = x_move(0, 0)
assert res
return good_boards
class RockPaperScissors(PuzzleGenerator):
@staticmethod
def sat(probs: List[float]):
"""Find optimal probabilities for playing Rock-Paper-Scissors zero-sum game, with best worst-case guarantee"""
assert len(probs) == 3 and abs(sum(probs) - 1) < 1e-6
return max(probs[(i + 2) % 3] - probs[(i + 1) % 3] for i in range(3)) < 1e-6
@staticmethod
def sol():
return [1 / 3] * 3
if __name__ == "__main__":
PuzzleGenerator.debug_problems()
|
the-stack_0_22457 | # Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import amqcfg.config_data
dataset_metadata_members = (
'tool_name',
'datetime',
)
@pytest.mark.parametrize('member', dataset_metadata_members)
def test_add_template_metadata_check_member(member):
data = {}
amqcfg.config_data.add_template_metadata(data)
assert member in data['metadata']
dataset_metadata_datetime_members = (
'datetime',
'year',
'time',
'date',
'unix',
)
@pytest.mark.parametrize('member', dataset_metadata_datetime_members)
def test_add_template_metadata_datetime_check_member(member):
data = {}
amqcfg.config_data.add_template_metadata(data)
assert member in data['metadata']['datetime']
|
the-stack_0_22459 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from test_dist_base import TestDistBase
import os
import paddle
paddle.enable_static()
flag_name = os.path.splitext(__file__)[0]
class TestStaticModelParallel(TestDistBase):
def _setup_config(self):
self._sync_mode = True
self._use_reduce = False
self._use_reader_alloc = False
self._nccl_comm_num = 1
self._pipeline_mode = True
def test_dist_static_model_parallel_fused_feedforward(self):
import paddle.fluid as fluid
if fluid.core.is_compiled_with_cuda():
self.check_with_place(
"static_model_parallel_fused_feedforward.py",
delta=1e-5,
check_error_log=True,
log_name=flag_name)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_22461 | """Test version checks."""
from os import getenv
from datetime import datetime
from pathlib import Path
from packaging.version import Version
import pytest
from .. import version as _version
from ..version import check_latest, DATE_FMT, requests, is_flagged
class MockResponse:
"""Mocks the requests module so that Pypi is not actually queried."""
status_code = 200
_json = {
"releases": {"1.0.0": None, "1.0.1": None, "1.1.0": None, "1.1.1rc1": None}
}
def __init__(self, code=200, json=None):
"""Allow setting different response codes."""
self.status_code = code
if json is not None:
self._json = json
def json(self):
"""Redefine the response object."""
return self._json
def test_check_latest1(tmpdir, monkeypatch):
"""Test latest version check."""
tmpdir.chdir()
monkeypatch.setenv("HOME", str(tmpdir))
assert str(Path.home()) == str(tmpdir)
def mock_get(*args, **kwargs):
return MockResponse()
monkeypatch.setattr(requests, "get", mock_get)
# Initially, cache should not exist
cachefile = Path.home() / ".cache" / "fmriprep-rodents" / "latest"
assert not cachefile.exists()
# First check actually fetches from pypi
v = check_latest()
assert cachefile.exists()
assert isinstance(v, Version)
assert v == Version("1.1.0")
assert cachefile.read_text().split("|") == [
str(v),
datetime.now().strftime(DATE_FMT),
]
# Second check - test the cache file is read
cachefile.write_text("|".join(("1.0.0", cachefile.read_text().split("|")[1])))
v = check_latest()
assert isinstance(v, Version)
assert v == Version("1.0.0")
# Third check - forced oudating of cache
cachefile.write_text("2.0.0|20180121")
v = check_latest()
assert isinstance(v, Version)
assert v == Version("1.1.0")
# Mock timeouts
def mock_get(*args, **kwargs):
raise requests.exceptions.Timeout
monkeypatch.setattr(requests, "get", mock_get)
cachefile.write_text("|".join(("1.0.0", cachefile.read_text().split("|")[1])))
v = check_latest()
assert isinstance(v, Version)
assert v == Version("1.0.0")
cachefile.write_text("2.0.0|20180121")
v = check_latest()
assert v is None
cachefile.unlink()
v = check_latest()
assert v is None
@pytest.mark.parametrize(
("result", "code", "json"),
[
(None, 404, None),
(None, 200, {"releases": {"1.0.0rc1": None}}),
(Version("1.1.0"), 200, None),
(Version("1.0.0"), 200, {"releases": {"1.0.0": None}}),
],
)
def test_check_latest2(tmpdir, monkeypatch, result, code, json):
"""Test latest version check with varying server responses."""
tmpdir.chdir()
monkeypatch.setenv("HOME", str(tmpdir))
assert str(Path.home()) == str(tmpdir)
def mock_get(*args, **kwargs):
return MockResponse(code=code, json=json)
monkeypatch.setattr(requests, "get", mock_get)
v = check_latest()
if result is None:
assert v is None
else:
assert isinstance(v, Version)
assert v == result
@pytest.mark.parametrize(
"bad_cache",
[
"3laj#r???d|3akajdf#",
"2.0.0|3akajdf#",
"|".join(("2.0.0", datetime.now().strftime(DATE_FMT), "")),
"",
],
)
def test_check_latest3(tmpdir, monkeypatch, bad_cache):
"""Test latest version check when the cache file is corrupted."""
tmpdir.chdir()
monkeypatch.setenv("HOME", str(tmpdir))
assert str(Path.home()) == str(tmpdir)
def mock_get(*args, **kwargs):
return MockResponse()
monkeypatch.setattr(requests, "get", mock_get)
# Initially, cache should not exist
cachefile = Path.home() / ".cache" / "fmriprep-rodents" / "latest"
cachefile.parent.mkdir(parents=True, exist_ok=True)
assert not cachefile.exists()
cachefile.write_text(bad_cache)
v = check_latest()
assert isinstance(v, Version)
assert v == Version("1.1.0")
@pytest.mark.parametrize(
("result", "version", "code", "json"),
[
(False, "1.2.1", 200, {"flagged": {"1.0.0": None}}),
(True, "1.2.1", 200, {"flagged": {"1.2.1": None}}),
(True, "1.2.1", 200, {"flagged": {"1.2.1": "FATAL Bug!"}}),
(False, "1.2.1", 404, {"flagged": {"1.0.0": None}}),
(False, "1.2.1", 200, {"flagged": []}),
(False, "1.2.1", 200, {}),
],
)
def test_is_flagged(monkeypatch, result, version, code, json):
"""Test that the flagged-versions check is correct."""
monkeypatch.setattr(_version, "__version__", version)
def mock_get(*args, **kwargs):
return MockResponse(code=code, json=json)
monkeypatch.setattr(requests, "get", mock_get)
val, reason = is_flagged()
assert val is result
test_reason = None
if val:
test_reason = json.get("flagged", {}).get(version, None)
if test_reason is not None:
assert reason == test_reason
else:
assert reason is None
def test_readonly(tmp_path, monkeypatch):
"""Test behavior when $HOME/.cache/fmriprep-rodents/latest can't be written out."""
home_path = (
Path("/home/readonly") if getenv("TEST_READONLY_FILESYSTEM") else tmp_path
)
monkeypatch.setenv("HOME", str(home_path))
cachedir = home_path / ".cache"
if getenv("TEST_READONLY_FILESYSTEM") is None:
cachedir.mkdir(mode=0o555, exist_ok=True)
# Make sure creating the folder will raise the exception.
with pytest.raises(OSError):
(cachedir / "fmriprep-rodents").mkdir(parents=True)
# Should not raise
check_latest()
|
the-stack_0_22462 | # -*- test-case-name: twisted.python.test.test_deprecate -*-
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Deprecation framework for Twisted.
To mark a method or function as being deprecated do this::
def badAPI(self, first, second):
'''
Docstring for badAPI.
'''
...
badAPI = deprecate(Version("Twisted", 8, 0, 0))(badAPI)
The newly-decorated badAPI will issue a warning when called. It will also have
a deprecation notice appended to its docstring.
See also L{Version}.
"""
__all__ = [
'deprecated',
'getDeprecationWarningString',
'getWarningMethod',
'setWarningMethod',
]
from warnings import warn
from twisted.python.versions import getVersionString
from twisted.python.reflect import fullyQualifiedName
from twisted.python.util import mergeFunctionMetadata
def getWarningMethod():
"""
Return the warning method currently used to record deprecation warnings.
"""
return warn
def setWarningMethod(newMethod):
"""
Set the warning method to use to record deprecation warnings.
The callable should take message, category and stacklevel. The return
value is ignored.
"""
global warn
warn = newMethod
def _getDeprecationDocstring(version):
return "Deprecated in %s." % getVersionString(version)
def getDeprecationWarningString(callableThing, version):
"""
Return a string indicating that the callable was deprecated in the given
version.
@param callableThing: A callable to be deprecated.
@param version: The L{twisted.python.versions.Version} that the callable
was deprecated in.
@return: A string describing the deprecation.
"""
return "%s was deprecated in %s" % (
fullyQualifiedName(callableThing), getVersionString(version))
def deprecated(version):
"""
Return a decorator that marks callables as deprecated.
@type version: L{twisted.python.versions.Version}
@param version: The version in which the callable will be marked as
having been deprecated. The decorated function will be annotated
with this version, having it set as its C{deprecatedVersion}
attribute.
"""
def deprecationDecorator(function):
"""
Decorator that marks C{function} as deprecated.
"""
warningString = getDeprecationWarningString(function, version)
def deprecatedFunction(*args, **kwargs):
warn(
warningString,
DeprecationWarning,
stacklevel=2)
return function(*args, **kwargs)
deprecatedFunction = mergeFunctionMetadata(
function, deprecatedFunction)
_appendToDocstring(deprecatedFunction,
_getDeprecationDocstring(version))
deprecatedFunction.deprecatedVersion = version
return deprecatedFunction
return deprecationDecorator
def _appendToDocstring(thingWithDoc, textToAppend):
"""
Append the given text to the docstring of C{thingWithDoc}.
If C{thingWithDoc} has no docstring, then the text just replaces the
docstring. If it has a single-line docstring then it appends a blank line
and the message text. If it has a multi-line docstring, then in appends a
blank line a the message text, and also does the indentation correctly.
"""
if thingWithDoc.__doc__:
docstringLines = thingWithDoc.__doc__.splitlines()
else:
docstringLines = []
if len(docstringLines) == 0:
docstringLines.append(textToAppend)
elif len(docstringLines) == 1:
docstringLines.extend(['', textToAppend, ''])
else:
spaces = docstringLines.pop()
docstringLines.extend(['',
spaces + textToAppend,
spaces])
thingWithDoc.__doc__ = '\n'.join(docstringLines)
|
the-stack_0_22465 | '''
Given an integer array nums and an integer k,
return the k most frequent elements. You may return the answer in any order.
Example 1:
Input: nums = [1,1,1,2,2,3], k = 2
Output: [1,2]
Example 2:
Input: nums = [1], k = 1
Output: [1]
'''
from typing import List
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
count = {}
freq = [[] for i in range(len(nums) + 1)]
for n in nums:
count[n] = count.get(n, 0) + 1
for n, c in count.items():
freq[c].append(n) # freq[c] is a list of numbers with count
res = []
for i in range(len(freq) - 1, 0, -1):
for n in freq[i]:
res.append(n)
if len(res) == k:
return res
s = Solution()
print(s.topKFrequent([1, 1, 1, 2, 2, 3], 2))
|
the-stack_0_22467 | import json, datetime
from discord.ext import commands
from datetime import timedelta
from vulcan import Keystore, Account, Vulcan
from tabulate import tabulate
with open("config.json", "r") as config:
data = json.load(config)
prefix = data["prefix"]
dziennik_enabled = data["dziennik_enabled"]
class ZadaniaDomowe(commands.Cog, name='Zadania domowe'):
def __init__(self, bot):
self.bot = bot
bot = commands.Bot(command_prefix=prefix)
@bot.command(aliases=['zadania_domowe', 'zadane', 'zadaniadomowe', 'zaddom', 'hw'])
async def homework(self, ctx):
if not dziennik_enabled:
await ctx.reply("Moduł dziennika jest wyłączony!", mention_author=False)
return
await ctx.reply(f'Zadania domowe: \n```{await self.get_homework()}```', mention_author=False)
#Doesnt work?
# @plan.error
# async def plan_error(ctx, error):
# if isinstance(error, commands.BadArgument) or isinstance(error, commands.MissingRequiredArgument):
# await ctx.channel.send("Instrukcja: `!plan <dzień> <grupa>`. \nLista dni: \n```dzisiaj, jutro, pojutrze, wczoraj, poniedzialek, poniedziałek, wtorek, środa, sroda, czwartek, piątek, piatek```")
async def get_homework(self):
with open("key-config.json") as f:
# load from a JSON string
dziennikKeystore = Keystore.load(f.read())
with open("acc-config.json") as f:
# load from a JSON string
dziennikAccount = Account.load(f.read())
dziennikClient = Vulcan(dziennikKeystore, dziennikAccount)
await dziennikClient.select_student()
lessons = await dziennikClient.data.get_lessons()
tmp = []
async for lesson in lessons:
tmp.append(lesson)
lessons = tmp
homeworks = await dziennikClient.data.get_homework()
tmp = []
rows = []
headers = ["Data", "Przedmiot", "Treść"]
all_info = {}
today = datetime.date.today()
tmp = today.weekday()
first_day = today
while tmp != 0: #Get first day of the week
first_day = first_day - timedelta(days=1)
tmp = tmp - 1
last_day = first_day + timedelta(days=4)
homeworks = await dziennikClient.data.get_homework()
number = 0
async for hw in homeworks:
if ((hw.deadline.date >= first_day) & (hw.deadline.date <= last_day)): #Check if homework is in the current week
all_info[number] = [hw]
number+1
await dziennikClient.close()
for key in sorted(all_info):
homework = all_info[key][0]
if homework.content:
content = homework.content
if homework.subject:
name = homework.subject.name
if len(name) > 16:
name = homework.subject.code
else:
name = homework.subject.name
print(name)
else:
name = 'NO_INFO'
date = homework.deadline.date.strftime("%d.%m.%Y")
rows.append([date, name, content])
table = tabulate(rows, headers, tablefmt="orgtbl", stralign="center")
print(table)
return table
def setup(bot):
bot.add_cog(ZadaniaDomowe(bot))
|
the-stack_0_22469 | """
Pre-process the CNN/Daily Mail dataset. Before using this script, please download the following
files and put all of them under `data/cnndm`:
* cnn_stories_tokenized.zip, dm_stories_tokenized.zip -- These can be obtained from
https://github.com/JafferWilson/Process-Data-of-CNN-DailyMail
* all_test.txt, all_train.txt, all_val.txt -- These are the indices of documents in See et al's
training/validation/testing sets, used here to ensure the same data split. They can be found in
https://github.com/abisee/cnn-dailymail/tree/master/url_lists
This script will generate `cnndm.gz`, `cnndm.val.gz`, and `cnndm.test.gz`. Each file is a gzipped
text file containing one example per line.
"""
import re
import os
import gzip
from zipfile import ZipFile
from hashlib import sha1
splitter = re.compile(r'(-)')
word_recognizer = re.compile(r'^\w[\w\-]+\w$')
contractions = {"s", "d", "ve", "ll", "m", "re", "em"}
ptb_unescape = {'-LRB-': '(', '-RRB-': ')', '-LCB-': '{', '-RCB-': '}'}
print_every = 1000 # print progress every 1000 documents
data_path = os.path.dirname(os.path.abspath(__file__))
corpus_path = os.path.join(data_path, 'cnndm')
def split_example(filename: str, data: str, eop: str='<P>') -> tuple:
text, summary = [], []
highlight_mode = False
for paragraph in data.split('\n\n'):
if paragraph == '@highlight':
highlight_mode = True
else:
original_tokens = paragraph.split()
tokens, next_prefix = [], None
for i, tok in enumerate(original_tokens):
if tok == '¿': # convert ¿ into '
if i + 1 < len(original_tokens):
if original_tokens[i+1] == 't' and len(tokens) > 0 and tokens[-1][-1] == 'n':
tokens[-1] = tokens[-1][:-1]
next_prefix = "n'"
elif original_tokens[i+1] in contractions:
next_prefix = "'"
elif len(tokens) > 0 and tokens[-1] == 'o': # o ' clock => o'clock
tokens.pop()
next_prefix = "o'"
elif len(tokens) > 0 and tokens[-1] == 'y': # y ' all => y' all
tokens[-1] = "y'"
else:
tokens.append("'")
else:
tokens.append("'")
elif tok in ptb_unescape:
assert next_prefix is None
tokens.append(ptb_unescape[tok])
elif tok == '|':
assert next_prefix is None
else:
tok = tok.lower()
if next_prefix is not None:
tok = next_prefix + tok
if tok == '-':
tokens.append('--')
elif '-' in tok and not '--' in tok and word_recognizer.match(tok):
tokens.extend(t for t in splitter.split(tok) if t)
else:
tokens.append(tok)
next_prefix = None
if not tokens:
continue # skip empty paragraphs
if eop: tokens.append(eop)
if highlight_mode is False:
text.extend(tokens)
else:
if highlight_mode is True:
summary.extend(tokens)
highlight_mode = None
else:
print("A paragraph in %s is dropped because it is not text or summary." % filename)
return text, summary
def get_story_set(filename: str) -> set:
story_names = set()
with open(os.path.join(corpus_path, filename), 'rb') as f:
for line in f:
story_names.add(sha1(line.strip()).hexdigest())
return story_names
train_set = get_story_set('all_train.txt')
valid_set = get_story_set('all_val.txt')
test_set = get_story_set('all_test.txt')
train_out = gzip.open(os.path.join(data_path, 'cnndm.gz'), 'wt')
valid_out = gzip.open(os.path.join(data_path, 'cnndm.val.gz'), 'wt')
test_out = gzip.open(os.path.join(data_path, 'cnndm.test.gz'), 'wt')
count = 0
for download_file in ['cnn_stories_tokenized.zip', 'dm_stories_tokenized.zip']:
with ZipFile(os.path.join(corpus_path, download_file), 'r') as archive:
for filename in archive.namelist():
if not filename.endswith('.story'): continue
story_name = filename[-46:-6]
if story_name in train_set:
fout = train_out
elif story_name in valid_set:
fout = valid_out
elif story_name in test_set:
fout = test_out
else:
print("Error: filename %s is not found in train, valid, or test set." % filename)
continue
with archive.open(filename, 'r') as f:
content = f.read().decode('utf-8')
text, summary = split_example(filename, content)
if not text:
print("Skipped: %s has no text." % filename)
continue
if not summary:
print("Skipped: %s has no summary." % filename)
continue
if len(text) < len(summary):
print("Skipped: the text of %s is shorter than its summary." % filename)
continue
fout.write(" ".join(text) + "\t" + " ".join(summary) + "\n")
count += 1
if count % print_every == 0:
print(count)
fout.flush()
train_out.close()
valid_out.close()
test_out.close()
|
the-stack_0_22470 | # coding: utf-8
import math
import torch
from torch import nn
# from util.misc import NestedTensor
# class PositionEmbeddingSine(nn.Module):
# """
# This is a more standard version of the position embedding, very similar to the one
# used by the Attention is all you need paper, generalized to work on images.
# """
# def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
# super().__init__()
# self.num_pos_feats = num_pos_feats
# self.temperature = temperature
# self.normalize = normalize
# if scale is not None and normalize is False:
# raise ValueError("normalize should be True if scale is passed")
# if scale is None:
# scale = 2 * math.pi
# self.scale = scale
# # def forward(self, tensor_list: NestedTensor):
# # x = tensor_list.tensors
# # mask = tensor_list.mask
# # assert mask is not None
# def forward(self, mask):
# not_mask = ~mask
# y_embed = not_mask.cumsum(1, dtype=torch.float32)
# x_embed = not_mask.cumsum(2, dtype=torch.float32)
# import ipdb; ipdb.set_trace()
# if self.normalize:
# eps = 1e-6
# y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
# x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
# dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=mask.device)
# dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
# pos_x = x_embed[:, :, :, None] / dim_t
# pos_y = y_embed[:, :, :, None] / dim_t
# pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
# pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
# pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
# return pos
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor):
x = tensor
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = torch.cat([
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
return pos
def test_embedding():
hidden_dim = 256
N_steps = hidden_dim // 2
# position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
|
the-stack_0_22471 | #!/usr/bin/env python3
import os
import re
from setuptools import setup, find_packages
version = None
def find(haystack, *needles):
regexes = [(index, re.compile(r'^{}\s*=\s*[\'"]([^\'"]*)[\'"]$'.format(needle))) for index, needle in enumerate(needles)]
values = ['' for needle in needles]
for line in haystack:
if len(regexes) == 0:
break
for rindex, (vindex, regex) in enumerate(regexes):
match = regex.match(line)
if match:
values[vindex] = match.groups()[0]
del regexes[rindex]
break
if len(needles) == 1:
return values[0]
else:
return values
with open(os.path.join(os.path.dirname(__file__), 'serve', '__init__.py'), 'r') as serve:
version = find(serve, '__version__')
setup(
name='serve',
version=version,
description='a simple autoindexing file server',
license='MIT',
author='Lily Foster',
author_email='[email protected]',
install_requires=['fooster-web'],
packages=find_packages(),
package_data={'': ['html/*.*', 'res/*.*']},
entry_points={'console_scripts': ['serve = serve.__main__:main']},
)
|
the-stack_0_22474 | r"""Function of unitary fourier transform (uft) and utilities
This module implements the unitary fourier transform, also known as
the ortho-normal transform. It is especially useful for convolution
[1], as it respects the Parseval equality. The value of the null
frequency is equal to
.. math:: \frac{1}{\sqrt{n}} \sum_i x_i
so the Fourier transform has the same energy as the original image
(see ``image_quad_norm`` function). The transform is applied from the
last axis for performance (assuming a C-order array input).
References
----------
.. [1] B. R. Hunt "A matrix theory proof of the discrete convolution
theorem", IEEE Trans. on Audio and Electroacoustics,
vol. au-19, no. 4, pp. 285-288, dec. 1971
"""
import math
import cupy as cp
import numpy as np
from .._shared.fft import fftmodule as fft
__keywords__ = "fft, Fourier Transform, orthonormal, unitary"
def ufftn(inarray, dim=None):
"""N-dimensional unitary Fourier transform.
Parameters
----------
inarray : ndarray
The array to transform.
dim : int, optional
The last axis along which to compute the transform. All
axes by default.
Returns
-------
outarray : ndarray (same shape than inarray)
The unitary N-D Fourier transform of ``inarray``.
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((3, 3, 3))
>>> output = ufftn(input)
>>> cp.allclose(cp.sum(input) / cp.sqrt(input.size), output[0, 0, 0])
True
>>> output.shape
(3, 3, 3)
"""
if dim is None:
dim = inarray.ndim
outarray = fft.fftn(inarray, axes=range(-dim, 0), norm="ortho")
return outarray
def uifftn(inarray, dim=None):
"""N-dimensional unitary inverse Fourier transform.
Parameters
----------
inarray : ndarray
The array to transform.
dim : int, optional
The last axis along which to compute the transform. All
axes by default.
Returns
-------
outarray : ndarray (same shape than inarray)
The unitary inverse N-D Fourier transform of ``inarray``.
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((3, 3, 3))
>>> output = uifftn(input)
>>> cp.allclose(cp.sum(input) / cp.sqrt(input.size), output[0, 0, 0])
True
>>> output.shape
(3, 3, 3)
"""
if dim is None:
dim = inarray.ndim
outarray = fft.ifftn(inarray, axes=range(-dim, 0), norm='ortho')
return outarray
def urfftn(inarray, dim=None):
"""N-dimensional real unitary Fourier transform.
This transform considers the Hermitian property of the transform on
real-valued input.
Parameters
----------
inarray : ndarray, shape (M, N, ..., P)
The array to transform.
dim : int, optional
The last axis along which to compute the transform. All
axes by default.
Returns
-------
outarray : ndarray, shape (M, N, ..., P / 2 + 1)
The unitary N-D real Fourier transform of ``inarray``.
Notes
-----
The ``urfft`` functions assume an input array of real
values. Consequently, the output has a Hermitian property and
redundant values are not computed or returned.
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((5, 5, 5))
>>> output = urfftn(input)
>>> cp.allclose(cp.sum(input) / cp.sqrt(input.size), output[0, 0, 0])
True
>>> output.shape
(5, 5, 3)
"""
if dim is None:
dim = inarray.ndim
outarray = fft.rfftn(inarray, axes=range(-dim, 0), norm='ortho')
return outarray
def uirfftn(inarray, dim=None, shape=None):
"""N-dimensional inverse real unitary Fourier transform.
This transform considers the Hermitian property of the transform
from complex to real input.
Parameters
----------
inarray : ndarray
The array to transform.
dim : int, optional
The last axis along which to compute the transform. All
axes by default.
shape : tuple of int, optional
The shape of the output. The shape of ``rfft`` is ambiguous in
case of odd-valued input shape. In this case, this parameter
should be provided. See ``cupy.fft.irfftn``.
Returns
-------
outarray : ndarray
The unitary N-D inverse real Fourier transform of ``inarray``.
Notes
-----
The ``uirfft`` function assumes that the output array is
real-valued. Consequently, the input is assumed to have a Hermitian
property and redundant values are implicit.
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((5, 5, 5))
>>> output = uirfftn(urfftn(input), shape=input.shape)
>>> cp.allclose(input, output)
True
>>> output.shape
(5, 5, 5)
"""
if dim is None:
dim = inarray.ndim
outarray = fft.irfftn(inarray, shape, axes=range(-dim, 0), norm='ortho')
return outarray
def ufft2(inarray):
"""2-dimensional unitary Fourier transform.
Compute the Fourier transform on the last 2 axes.
Parameters
----------
inarray : ndarray
The array to transform.
Returns
-------
outarray : ndarray (same shape as inarray)
The unitary 2-D Fourier transform of ``inarray``.
See Also
--------
uifft2, ufftn, urfftn
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((10, 128, 128))
>>> output = ufft2(input)
>>> cp.allclose(cp.sum(input[1, ...]) / cp.sqrt(input[1, ...].size),
... output[1, 0, 0])
True
>>> output.shape
(10, 128, 128)
"""
return ufftn(inarray, 2)
def uifft2(inarray):
"""2-dimensional inverse unitary Fourier transform.
Compute the inverse Fourier transform on the last 2 axes.
Parameters
----------
inarray : ndarray
The array to transform.
Returns
-------
outarray : ndarray (same shape as inarray)
The unitary 2-D inverse Fourier transform of ``inarray``.
See Also
--------
uifft2, uifftn, uirfftn
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((10, 128, 128))
>>> output = uifft2(input)
>>> cp.allclose(cp.sum(input[1, ...]) / cp.sqrt(input[1, ...].size),
... output[0, 0, 0])
True
>>> output.shape
(10, 128, 128)
"""
return uifftn(inarray, 2)
def urfft2(inarray):
"""2-dimensional real unitary Fourier transform
Compute the real Fourier transform on the last 2 axes. This
transform considers the Hermitian property of the transform from
complex to real-valued input.
Parameters
----------
inarray : ndarray, shape (M, N, ..., P)
The array to transform.
Returns
-------
outarray : ndarray, shape (M, N, ..., 2 * (P - 1))
The unitary 2-D real Fourier transform of ``inarray``.
See Also
--------
ufft2, ufftn, urfftn
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((10, 128, 128))
>>> output = urfft2(input)
>>> cp.allclose(cp.sum(input[1,...]) / cp.sqrt(input[1,...].size),
... output[1, 0, 0])
True
>>> output.shape
(10, 128, 65)
"""
return urfftn(inarray, 2)
def uirfft2(inarray, shape=None):
"""2-dimensional inverse real unitary Fourier transform.
Compute the real inverse Fourier transform on the last 2 axes.
This transform considers the Hermitian property of the transform
from complex to real-valued input.
Parameters
----------
inarray : ndarray, shape (M, N, ..., P)
The array to transform.
shape : tuple of int, optional
The shape of the output. The shape of ``rfft`` is ambiguous in
case of odd-valued input shape. In this case, this parameter
should be provided. See ``cupy.fft.irfftn``.
Returns
-------
outarray : ndarray, shape (M, N, ..., 2 * (P - 1))
The unitary 2-D inverse real Fourier transform of ``inarray``.
See Also
--------
urfft2, uifftn, uirfftn
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((10, 128, 128))
>>> output = uirfftn(urfftn(input), shape=input.shape)
>>> cp.allclose(input, output)
True
>>> output.shape
(10, 128, 128)
"""
return uirfftn(inarray, 2, shape=shape)
def image_quad_norm(inarray):
"""Return the quadratic norm of images in Fourier space.
This function detects whether the input image satisfies the
Hermitian property.
Parameters
----------
inarray : ndarray
Input image. The image data should reside in the final two
axes.
Returns
-------
norm : float
The quadratic norm of ``inarray``.
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((5, 5))
>>> image_quad_norm(ufft2(input)) == cp.sum(cp.abs(input)**2)
True
>>> image_quad_norm(ufft2(input)) == image_quad_norm(urfft2(input))
True
"""
# If there is a Hermitian symmetry
abs_sq = cp.abs(inarray)
abs_sq *= abs_sq
if inarray.shape[-1] != inarray.shape[-2]:
return 2 * cp.sum(cp.sum(abs_sq, axis=-1), axis=-1) - cp.sum(
cp.abs(inarray[..., 0]) ** 2, axis=-1
)
else:
return cp.sum(cp.sum(abs_sq, axis=-1), axis=-1)
def ir2tf(imp_resp, shape, dim=None, is_real=True):
"""Compute the transfer function of an impulse response (IR).
This function makes the necessary correct zero-padding, zero
convention, correct fft2, etc... to compute the transfer function
of IR. To use with unitary Fourier transform for the signal (ufftn
or equivalent).
Parameters
----------
imp_resp : ndarray
The impulse responses.
shape : tuple of int
A tuple of integer corresponding to the target shape of the
transfer function.
dim : int, optional
The last axis along which to compute the transform. All
axes by default.
is_real : boolean, optional
If True (default), imp_resp is supposed real and the Hermitian property
is used with rfftn Fourier transform.
Returns
-------
y : complex ndarray
The transfer function of shape ``shape``.
See Also
--------
ufftn, uifftn, urfftn, uirfftn
Examples
--------
>>> import cupy as cp
>>> cp.all(cp.array([[4, 0], [0, 0]]) == ir2tf(cp.ones((2, 2)), (2, 2)))
True
>>> ir2tf(cp.ones((2, 2)), (512, 512)).shape == (512, 257)
True
>>> ir2tf(cp.ones((2, 2)), (512, 512), is_real=False).shape == (512, 512)
True
Notes
-----
The input array can be composed of multiple-dimensional IR with
an arbitrary number of IR. The individual IR must be accessed
through the first axes. The last ``dim`` axes contain the space
definition.
"""
if not dim:
dim = imp_resp.ndim
# Zero padding and fill
irpadded_dtype = imp_resp.dtype if imp_resp.dtype.kind == 'f' else float
irpadded = cp.zeros(shape, dtype=irpadded_dtype)
irpadded[tuple([slice(0, s) for s in imp_resp.shape])] = imp_resp
# Roll for zero convention of the fft to avoid the phase
# problem. Work with odd and even size.
for axis, axis_size in enumerate(imp_resp.shape):
if axis >= imp_resp.ndim - dim:
irpadded = cp.roll(irpadded,
shift=-math.floor(axis_size / 2),
axis=axis)
if is_real:
return fft.rfftn(irpadded, axes=range(-dim, 0))
else:
return fft.fftn(irpadded, axes=range(-dim, 0))
def laplacian(ndim, shape, is_real=True, *, dtype=None):
"""Return the transfer function of the Laplacian.
Laplacian is the second order difference, on row and column.
Parameters
----------
ndim : int
The dimension of the Laplacian.
shape : tuple
The support on which to compute the transfer function.
is_real : boolean, optional
If True (default), imp_resp is assumed to be real-valued and
the Hermitian property is used with rfftn Fourier transform
to return the transfer function.
Returns
-------
tf : array_like, complex
The transfer function.
impr : array_like, real
The Laplacian.
Examples
--------
>>> import cupy as cp
>>> tf, ir = laplacian(2, (32, 32))
>>> cp.all(ir == cp.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]]))
True
>>> cp.all(tf == ir2tf(ir, (32, 32)))
True
"""
if dtype is None:
dtype = cp.float64 if is_real else cp.complex128
elif np.dtype(dtype).kind != 'f':
raise ValueError("dtype must be a floating point dtype")
# CuPy Backend: assemble the small kernel on the host and then transfer it
impr = np.zeros([3] * ndim)
for dim in range(ndim):
idx = tuple(
[slice(1, 2)] * dim
+ [slice(None)]
+ [slice(1, 2)] * (ndim - dim - 1)
)
impr[idx] = np.array([-1.0, 0.0, -1.0]).reshape(
[-1 if i == dim else 1 for i in range(ndim)]
)
impr[(slice(1, 2),) * ndim] = 2.0 * ndim
impr = cp.array(impr, dtype=dtype)
if shape is None: # filters.laplace only uses the spatial kernel
return impr
return ir2tf(impr, shape, is_real=is_real), impr
|
the-stack_0_22478 | # Copyright 2021, laggardkernel and the ranger-fzf-marks contributors
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, division, print_function
import os
from ranger.api.commands import Command
class FzfMarksBase(Command):
fzf_cmd = os.environ.get("FZF_MARKS_CMD", "fzf")
# https://github.com/urbainvaes/fzf-marks
bookmark_file = os.environ.get("FZF_MARKS_FILE") or os.path.join(
os.environ.get("HOME", os.path.expanduser("~")), ".fzf-marks"
)
class fmark(FzfMarksBase):
"""
:fmark <name>
Mark the current directory with provided keyword
"""
def execute(self):
if not self.arg(1):
self.fm.notify(
"A keyword must be given for the current bookmark!", bad=True
)
return
item = "{} : {}".format(self.arg(1), self.fm.thisdir.path)
if not os.path.exists(self.bookmark_file):
with open(self.bookmark_file, "a") as f:
pass
with open(self.bookmark_file, "r") as f:
for line in f.readlines():
if line.split(":")[1].strip() == self.fm.thisdir.path:
self.fm.notify(
"Fzf bookmark already exists: {}".format(line.strip()), bad=True
)
return
with open(self.bookmark_file, "a") as f:
f.write("{}{}".format(item, os.linesep))
self.fm.notify("Fzf bookmark has been added: {}".format(item))
class dmark(FzfMarksBase):
"""
dmark: delete current directory from fzf-marks file
"""
fzf_opts = os.environ.get(
"FZF_DMARK_OPTS",
"--cycle -m --ansi --bind=ctrl-o:accept,ctrl-t:toggle",
)
def execute(self):
import subprocess
items = None
query = ""
if self.arg(1):
query = self.arg(1)
if not os.path.exists(self.bookmark_file):
self.fm.notify("No fzf bookmark is created yet!", bad=True)
return
# TODO: batch deletion
command = '< "{2}" sort -f | {0} {1} --query="{3}"'.format(
self.fzf_cmd, self.fzf_opts, self.bookmark_file, query
)
process = self.fm.execute_command(
command, universal_newlines=True, stdout=subprocess.PIPE
)
stdout, stderr = process.communicate()
if process.returncode == 0:
items = stdout.rstrip().split("\n")
if not items:
return
with open(self.bookmark_file, "r") as f:
lines = f.readlines()
with open(self.bookmark_file, "w") as f:
for line in lines:
if line.strip() not in items:
f.write(line)
self.fm.notify("Fzf bookmark is deleted: {}".format(", ".join(items)))
class fzm(FzfMarksBase):
"""
fzm: select and jump to bookmark stored in fzf-marks
"""
fzf_opts = os.environ.get(
"FZF_FZM_OPTS",
"--cycle +m --ansi --bind=ctrl-o:accept,ctrl-t:toggle --select-1",
)
def execute(self):
import subprocess
target = None
query = ""
if self.arg(1):
query = self.arg(1)
if not os.path.exists(self.bookmark_file):
self.fm.notify("No fzf bookmark is created yet!", bad=True)
return
command = '< "{2}" sort -f | {0} {1} --query "{3}"'.format(
self.fzf_cmd, self.fzf_opts, self.bookmark_file, query
)
process = self.fm.execute_command(
command, universal_newlines=True, stdout=subprocess.PIPE
)
stdout, stderr = process.communicate()
if process.returncode == 0:
key, target = stdout.rstrip().split(" : ", 1)
target = os.path.expanduser(target)
if not target:
return
elif os.path.isdir(target):
self.fm.cd(target)
elif os.path.isfile(target):
self.fm.select_file(target)
else:
self.fm.notify(
"Invalid fzf bookmark location: {} : {}".format(key, target), True
)
|
the-stack_0_22481 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from wifite.tools.airodump import Airodump
import unittest
class TestTarget(unittest.TestCase):
''' Test suite for Target parsing an generation '''
airodump_csv = 'airodump.csv'
def getTargets(self, filename):
''' Helper method to parse targets from filename '''
import os, inspect
this_file = os.path.abspath(inspect.getsourcefile(TestTarget.getTargets))
this_dir = os.path.dirname(this_file)
csv_file = os.path.join(this_dir, 'files', filename)
# Load targets from CSV file
return Airodump.get_targets_from_csv(csv_file)
def testTargetParsing(self):
''' Asserts target parsing finds targets '''
targets = self.getTargets(TestTarget.airodump_csv)
assert(len(targets) > 0)
def testTargetClients(self):
''' Asserts target parsing captures clients properly '''
targets = self.getTargets(TestTarget.airodump_csv)
for t in targets:
if t.bssid == '00:1D:D5:9B:11:00':
assert(len(t.clients) > 0)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_22484 | # All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from NSDManagement.models import NsdInfo
from NSDSubscription.serializers import *
from rest_framework import viewsets, status
from rest_framework.utils import json
from rest_framework.exceptions import APIException
class VNFPackageSubscriptionViewSet(viewsets.ModelViewSet):
queryset = NsdmSubscription.objects.all()
serializer_class = NsdmSubscriptionSerializer
def create(self, request, **kwargs):
"""
Subscribe to NSD change notifications.
The POST method creates a new subscription. \
This method shall support the URI query parameters, request and response data structures, \
and response codes, as specified in the Tables 5.4.8.3.1-1 and 5.4.8.3.1-2. \
Creation of two subscription resources with the same callbackURI \
and the same filter can result in performance degradation \
and will provide duplicates of notifications to the OSS, \
and might make sense only in very rare use cases. \
Consequently, the NFVO may either allow creating a subscription resource \
if another subscription resource with the same filter \
and callbackUri already exists (in which case it shall return the “201 Created” response code), \
or may decide to not create a duplicate subscription resource (in which case \
it shall return a “303 See Other” response code referencing the existing subscription resource \
with the same filter and callbackUri).
"""
if 'filter' not in request.data or 'callbackUri' not in request.data:
raise APIException(detail='filter or callbackUri is not exists',
code=status.HTTP_409_CONFLICT)
ns_descriptor_subscription_filter = request.data['filter'].get('nsdInfoId', None)
if not ns_descriptor_subscription_filter:
raise APIException(detail='nsdInfoId is not exists',
code=status.HTTP_409_CONFLICT)
if not isinstance(ns_descriptor_subscription_filter, list):
raise APIException(detail='nsdInfoId needs list type',
code=status.HTTP_409_CONFLICT)
for ns_id in ns_descriptor_subscription_filter:
ns_instance = NsdInfo.objects.filter(id=uuid.UUID(ns_id)).last()
if not ns_instance:
raise APIException(detail='Network Service Descriptor not found',
code=status.HTTP_404_NOT_FOUND)
request.data['filter']['nsdInfoId'] = json.dumps(ns_descriptor_subscription_filter)
request.data['_links'] = {'self': request.build_absolute_uri()}
return super().create(request)
def get_success_headers(self, data):
return {'Location': data['_links']['self']}
def update(self, request, *args, **kwargs):
raise APIException(detail='Method Not Allowed',
code=status.HTTP_405_METHOD_NOT_ALLOWED)
def retrieve(self, request, *args, **kwargs):
"""
Read an individual subscription resource.
This resource represents an individual subscription. \
It can be used by the client to read and to terminate a subscription \
to notifications related to NSD management. \
The GET method retrieves information about a subscription by reading an individual subscription resource. \
This resource represents an individual subscription. \
It can be used by the client to read \
and to terminate a subscription to notifications related to NSD management.
"""
return super().retrieve(request)
def list(self, request, *args, **kwargs):
"""
Query multiple subscriptions.
The GET method queries the list of active subscriptions of the functional block that invokes the method. \
It can be used e.g. for resynchronization after error situations.
"""
return super().list(request)
def destroy(self, request, *args, **kwargs):
"""
Terminate Subscription.
This resource represents an individual subscription. It can be used by the client to read \
and to terminate a subscription to notifications related to NSD management.
The DELETE method terminates an individual subscription. \
This method shall support the URI query parameters, request and response data structures, \
and response codes, as specified in the Table 5.4.9.3.3-2.
"""
return super().destroy(request)
|
the-stack_0_22485 | """Tool to convert Waymo Open Dataset to pickle files.
Adapted from https://github.com/WangYueFt/pillar-od
# Copyright (c) Massachusetts Institute of Technology and its affiliates.
# Licensed under MIT License
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob, argparse, tqdm, pickle, os
import waymo_decoder
import tensorflow.compat.v2 as tf
from waymo_open_dataset import dataset_pb2
from multiprocessing import Pool
import numpy as np
tf.enable_v2_behavior()
fnames = None
LIDAR_PATH = None
ANNO_PATH = None
prefix = 1 # validation
def convert(idx):
global fnames, prefix
fname = fnames[idx]
dataset = tf.data.TFRecordDataset(fname, compression_type='')
for frame_id, data in enumerate(dataset):
frame = dataset_pb2.Frame()
frame.ParseFromString(bytearray(data.numpy()))
decoded_frame = waymo_decoder.decode_frame(frame, frame_id)
bin_filepath = f'/mnt/xrhuang/datasets/waymo/kitti_format/training/velodyne/{prefix}{idx:03d}{frame_id:03d}.bin'
#points = np.fromfile(bin_filepath, dtype=np.float32).reshape(-1, 6)
decoded_frame.pop('lidars')
decoded_frame['lidars'] = bin_filepath
decoded_annos = waymo_decoder.decode_annos(frame, frame_id)
with open(os.path.join(LIDAR_PATH, 'seq_{}_frame_{}.pkl'.format(idx, frame_id)), 'wb') as f:
pickle.dump(decoded_frame, f)
with open(os.path.join(ANNO_PATH, 'seq_{}_frame_{}.pkl'.format(idx, frame_id)), 'wb') as f:
pickle.dump(decoded_annos, f)
def main(args):
global fnames, prefix
fnames = sorted(list(glob.glob(args.record_path)))
print("Number of files {}".format(len(fnames)))
with Pool(128) as p: # change according to your cpu
r = list(tqdm.tqdm(p.imap(convert, range(len(fnames))), total=len(fnames)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Waymo Data Converter')
parser.add_argument('--root_path', type=str, required=True)
parser.add_argument('--record_path', type=str, required=True)
args = parser.parse_args()
if not os.path.isdir(args.root_path):
os.mkdir(args.root_path)
LIDAR_PATH = os.path.join(args.root_path, 'lidar')
ANNO_PATH = os.path.join(args.root_path, 'annos')
if not os.path.isdir(LIDAR_PATH):
os.mkdir(LIDAR_PATH)
if not os.path.isdir(ANNO_PATH):
os.mkdir(ANNO_PATH)
main(args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.