filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_333 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SsoUri(Model):
"""The URI required to login to the supplemental portal from the Azure portal.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar sso_uri_value: The URI used to login to the supplemental portal.
:vartype sso_uri_value: str
"""
_validation = {
'sso_uri_value': {'readonly': True},
}
_attribute_map = {
'sso_uri_value': {'key': 'ssoUriValue', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SsoUri, self).__init__(**kwargs)
self.sso_uri_value = None
|
the-stack_0_334 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 12/02/2004
# Description: Defines a Tkinter ImageControl widget that is used by various
# trait editors to display trait values iconically.
#
# Symbols defined: ImageControl
#
#------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
import tk
#-------------------------------------------------------------------------------
# 'ImageControl' class:
#-------------------------------------------------------------------------------
class ImageControl ( wx.Window ):
# Pens used to draw the 'selection' marker:
_selectedPenDark = wx.Pen(
wx.SystemSettings_GetColour( wx.SYS_COLOUR_3DSHADOW ), 1,
wx.SOLID )
_selectedPenLight = wx.Pen(
wx.SystemSettings_GetColour( wx.SYS_COLOUR_3DHIGHLIGHT ), 1,
wx.SOLID )
#---------------------------------------------------------------------------
# Initializes the object:
#---------------------------------------------------------------------------
def __init__ ( self, parent, bitmap, selected = None, handler = None ):
""" Initializes the object.
"""
wx.Window.__init__( self, parent, -1,
size = wx.Size( bitmap.GetWidth() + 10,
bitmap.GetHeight() + 10 ) )
self._bitmap = bitmap
self._selected = selected
self._handler = handler
self._mouse_over = False
self._button_down = False
# Set up the 'paint' event handler:
wx.EVT_PAINT( self, self._on_paint )
# Set up mouse event handlers:
wx.EVT_LEFT_DOWN( self, self._on_left_down )
wx.EVT_LEFT_UP( self, self._on_left_up )
wx.EVT_ENTER_WINDOW( self, self._on_enter )
wx.EVT_LEAVE_WINDOW( self, self._on_leave )
#---------------------------------------------------------------------------
# Gets/Sets the current selection state of the image:
#---------------------------------------------------------------------------
def Selected ( self, selected = None ):
""" Gets/Sets the current selection state of the image.
"""
if selected is not None:
selected = (selected != 0)
if selected != self._selected:
if selected:
for control in self.GetParent().GetChildren():
if (isinstance( control, ImageControl ) and
control.Selected()):
control.Selected( False )
break
self._selected = selected
self.Refresh()
return self._selected
#---------------------------------------------------------------------------
# Gets/Sets the current bitmap image:
#---------------------------------------------------------------------------
def Bitmap ( self, bitmap = None ):
if bitmap is not None:
if bitmap != self._bitmap:
self._bitmap = bitmap
self.Refresh()
return self._bitmap
#---------------------------------------------------------------------------
# Gets/Sets the current click handler:
#---------------------------------------------------------------------------
def Handler ( self, handler = None ):
""" Gets/Sets the current click handler.
"""
if handler is not None:
if handler != self._handler:
self._handler = handler
self.Refresh()
return self._handler
#---------------------------------------------------------------------------
# Handles the mouse entering the control:
#---------------------------------------------------------------------------
def _on_enter ( self, event = None ):
""" Handles the mouse entering the control.
"""
if self._selected is not None:
self._mouse_over = True
self.Refresh()
#---------------------------------------------------------------------------
# Handles the mouse leaving the control:
#---------------------------------------------------------------------------
def _on_leave ( self, event = None ):
""" Handles the mouse leaving the control.
"""
if self._mouse_over:
self._mouse_over = False
self.Refresh()
#---------------------------------------------------------------------------
# Handles the user pressing the mouse button:
#---------------------------------------------------------------------------
def _on_left_down ( self, event = None ):
""" Handles the user pressing the mouse button.
"""
if self._selected is not None:
self.CaptureMouse()
self._button_down = True
self.Refresh()
#---------------------------------------------------------------------------
# Handles the user clicking the control:
#---------------------------------------------------------------------------
def _on_left_up ( self, event = None ):
""" Handles the user clicking the control.
"""
need_refresh = self._button_down
if need_refresh:
self.ReleaseMouse()
self._button_down = False
if self._selected is not None:
wdx, wdy = self.GetClientSizeTuple()
x = event.GetX()
y = event.GetY()
if (0 <= x < wdx) and (0 <= y < wdy):
if self._selected != -1:
self.Selected( True )
elif need_refresh:
self.Refresh()
if self._handler is not None:
self._handler( self )
return
if need_refresh:
self.Refresh()
#---------------------------------------------------------------------------
# Handles the control being re-painted:
#---------------------------------------------------------------------------
def _on_paint ( self, event = None ):
""" Handles the control being re-painted.
"""
wdc = wx.PaintDC( self )
wdx, wdy = self.GetClientSizeTuple()
bitmap = self._bitmap
bdx = bitmap.GetWidth()
bdy = bitmap.GetHeight()
wdc.DrawBitmap( bitmap, (wdx - bdx) / 2, (wdy - bdy) / 2, True )
pens = [ self._selectedPenLight, self._selectedPenDark ]
bd = self._button_down
if self._mouse_over:
wdc.SetBrush( wx.TRANSPARENT_BRUSH )
wdc.SetPen( pens[ bd ] )
wdc.DrawLine( 0, 0, wdx, 0 )
wdc.DrawLine( 0, 1, 0, wdy )
wdc.SetPen( pens[ 1 - bd ] )
wdc.DrawLine( wdx - 1, 1, wdx - 1, wdy )
wdc.DrawLine( 1, wdy - 1, wdx - 1, wdy - 1 )
if self._selected == True:
wdc.SetBrush( wx.TRANSPARENT_BRUSH )
wdc.SetPen( pens[ bd ] )
wdc.DrawLine( 1, 1, wdx - 1, 1 )
wdc.DrawLine( 1, 1, 1, wdy - 1 )
wdc.DrawLine( 2, 2, wdx - 2, 2 )
wdc.DrawLine( 2, 2, 2, wdy - 2 )
wdc.SetPen( pens[ 1 - bd ] )
wdc.DrawLine( wdx - 2, 2, wdx - 2, wdy - 1 )
wdc.DrawLine( 2, wdy - 2, wdx - 2, wdy - 2 )
wdc.DrawLine( wdx - 3, 3, wdx - 3, wdy - 2 )
wdc.DrawLine( 3, wdy - 3, wdx - 3, wdy - 3 )
|
the-stack_0_335 | # stdlib
# stdlib
import dataclasses
from uuid import UUID
# third party
import sympc
from sympc.config import Config
from sympc.tensor import ShareTensor
# syft absolute
import syft
# syft relative
from ...generate_wrapper import GenerateWrapper
from ...proto.lib.sympc.share_tensor_pb2 import ShareTensor as ShareTensor_PB
from ..python.primitive_factory import PrimitiveFactory
def object2proto(obj: object) -> ShareTensor_PB:
share: ShareTensor = obj
session_uuid = ""
config = {}
if share.session_uuid is not None:
session_uuid = str(share.session_uuid)
config = dataclasses.asdict(share.config)
session_uuid_syft = session_uuid
conf_syft = syft.serialize(
PrimitiveFactory.generate_primitive(value=config), to_proto=True
)
proto = ShareTensor_PB(session_uuid=session_uuid_syft, config=conf_syft)
tensor_data = getattr(share.tensor, "data", None)
if tensor_data is not None:
proto.tensor.CopyFrom(syft.serialize(share.tensor, to_proto=True))
return proto
def proto2object(proto: ShareTensor_PB) -> ShareTensor:
if proto.session_uuid:
session = sympc.session.get_session(proto.session_uuid)
if session is None:
raise ValueError(f"The session {proto.session_uuid} could not be found")
config = dataclasses.asdict(session.config)
else:
config = syft.deserialize(proto.config, from_proto=True)
tensor = syft.deserialize(proto.tensor, from_proto=True)
share = ShareTensor(data=None, config=Config(**config))
if proto.session_uuid:
share.session_uuid = UUID(proto.session_uuid)
# Manually put the tensor since we do not want to re-encode it
share.tensor = tensor
return share
GenerateWrapper(
wrapped_type=ShareTensor,
import_path="sympc.tensor.ShareTensor",
protobuf_scheme=ShareTensor_PB,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
|
the-stack_0_336 | # -= ml_breakdown.py =-
# __ by Morgan Loomis
# ____ ___ / / http://morganloomis.com
# / __ `__ \/ / Revision 4
# / / / / / / / 2018-05-13
# /_/ /_/ /_/_/ _________
# /_________/
#
# ______________
# - -/__ License __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copyright 2018 Morgan Loomis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# ___________________
# - -/__ Installation __/- - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copy this file into your maya scripts directory, for example:
# C:/Documents and Settings/user/My Documents/maya/scripts/ml_breakdown.py
#
# Run the tool in a python shell or shelf button by importing the module,
# and then calling the primary function:
#
# import ml_breakdown
# ml_breakdown.ui()
#
#
# __________________
# - -/__ Description __/- - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Blend a keyframe or pose with the next or previous keys, essentially creating a
# breakdown pose that is weighted one way or the other.
#
# ____________
# - -/__ Usage __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Press the "Breakdown Dragger" button to enter the dragger, and the cursor will
# turn into a hand. Left-click and hold in the viewport, and then drag either left
# or right to weight the key to the next or previous key. Press and hold the
# middle mouse button to weight the key toward or away from the average of the
# surrounding keys. Alternately, set the slider to the desired weight, and press
# the Next, Previous or Average buttons to increment the breakdown. Right click
# the buttons to assign to hotkeys. If you have no keys selected, the tool will
# act only on curves that are visibile in the graph editor. If there are no keys
# at the current frame, keys will be set.
#
# ____________
# - -/__ Video __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# http://www.youtube.com/watch?v=D8yD4zbHTP8
#
# _________
# - -/__ Ui __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# [Breakdown Dragger] : Drag in the viewport to weight a breakdown toward the next or previous frame.
# [<<] : Weight toward the previous frame.
# [Average] : Weight toward the average of the next and previous frame.
# [>>] : Weight toward the next frame.
#
# ___________________
# - -/__ Requirements __/- - - - - - - - - - - - - - - - - - - - - - - - - -
#
# This script requires the ml_utilities module, which can be downloaded here:
# https://raw.githubusercontent.com/morganloomis/ml_tools/master/ml_utilities.py
#
# __________
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /_ Enjoy! _/- - -
__author__ = 'Morgan Loomis'
__license__ = 'MIT'
__revision__ = 4
__category__ = 'animation'
shelfButton = {'annotation': 'Click to weight keys by dragging, double click to open UI.',
'command': 'import ml_breakdown;ml_breakdown.drag()',
'doubleClickCommand': 'import ml_breakdown;ml_breakdown.ui()',
'imageOverlayLabel': 'BD',
'menuItem': [['Breakdown UI', 'import ml_breakdown;ml_breakdown.ui()'],
['<< Previous', 'import ml_breakdown;ml_breakdown.weightPrevious()'],
['>> Next', 'import ml_breakdown;ml_breakdown.weightNext()'],
['Average', 'import ml_breakdown;ml_breakdown.weightAverage()']],
'order': 12}
import maya.cmds as mc
from maya import OpenMaya
from functools import partial
try:
import ml_utilities as utl
utl.upToDateCheck(32)
except ImportError:
result = mc.confirmDialog( title='Module Not Found',
message='This tool requires the ml_utilities module. Once downloaded you will need to restart Maya.',
button=['Download Module','Cancel'],
defaultButton='Cancel', cancelButton='Cancel', dismissString='Cancel' )
if result == 'Download Module':
mc.showHelp('http://morganloomis.com/tool/ml_utilities/',absolute=True)
def ui():
'''
User interface for breakdown
'''
with utl.MlUi('ml_breakdown', 'Breakdown Tools', width=400, height=180, info='''Select objects.
Press Breakdown Dragger to create a new key and weight it by dragging in the viewport.
Otherwise use the increment buttons to nudge a key's value toward the next or previous key.''') as win:
win.buttonWithPopup(label='Breakdown Dragger', command=drag, annotation='Drag in the viewport to weight a breakdown toward the next or previous frame.',
shelfLabel='BDD')
mc.separator(height=20)
mc.floatSliderGrp('ml_breakdown_value_floatSlider', value=0.2, field=True, minValue=0, maxValue=2)
mc.paneLayout(configuration='vertical3',separatorThickness=1)
win.ButtonWithPopup(label='<<', command=weightPrevious, annotation='Weight toward the previous frame.', shelfLabel='<', shelfIcon='defaultTwoStackedLayout',
readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})
win.ButtonWithPopup(label='Average', command=weightAverage, annotation='Weight toward the average of the next and previous frame.', shelfLabel='><', shelfIcon='defaultTwoStackedLayout',
readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})
win.ButtonWithPopup(label='>>', command=weightNext, annotation='Weight toward the next frame.', shelfLabel='>', shelfIcon='defaultTwoStackedLayout',
readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})
def quickBreakDownUI():
winName = 'ml_quickBreakdownWin'
if mc.window(winName, exists=True):
mc.deleteUI(winName)
mc.window(winName, title='ml :: QBD', iconName='Quick Breakdown', width=100, height=500)
mc.columnLayout(adj=True)
mc.paneLayout(configuration='vertical2', separatorThickness=1)
mc.text('<<')
mc.text('>>')
mc.setParent('..')
for v in (10,20,50,80,90,100,110,120,150):
mc.paneLayout(configuration='vertical2',separatorThickness=1)
mc.button(label=str(v)+' %', command=partial(weightPrevious,v/100.0))
mc.button(label=str(v)+' %', command=partial(weightNext,v/100.0))
mc.setParent('..')
mc.showWindow(winName)
mc.window(winName, edit=True, width=100, height=250)
def drag(*args):
'''The primary command to run the tool'''
BreakdownDragger()
def weightPrevious(weight=0.2, *args):
weightBreakdownStep(direction='previous', weight=weight)
def weightAverage(weight=0.2, *args):
weightBreakdownStep(direction='average', weight=weight)
def weightNext(weight=0.2, *args):
weightBreakdownStep(direction='next', weight=weight)
def weightBreakdownStep(direction='next', weight=0.2):
keySel = utl.KeySelection()
if keySel.selectedKeys():
pass
elif keySel.visibleInGraphEditor():
keySel.setKeyframe()
elif keySel.keyedChannels():
keySel.setKeyframe()
if not keySel.curves:
return
times = list()
values = list()
data = list()
for curve in keySel.curves:
if keySel.selected:
times = mc.keyframe(curve, query=True, timeChange=True, sl=True)
values = mc.keyframe(curve, query=True, valueChange=True, sl=True)
else:
times = [keySel.time]
values = mc.keyframe(curve, time=keySel.time, query=True, valueChange=True)
for i,v in zip(times,values):
nextTime = mc.findKeyframe(curve, time=(i,), which='next')
n = mc.keyframe(curve, time=(nextTime,), query=True, valueChange=True)[0]
prevTime = mc.findKeyframe(curve, time=(i,), which='previous')
p = mc.keyframe(curve, time=(prevTime,), query=True, valueChange=True)[0]
data.append([curve,i,v,n,p])
for d in data:
value = None
if direction == 'next':
value = d[2]+((d[3]-d[2])*weight)
elif direction == 'previous':
value = d[2]+((d[4]-d[2])*weight)
elif direction == 'average':
value = d[2]+(((d[3]+d[4])/2-d[2])*weight)
else: break
mc.keyframe(d[0], time=(d[1],), valueChange=value)
class BreakdownDragger(utl.Dragger):
'''Creates the tool and manages the data'''
def __init__(self,
name='mlBreakdownDraggerContext',
minValue=None,
maxValue=None,
defaultValue=0,
title = 'Breakdown'):
self.keySel = utl.KeySelection()
if self.keySel.selectedKeys():
pass
elif self.keySel.visibleInGraphEditor():
self.keySel.setKeyframe()
elif self.keySel.keyedChannels():
self.keySel.setKeyframe()
if not self.keySel.curves:
return
utl.Dragger.__init__(self, defaultValue=defaultValue, minValue=minValue, maxValue=maxValue, name=name, title=title)
#setup tangent type
itt,ott = utl.getHoldTangentType()
self.time = dict()
self.value = dict()
self.next = dict()
self.prev = dict()
self.average = dict()
for curve in self.keySel.curves:
if self.keySel.selected:
self.time[curve] = mc.keyframe(curve, query=True, timeChange=True, sl=True)
self.value[curve] = mc.keyframe(curve, query=True, valueChange=True, sl=True)
else:
self.time[curve] = self.keySel.time
self.value[curve] = mc.keyframe(curve, time=self.keySel.time, query=True, valueChange=True)
self.next[curve] = list()
self.prev[curve] = list()
self.average[curve] = list()
for i in self.time[curve]:
next = mc.findKeyframe(curve, time=(i,), which='next')
prev = mc.findKeyframe(curve, time=(i,), which='previous')
n = mc.keyframe(curve, time=(next,), query=True, valueChange=True)[0]
p = mc.keyframe(curve, time=(prev,), query=True, valueChange=True)[0]
self.next[curve].append(n)
self.prev[curve].append(p)
self.average[curve].append((n+p)/2)
#set the tangents on this key, and the next and previous, so they flatten properly
mc.keyTangent(curve, time=(i,), itt=itt, ott=ott)
mc.keyTangent(curve, time=(next,), itt=itt)
mc.keyTangent(curve, time=(prev,), ott=ott)
self.setTool()
self.drawString('Left: Weight Prev/Next, Middle: Weight Average')
OpenMaya.MGlobal.displayWarning('Left: Weight Prev/Next, Middle: Weight Average')
def dragLeft(self):
'''This is activated by the left mouse button, and weights to the next or previous keys.'''
#clamp it
if self.x < -1:
self.x = -1
if self.x > 1:
self.x = 1
if self.x > 0:
self.drawString('>> '+str(int(self.x*100))+' %')
for curve in self.keySel.curves:
for i,v,n in zip(self.time[curve],self.value[curve],self.next[curve]):
mc.keyframe(curve, time=(i,), valueChange=v+((n-v)*self.x))
elif self.x <0:
self.drawString('<< '+str(int(self.x*-100))+' %')
for curve in self.keySel.curves:
for i,v,p in zip(self.time[curve],self.value[curve],self.prev[curve]):
mc.keyframe(curve, time=(i,), valueChange=v+((p-v)*(-1*self.x)))
def dragMiddle(self):
'''This is activated by the middle mouse button, and weights to the average of the surrounding keys.'''
#clamp it
if self.x < -1:
self.x = -1
if self.x > 1:
self.x = 1
self.drawString('Average '+str(int(self.x*100))+' %')
for curve in self.keySel.curves:
for i,v,n in zip(self.time[curve],self.value[curve],self.average[curve]):
mc.keyframe(curve, time=(i,), valueChange=v+((n-v)*self.x))
def dragShiftLeft(self):
'''This is activated by Shift and the left mouse button, and weights to the next or previous keys, without clamping.'''
if self.x > 0:
self.drawString('>> '+str(int(self.x*100))+' %')
for curve in self.keySel.curves:
for i,v,n in zip(self.time[curve],self.value[curve],self.next[curve]):
mc.keyframe(curve, time=(i,), valueChange=v+((n-v)*self.x))
elif self.x <0:
self.drawString('<< '+str(int(self.x*-100))+' %')
for curve in self.keySel.curves:
for i,v,p in zip(self.time[curve],self.value[curve],self.prev[curve]):
mc.keyframe(curve, time=(i,), valueChange=v+((p-v)*(-1*self.x)))
if __name__ == '__main__':
quickBreakDownUI()
# ______________________
# - -/__ Revision History __/- - - - - - - - - - - - - - - - - - - - - - - -
#
# Revision 1: 2015-05-13 : First publish.
#
# Revision 2: 2015-05-13 : Documentation updates.
#
# Revision 3: 2018-02-17 : Updating license to MIT.
#
# Revision 4: 2018-05-13 : shelf support |
the-stack_0_337 | import asyncio
import io
import userbot.plugins.sql_helper.no_log_pms_sql as no_log_pms_sql
from telethon import events, errors, functions, types
from userbot.utils import admin_cmd
from userbot.uniborgConfig import Config
@borg.on(admin_cmd(pattern="nccreatedch"))
async def create_dump_channel(event):
if Config.PM_LOGGR_BOT_API_ID is None:
result = await borg(functions.channels.CreateChannelRequest( # pylint:disable=E0602
title=f"UniBorg-{borg.uid}-PM_LOGGR_BOT_API_ID-data",
about="PM_LOGGR_BOT_API_ID // Do Not Touch",
megagroup=False
))
logger.info(result)
created_chat_id = result.chats[0].id
result = await borg.edit_admin( # pylint:disable=E0602
entity=created_chat_id,
user=Config.TG_BOT_USER_NAME_BF_HER,
is_admin=True,
title="Editor"
)
logger.info(result)
with io.BytesIO(str.encode(str(created_chat_id))) as out_file:
out_file.name = "PLEASE.IGNORE.dummy.file"
await borg.send_file(
created_chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=f"Please set `PM_LOGGR_BOT_API_ID` to `{created_chat_id}`",
reply_to=1
)
await event.delete()
else:
await event.edit(f"**is configured**. [please do not touch](https://t.me/c/{Config.PM_LOGGR_BOT_API_ID}/2)")
@borg.on(admin_cmd(pattern="nolog ?(.*)"))
async def set_no_log_p_m(event):
if Config.PM_LOGGR_BOT_API_ID is not None:
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if not no_log_pms_sql.is_approved(chat.id):
no_log_pms_sql.approve(chat.id)
await event.edit("Won't Log Messages from this chat")
await asyncio.sleep(3)
await event.delete()
@borg.on(admin_cmd(pattern="enlog ?(.*)"))
async def set_no_log_p_m(event):
if Config.PM_LOGGR_BOT_API_ID is not None:
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if no_log_pms_sql.is_approved(chat.id):
no_log_pms_sql.disapprove(chat.id)
await event.edit("Will Log Messages from this chat")
await asyncio.sleep(3)
await event.delete()
@borg.on(events.NewMessage(incoming=True))
async def on_new_private_message(event):
if Config.PM_LOGGR_BOT_API_ID is None:
return
if not event.is_private:
return
message_text = event.message.message
message_media = event.message.media
message_id = event.message.id
message_to_id = event.message.to_id
chat_id = event.chat_id
# logger.info(chat_id)
sender = await borg.get_entity(chat_id)
if chat_id == borg.uid:
# don't log Saved Messages
return
if sender.bot:
# don't log bots
return
if sender.verified:
# don't log verified accounts
return
if not no_log_pms_sql.is_approved(chat_id):
# log pms
await do_log_pm_action(chat_id, message_text, message_media)
@borg.on(events.ChatAction(blacklist_chats=Config.UB_BLACK_LIST_CHAT))
async def on_new_chat_action_message(event):
if Config.PM_LOGGR_BOT_API_ID is None:
return
# logger.info(event.stringify())
chat_id = event.chat_id
message_id = event.action_message.id
if event.created or event.user_added:
added_by_users = event.action_message.action.users
if borg.uid in added_by_users:
added_by_user = event.action_message.from_id
# someone added me to chat
the_message = ""
the_message += "#MessageActionChatAddUser\n\n"
the_message += f"[User](tg://user?id={added_by_user}): `{added_by_user}`\n"
the_message += f"[Private Link](https://t.me/c/{chat_id}/{message_id})\n"
await borg.send_message(
entity=Config.PM_LOGGR_BOT_API_ID,
message=the_message,
# reply_to=,
# parse_mode="html",
link_preview=False,
# file=message_media,
silent=True
)
@borg.on(events.Raw())
async def on_new_channel_message(event):
if Config.PM_LOGGR_BOT_API_ID is None:
return
if tgbot is None:
return
# logger.info(event.stringify())
if isinstance(event, types.UpdateChannel):
channel_id = event.channel_id
message_id = 2
# someone added me to channel
# TODO: https://t.me/TelethonChat/153947
the_message = ""
the_message += "#MessageActionChatAddUser\n\n"
# the_message += f"[User](tg://user?id={added_by_user}): `{added_by_user}`\n"
the_message += f"[Private Link](https://t.me/c/{channel_id}/{message_id})\n"
await borg.send_message(
entity=Config.PM_LOGGR_BOT_API_ID,
message=the_message,
# reply_to=,
# parse_mode="html",
link_preview=False,
# file=message_media,
silent=True
)
"""@borg.on(events.Raw())
async def _(event):
if Config.PM_LOGGR_BOT_API_ID is None:
return
if tgbot is None:
return
logger.info(event.stringify())"""
"""if tgbot is not None:
@tgbot.on(events.Raw())
async def _(event):
if Config.PM_LOGGR_BOT_API_ID is None:
return
logger.info(event.stringify())"""
async def do_log_pm_action(chat_id, message_text, message_media):
the_message = ""
the_message += "#LOG_PMs\n\n"
the_message += f"[User](tg://user?id={chat_id}): {chat_id}\n"
the_message += f"Message: {message_text}\n"
# the_message += f"Media: {message_media}"
await borg.send_message(
entity=Config.PM_LOGGR_BOT_API_ID,
message=the_message,
# reply_to=,
# parse_mode="html",
link_preview=False,
file=message_media,
silent=True
)
|
the-stack_0_338 | # -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import warnings
from rasa_core.actions import Action
from rasa_core.agent import Agent
from rasa_core.channels.console import ConsoleInputChannel
from rasa_core.events import SlotSet
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
logger = logging.getLogger(__name__)
support_search = ["话费", "流量"]
def extract_item(item):
"""
check if item supported, this func just for lack of train data.
:param item: item in track, eg: "流量"、"查流量"
:return:
"""
if item is None:
return None
for name in support_search:
if name in item:
return name
return None
class ActionSearchConsume(Action):
def name(self):
return 'action_search_consume'
def run(self, dispatcher, tracker, domain):
item = tracker.get_slot("item")
item = extract_item(item)
if item is None:
dispatcher.utter_message("您好,我现在只会查话费和流量")
dispatcher.utter_message("你可以这样问我:“帮我查话费”")
return []
time = tracker.get_slot("time")
if time is None:
dispatcher.utter_message("您想查询哪个月的话费?")
return []
# query database here using item and time as key. but you may normalize time format first.
dispatcher.utter_message("好,请稍等")
if item == "流量":
dispatcher.utter_message("您好,您{}共使用{}二百八十兆,剩余三十兆。".format(time, item))
else:
dispatcher.utter_message("您好,您{}共消费二十八元。".format(time))
return []
class MobilePolicy(KerasPolicy):
def model_architecture(self, num_features, num_actions, max_history_len):
"""Build a Keras model and return a compiled model."""
from keras.layers import LSTM, Activation, Masking, Dense
from keras.models import Sequential
n_hidden = 32 # size of hidden layer in LSTM
# Build Model
batch_shape = (None, max_history_len, num_features)
model = Sequential()
model.add(Masking(-1, batch_input_shape=batch_shape))
model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"])
logger.debug(model.summary())
return model
def train_nlu():
from rasa_nlu.training_data import load_data
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.model import Trainer
from rasa_nlu import config
training_data = load_data("data/nlu.json")
trainer = Trainer(config.load("data/nlu_model_config.json"))
trainer.train(training_data)
model_directory = trainer.persist("models/", project_name="ivr", fixed_model_name="demo")
return model_directory
def train_dialogue(domain_file="data/domain.yml",
model_path="models/dialogue",
training_data_file="data/stories.md"):
from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,
BinarySingleStateFeaturizer)
featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5)
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=5), KerasPolicy(featurizer)])
agent.train(
training_data_file,
epochs=200,
batch_size=16,
augmentation_factor=50,
validation_split=0.2
)
agent.persist(model_path)
return agent
def run_ivrbot_online(input_channel=ConsoleInputChannel(),
interpreter=RasaNLUInterpreter("models/ivr/demo"),
domain_file="data/domain.yml",
training_data_file="data/stories.md"):
from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,
BinarySingleStateFeaturizer)
featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5)
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=5), KerasPolicy(featurizer)],
interpreter=interpreter)
agent.train_online(training_data_file,
input_channel=input_channel,
batch_size=50,
epochs=200,
max_training_samples=300)
return agent
def run(serve_forever=True):
agent = Agent.load("models/dialogue",
interpreter=RasaNLUInterpreter("models/ivr/demo"))
if serve_forever:
agent.handle_channel(ConsoleInputChannel())
return agent
if __name__ == "__main__":
logging.basicConfig(level="INFO")
parser = argparse.ArgumentParser(
description="starts the bot")
parser.add_argument(
"task",
choices=["train-nlu", "train-dialogue", "run", "online-train"],
help="what the bot should do - e.g. run or train?")
task = parser.parse_args().task
# decide what to do based on first parameter of the script
if task == "train-nlu":
train_nlu()
elif task == "train-dialogue":
train_dialogue()
elif task == "run":
run()
elif task == "online-train":
run_ivrbot_online()
else:
warnings.warn("Need to pass either 'train-nlu', 'train-dialogue', 'run' or 'online-train' to use the script.")
exit(1)
|
the-stack_0_339 | """
.. module: cloudaux.aws.decorators
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
.. moduleauthor:: Mike Grima <[email protected]>
"""
import functools
import time
import boto
import botocore
RATE_LIMITING_ERRORS = ['Throttling', 'RequestLimitExceeded', 'SlowDown', 'RequestThrottled']
def rate_limited(max_attempts=None, max_delay=4):
def decorator(f):
metadata = {
'count': 0,
'delay': 0
}
@functools.wraps(f)
def decorated_function(*args, **kwargs):
def increase_delay(e):
if metadata['delay'] == 0:
metadata['delay'] = 1
elif metadata['delay'] < max_delay:
metadata['delay'] *= 2
if max_attempts and metadata['count'] > max_attempts:
raise e
metadata['count'] = 0
while True:
metadata['count'] += 1
if metadata['delay'] > 0:
time.sleep(metadata['delay'])
try:
retval = f(*args, **kwargs)
metadata['delay'] = 0
return retval
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] not in RATE_LIMITING_ERRORS:
raise e
increase_delay(e)
except boto.exception.BotoServerError as e:
if e.error_code not in RATE_LIMITING_ERRORS:
raise e
increase_delay(e)
return decorated_function
return decorator
def paginated(response_key, request_pagination_marker="Marker", response_pagination_marker="Marker"):
def decorator(func):
@functools.wraps(func)
def decorated_function(*args, **kwargs):
results = []
while True:
response = func(*args, **kwargs)
results.extend(response[response_key])
if ('NextMarker' in response) or ('IsTruncated' in response and response['IsTruncated']):
kwargs.update({request_pagination_marker: response[response_pagination_marker]})
else:
break
return results
return decorated_function
return decorator
|
the-stack_0_340 | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_3 import models
class ArrayConnectionResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[ArrayConnection]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.ArrayConnection]
):
"""
Keyword args:
items (list[ArrayConnection])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ArrayConnectionResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ArrayConnectionResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArrayConnectionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_341 | "this program runs on ngc and syncs data with a local master machine"
import time
import os
import ray
@ray.remote
def sync(agentparams):
master_datadir = agentparams['master_datadir']
master = agentparams.get('master', 'deepthought')
local_datadir = '/result'
while True:
print('transfer tfrecords to master')
cmd = 'rsync -a --update {} {}:{}'.format(local_datadir + '/', master, master_datadir)
print('executing: {}'.format(cmd))
os.system(cmd)
time.sleep(10)
if __name__ == '__main__':
conf = {}
conf['master_datadir'] = '/raid/ngc2/pushing_data/cartgripper/mj_multi_obj_push3_75step'
sync(0, conf) |
the-stack_0_342 | import tensorflow as tf
from build_model import embedded_neural_net, compile_model
def train_model(train_dataset: tf.data.Dataset, validation_dataset: tf.data.Dataset, max_features, patience=4,
epochs=10):
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)
model_structure = embedded_neural_net(max_features)
model = compile_model(model_structure)
history = model.fit(train_dataset, validation_data=validation_dataset, epochs=epochs, callbacks=[callback])
return model, history
|
the-stack_0_343 | import redis
r = redis.Redis()
from datetime import date
today = str(date.today())
import datetime
import pickle
existed = False
stand = [460, 1.3, .7]
def gas_lvl(gas):
status = 'normal'
gas = gas.replace(' ','')
gases = gas.split('|')
ox = float(gases[0])
red = float(gases[1])
nh = float(gases[2])
ox_diff = (abs(ox-stand[0]) / stand[0] ) * 100
red_diff = (abs(red-stand[1]) / stand[1] ) * 100
nh_diff = (abs(nh-stand[2]) / stand[2] ) * 100
if (ox_diff > 30 or red_diff > 30 or nh_diff > 30):
status = 'abnormal'
return status
class RedisHelper:
def __init__(self):
self.r = redis.Redis()
self.existed = False
self.dev_key = 'devices'
def read(self, span=1800):
current = {
"temp": -1,
"humidity" : -1,
"gas" : "abs",
"alerts" : -2,
"messages" : -3
}
msg, msgC = self.messages()
currentTime = datetime.datetime.now()
day = currentTime.strftime("%d/%m/%Y")
key = day
#print(key)
if (self.r.exists(key)):
persisted = pickle.loads(self.r.get(key))
self.existed = True
self.dev_key = 'devices'
#print(persisted)
else:
persisted = {}
timeHM = datetime.datetime.now()
temp = 0
humidity = 0
pressure = 0
count = 0
for keys in persisted:
date_time_obj = datetime.datetime.strptime(keys, '%d/%m/%Y@%H:%M:%S')
diff = timeHM - date_time_obj
#print(diff.seconds, span)
if (diff.seconds <= span) :
count = count + 1
temp = temp + persisted[keys]['temp']
humidity = humidity + persisted[keys]['humidity']
pressure = pressure + persisted[keys]['pressure']
#print(keys, persisted[keys], diff)
if (count > 0):
#print(f"averages are {temp/count} {humidity/count} {pressure/count} {count} ")
last = list(persisted.keys())
last_one = len(last) - 1
gases = persisted[last[last_one]]['gas']
if (gas_lvl(gases) != 'normal'):
alert_message = 'Alert!'
else:
alert_message = 'Normal'
current = {
"temp": round(temp/count,2),
"humidity" : round(humidity/count,2),
"pressure" : round(pressure/count,2),
"gas" : gas_lvl(gases),
"alerts" : alert_message,
"messages" : msgC,
"count" : count
}
return current
def devices_read(self):
if (r.exists(self.dev_key)):
devices = pickle.loads(self.r.get(self.dev_key))
else:
devices = {}
docs = []
for dev in devices:
docs.append(devices[dev])
return docs
def devices_update(self, dev):
devices = self.devices_read()
devices.pop(dev, None)
r.set(self.dev_key, pickle.dumps(devices))
return devices
def messages(self):
if (r.exists('messages')):
messages = pickle.loads(self.r.get('messages'))
else:
messages = {}
return messages, len(messages)
|
the-stack_0_344 | # importation de pygame
import pygame
# importation de la bibliothèque system
import sys
# importation de nos classes
from Model.class_Hero import Hero
from Model.class_Platform import Platform
from Model.class_Atk import Atk
from Model.class_SacDeSable import SacDeSable
from utils import load_imgs
def exit_game(key):
from Model.class_Menu import run
if key == pygame.K_RETURN:
run()
# initialisation de pygame
def main(self):
pygame.init()
WIDTH = 1280
HEIGHT = 720
fenetre = pygame.display.set_mode((WIDTH, HEIGHT), pygame.RESIZABLE)
fond_e = pygame.transform.scale(
pygame.image.load("Images/Background/niveauRecurciforce.png").convert(), (1280, 720)
)
blanchonAa1 = pygame.image.load("Images/Spell/aa1.png").convert()
blanchonAa2 = pygame.image.load("Images/Spell/aa2.png").convert()
blanchonAa3 = pygame.image.load("Images/Spell/aa3.png").convert()
blanchonAaMidAir = pygame.image.load("Images/Spell/aaMidAir.png").convert()
blanchonVector = pygame.image.load("Images/Spell/vector.png").convert()
imagesBlanchonList = {
"Ridle": ["b_idle_1", "b_idle_2"],
"Rmove": ["b_move_0", "b_move_1", "b_move_2", "b_move_1"],
"Ffall": ["b_jumpdown_1", "b_jumpdown_2"],
"Fcrouch": ["b_crouch_1", "b_crouch_2"],
"Rslide": ["b_slide"],
"Fjump": ["b_jumpup_1", "b_jumpup_2", "b_jumpup_3"],
"Oaa1": ["b_aa1_1", "b_aa1_2", "b_aa1_3", "b_aa1_3"],
"Oaa2": ["b_aa2_1", "b_aa2_2", "b_aa2_3", "b_aa2_4", "b_aa2_5", "b_aa2_5"],
"Oaa3": ["b_aa3_1", "b_aa3_2", "b_aa3_3", "b_aa3_4", "b_aa3_5", "b_aa3_6", "b_aa3_6", "b_aa3_6"],
"Oaaa": ["b_aa2_2", "b_atkjumpdown", "b_atkjumpdown"],
"Odmg": ["b_dmg_2", "b_dmg_2"],
"D": ["b_gameover", "b_gameover"],
}
path = "Images/Blanchon"
imagesBlanchon = load_imgs(imagesBlanchonList, path)
blanchon_atkList = [
Atk("autoHit1", 0.5, 32, 32, load_imgs({"idle": ["particlehit"]}, path), 10, 5, -1, 0, 0, 0, 225),
Atk("autoHit2", 0.7, 32, 32, load_imgs({"idle": ["particlehit"]}, path), 15, 5, -2, 0, 0, 0, 300),
Atk("autoHit3", 0.7, 32, 32, load_imgs({"idle": ["particlehit"]}, path), 15, 6, -16, 0, 0, 0, 500),
Atk("EOF", 4, 32, 17, load_imgs({"idle": ["vector"]}, path), 15, 4, -1, 0, 4, 0, 2000),
Atk("airAutoHit", 1, 64, 32, load_imgs({"idle": ["particlehit"]}, path), 10, 5, 5, 0, 0, 0, 300)
]
blanchon = Hero(200, 200, 64, 64, imagesBlanchon, 0.3, 0.7, 8, 6, WIDTH, 100.0, blanchon_atkList)
sol = Platform(0, HEIGHT-70, WIDTH, 10, pygame.image.load("Images/plateformtest.png").convert_alpha(), 0.4)
# INIT PLATEFORMES
platforms = [
Platform(100, HEIGHT - 180, 100, 10, pygame.image.load("Images/plateform.png").convert_alpha(), 1),
Platform(350, HEIGHT - 280, 100, 10, pygame.image.load("Images/plateform.png").convert_alpha(), 1)
]
# INIT ENNEMIS
foes = [SacDeSable(600, 500, WIDTH, 1)]
# INIT SYSTEM CLOCK
clock = pygame.time.Clock()
fps = 60
Mult = pygame.font.Font("Polices/Lady Radical.ttf", 25)
Mult.set_bold(False)
MultB = pygame.font.Font("Polices/Lady Radical.ttf", 40)
MultB.set_bold(True)
damageFont = pygame.font.Font("Polices/Lady Radical.ttf", 30)
# damageFont.set_bold(True)
damageArray = []
timerDamage = 300
# TEXTE DU TUTO------------------------------------------------------------------
self.myfontMini = pygame.font.Font("Polices/Lady Radical.ttf", 15)
self.myfont = pygame.font.Font("Polices/Lady Radical.ttf", 25)
fleches = self.myfont.render("Les fleches directionnelles servent a se deplacer", 1, (200, 200, 0))
atkDeBase = self.myfont.render("'A' (Q sous Windows) permet de donner des coups au corps a corps", 1, (200, 200, 0))
atkDistance = self.myfont.render("'Z' (W sous Windows) permet de lancer des projectiles", 1, (200, 200, 0))
combol = self.myfont.render("Un combo est possible en realisant 3 attaques basiques successives", 1, (200, 200, 0))
dbSaut = self.myfont.render("Le double saut est possible", 1, (200, 200, 0))
quit1 = self.myfontMini.render("Appuyer sur 'Entree' pour ", 1, (200, 200, 0))
quit2 = self.myfontMini.render("retourner au menu principal ", 1, (200, 200, 0))
while 1:
clock.tick(fps)
# GESTION EVENT------------------------------------------------------------------
for event in pygame.event.get():
if event.type == pygame.QUIT: # si l'utilisateur clique sur la croix
sys.exit() # on ferme la fenêtre
if event.type == pygame.KEYDOWN:
exit_game(event.key)
blanchon.key_down(event)
if event.type == pygame.KEYUP:
blanchon.key_up(event)
# GESTION DU DECORS--------------------------------------------------------------
# Fond
fenetre.blit(fond_e, (0, 0))
self.screen.blit(fleches, (600, 50))
self.screen.blit(atkDeBase, (600, 80))
self.screen.blit(atkDistance, (600, 110))
self.screen.blit(combol, (600, 140))
self.screen.blit(dbSaut, (600, 170))
self.screen.blit(quit1, (1100, 600))
self.screen.blit(quit2, (1100, 620))
# Plateformes
nbPlatf = len(platforms)
for i in range(0, nbPlatf):
fenetre.blit(platforms[i].get_img(), platforms[i].get_rect())
# GESTION DU HERO----------------------------------------------------------------
# Affichage Multiplicateur de dégats
Multipl = Mult.render("Mult : ", 1, (255, 255, 0))
combo = blanchon.get_combo()
if combo < 2:
MultiplCombo = MultB.render(f"{combo:.2f}", 1, (255, 255, 0))
elif combo < 3:
MultiplCombo = MultB.render(f"{combo:.2f}", 1, (0, 0, 255))
elif combo < 4:
MultiplCombo = MultB.render(f"{combo:.2f}", 1, (255, 0, 255))
else:
MultiplCombo = MultB.render(f"{combo:.2f}", 1, (255, 0, 0))
fenetre.blit(Multipl, (700, 680))
fenetre.blit(MultiplCombo, (800, 670))
# CoolDown Attaque de Blanchon
colorRect = (125, 125, 125, 128)
if not blanchon.get_onGround():
cd = blanchon_atkList[4].get_cd()
if cd > 0:
pygame.draw.rect(fenetre, (0, 0, 0), (95, 655, 60, 60))
else:
pygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))
tailleRect1 = 60 * cd / blanchon_atkList[4].get_maxCd()
posRect1 = 715 - tailleRect1
fenetre.blit(blanchonAaMidAir, (100, 660))
CdAH = damageFont.render(f"{cd:.1f}", 1, (255, 0, 0))
elif blanchon.get_autoHitTimer3() > 0:
pygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))
fenetre.blit(blanchonAa3, (100, 660))
tailleRect1 = 60 * blanchon.get_autoHitTimer3() / 3000
posRect1 = 715 - tailleRect1
CdAH = damageFont.render(f"{blanchon.get_autoHitTimer3()/1000:.1f}", 1, (255, 0, 0))
elif blanchon.get_autoHitTimer2() > 0:
pygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))
fenetre.blit(blanchonAa2, (100, 660))
tailleRect1 = 60 * blanchon.get_autoHitTimer2() / 3000
posRect1 = 715 - tailleRect1
CdAH = damageFont.render(f"{blanchon.get_autoHitTimer2()/1000:.1f}", 1, (255, 0, 0))
else:
cd = blanchon_atkList[0].get_cd()
if cd > 0:
pygame.draw.rect(fenetre, (0, 0, 0), (95, 655, 60, 60))
else:
pygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))
fenetre.blit(blanchonAa1, (100, 660))
tailleRect1 = 60 * cd / blanchon_atkList[0].get_maxCd()
posRect1 = 715 - tailleRect1
CdAH = damageFont.render(f"{cd:.1f}", 1, (255, 0, 0))
CaseAa = pygame.Surface((60, tailleRect1), pygame.SRCALPHA)
CaseAa.fill(colorRect)
fenetre.blit(CaseAa, (95, posRect1))
if cd > 0:
fenetre.blit(CdAH, (110, 670))
if blanchon_atkList[3].get_cd() > 0:
pygame.draw.rect(fenetre, (0, 0, 0), (175, 655, 60, 60))
pygame.draw.rect(fenetre, (255, 255, 255), (180, 660, 50, 50))
else:
pygame.draw.rect(fenetre, (200, 200, 50), (175, 655, 60, 60))
pygame.draw.rect(fenetre, (255, 255, 255), (180, 660, 50, 50))
fenetre.blit(blanchonVector, (189, 677))
tailleRect2 = 60 * blanchon_atkList[3].get_cd() / blanchon_atkList[3].get_maxCd()
posRect2 = 715 - tailleRect2
CaseAa = pygame.Surface((60, tailleRect2), pygame.SRCALPHA)
CaseAa.fill((125, 125, 125, 128))
fenetre.blit(CaseAa, (175, posRect2))
CdProj = damageFont.render(f"{blanchon_atkList[3].get_cd():.1f}", 1, (255, 0, 0))
if blanchon_atkList[3].get_cd() > 0:
fenetre.blit(CdProj, (190, 670))
# Teste Hero => Plateforme
heroOnGround = blanchon.isOnGround()
blanchon.setOnAir()
blanchon.testPlatform(sol)
for i in range(0, nbPlatf):
blanchon.testPlatform(platforms[i])
# Le hero est descendu d'une plateforme
if heroOnGround and not blanchon.isOnGround():
blanchon.giveDoubleJump() # On lui donne un saut
blanchon.update(blanchon, fps)
# AFFICHAGE DES DEGATS----------------------------------------------------------
i = 0
while i < len(damageArray):
if damageArray[i][2] > 0:
fenetre.blit(damageArray[i][0], damageArray[i][1])
damageArray[i][2] = damageArray[i][2] - (1000/fps)
i += 1
else:
damageArray.pop(i)
# GESTION DES MOBS---------------------------------------------------------------
# Teste Mob => Plateforme && Atk Hero => Mob
nbAtkHero = len(blanchon.get_AtkEffectList())
i = 0
while i < len(foes):
foes[i].nextImg(fps)
fenetre.blit(foes[i].get_img(), foes[i].get_rect())
pygame.draw.rect(
fenetre, (0, 0, 0), (foes[i].get_rect().x, foes[i].get_rect().y - 10, 60, 6)
)
pygame.draw.rect(
fenetre, (255, 0, 0), (
foes[i].get_rect().x, foes[i].get_rect().y - 10,
int(max(min(foes[i].get_hp()/float(foes[i].get_hpMax())*60, 60), 0)), 6
)
)
foes[i].setOnAir()
foes[i].testPlatform(sol)
for j in range(0, nbPlatf):
foes[i].testPlatform(platforms[j])
# Check si le mob i se fait toucher par l'atk de hero k
for k in range(0, nbAtkHero):
hpBefore = foes[i].get_hp()
foes[i].testAtkEffect(blanchon.get_AtkEffectList()[k])
degats = foes[i].get_hp() - hpBefore
foes[i].set_hp(degats)
if degats < 0.0:
damageArray.append([
damageFont.render(f"{degats:.1f}", 1, (50, 150, 255)),
(foes[i].get_x(), foes[i].get_y()-40), timerDamage
])
nbAtkFoe = len(foes[i].get_AtkEffectList())
for l in range(0, nbAtkFoe):
hpBefore = blanchon.get_hp()
blanchon.testAtkEffect(foes[i].get_AtkEffectList()[l])
degats = blanchon.get_hp() - hpBefore
if degats < 0:
damageArray.append([
damageFont.render(f"{degats:.1f}", 1, (255, 0, 0)),
(blanchon.get_x(), blanchon.get_y()-40), timerDamage
])
fenetre.blit(
foes[i].get_AtkEffectList()[l].get_img(),
foes[i].get_AtkEffectList()[l].get_rect()
)
foes[i].update(blanchon, fps)
if foes[i].get_hp() <= 0:
foes.pop(i)
else:
i += 1
for i in range(0, nbAtkHero):
fenetre.blit(blanchon.get_AtkEffectList()[k].get_img(), blanchon.get_AtkEffectList()[k].get_rect())
# Affichage Hero
blanchon.nextImg(fps)
fenetre.blit(blanchon.get_img(), blanchon.get_rect())
pygame.draw.rect(fenetre, (0, 0, 0), (blanchon.get_rect().x, blanchon.get_rect().y - 10, 60, 6))
pygame.draw.rect(
fenetre, (0, 255, 0), (
blanchon.get_rect().x, blanchon.get_rect().y - 10,
int(max(min(blanchon.get_hp()/float(blanchon.get_hpMax()) * 60, 60), 0)), 6
)
)
pygame.display.flip()
|
the-stack_0_347 | import os
import mlflow
import random
import hashlib
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
from src.git_autocommit import autocommit
SEED = 0
TRACKING_URI = 'http://localhost:5000'
EXPERIMENT_NAME = 'mnist'
random.seed(SEED)
np.random.seed(SEED)
def train(cfg):
os.system("conda env export > environment.yaml")
autocommit(file_paths=['./'], message='Another version of random forest')
mlflow.set_tracking_uri(TRACKING_URI)
mlflow.set_experiment(EXPERIMENT_NAME)
digits = datasets.load_digits()
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.8, random_state=SEED)
# Track hash of data & split
data_hash = hashlib.md5()
for df in [X_train, X_test, y_train, y_test]:
data_hash.update(df)
data_hash = data_hash.hexdigest()
clf = RandomForestClassifier(**cfg, random_state=SEED)
clf.fit(X_train, y_train)
preds = clf.predict(X_test)
scores = classification_report(y_test, preds, output_dict=True)
df = pd.json_normalize(scores, sep='_')
df = df.to_dict(orient='records')[0]
with mlflow.start_run():
mlflow.log_params(cfg)
mlflow.log_param('data_hash', data_hash)
mlflow.log_metrics(df)
print(df['macro avg_f1-score'])
if __name__ == '__main__':
cfg = {'n_estimators': 500,
'max_depth': 25,
'min_samples_split': 2,
'min_samples_leaf': 1,
}
train(cfg)
|
the-stack_0_350 | from django.core.exceptions import ValidationError
from django.test.client import RequestFactory
from mock import patch
from nose.tools import assert_raises, eq_, ok_
from waffle import Flag
from flicks.base.regions import NORTH_AMERICA
from flicks.base.tests import TestCase
from flicks.videos.forms import VideoSearchForm
from flicks.videos.search import AUTOCOMPLETE_FIELDS
class VideoSearchFormTests(TestCase):
def setUp(self):
super(VideoSearchFormTests, self).setUp()
self.factory = RequestFactory()
self.request = self.factory.get('/')
def test_popular_sort_include(self):
"""If the voting-end waffle flag is not set, include the popular option for sorting."""
Flag.objects.create(name='voting-end', everyone=False)
form = VideoSearchForm(self.request)
ok_('popular' in [c[0] for c in form.fields['sort'].choices])
def test_popular_sort_exclude(self):
"""If the voting-end waffle flag is set, do not include the popular option for sorting."""
Flag.objects.create(name='voting-end', everyone=True)
form = VideoSearchForm(self.request)
ok_('popular' not in [c[0] for c in form.fields['sort'].choices])
@patch('flicks.videos.forms.search_videos')
def test_valid_search(self, search_videos):
form = VideoSearchForm(self.request, {
'query': 'asdf',
'field': 'title',
'region': NORTH_AMERICA,
'sort': 'popular'
})
eq_(form.perform_search(), search_videos.return_value)
search_videos.assert_called_with(
query='asdf',
fields=AUTOCOMPLETE_FIELDS['title'],
region=NORTH_AMERICA,
sort='popular'
)
@patch('flicks.videos.forms.search_videos')
def test_empty_field_passes_none(self, search_videos):
"""If the field isn't specified, pass None to the fields parameter."""
form = VideoSearchForm(self.request, {
'query': 'asdf',
'region': NORTH_AMERICA,
'sort': 'popular'
})
eq_(form.perform_search(), search_videos.return_value)
search_videos.assert_called_with(query='asdf', fields=None,
region=NORTH_AMERICA, sort='popular')
def test_invalid_form(self):
"""If the form fails validation, throw a ValidationError."""
form = VideoSearchForm(self.request, {
'region': -5,
'sort': 'invalid'
})
with assert_raises(ValidationError):
form.perform_search()
def test_clean_no_query(self):
"""
If no search query is specified, do not alter the sort value or
choices.
"""
form = VideoSearchForm(self.request, {'region': NORTH_AMERICA, 'sort': 'title'})
form.full_clean()
eq_(form.cleaned_data['sort'], 'title')
choice_values = zip(*form.fields['sort'].choices)[0]
ok_('' in choice_values)
def test_clean_query(self):
"""
If a search query is specified, remove the random option from the sort
choices and, if the sort is currently set to random, switch to title
sort.
"""
form = VideoSearchForm(self.request, {'query': 'blah', 'sort': ''})
form.full_clean()
eq_(form.cleaned_data['sort'], 'title')
choice_values = zip(*form.fields['sort'].choices)[0]
ok_('' not in choice_values)
# Check that sort is preserved if it is not random.
form = VideoSearchForm(self.request, {'query': 'blah', 'sort': 'popular'})
form.full_clean()
eq_(form.cleaned_data['sort'], 'popular')
choice_values = zip(*form.fields['sort'].choices)[0]
ok_('' not in choice_values)
def test_invalid_sort(self):
"""
An invalid value for sort should not break clean.
Regression test for an issue where a user was attempting to break Flicks by submitting a
bunch of invalid values for sort.
"""
form = VideoSearchForm(self.request, {'query': 'blah', 'sort': 'invalid'})
form.full_clean()
eq_(form.is_valid(), False)
|
the-stack_0_354 | from ..Qt import QtGui, QtCore, QtWidgets
__all__ = ['BusyCursor']
class BusyCursor(object):
"""Class for displaying a busy mouse cursor during long operations.
Usage::
with pyqtgraph.BusyCursor():
doLongOperation()
May be nested.
"""
active = []
def __enter__(self):
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
BusyCursor.active.append(self)
def __exit__(self, *args):
BusyCursor.active.pop(-1)
if len(BusyCursor.active) == 0:
QtWidgets.QApplication.restoreOverrideCursor()
|
the-stack_0_355 | """
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Any, Mapping, Tuple
from base_python import BaseClient
from .api import (
API,
AgentsAPI,
CompaniesAPI,
ContactsAPI,
ConversationsAPI,
FreshdeskError,
FreshdeskNotFound,
FreshdeskUnauthorized,
GroupsAPI,
RolesAPI,
SatisfactionRatingsAPI,
SkillsAPI,
SurveysAPI,
TicketsAPI,
TimeEntriesAPI,
)
class Client(BaseClient):
def __init__(self, domain, api_key, requests_per_minute: int = None):
self._api = API(domain=domain, api_key=api_key, requests_per_minute=requests_per_minute)
self._apis = {
"agents": AgentsAPI(self._api),
"companies": CompaniesAPI(self._api),
"contacts": ContactsAPI(self._api),
"conversations": ConversationsAPI(self._api),
"groups": GroupsAPI(self._api),
"roles": RolesAPI(self._api),
"skills": SkillsAPI(self._api),
"surveys": SurveysAPI(self._api),
"tickets": TicketsAPI(self._api),
"time_entries": TimeEntriesAPI(self._api),
"satisfaction_ratings": SatisfactionRatingsAPI(self._api),
}
super().__init__()
def settings(self):
url = "settings/helpdesk"
return self._api.get(url)
def stream_has_state(self, name: str) -> bool:
"""Tell if stream supports incremental sync"""
return hasattr(self._apis[name], "state")
def get_stream_state(self, name: str) -> Any:
"""Get state of stream with corresponding name"""
return self._apis[name].state
def set_stream_state(self, name: str, state: Any):
"""Set state of stream with corresponding name"""
self._apis[name].state = state
def _enumerate_methods(self) -> Mapping[str, callable]:
return {name: api.list for name, api in self._apis.items()}
def health_check(self) -> Tuple[bool, str]:
alive = True
error_msg = None
try:
self.settings()
except (FreshdeskUnauthorized, FreshdeskNotFound):
alive = False
error_msg = "Invalid credentials"
except FreshdeskError as error:
alive = False
error_msg = repr(error)
return alive, error_msg
|
the-stack_0_356 | from adder.full_adder import FullAdder
from comparator.comparator import Comparator
from decoder.decoder_mxn import Decoder_nxm
from flipflop.d import D_FlipFlop
from gate.and_gate import And
from gate.input_gate import Input
from gate.one_gate import One
from gate.or_gate import Or
from gate.xor_gate import Xor
from gate.zero_gate import Zero
from latch.d import D_Latch
from multiplexer.mux2x1 import Mux2x1
from multiplexer.mux_mxn import Mux_mxn
from multiplexer.mux4x2 import Mux4x2
from runner.circuit_runner import CircuitRunner
from signals.signal import Signal
from gate.not_gate import Not
import sys
sys.setrecursionlimit(1000) # default is 1000
def turn_off_debug(every_thing=False):
And.DEBUGMODE = every_thing
Or.DEBUGMODE = every_thing
Xor.DEBUGMODE = every_thing
D_FlipFlop.DEBUGMODE = every_thing
D_Latch.DEBUGMODE = every_thing
Not.DEBUGMODE = every_thing
Mux2x1.DEBUGMODE = every_thing
Mux4x2.DEBUGMODE = every_thing
Signal.DEBUGMODE = every_thing
def test1():
clock = Signal()
l1 = D_Latch(clock, None, "l1")
l1.set_input(l1)
l1.set()
CircuitRunner.run([l1], clock, 4, [[l1]])
def test2():
clock = Signal()
d1 = D_FlipFlop(clock, None, "d1")
not1 = Not(d1, "not")
d1.set_input(not1)
d1.set()
for _ in range(20):
clock.pulse()
d1.logic()
print(d1)
def johnson_counter(n=100):
clock = Signal()
bits = [D_FlipFlop(clock, None, f"d{i}") for i in range(n)]
for i in range(1, n):
bits[i].set_input(bits[i - 1])
bits[i].reset()
bits[0].set_input(Not(bits[-1], "not"))
bits[0].reset()
for _ in range(4 * n):
clock.pulse()
bits[0].logic()
print("".join([str(b.q()) for b in bits]))
def multiplexer_test():
mux = Mux4x2((One(), Zero(), One(), Zero()), (One(), Zero()), "my_mux")
CircuitRunner.run([mux], None, None, [[mux]])
def n_bit_adder():
clock = Signal()
n = 200
a, b = "01001" * 40, "01110" * 40
d1 = [D_FlipFlop(clock, None, f"a{i}") for i in range(n)]
d2 = [D_FlipFlop(clock, None, f"b{i}") for i in range(n)]
adder = [FullAdder(None, None, f"adder{i}") for i in range(n)]
res = [D_FlipFlop(clock, None, f"r{i}") for i in range(n)]
for i in range(n):
d1[i].set_input(d1[i])
d2[i].set_input(d2[i])
adder[i].set_input((d1[i], d2[i]))
adder[i].set_cin(Zero() if i == 0 else adder[i - 1].cout)
res[i].set_input(adder[i].sum)
res[i].reset()
if a[n - i - 1] == '0':
d1[i].reset()
else:
d1[i].set()
if b[n - 1 - i] == '0':
d2[i].reset()
else:
d2[i].set()
CircuitRunner.run(res, clock, 3, [res])
def bitsToGates(bitString, inputs):
for i in range(len(bitString)):
inputs[i].output = 0 if bitString[i] == "0" else 1
def n_multiplexer_test():
inputs = [Input() for _ in range(32)]
selectors = [Input() for _ in range(5)]
mux = Mux_mxn(inputs, selectors, 5)
bitsToGates("11001110011100111001110011100101", inputs)
for i in range(32):
i_bin = bin(i)[2:].zfill(5)
bitsToGates(i_bin, selectors)
CircuitRunner.run([mux], display=[[mux]])
def decoder_test():
inputs = [Input() for _ in range(5)]
dec = Decoder_nxm(inputs, 5)
bitsToGates("11101", inputs)
CircuitRunner.run([dec], display=[dec.outputs])
def comparator_test():
i1 = [Input() for _ in range(5)]
i2 = [Input() for _ in range(5)]
comp = Comparator((i1, i2), 5)
bitsToGates("11101", i1)
bitsToGates("11101", i2)
CircuitRunner.run([comp], display=[[comp]])
turn_off_debug(False)
johnson_counter(800)
|
the-stack_0_357 | import re
import gevent
from gevent.pywsgi import WSGIHandler
from socketio import transports
from geventwebsocket.handler import WebSocketHandler
class SocketIOHandler(WSGIHandler):
path_re = re.compile(r"^/(?P<resource>[^/]+)/(?P<transport>[^/]+)(/(?P<session_id>[^/]*)/?(?P<rest>.*))?$")
handler_types = {
'websocket': transports.WebsocketTransport,
'flashsocket': transports.FlashSocketTransport,
'htmlfile': transports.HTMLFileTransport,
'xhr-multipart': transports.XHRMultipartTransport,
'xhr-polling': transports.XHRPollingTransport,
'jsonp-polling': transports.JSONPolling,
}
def __init__(self, *args, **kwargs):
self.socketio_connection = False
self.allowed_paths = None
super(SocketIOHandler, self).__init__(*args, **kwargs)
def handle_one_response(self):
self.status = None
self.headers_sent = False
self.result = None
self.response_length = 0
self.response_use_chunked = False
path = self.environ.get('PATH_INFO')
parts = SocketIOHandler.path_re.match(path)
# Is this a valid SocketIO path?
if parts:
parts = parts.groupdict()
else:
return super(SocketIOHandler, self).handle_one_response()
resource = parts['resource']
if resource != self.server.resource:
return super(SocketIOHandler, self).handle_one_response()
transport_name = parts['transport']
transport = SocketIOHandler.handler_types.get(transport_name)
if transport is None:
return super(SocketIOHandler, self).handle_one_response()
session_id = parts.get('session_id')
request_method = self.environ.get("REQUEST_METHOD")
# In case this is WebSocket request, switch to the WebSocketHandler
if transport in (transports.WebsocketTransport, \
transports.FlashSocketTransport):
self.__class__ = WebSocketHandler
self.handle_one_response(call_wsgi_app=False)
session = self.server.get_session()
else:
session = self.server.get_session(session_id)
# Make the session object available for WSGI apps
self.environ['socketio'].session = session
# Create a transport and handle the request likewise
self.transport = transport(self)
jobs = self.transport.connect(session, request_method)
if not session.wsgi_app_greenlet or not bool(session.wsgi_app_greenlet):
# Call the WSGI application, and let it run until the Socket.IO
# is *disconnected*, even though many POST/polling requests
# come through.
session.wsgi_app_greenlet = gevent.getcurrent()
session.connected = True
self.application(self.environ,
lambda status, headers, exc=None: None)
session.connected = False
gevent.joinall(jobs)
|
the-stack_0_359 | def get_set():
return set(map(int, input().split()))
def is_super_set(main, sets):
for set in sets:
if not main.issuperset(set):
return False
return True
A = get_set()
queries = int(input())
sets = []
for _ in range(queries):
sets.append(get_set())
print(is_super_set(A, sets))
|
the-stack_0_360 | # SPDX-License-Identifier: Apache-2.0
import os
from distutils.version import StrictVersion
import numpy as np
import onnx
from onnxruntime import __version__ as ort_version
from skl2onnx import __max_supported_opset__ as max_opset
from skl2onnx.common._topology import OPSET_TO_IR_VERSION
from .tests_helper import dump_data_and_model # noqa
from .tests_helper import ( # noqa
dump_one_class_classification,
dump_binary_classification,
dump_multilabel_classification,
dump_multiple_classification)
from .tests_helper import ( # noqa
dump_multiple_regression,
dump_single_regression,
convert_model,
fit_classification_model,
fit_multilabel_classification_model,
fit_clustering_model,
fit_regression_model,
binary_array_to_string,
path_to_leaf
)
def create_tensor(N, C, H=None, W=None):
if H is None and W is None:
return np.random.rand(N, C).astype(np.float32, copy=False)
elif H is not None and W is not None:
return np.random.rand(N, C, H, W).astype(np.float32, copy=False)
else:
raise ValueError('This function only produce 2-D or 4-D tensor.')
def _get_ir_version(opv):
if opv >= 15:
return 8
if opv >= 12:
return 7
if opv >= 11:
return 6
if opv >= 10:
return 5
if opv >= 9:
return 4
if opv >= 8:
return 4
return 3
def max_onnxruntime_opset():
"""
See `Versioning.md
<https://github.com/microsoft/onnxruntime/blob/
master/docs/Versioning.md>`_.
"""
vi = StrictVersion(ort_version.split('+')[0])
if vi >= StrictVersion("1.9.0"):
return 15
if vi >= StrictVersion("1.8.0"):
return 14
if vi >= StrictVersion("1.6.0"):
return 13
if vi >= StrictVersion("1.3.0"):
return 12
if vi >= StrictVersion("1.0.0"):
return 11
if vi >= StrictVersion("0.4.0"):
return 10
if vi >= StrictVersion("0.3.0"):
return 9
return 8
TARGET_OPSET = int(
os.environ.get(
'TEST_TARGET_OPSET',
min(max_onnxruntime_opset(),
min(max_opset,
onnx.defs.onnx_opset_version()))))
TARGET_IR = int(
os.environ.get(
'TEST_TARGET_IR',
min(OPSET_TO_IR_VERSION[TARGET_OPSET],
_get_ir_version(TARGET_OPSET))))
|
the-stack_0_361 | """
Base and utility classes for pandas objects.
"""
import builtins
from collections import OrderedDict
import textwrap
from typing import Dict, FrozenSet, Optional
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetimelike,
is_extension_array_dtype,
is_extension_type,
is_list_like,
is_object_dtype,
is_scalar,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, common as com
from pandas.core.accessor import DirNamesMixin
from pandas.core.algorithms import duplicated, unique1d, value_counts
from pandas.core.arrays import ExtensionArray
import pandas.core.nanops as nanops
_shared_docs = dict() # type: Dict[str, str]
_indexops_doc_kwargs = dict(
klass="IndexOpsMixin",
inplace="",
unique="IndexOpsMixin",
duplicated="IndexOpsMixin",
)
class PandasObject(DirNamesMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __repr__(self):
"""
Return a string representation for a particular object.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, "_cache", None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
if hasattr(self, "memory_usage"):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
# no memory_usage attribute, so fall back to
# object's 'sizeof'
return super().__sizeof__()
class NoNewAttributesMixin:
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if getattr(self, "__frozen", False) and not (
key == "_cache"
or key in type(self).__dict__
or getattr(self, key, None) is not None
):
raise AttributeError(
"You cannot add any new attribute '{key}'".format(key=key)
)
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin:
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ["_cache", "__setstate__"]
_internal_names_set = set(_internal_names)
_builtin_table = OrderedDict(
((builtins.sum, np.sum), (builtins.max, np.max), (builtins.min, np.min))
)
_cython_table = OrderedDict(
(
(builtins.sum, "sum"),
(builtins.max, "max"),
(builtins.min, "min"),
(np.all, "all"),
(np.any, "any"),
(np.sum, "sum"),
(np.nansum, "sum"),
(np.mean, "mean"),
(np.nanmean, "mean"),
(np.prod, "prod"),
(np.nanprod, "prod"),
(np.std, "std"),
(np.nanstd, "std"),
(np.var, "var"),
(np.nanvar, "var"),
(np.median, "median"),
(np.nanmedian, "median"),
(np.max, "max"),
(np.nanmax, "max"),
(np.min, "min"),
(np.nanmin, "min"),
(np.cumprod, "cumprod"),
(np.nancumprod, "cumprod"),
(np.cumsum, "cumsum"),
(np.nancumsum, "cumsum"),
)
)
@property
def _selection_name(self):
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
Series.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(
self._selection, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)
):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj, ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise IndexError(
"Column(s) {selection} already selected".format(
selection=self._selection
)
)
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError(
"Columns not found: {missing}".format(missing=str(bad_keys)[1:-1])
)
return self._gotitem(list(key), ndim=2)
elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg: str, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, str)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, arg, None)
if f is not None:
if hasattr(self, "__array__"):
# in particular exclude Window
return f(self, *args, **kwargs)
raise AttributeError(
"'{arg}' is not a valid function for "
"'{cls}' object".format(arg=arg, cls=type(self).__name__)
)
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(self, "axis", 0)
_level = kwargs.pop("_level", None)
if isinstance(arg, str):
return self._try_aggregate_string_function(arg, *args, **kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError("Can only pass dict with axis=0")
obj = self._selected_obj
def nested_renaming_depr(level=4):
# deprecation of nested renaming
# GH 15931
msg = textwrap.dedent(
"""\
using a dict with renaming is deprecated and will be removed
in a future version.
For column-specific groupby renaming, use named aggregation
>>> df.groupby(...).agg(name=('column', aggfunc))
"""
)
warnings.warn(msg, FutureWarning, stacklevel=level)
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in arg.values()):
new_arg = OrderedDict()
for k, v in arg.items():
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
is_nested_renamer = True
if k not in obj.columns:
msg = (
"cannot perform renaming for {key} with a "
"nested dictionary"
).format(key=k)
raise SpecificationError(msg)
nested_renaming_depr(4 + (_level or 0))
elif isinstance(obj, ABCSeries):
nested_renaming_depr()
elif isinstance(obj, ABCDataFrame) and k not in obj.columns:
raise KeyError("Column '{col}' does not exist!".format(col=k))
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(arg.keys())
if isinstance(obj, ABCDataFrame) and len(
obj.columns.intersection(keys)
) != len(keys):
nested_renaming_depr()
from pandas.core.reshape.concat import concat
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError(
"nested dictionary is ambiguous in aggregation"
)
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2, subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = OrderedDict()
for fname, agg_how in arg.items():
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(arg.keys())
result = OrderedDict()
# nested renamer
if is_nested_renamer:
result = list(_agg(arg, _agg_1dim).values())
if all(isinstance(r, dict) for r in result):
result, results = OrderedDict(), result
for r in results:
result.update(r)
keys = list(result.keys())
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
result = _agg(
arg, lambda fname, agg_how: _agg_1dim(self._selection, agg_how)
)
# we are selecting the same set as we are aggregating
elif not len(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a DataFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_any_series():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCSeries) for r in result.values())
def is_any_frame():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCDataFrame) for r in result.values())
if isinstance(result, list):
return concat(result, keys=keys, axis=1, sort=True), True
elif is_any_frame():
# we have a dict of DataFrames
# return a MI DataFrame
return concat([result[k] for k in keys], keys=keys, axis=1), True
elif isinstance(self, ABCSeries) and is_any_series():
# we have a dict of Series
# return a MI Series
try:
result = concat(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatically broadcast
raise ValueError(
"cannot perform both aggregation "
"and transformation operations "
"simultaneously"
)
return result, True
# fall thru
from pandas import DataFrame, Series
try:
result = DataFrame(result)
except ValueError:
# we have a dict of scalars
result = Series(result, name=getattr(self, "name", None))
return result, True
elif is_list_like(arg):
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg, _level=_level, _axis=_axis), None
else:
result = None
f = self._get_cython_func(arg)
if f and not args and not kwargs:
return getattr(self, f)(), None
# caller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
try:
new_res = colg.aggregate(a)
except (TypeError, DataError):
pass
else:
results.append(new_res)
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
# multiples
else:
for index, col in enumerate(obj):
colg = self._gotitem(col, ndim=1, subset=obj.iloc[:, index])
try:
new_res = colg.aggregate(arg)
except (TypeError, DataError):
pass
except ValueError as err:
# cannot aggregate
if "Must produce aggregated value" in str(err):
# raised directly in _aggregate_named
pass
elif "no results" in str(err):
# raised direcly in _aggregate_multiple_funcs
pass
else:
raise
else:
results.append(new_res)
keys.append(col)
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas import Series
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and aggregation operations")
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
"""
return a new object with the replacement attributes
"""
if obj is None:
obj = self._selected_obj.copy()
if obj_type is None:
obj_type = self._constructor
if isinstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
def _get_cython_func(self, arg: str) -> Optional[str]:
"""
if we define an internal function for this argument, return it
"""
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class IndexOpsMixin:
"""
Common ops mixin to support a unified interface / docs for Series / Index
"""
# ndarray compatibility
__array_priority__ = 1000
_deprecations = frozenset(
[
"tolist", # tolist is not deprecated, just suppressed in the __dir__
"base",
"data",
"item",
"itemsize",
"flags",
"strides",
]
) # type: FrozenSet[str]
def transpose(self, *args, **kwargs):
"""
Return the transpose, which is by definition self.
Returns
-------
%(klass)s
"""
nv.validate_transpose(args, kwargs)
return self
T = property(
transpose,
doc="""
Return the transpose, which is by definition self.
""",
)
@property
def _is_homogeneous_type(self):
"""
Whether the object has a single dtype.
By definition, Series and Index are always considered homogeneous.
A MultiIndex may or may not be homogeneous, depending on the
dtypes of the levels.
See Also
--------
DataFrame._is_homogeneous_type : Whether all the columns in a
DataFrame have the same dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
"""
return True
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
@property
def ndim(self):
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a python scalar.
.. deprecated:: 0.25.0
Returns
-------
scalar
The first element of %(klass)s.
"""
warnings.warn(
"`item` has been deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
return self.values.item()
@property
def data(self):
"""
Return the data pointer of the underlying data.
.. deprecated:: 0.23.0
"""
warnings.warn(
"{obj}.data is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning,
stacklevel=2,
)
return self.values.data
@property
def itemsize(self):
"""
Return the size of the dtype of the item of the underlying data.
.. deprecated:: 0.23.0
"""
warnings.warn(
"{obj}.itemsize is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning,
stacklevel=2,
)
return self._ndarray_values.itemsize
@property
def nbytes(self):
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def strides(self):
"""
Return the strides of the underlying data.
.. deprecated:: 0.23.0
"""
warnings.warn(
"{obj}.strides is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning,
stacklevel=2,
)
return self._ndarray_values.strides
@property
def size(self):
"""
Return the number of elements in the underlying data.
"""
return len(self._values)
@property
def flags(self):
"""
Return the ndarray.flags for the underlying data.
.. deprecated:: 0.23.0
"""
warnings.warn(
"{obj}.flags is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning,
stacklevel=2,
)
return self.values.flags
@property
def base(self):
"""
Return the base object if the memory of the underlying data is shared.
.. deprecated:: 0.23.0
"""
warnings.warn(
"{obj}.base is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning,
stacklevel=2,
)
return self.values.base
@property
def array(self) -> ExtensionArray:
"""
The ExtensionArray of the data backing this Series or Index.
.. versionadded:: 0.24.0
Returns
-------
ExtensionArray
An ExtensionArray of the values stored within. For extension
types, this is the actual array. For NumPy native types, this
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
``.array`` differs ``.values`` which may require converting the
data to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be a
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
Examples
--------
For regular NumPy types like int, and float, a PandasArray
is returned.
>>> pd.Series([1, 2, 3]).array
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
For extension types, like Categorical, the actual ExtensionArray
is returned
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.array
[a, b, a]
Categories (2, object): [a, b]
"""
# As a mixin, we depend on the mixing class having _values.
# Special mixin syntax may be developed in the future:
# https://github.com/python/typing/issues/246
result = self._values # type: ignore
if is_datetime64_ns_dtype(result.dtype):
from pandas.arrays import DatetimeArray
result = DatetimeArray(result)
elif is_timedelta64_ns_dtype(result.dtype):
from pandas.arrays import TimedeltaArray
result = TimedeltaArray(result)
elif not is_extension_array_dtype(result.dtype):
from pandas.core.arrays.numpy_ import PandasArray
result = PandasArray(result)
return result
def to_numpy(self, dtype=None, copy=False):
"""
A NumPy ndarray representing the values in this Series or Index.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
DataFrame.to_numpy : Similar method for DataFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Series,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Series or Index (assuming ``copy=False``). Modifying the result
in place will modify the data stored in the Series or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require copying data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-copy reference to the underlying data,
:attr:`Series.array` should be used instead.
This table lays out the different dtypes and default return types of
``to_numpy()`` for various dtypes within pandas.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns] datetime64[ns]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is dropped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if is_datetime64tz_dtype(self.dtype) and dtype is None:
# note: this is going to change very soon.
# I have a WIP PR making this unnecessary, but it's
# a bit out of scope for the DatetimeArray PR.
dtype = "object"
result = np.asarray(self._values, dtype=dtype)
# TODO(GH-24345): Avoid potential double copy
if copy:
result = result.copy()
return result
@property
def _ndarray_values(self) -> np.ndarray:
"""
The data as an ndarray, possibly losing information.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.array._ndarray_values
# As a mixin, we depend on the mixing class having values.
# Special mixin syntax may be developed in the future:
# https://github.com/python/typing/issues/246
return self.values # type: ignore
@property
def empty(self):
return not self.size
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return nanops.nanmax(self._values, skipna=skipna)
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Return an ndarray of the maximum argument indexer.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Returns
-------
numpy.ndarray
Indices of the maximum values.
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_minmax_axis(axis)
nv.validate_argmax_with_skipna(skipna, args, kwargs)
return nanops.nanargmax(self._values, skipna=skipna)
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return nanops.nanmin(self._values, skipna=skipna)
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Return a ndarray of the minimum argument indexer.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Returns
-------
numpy.ndarray
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_minmax_axis(axis)
nv.validate_argmax_with_skipna(skipna, args, kwargs)
return nanops.nanargmin(self._values, skipna=skipna)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
See Also
--------
numpy.ndarray.tolist
"""
if is_datetimelike(self._values):
return [com.maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.tolist()
to_list = tolist
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
iterator
"""
# We are explicitly making element iterators.
if is_datetimelike(self._values):
return map(com.maybe_box_datetimelike, self._values)
elif is_extension_array_dtype(self._values):
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self):
"""
Return if I have any nans; enables various perf speedups.
"""
return bool(isna(self).any())
def _reduce(
self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
):
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
raise TypeError(
"{klass} cannot perform the operation {op}".format(
klass=self.__class__.__name__, op=name
)
)
return func(skipna=skipna, **kwds)
def _map_values(self, mapper, na_action=None):
"""
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if isinstance(mapper, dict):
if hasattr(mapper, "__missing__"):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
from pandas import Series
mapper = Series(mapper)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_categorical_dtype(self._values):
# use the built in categorical series mapper which saves
# time by mapping the categories instead of all values
return self._values.map(mapper)
if is_extension_type(self.dtype):
values = self._values
else:
values = self.values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_1d(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_type(self.dtype):
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self.astype(object)
values = getattr(values, "values", values)
if na_action == "ignore":
def map_f(values, f):
return lib.map_infer_mask(values, f, isna(values).view(np.uint8))
else:
map_f = lib.map_infer
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : bool, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
bins : int, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.count: Number of non-NA elements in a DataFrame.
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
4.0 1
2.0 1
1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
4.0 0.2
2.0 0.2
1.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(2.0, 3.0] 2
(0.996, 2.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
NaN 1
4.0 1
2.0 1
1.0 1
dtype: int64
"""
result = value_counts(
self,
sort=sort,
ascending=ascending,
normalize=normalize,
bins=bins,
dropna=dropna,
)
return result
def unique(self):
values = self._values
if hasattr(values, "unique"):
result = values.unique()
else:
result = unique1d(values)
return result
def nunique(self, dropna=True):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don't include NaN in the count.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> s = pd.Series([1, 3, 5, 7, 7])
>>> s
0 1
1 3
2 5
3 7
4 7
dtype: int64
>>> s.nunique()
4
"""
uniqs = self.unique()
n = len(uniqs)
if dropna and isna(uniqs).any():
n -= 1
return n
@property
def is_unique(self):
"""
Return boolean if values in the object are unique.
Returns
-------
bool
"""
return self.nunique(dropna=False) == len(self)
@property
def is_monotonic(self):
"""
Return boolean if values in the object are
monotonic_increasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are
monotonic_decreasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of the values.
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption.
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
"""
if hasattr(self.array, "memory_usage"):
return self.array.memory_usage(deep=deep)
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
v += lib.memory_usage_of_objects(self.array)
return v
@Substitution(
values="",
order="",
size_hint="",
sort=textwrap.dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `labels` to maintain the
relationship.
"""
),
)
@Appender(algorithms._shared_docs["factorize"])
def factorize(self, sort=False, na_sentinel=-1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs[
"searchsorted"
] = """
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted %(klass)s `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
.. note::
The %(klass)s *must* be monotonically sorted, otherwise
wrong locations will likely be returned. Pandas does *not*
check this for you.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
int or array of int
A scalar or array of insertion points with the
same shape as `value`.
.. versionchanged:: 0.24.0
If `value` is a scalar, an int is now always returned.
Previously, scalar inputs returned an 1-item array for
:class:`Series` and :class:`Categorical`.
See Also
--------
sort_values
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
3
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x = pd.Categorical(['apple', 'bread', 'bread',
'cheese', 'milk'], ordered=True)
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
1
>>> x.searchsorted(['bread'], side='right')
array([3])
If the values are not monotonically sorted, wrong locations
may be returned:
>>> x = pd.Series([2, 1, 3])
>>> x.searchsorted(1)
0 # wrong result, correct would be 1
"""
@Substitution(klass="Index")
@Appender(_shared_docs["searchsorted"])
def searchsorted(self, value, side="left", sorter=None):
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
def drop_duplicates(self, keep="first", inplace=False):
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(self, ABCIndexClass):
if self.is_unique:
return self._shallow_copy()
duplicated = self.duplicated(keep=keep)
result = self[np.logical_not(duplicated)]
if inplace:
return self._update_inplace(result)
else:
return result
def duplicated(self, keep="first"):
if isinstance(self, ABCIndexClass):
if self.is_unique:
return np.zeros(len(self), dtype=np.bool)
return duplicated(self, keep=keep)
else:
return self._constructor(
duplicated(self, keep=keep), index=self.index
).__finalize__(self)
# ----------------------------------------------------------------------
# abstracts
def _update_inplace(self, result, verify_is_copy=True, **kwargs):
raise AbstractMethodError(self)
|
the-stack_0_362 | # Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learning.federated_averaging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from six.moves import range
import tensorflow as tf
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.learning import federated_averaging
from tensorflow_federated.python.learning import keras_utils
from tensorflow_federated.python.learning import model_examples
from tensorflow_federated.python.learning import model_utils
class FederatedAveragingClientTest(test.TestCase, parameterized.TestCase):
"""Tests of ClientFedAvg that use a common model and data."""
def dataset(self):
# Create a dataset with 4 examples:
dataset = tf.data.Dataset.from_tensor_slices(
model_examples.TrainableLinearRegression.make_batch(
x=[[0.0, 0.0], [1.0, 0.0], [2.0, 0.0], [3.0, 0.0]],
y=[[0.0], [0.0], [1.0], [1.0]]))
# Repeat the dataset 2 times with batches of 3 examples,
# producing 3 minibatches (the last one with only 2 examples).
# Note that `batch` is required for this dataset to be useable,
# as it adds the batch dimension which is expected by the model.
return dataset.repeat(2).batch(3)
def model(self):
return model_examples.TrainableLinearRegression(feature_dim=2)
def initial_weights(self):
return model_utils.ModelWeights(
trainable={
'a': tf.constant([[0.0], [0.0]]),
'b': tf.constant(0.0)
},
non_trainable={'c': 0.0})
@test.graph_mode_test
def test_client_tf(self):
model = self.model()
dataset = self.dataset()
client_tf = federated_averaging.ClientFedAvg(model)
init_op = tf.group(
model_utils.model_initializer(model),
tf.compat.v1.initializers.variables(client_tf.variables),
name='fedavg_initializer')
client_outputs = client_tf(dataset, self.initial_weights())
tf.compat.v1.get_default_graph().finalize()
with self.session() as sess:
sess.run(init_op)
out = sess.run(client_outputs)
# Both trainable parameters should have been updated,
# and we don't return the non-trainable 'c'.
self.assertCountEqual(['a', 'b'], list(out.weights_delta.keys()))
self.assertGreater(np.linalg.norm(out.weights_delta['a']), 0.1)
self.assertGreater(np.linalg.norm(out.weights_delta['b']), 0.1)
self.assertEqual(out.weights_delta_weight, 8.0)
self.assertEqual(out.optimizer_output['num_examples'], 8)
self.assertEqual(out.optimizer_output['has_non_finite_delta'], 0)
self.assertEqual(out.model_output['num_examples'], 8)
self.assertEqual(out.model_output['num_batches'], 3)
self.assertBetween(out.model_output['loss'],
np.finfo(np.float32).eps, 10.0)
def test_client_tf_custom_delta_weight(self):
model = self.model()
dataset = self.dataset()
client_tf = federated_averaging.ClientFedAvg(
model, client_weight_fn=lambda _: tf.constant(1.5))
out = client_tf(dataset, self.initial_weights())
self.assertEqual(self.evaluate(out.weights_delta_weight), 1.5)
@parameterized.named_parameters(('_inf', np.inf), ('_nan', np.nan))
def test_non_finite_aggregation(self, bad_value):
model = self.model()
dataset = self.dataset()
client_tf = federated_averaging.ClientFedAvg(model)
init_weights = self.initial_weights()
init_weights.trainable['b'] = bad_value
out = client_tf(dataset, init_weights)
self.assertEqual(self.evaluate(out.weights_delta_weight), 0.0)
self.assertAllClose(
self.evaluate(out.weights_delta['a']), np.array([[0.0], [0.0]]))
self.assertAllClose(self.evaluate(out.weights_delta['b']), 0.0)
self.assertEqual(
self.evaluate(out.optimizer_output['has_non_finite_delta']), 1)
class FederatedAveragingTffTest(test.TestCase, parameterized.TestCase):
def test_orchestration_execute(self):
iterative_process = federated_averaging.build_federated_averaging_process(
model_fn=model_examples.TrainableLinearRegression)
ds = tf.data.Dataset.from_tensor_slices({
'x': [[1., 2.], [3., 4.]],
'y': [[5.], [6.]]
}).batch(2)
federated_ds = [ds] * 3
server_state = iterative_process.initialize()
prev_loss = np.inf
for _ in range(3):
server_state, metric_outputs = iterative_process.next(
server_state, federated_ds)
self.assertEqual(metric_outputs.num_examples, 2 * len(federated_ds))
self.assertLess(metric_outputs.loss, prev_loss)
prev_loss = metric_outputs.loss
@parameterized.named_parameters([
('functional_model',
model_examples.build_linear_regresion_keras_functional_model),
('sequential_model',
model_examples.build_linear_regresion_keras_sequential_model),
('subclass_model',
model_examples.build_linear_regresion_keras_subclass_model),
])
def test_orchestration_execute_from_keras(self, build_keras_model_fn):
dummy_batch = collections.OrderedDict([
('x', np.zeros([1, 2], np.float32)),
('y', np.zeros([1, 1], np.float32)),
])
def model_fn():
keras_model = build_keras_model_fn(feature_dims=2)
keras_model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[])
return keras_utils.from_compiled_keras_model(keras_model, dummy_batch)
iterative_process = federated_averaging.build_federated_averaging_process(
model_fn=model_fn)
ds = tf.data.Dataset.from_tensor_slices({
'x': [[1., 2.], [3., 4.]],
'y': [[5.], [6.]]
}).batch(2)
federated_ds = [ds] * 3
server_state = iterative_process.initialize()
prev_loss = np.inf
for _ in range(3):
server_state, metrics = iterative_process.next(server_state, federated_ds)
self.assertLess(metrics.loss, prev_loss)
prev_loss = metrics.loss
def test_execute_empty_data(self):
iterative_process = federated_averaging.build_federated_averaging_process(
model_fn=model_examples.TrainableLinearRegression)
# Results in empty dataset with correct types and shapes.
ds = tf.data.Dataset.from_tensor_slices({
'x': [[1., 2.]],
'y': [[5.]]
}).batch(
5, drop_remainder=True)
federated_ds = [ds] * 2
server_state = iterative_process.initialize()
first_state, metric_outputs = iterative_process.next(
server_state, federated_ds)
self.assertEqual(
self.evaluate(tf.reduce_sum(first_state.model.trainable.a)) +
self.evaluate(tf.reduce_sum(first_state.model.trainable.b)), 0)
self.assertEqual(metric_outputs.num_examples, 0)
self.assertTrue(tf.is_nan(metric_outputs.loss))
if __name__ == '__main__':
test.main()
|
the-stack_0_363 | import argparse
import torch
torch.cuda.current_device()
import torch.optim as optim
from painter import *
# settings
parser = argparse.ArgumentParser(description='STYLIZED NEURAL PAINTING')
parser.add_argument('--img_path', type=str, default='./test_images/sunflowers.jpg', metavar='str',
help='path to test image (default: ./test_images/sunflowers.jpg)')
parser.add_argument('--renderer', type=str, default='rectangle', metavar='str',
help='renderer: [watercolor, markerpen, oilpaintbrush, rectangle (default oilpaintbrush)')
parser.add_argument('--canvas_color', type=str, default='black', metavar='str',
help='canvas_color: [black, white] (default black)')
parser.add_argument('--canvas_size', type=int, default=512, metavar='str',
help='size ( max(w, h) ) of the canvas for stroke rendering')
parser.add_argument('--max_m_strokes', type=int, default=500, metavar='str',
help='max number of strokes (default 500)')
parser.add_argument('--max_divide', type=int, default=5, metavar='N',
help='divide an image up-to max_divide x max_divide patches (default 5)')
parser.add_argument('--beta_L1', type=float, default=1.0,
help='weight for L1 loss (default: 1.0)')
parser.add_argument('--with_ot_loss', action='store_true', default=False,
help='imporve the convergence by using optimal transportation loss')
parser.add_argument('--beta_ot', type=float, default=0.1,
help='weight for optimal transportation loss (default: 0.1)')
parser.add_argument('--net_G', type=str, default='zou-fusion-net', metavar='str',
help='net_G: plain-dcgan, plain-unet, huang-net, or zou-fusion-net (default: zou-fusion-net)')
parser.add_argument('--renderer_checkpoint_dir', type=str, default=r'./checkpoints_G_rectangle', metavar='str',
help='dir to load neu-renderer (default: ./checkpoints_G_rectangle)')
parser.add_argument('--lr', type=float, default=0.005,
help='learning rate for stroke searching (default: 0.005)')
parser.add_argument('--output_dir', type=str, default=r'./output', metavar='str',
help='dir to save painting results (default: ./output)')
parser.add_argument('--disable_preview', action='store_true', default=False,
help='disable cv2.imshow, for running remotely without x-display')
args = parser.parse_args()
# Decide which device we want to run on
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def optimize_x(pt):
pt._load_checkpoint()
pt.net_G.eval()
print('begin drawing...')
PARAMS = np.zeros([1, 0, pt.rderr.d], np.float32)
if pt.rderr.canvas_color == 'white':
CANVAS_tmp = torch.ones([1, 3, 128, 128]).to(device)
else:
CANVAS_tmp = torch.zeros([1, 3, 128, 128]).to(device)
for pt.m_grid in range(1, pt.max_divide + 1):
pt.img_batch = utils.img2patches(pt.img_, pt.m_grid).to(device)
pt.G_final_pred_canvas = CANVAS_tmp
pt.initialize_params()
pt.x_ctt.requires_grad = True
pt.x_color.requires_grad = True
pt.x_alpha.requires_grad = True
utils.set_requires_grad(pt.net_G, False)
pt.optimizer_x = optim.RMSprop([pt.x_ctt, pt.x_color, pt.x_alpha], lr=pt.lr, centered=True)
pt.step_id = 0
for pt.anchor_id in range(0, pt.m_strokes_per_block):
pt.stroke_sampler(pt.anchor_id)
iters_per_stroke = 20
for i in range(iters_per_stroke):
pt.G_pred_canvas = CANVAS_tmp
# update x
pt.optimizer_x.zero_grad()
pt.x_ctt.data = torch.clamp(pt.x_ctt.data, 0, 1)
pt.x_ctt.data[:, :, -1] = torch.clamp(pt.x_ctt.data[:, :, -1], 0, 0)
pt.x_color.data = torch.clamp(pt.x_color.data, 0, 1)
pt.x_alpha.data = torch.clamp(pt.x_alpha.data, 1, 1)
pt._forward_pass()
pt._backward_x()
pt.x_ctt.data = torch.clamp(pt.x_ctt.data, 0, 1)
pt.x_ctt.data[:, :, -1] = torch.clamp(pt.x_ctt.data[:, :, -1], 0, 0)
pt.x_color.data = torch.clamp(pt.x_color.data, 0, 1)
pt.x_alpha.data = torch.clamp(pt.x_alpha.data, 1, 1)
pt._drawing_step_states()
pt.optimizer_x.step()
pt.step_id += 1
v = pt._normalize_strokes(pt.x)
PARAMS = np.concatenate([PARAMS, np.reshape(v, [1, -1, pt.rderr.d])], axis=1)
CANVAS_tmp = pt._render(PARAMS)[-1]
CANVAS_tmp = utils.img2patches(CANVAS_tmp, pt.m_grid + 1, to_tensor=True).to(device)
pt._save_stroke_params(PARAMS)
pt.final_rendered_images = pt._render(PARAMS)
pt._save_rendered_images()
if __name__ == '__main__':
pt = ProgressivePainter(args=args)
optimize_x(pt)
|
the-stack_0_364 | import base64
import json
import os
import sys
import re
from logging import getLogger, StreamHandler, INFO
from google.cloud import storage
age = os.environ.get('LIFECYCLE_EXPIRE')
ignorePatterns = os.environ.get('IGNORE_PATTERNS')
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(INFO)
logger.setLevel(INFO)
logger.addHandler(handler)
logger.propagate = False
def get_gcs_bucket_name(pubsub_message):
proto_payload = pubsub_message.get(u'protoPayload')
if proto_payload is None or len(proto_payload) == 0:
return None
resource_name = proto_payload.get(u'resourceName')
if resource_name is None or len(resource_name) == 0:
return None
return resource_name.split('/')[3]
def get_project_id(pubsub_message):
resource = pubsub_message.get(u'resource')
if resource is None or len(resource) == 0:
return None
labels = resource.get(u'labels')
if labels is None or len(labels) == 0:
return None
project_id = labels.get(u'project_id')
if project_id is None or len(project_id) == 0:
return None
return project_id
# Add lifecycle rule which deletes object after 365 days
def enable_bucket_lifecycle(bucket_name):
client = storage.Client()
bucket = client.get_bucket(bucket_name)
bucket.add_lifecycle_delete_rule(age=age)
bucket.patch()
logger.info("Lifecycle addition is complete.")
def main_handler(event, context):
pubsub_message = json.loads(base64.b64decode(event['data']).decode('utf-8'))
bucket_name = get_gcs_bucket_name(pubsub_message)
if bucket_name is None:
logger.error("Could not get the bucket name from the event data.")
return
logger.info("Bucket: %s" % bucket_name)
project_id = get_project_id(pubsub_message)
if project_id is None:
logger.warning("Could not get the project id from the event data.")
logger.info("Project id: %s" % project_id)
for ignorePattern in ignorePatterns.split('###'):
try:
if re.match(ignorePattern, bucket_name):
logger.info("Since it is included in ignorePattern '%s', it does not set the life cycle." % ignorePattern)
return
except re.error as regex_error:
logger.warning("The grammar expression '%s' has an error : %s" % (ignorePattern, regex_error))
enable_bucket_lifecycle(bucket_name)
# debug
if __name__ == '__main__':
f = open("event_sample.json", "r", encoding="utf-8")
event = json.load(f)
f.close()
context = ''
age = '365'
ignorePatterns = '.*.appspot.com###gcf-sources*'
main_handler(event, context)
|
the-stack_0_365 | # Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
from model import ssd_net_resnet34_large
from dataset import dataset_common
from utils import ssd_preprocessing
from utils import anchor_manipulator
from utils import scaffolds
tf.app.flags.DEFINE_integer(
'num_readers', 8,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 24,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'num_cpu_threads', 0,
'The number of cpu cores used to train.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 1., 'GPU memory fraction to use.')
tf.app.flags.DEFINE_string(
'data_dir', './tfrecords/',
'The directory where the dataset input data is stored.')
tf.app.flags.DEFINE_integer(
'num_classes', 81, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_string(
'model_dir', './logs_mine_sec.ssd_resnet34_pretrain.no-bn_in_ssd_block_3*3_map/',
'The directory where the model will be stored.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are printed.')
tf.app.flags.DEFINE_integer(
'save_summary_steps', 500,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_checkpoints_secs', 3600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'train_image_size', 1200,
'The size of the input image for the model to use.')
tf.app.flags.DEFINE_integer(
'train_epochs', None,
'The number of epochs to use for training.')
tf.app.flags.DEFINE_integer(
'max_number_of_steps', 840000,
'The max number of steps to use for training.')
tf.app.flags.DEFINE_integer(
'batch_size', 48,
'Batch size for training and evaluation.')
tf.app.flags.DEFINE_string(
'data_format', 'channels_first',
'A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
tf.app.flags.DEFINE_float(
'negative_ratio', 3., 'Negative ratio in the loss function.')
tf.app.flags.DEFINE_float(
'match_threshold', 0.5, 'Matching threshold in the loss function.')
tf.app.flags.DEFINE_float(
'neg_threshold', 0.5, 'Matching threshold for the negtive examples in the loss function.')
tf.app.flags.DEFINE_integer(
'tf_random_seed', 20180503, 'Random seed for TensorFlow initializers.')
tf.app.flags.DEFINE_float(
'weight_decay', 5e-4, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('learning_rate', 4e-3, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.000001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_string(
'decay_boundaries', '6000, 26000, 40000, 60000, 79000, 795000, 815000',
'Learning rate decay boundaries by global_step (comma-separated list).')
tf.app.flags.DEFINE_string(
'lr_decay_factors', '0.001, 0.01, 0.04, 0.001, 0.001, 0.001, 0.01, 0.001',
'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')
tf.app.flags.DEFINE_string(
'checkpoint_path', './logs_mine_sec.ssd_resnet34_pretrain.no-bn_in_ssd_block.21.1/model.ckpt-99590',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_model_scope', 'ssd1200',
'Model scope in the checkpoint. None if the same as the trained model.')
tf.app.flags.DEFINE_string(
'model_scope', 'ssd1200',
'Model scope name used to replace the name_scope in checkpoint.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', '',
'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'multi_gpu', True,
'Whether there is GPU to use for training.')
FLAGS = tf.app.flags.FLAGS
def validate_batch_size_for_multi_gpu(batch_size):
"""For multi-gpu, batch-size must be a multiple of the number of
available GPUs.
Note that this should eventually be handled by replicate_model_fn
directly. Multi-GPU support is currently experimental, however,
so doing the work here until that feature is in place.
"""
if FLAGS.multi_gpu:
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
num_gpus = sum([1 for d in local_device_protos if d.device_type == 'GPU'])
if not num_gpus:
raise ValueError('Multi-GPU mode was specified, but no GPUs '
'were found. To use CPU, run --multi_gpu=False.')
remainder = batch_size % num_gpus
if remainder:
err = ('When running with multiple GPUs, batch size '
'must be a multiple of the number of available GPUs. '
'Found {} GPUs with a batch size of {}; try --batch_size={} instead.'
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
return num_gpus
return 0
def get_init_fn():
return scaffolds.get_init_fn_for_scaffold(FLAGS.model_dir, FLAGS.checkpoint_path,
FLAGS.model_scope, FLAGS.checkpoint_model_scope,
FLAGS.checkpoint_exclude_scopes, FLAGS.ignore_missing_vars,
name_remap=None)#{'/kernel': '/weights', '/bias': '/biases'})
global_anchor_info = dict()
def input_pipeline(dataset_pattern='pascalvoc_0712_train_*', is_training=True, batch_size=FLAGS.batch_size):
def input_fn():
out_shape = [FLAGS.train_image_size] * 2
anchor_creator = anchor_manipulator.AnchorCreator(out_shape,
layers_shapes = [(50, 50), (25, 25), (13, 13), (7, 7), (3, 3), (3, 3)],
anchor_scales = [(0.1,), (0.2,), (0.375,), (0.55,), (0.725,), (0.9,)],
extra_anchor_scales = [(0.1414,), (0.2739,), (0.4541,), (0.6315,), (0.8078,), (0.9836,)],
anchor_ratios = [(1., 2., .5), (1., 2., 3., .5, 0.3333), (1., 2., 3., .5, 0.3333), (1., 2., 3., .5, 0.3333), (1., 2., .5), (1., 2., .5)],
layer_steps = [24, 48, 92, 171, 400, 400])
all_anchors, all_num_anchors_depth, all_num_anchors_spatial = anchor_creator.get_all_anchors()
num_anchors_per_layer = []
for ind in range(len(all_anchors)):
num_anchors_per_layer.append(all_num_anchors_depth[ind] * all_num_anchors_spatial[ind])
anchor_encoder_decoder = anchor_manipulator.AnchorEncoder(allowed_borders = [1.0] * 6,
positive_threshold = FLAGS.match_threshold,
ignore_threshold = FLAGS.neg_threshold,
prior_scaling=[0.1, 0.1, 0.2, 0.2])
image_preprocessing_fn = lambda image_, labels_, bboxes_ : ssd_preprocessing.preprocess_image(image_, labels_, bboxes_, out_shape, is_training=is_training, data_format=FLAGS.data_format, output_rgb=False)
anchor_encoder_fn = lambda glabels_, gbboxes_: anchor_encoder_decoder.encode_all_anchors(glabels_, gbboxes_, all_anchors, all_num_anchors_depth, all_num_anchors_spatial)
image, _, shape, loc_targets, cls_targets, match_scores = dataset_common.slim_get_batch(FLAGS.num_classes,
batch_size,
('train' if is_training else 'val'),
os.path.join(FLAGS.data_dir, dataset_pattern),
FLAGS.num_readers,
FLAGS.num_preprocessing_threads,
image_preprocessing_fn,
anchor_encoder_fn,
num_epochs=FLAGS.train_epochs,
is_training=is_training)
global global_anchor_info
global_anchor_info = {'decode_fn': lambda pred : anchor_encoder_decoder.decode_all_anchors(pred, num_anchors_per_layer),
'num_anchors_per_layer': num_anchors_per_layer,
'all_num_anchors_depth': all_num_anchors_depth }
return image, {'shape': shape, 'loc_targets': loc_targets, 'cls_targets': cls_targets, 'match_scores': match_scores}
return input_fn
def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights=1., bbox_outside_weights=1., sigma=1.):
with tf.name_scope('smooth_l1', [bbox_pred, bbox_targets]):
sigma2 = sigma * sigma
inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))
smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)
smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)
smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)
smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),
tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))
outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)
return outside_mul
def ssd_model_fn(features, labels, mode, params):
shape = labels['shape']
loc_targets = labels['loc_targets']
cls_targets = labels['cls_targets']
match_scores = labels['match_scores']
print('loc_targets:', loc_targets)
print('cls_targets:', cls_targets)
global global_anchor_info
decode_fn = global_anchor_info['decode_fn']
num_anchors_per_layer = global_anchor_info['num_anchors_per_layer']
all_num_anchors_depth = global_anchor_info['all_num_anchors_depth']
with tf.variable_scope(params['model_scope'], default_name=None, values=[features], reuse=tf.AUTO_REUSE):
backbone = ssd_net_resnet34_large.Resnet34Backbone(params['data_format'])
feature_layers = backbone.forward(features, training=(mode == tf.estimator.ModeKeys.TRAIN))
location_pred, cls_pred = ssd_net_resnet34_large.multibox_head(feature_layers, params['num_classes'], all_num_anchors_depth, data_format=params['data_format'], strides=(3, 3))
print(location_pred, cls_pred)
if params['data_format'] == 'channels_first':
cls_pred = [tf.transpose(pred, [0, 2, 3, 1]) for pred in cls_pred]
location_pred = [tf.transpose(pred, [0, 2, 3, 1]) for pred in location_pred]
cls_pred = [tf.reshape(pred, [tf.shape(features)[0], -1, params['num_classes']]) for pred in cls_pred]
location_pred = [tf.reshape(pred, [tf.shape(features)[0], -1, 4]) for pred in location_pred]
cls_pred = tf.concat(cls_pred, axis=1)
location_pred = tf.concat(location_pred, axis=1)
cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']])
location_pred = tf.reshape(location_pred, [-1, 4])
with tf.device('/cpu:0'):
with tf.control_dependencies([cls_pred, location_pred]):
with tf.name_scope('post_forward'):
#bboxes_pred = decode_fn(location_pred)
bboxes_pred = tf.map_fn(lambda _preds : decode_fn(_preds),
tf.reshape(location_pred, [tf.shape(features)[0], -1, 4]),
dtype=[tf.float32] * len(num_anchors_per_layer), back_prop=False)
#cls_targets = tf.Print(cls_targets, [tf.shape(bboxes_pred[0]),tf.shape(bboxes_pred[1]),tf.shape(bboxes_pred[2]),tf.shape(bboxes_pred[3])])
bboxes_pred = [tf.reshape(preds, [-1, 4]) for preds in bboxes_pred]
bboxes_pred = tf.concat(bboxes_pred, axis=0)
flaten_cls_targets = tf.reshape(cls_targets, [-1])
flaten_match_scores = tf.reshape(match_scores, [-1])
flaten_loc_targets = tf.reshape(loc_targets, [-1, 4])
# each positive examples has one label
positive_mask = flaten_cls_targets > 0
n_positives = tf.count_nonzero(positive_mask)
batch_n_positives = tf.count_nonzero(cls_targets, -1)
batch_negtive_mask = tf.equal(cls_targets, 0)#tf.logical_and(tf.equal(cls_targets, 0), match_scores > 0.)
batch_n_negtives = tf.count_nonzero(batch_negtive_mask, -1)
batch_n_neg_select = tf.cast(params['negative_ratio'] * tf.cast(batch_n_positives, tf.float32), tf.int32)
batch_n_neg_select = tf.minimum(batch_n_neg_select, tf.cast(batch_n_negtives, tf.int32))
# hard negative mining for classification
predictions_for_bg = tf.nn.softmax(tf.reshape(cls_pred, [tf.shape(features)[0], -1, params['num_classes']]))[:, :, 0]
prob_for_negtives = tf.where(batch_negtive_mask,
0. - predictions_for_bg,
# ignore all the positives
0. - tf.ones_like(predictions_for_bg))
topk_prob_for_bg, _ = tf.nn.top_k(prob_for_negtives, k=tf.shape(prob_for_negtives)[1])
score_at_k = tf.gather_nd(topk_prob_for_bg, tf.stack([tf.range(tf.shape(features)[0]), batch_n_neg_select - 1], axis=-1))
selected_neg_mask = prob_for_negtives >= tf.expand_dims(score_at_k, axis=-1)
# include both selected negtive and all positive examples
final_mask = tf.stop_gradient(tf.logical_or(tf.reshape(tf.logical_and(batch_negtive_mask, selected_neg_mask), [-1]), positive_mask))
total_examples = tf.count_nonzero(final_mask)
cls_pred = tf.boolean_mask(cls_pred, final_mask)
location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask))
flaten_cls_targets = tf.boolean_mask(tf.clip_by_value(flaten_cls_targets, 0, params['num_classes']), final_mask)
flaten_loc_targets = tf.stop_gradient(tf.boolean_mask(flaten_loc_targets, positive_mask))
predictions = {
'classes': tf.argmax(cls_pred, axis=-1),
'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1),
'loc_predict': bboxes_pred }
cls_accuracy = tf.metrics.accuracy(flaten_cls_targets, predictions['classes'])
metrics = {'cls_accuracy': cls_accuracy}
# Create a tensor named train_accuracy for logging purposes.
tf.identity(cls_accuracy[1], name='cls_accuracy')
tf.summary.scalar('cls_accuracy', cls_accuracy[1])
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
#flaten_cls_targets=tf.Print(flaten_cls_targets, [flaten_loc_targets],summarize=50000)
cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=flaten_cls_targets, logits=cls_pred) * (params['negative_ratio'] + 1.)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy_loss')
tf.summary.scalar('cross_entropy_loss', cross_entropy)
#loc_loss = tf.cond(n_positives > 0, lambda: modified_smooth_l1(location_pred, tf.stop_gradient(flaten_loc_targets), sigma=1.), lambda: tf.zeros_like(location_pred))
loc_loss = modified_smooth_l1(location_pred, flaten_loc_targets, sigma=1.)
#loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets))
loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1), name='location_loss')
tf.summary.scalar('location_loss', loc_loss)
tf.losses.add_loss(loc_loss)
l2_loss_vars = []
for trainable_var in tf.trainable_variables():
if '_bn' not in trainable_var.name:
if 'conv4_3_scale' not in trainable_var.name:
l2_loss_vars.append(tf.nn.l2_loss(trainable_var) * 0.1)
else:
l2_loss_vars.append(tf.nn.l2_loss(trainable_var) * 0.1)
# Add weight decay to the loss. We exclude the batch norm variables because
# doing so leads to a small improvement in accuracy.
total_loss = tf.add(cross_entropy + loc_loss, tf.multiply(params['weight_decay'], tf.add_n(l2_loss_vars), name='l2_loss'), name='total_loss')
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
[int(_) for _ in params['decay_boundaries']],
lr_values)
truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype), name='learning_rate')
# Create a tensor named learning_rate for logging purposes.
tf.summary.scalar('learning_rate', truncated_learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate,
momentum=params['momentum'])
optimizer = tf.contrib.estimator.TowerOptimizer(optimizer)
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(total_loss, global_step)
else:
train_op = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op,
eval_metric_ops=metrics,
#scaffold=None)
scaffold=tf.train.Scaffold(init_fn=get_init_fn()))
def parse_comma_list(args):
return [float(s.strip()) for s in args.split(',')]
def main(_):
os.environ['CUDA_VISIBLE_DEVICES'] = '4,5,6,7'
#tf.set_pruning_mode()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, intra_op_parallelism_threads=FLAGS.num_cpu_threads, inter_op_parallelism_threads=FLAGS.num_cpu_threads, gpu_options=gpu_options)
num_gpus = validate_batch_size_for_multi_gpu(FLAGS.batch_size)
run_config = tf.estimator.RunConfig().replace(
save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace(
save_checkpoints_steps=None).replace(
save_summary_steps=FLAGS.save_summary_steps).replace(
keep_checkpoint_max=5).replace(
tf_random_seed=FLAGS.tf_random_seed).replace(
log_step_count_steps=FLAGS.log_every_n_steps).replace(
session_config=config)
replicate_ssd_model_fn = tf.contrib.estimator.replicate_model_fn(ssd_model_fn, loss_reduction=tf.losses.Reduction.MEAN)
ssd_detector = tf.estimator.Estimator(
model_fn=replicate_ssd_model_fn, model_dir=FLAGS.model_dir, config=run_config,
params={
'num_gpus': num_gpus,
'data_format': FLAGS.data_format,
'batch_size': FLAGS.batch_size,
'model_scope': FLAGS.model_scope,
'num_classes': FLAGS.num_classes,
'negative_ratio': FLAGS.negative_ratio,
'match_threshold': FLAGS.match_threshold,
'neg_threshold': FLAGS.neg_threshold,
'weight_decay': FLAGS.weight_decay,
'momentum': FLAGS.momentum,
'learning_rate': FLAGS.learning_rate,
'end_learning_rate': FLAGS.end_learning_rate,
'decay_boundaries': parse_comma_list(FLAGS.decay_boundaries),
'lr_decay_factors': parse_comma_list(FLAGS.lr_decay_factors),
})
tensors_to_log = {
'lr': 'learning_rate',
'ce': 'cross_entropy_loss',
'loc': 'location_loss',
'loss': 'total_loss',
'l2': 'l2_loss',
'acc': 'post_forward/cls_accuracy',
}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps,
formatter=lambda dicts: (', '.join(['%s=%.6f' % (k, v) for k, v in dicts.items()])))
print('Starting a training cycle.')
ssd_detector.train(input_fn=input_pipeline(dataset_pattern='coco_2017_train-*', is_training=True, batch_size=FLAGS.batch_size),
hooks=[logging_hook], max_steps=FLAGS.max_number_of_steps)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
the-stack_0_370 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoinold Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
assumevalid.py
Test logic for skipping signature validation on blocks which we've assumed
valid (https://github.com/bitcoinold/bitcoinold/pull/9484)
We build a chain that includes and invalid signature for one of the
transactions:
0: genesis block
1: block 1 with coinbase transaction output.
2-101: bury that block with 100 blocks so the coinbase transaction
output can be spent
102: a block containing a transaction spending the coinbase
transaction output. The transaction has an invalid signature.
103-2202: bury the bad block with just over two weeks' worth of blocks
(2100 blocks)
Start three nodes:
- node0 has no -assumevalid parameter. Try to sync to block 2202. It will
reject block 102 and only sync as far as block 101
- node1 has -assumevalid set to the hash of block 102. Try to sync to
block 2202. node1 will sync all the way to block 2202.
- node2 has -assumevalid set to the hash of block 102. Try to sync to
block 200. node2 will reject block 102 since it's assumed valid, but it
isn't buried by at least two weeks' work.
'''
from test_framework.mininode import *
from test_framework.test_framework import BitcoinoldTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
from test_framework.key import CECKey
from test_framework.script import *
class BaseNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_inv = None
self.last_headers = None
self.last_block = None
self.last_getdata = None
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
self.last_blockhash_announced = None
def on_close(self, conn):
self.disconnected = True
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
assert(wait_until(test_function, timeout=timeout))
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
class SendHeadersTest(BitcoinoldTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
# Start node0. We don't start the other nodes yet since
# we need to pre-mine a block with an invalid transaction
# signature so we can pass in the block hash as assumevalid.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
def run_test(self):
# Connect to node0
node0 = BaseNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
node0.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
node0.wait_for_verack()
# Build the blockchain
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
self.blocks = []
# Get a pubkey for the coinbase TXO
coinbase_key = CECKey()
coinbase_key.set_secretbytes(b"horsebattery")
coinbase_pubkey = coinbase_key.get_pubkey()
# Create the first block with a coinbase output to our key
height = 1
block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
self.blocks.append(block)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
# Bury the block 100 deep so the coinbase output is spendable
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Create a transaction spending the coinbase output with an invalid (null) signature
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
block102 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block102.vtx.extend([tx])
block102.hashMerkleRoot = block102.calc_merkle_root()
block102.rehash()
block102.solve()
self.blocks.append(block102)
self.tip = block102.sha256
self.block_time += 1
height += 1
# Bury the assumed valid block 2100 deep
for i in range(2100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.nVersion = 4
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Start node1 and node2 with assumevalid so they accept a block with a bad signature.
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-assumevalid=" + hex(block102.sha256)]))
node1 = BaseNode() # connects to node1
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
node1.add_connection(connections[1])
node1.wait_for_verack()
self.nodes.append(start_node(2, self.options.tmpdir,
["-debug", "-assumevalid=" + hex(block102.sha256)]))
node2 = BaseNode() # connects to node2
connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
node2.add_connection(connections[2])
node2.wait_for_verack()
# send header lists to all three nodes
node0.send_header_for_blocks(self.blocks[0:2000])
node0.send_header_for_blocks(self.blocks[2000:])
node1.send_header_for_blocks(self.blocks[0:2000])
node1.send_header_for_blocks(self.blocks[2000:])
node2.send_header_for_blocks(self.blocks[0:200])
# Send 102 blocks to node0. Block 102 will be rejected.
for i in range(101):
node0.send_message(msg_block(self.blocks[i]))
node0.sync_with_ping() # make sure the most recent block is synced
node0.send_message(msg_block(self.blocks[101]))
assert_equal(self.nodes[0].getblock(self.nodes[0].getbestblockhash())['height'], 101)
# Send 3102 blocks to node1. All blocks will be accepted.
for i in range(2202):
node1.send_message(msg_block(self.blocks[i]))
node1.sync_with_ping() # make sure the most recent block is synced
assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
# Send 102 blocks to node2. Block 102 will be rejected.
for i in range(101):
node2.send_message(msg_block(self.blocks[i]))
node2.sync_with_ping() # make sure the most recent block is synced
node2.send_message(msg_block(self.blocks[101]))
assert_equal(self.nodes[2].getblock(self.nodes[2].getbestblockhash())['height'], 101)
if __name__ == '__main__':
SendHeadersTest().main()
|
the-stack_0_371 |
from setuptools import find_packages, setup
from pathlib import Path
this_directory = Path(__file__).parent
readme = (this_directory / "README.md").read_text()
setup(
name='sentencesimilarity',
packages=find_packages(),
version='0.1.1',
description='Calculates semantic similarity between given sentences.',
long_description= readme,
long_description_content_type='text/markdown',
author='osahin',
author_email = "[email protected]",
license='MIT',
install_requires=['transformers==4.9.2','scikit_learn==0.24.2','torch==1.9.0'],
setup_requires=['pytest-runner'],
tests_require=['pytest==4.4.1'],
test_suite='tests',
)
|
the-stack_0_372 | # -*- coding: utf-8 -*-
import math,string,itertools,fractions,heapq,collections,re,array,bisect
class PublicTransit:
def distRaw(self, R, C, i1, j1, i2, j2):
# q = [(i1, j1, 0)]
# deltas = [(1, 0), (-1, 0), (0, -1), (0, 1)]
# while q:
# i, j, d = q.pop(0)
# if i == i2 and j == j2:
# return d
# for delta in deltas:
# ni = i + delta[0]
# nj = j + delta[1]
# if 0 <= ni < R and 0 <= nj < C:
# q.append((ni, nj, d+1))
# return 1000
return abs(i1-i2)+abs(j1-j2)
def distAfterConnect(self, R, C, connect, i1, j1, i2, j2):
if i1 == i2 and j1 == j2:
return 0
return min(self.distRaw(R, C, i1, j1, i2, j2), \
self.distRaw(R, C, i1, j1, connect[0], connect[1]) + self.distRaw(R, C, connect[2], connect[3], i2, j2), \
self.distRaw(R, C, i1, j1, connect[2], connect[3]) + self.distRaw(R, C, connect[0], connect[1], i2, j2))
def maxDist(self, R, C, connect):
res = 1
for i1 in range(R):
for j1 in range(C):
for i2 in range(R-1, -1, -1):
for j2 in range(C-1, -1, -1):
if abs(i1-i2) + abs(j1-j2) <= res:
continue
res = max(res, self.distAfterConnect(R, C, connect, i1, j1, i2, j2))
return res
def minimumLongestDistance(self, R, C):
if R <= 0 or C <= 0:
return 0
if R*C <= 2:
return 1
res = 1000
for i1 in range(R):
for j1 in range(C):
for i2 in range(R):
for j2 in range(C):
if i1 == i2 and j1 == j2:
continue
# connect (i, j) and (i2, j2)
res = min(res, self.maxDist(R, C, (i1, j1, i2, j2)))
return res
# CUT begin
# TEST CODE FOR PYTHON {{{
import sys, time, math
def tc_equal(expected, received):
try:
_t = type(expected)
received = _t(received)
if _t == list or _t == tuple:
if len(expected) != len(received): return False
return all(tc_equal(e, r) for (e, r) in zip(expected, received))
elif _t == float:
eps = 1e-9
d = abs(received - expected)
return not math.isnan(received) and not math.isnan(expected) and d <= eps * max(1.0, abs(expected))
else:
return expected == received
except:
return False
def pretty_str(x):
if type(x) == str:
return '"%s"' % x
elif type(x) == tuple:
return '(%s)' % (','.join( (pretty_str(y) for y in x) ) )
else:
return str(x)
def do_test(R, C, __expected):
startTime = time.time()
instance = PublicTransit()
exception = None
try:
__result = instance.minimumLongestDistance(R, C);
except:
import traceback
exception = traceback.format_exc()
elapsed = time.time() - startTime # in sec
if exception is not None:
sys.stdout.write("RUNTIME ERROR: \n")
sys.stdout.write(exception + "\n")
return 0
if tc_equal(__expected, __result):
sys.stdout.write("PASSED! " + ("(%.3f seconds)" % elapsed) + "\n")
return 1
else:
sys.stdout.write("FAILED! " + ("(%.3f seconds)" % elapsed) + "\n")
sys.stdout.write(" Expected: " + pretty_str(__expected) + "\n")
sys.stdout.write(" Received: " + pretty_str(__result) + "\n")
return 0
def run_tests():
sys.stdout.write("PublicTransit (500 Points)\n\n")
passed = cases = 0
case_set = set()
for arg in sys.argv[1:]:
case_set.add(int(arg))
with open("PublicTransit.sample", "r") as f:
while True:
label = f.readline()
if not label.startswith("--"): break
R = int(f.readline().rstrip())
C = int(f.readline().rstrip())
f.readline()
__answer = int(f.readline().rstrip())
cases += 1
if len(case_set) > 0 and (cases - 1) in case_set: continue
sys.stdout.write(" Testcase #%d ... " % (cases - 1))
passed += do_test(R, C, __answer)
sys.stdout.write("\nPassed : %d / %d cases\n" % (passed, cases))
T = time.time() - 1431783977
PT, TT = (T / 60.0, 75.0)
points = 500 * (0.3 + (0.7 * TT * TT) / (10.0 * PT * PT + TT * TT))
sys.stdout.write("Time : %d minutes %d secs\n" % (int(T/60), T%60))
sys.stdout.write("Score : %.2f points\n" % points)
if __name__ == '__main__':
run_tests()
# }}}
# CUT end
|
the-stack_0_373 | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import json
import re
import sys
import yaml
environment_file = '.ci_support/environment.yml'
name_mapping_file = '.ci_support/pypi_vs_conda_names.json'
class EnvironmentUpdater:
def __init__(self, package_name, from_version, to_version):
"""
Updates the version of a package in the conda environment file.
Parameters:
package_name: Name of the package to update as available on PyPI
from_version: Version the package is before the update
to_version: Version to which the package should be updated
"""
self.from_version = from_version
self.to_version = to_version
with open(name_mapping_file, 'r') as f:
self._name_conversion_dict = json.load(f)
with open(environment_file, 'r') as f:
self.environment = yaml.safe_load(f)
self.package_name = self._convert_package_name(package_name)
def _convert_package_name(self, name):
if name in self._name_conversion_dict.keys():
result = self._name_conversion_dict[name]
else:
result = name
return result
def _update_dependencies(self):
updated_dependencies = []
for dep in self.environment['dependencies']:
updated_dependencies.append(re.sub(
r'(' + self.package_name + '.*)' + self.from_version,
r'\g<1>' + self.to_version,
dep
))
self.environment['dependencies'] = updated_dependencies
def _write(self):
with open(environment_file, 'w') as f:
yaml.safe_dump(self.environment, f)
def update_dependencies(self):
"""Update the version of the requested dependency in the environment file"""
self._update_dependencies()
self._write()
if len(sys.argv) != 7 or not (sys.argv[1] == 'Bump' and sys.argv[3] == 'from' and sys.argv[5] == 'to'):
raise ValueError(f"Title of a dependabot PR 'Bump <package> from <version> to <version>' expected, "
f"but got {' '.join(sys.argv[1:])}")
package_to_update = sys.argv[2]
from_version = sys.argv[4]
to_version = sys.argv[6]
updater = EnvironmentUpdater(package_to_update, from_version, to_version)
updater.update_dependencies()
|
the-stack_0_374 | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch ViLT model. """
import unittest
from datasets import load_dataset
from packaging import version
from transformers import ViltConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltModel,
)
from transformers.models.vilt.modeling_vilt import VILT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import ViltProcessor
class ViltModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
scope=None,
modality_type_vocab_size=2,
add_multiple_images=False,
num_images=-1,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scope = scope
self.modality_type_vocab_size = modality_type_vocab_size
self.add_multiple_images = add_multiple_images
self.num_images = num_images
# we set the expected sequence length (which is used in several tests)
# this is equal to the seq length of the text tokens + number of image patches + 1 for the CLS token
self.expected_seq_len = self.seq_length + (self.image_size // self.patch_size) ** 2 + 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
if self.add_multiple_images:
pixel_values = floats_tensor([self.batch_size, 2, self.num_channels, self.image_size, self.image_size])
else:
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
return (config, input_ids, token_type_ids, input_mask, pixel_values, token_labels)
def get_config(self):
return ViltConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
num_labels=self.num_labels,
modality_type_vocab_size=self.modality_type_vocab_size,
num_images=self.num_images,
)
def create_and_check_model(
self,
config,
input_ids,
token_type_ids,
input_mask,
pixel_values,
token_labels,
):
model = ViltModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, pixel_values=pixel_values)
result = model(input_ids, token_type_ids=token_type_ids, pixel_values=pixel_values)
result = model(input_ids, pixel_values=pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
pixel_values,
token_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
"pixel_values": pixel_values,
}
return config, inputs_dict
def prepare_pixel_values(self):
return floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
@require_torch
class ViltModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
ViltModel,
ViltForQuestionAnswering,
ViltForImageAndTextRetrieval,
ViltForMaskedLM,
)
if is_torch_available()
else ()
)
test_pruning = False
test_headmasking = False
test_torchscript = False
# ViltForMaskedLM, ViltForQuestionAnswering and ViltForImagesAndTextClassification require special treatment
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
# if model_class.__name__ == "ViltForNaturalLanguageVisualReasonining":
# inputs_dict["pixel_values"] = floats_tensor([self.model_tester.batch_size, self.model_tester.num_images, self.model_tester.num_channels, self.model_tester.image_size, self.model_tester.image_size])
if return_labels:
if model_class.__name__ == "ViltForQuestionAnswering":
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, self.model_tester.num_labels, device=torch_device
)
elif model_class.__name__ == "ViltForMaskedLM":
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
elif model_class.__name__ == "ViltForImagesAndTextClassification":
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = ViltModelTester(self)
self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_training(self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
if model_class.__name__ == "ViltForImagesAndTextClassification":
config.modality_type_vocab_size = 3
# ViltForImageAndTextRetrieval doesn't support training for now
if model_class in [*get_values(MODEL_MAPPING), ViltForImageAndTextRetrieval]:
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
for k, v in inputs.items():
print(k, v.shape)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
# ViltForImageAndTextRetrieval doesn't support training for now
if (
model_class in [*get_values(MODEL_MAPPING), ViltForImageAndTextRetrieval]
or not model_class.supports_gradient_checkpointing
):
continue
model = model_class(config)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_save_load(self):
pass
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_determinism(self):
pass
@unittest.skip(
reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic
hidden states"""
)
def test_model_outputs_equivalence(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "expected_seq_len", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
# attentions are a list of length num_images
# each element contains the attentions of a particular image index
self.assertEqual(len(attentions), self.model_tester.num_images)
self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers)
else:
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
# attentions are a list of length num_images
# each element contains the attentions of a particular image index
self.assertEqual(len(attentions), self.model_tester.num_images)
self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers)
else:
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertListEqual(
list(attentions[0][0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertEqual(len(self_attentions), self.model_tester.num_images)
self.assertEqual(len(self_attentions[0]), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0][0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
else:
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
self.assertEqual(len(hidden_states), self.model_tester.num_images)
self.assertEqual(len(hidden_states[0]), expected_num_layers)
else:
self.assertEqual(len(hidden_states), expected_num_layers)
seq_length = self.model_tester.expected_seq_len
if model_class.__name__ == "ViltForImagesAndTextClassification":
self.assertListEqual(
list(hidden_states[0][0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
else:
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
print("Model class:", model_class)
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
hidden_states[0].retain_grad()
attentions[0].retain_grad()
else:
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
if model_class.__name__ == "ViltForImagesAndTextClassification":
# hidden_states are a list of length num_images
# each element contains the hidden states of a particular image index
self.assertIsNotNone(hidden_states[0].grad)
self.assertIsNotNone(attentions[0].grad)
else:
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
@slow
def test_model_from_pretrained(self):
for model_name in VILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = ViltModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class ViltForImagesAndTextClassificationModelTest(ViltModelTest, unittest.TestCase):
all_model_classes = (ViltForImagesAndTextClassification,) if is_torch_available() else ()
def setUp(self):
self.model_tester = ViltModelTester(self, modality_type_vocab_size=3, add_multiple_images=True, num_images=2)
self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)
@unittest.skip("We only test the model that takes in multiple images")
def test_model(self):
pass
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class ViltModelIntegrationTest(unittest.TestCase):
@cached_property
def default_processor(self):
return ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa") if is_vision_available() else None
@slow
def test_inference_masked_lm(self):
model = ViltForMaskedLM.from_pretrained("dandelin/vilt-b32-mlm").to(torch_device)
processor = self.default_processor
image = prepare_img()
text = "a bunch of [MASK] laying on a [MASK]."
inputs = processor(image, text, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size([1, 11, 30522])
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-12.5061, -12.5123, -12.5174]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4))
# verify masked token prediction equals "cats"
predicted_id = outputs.logits[0, 4, :].argmax(-1).item()
assert processor.decode([predicted_id]) == "cats"
@slow
def test_inference_visual_question_answering(self):
model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa").to(torch_device)
processor = self.default_processor
image = prepare_img()
text = "How many cats are there?"
inputs = processor(image, text, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 3129))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-15.9495, -18.1472, -10.3041]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
# compute loss
vqa_labels = [[2, 3, 155, 800]]
vqa_scores = [[1.0, 0.3, 0.3, 0.3]]
labels = torch.zeros(1, model.config.num_labels).to(torch_device)
for i, (labels_example, scores_example) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(labels_example, scores_example):
labels[i, l] = s
# forward pass
outputs = model(**inputs, labels=labels)
# verify we have a positive loss
self.assertTrue(outputs.loss > 0)
@slow
def test_inference_natural_language_visual_reasoning(self):
model = ViltForImagesAndTextClassification.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2").to(
torch_device
)
processor = self.default_processor
dataset = load_dataset("hf-internal-testing/fixtures_nlvr2", split="test")
image1 = Image.open(dataset[0]["file"]).convert("RGB")
image2 = Image.open(dataset[1]["file"]).convert("RGB")
text = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
encoding_1 = processor(image1, text, return_tensors="pt")
encoding_2 = processor(image2, text, return_tensors="pt")
pixel_values = torch.stack([encoding_1.pixel_values, encoding_2.pixel_values], dim=1)
# forward pass
outputs = model(
input_ids=encoding_1.input_ids.to(torch_device),
pixel_values=pixel_values.to(torch_device),
)
# verify the logits
expected_shape = torch.Size([1, 2])
self.assertEqual(outputs.logits.shape, expected_shape)
is_pillow_less_than_9 = version.parse(PIL.__version__) < version.parse("9.0.0")
if is_pillow_less_than_9:
expected_slice = torch.tensor(
[-2.4013, 2.9342],
device=torch_device,
)
else:
expected_slice = torch.tensor(
[-2.3713, 2.9168],
device=torch_device,
)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
|
the-stack_0_375 | import os
import sys
module_path = os.path.abspath(os.path.join('../models/'))
print(module_path)
if module_path not in sys.path:
sys.path.append(module_path)
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import numpy as np
import cv2
import time
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
size = 320
# from refinedet import build_refinedet
# from models.multitrident_refinedet_v2 import build_multitridentrefinedet
from models.multitrident_refinedet import build_multitridentrefinedet
net = build_multitridentrefinedet('test', size, 21) # initialize SSD
# net = build_refinedet('test', 512, 21)
# net.load_weights('../weights/RefineDet512_VOC_final.pth')
# net.load_weights('../weights/experiment/320*320/exp_4_[256relufpn][0.3_0.6][mAP_0.77][dilate:11111-12333-12555]/RefineDet320_VOC_275000.pth')
net.load_weights('../weights/experiment/320*320/RefineDet320_VOC_315000.pth')
"""000210 000111 000144 009539 009589 000069 009539 001275 002333 002338 002341
002695 002713 003681 003874 003673 003740"""
im_names = "002695.jpg"
image_file = '/home/yiling/data/VOCdevkit/VOC2007/JPEGImages/' + im_names
image = cv2.imread(image_file, cv2.IMREAD_COLOR) # uncomment if dataset not download
#%matplotlib inline
from matplotlib import pyplot as plt
from data import VOCDetection, VOC_ROOT, VOCAnnotationTransform
# here we specify year (07 or 12) and dataset ('test', 'val', 'train')
testset = VOCDetection(VOC_ROOT, [('2007', 'val')], None, VOCAnnotationTransform())
img_id = 62
# image = testset.pull_image(img_id)
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# View the sampled input image before transform
plt.figure(figsize=(10,10))
# plt.imshow(rgb_image)
# plt.show()
x = cv2.resize(image, (size, size)).astype(np.float32)
x -= (104.0, 117.0, 123.0)
x = x.astype(np.float32)
x = x[:, :, ::-1].copy()
# plt.imshow(x)
x = torch.from_numpy(x).permute(2, 0, 1)
xx = Variable(x.unsqueeze(0)) # wrap tensor in Variable
if torch.cuda.is_available():
xx = xx.cuda()
start = time.time()
y = net(xx)
end = time.time()
print(end-start)
from data import VOC_CLASSES as labels
top_k=100
plt.figure(figsize=(10,10))
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
plt.imshow(rgb_image) # plot the image for matplotlib
currentAxis = plt.gca()
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2)
for i in range(detections.size(1)):
for j in range(detections.size(2)):
if detections[0,i,j,0] > 0.05:
score = detections[0, i, j, 0]
label_name = labels[i - 1]
display_txt = '%s: %.2f' % (label_name, score)
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
coords = (pt[0], pt[1]), pt[2] - pt[0] + 1, pt[3] - pt[1] + 1
color = colors[i]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor': color, 'alpha': 0.5})
else:
continue
# j = 0
# while detections[0,i,j,0] >= -1:
# score = detections[0,i,j,0]
# label_name = labels[i-1]
# display_txt = '%s: %.2f'%(label_name, score)
# pt = (detections[0,i,j,1:]*scale).cpu().numpy()
# coords = (pt[0], pt[1]), pt[2]-pt[0]+1, pt[3]-pt[1]+1
# color = colors[i]
# currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
# currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor':color, 'alpha':0.5})
# j+=1
plt.show()
|
the-stack_0_376 | # -*- coding: utf-8 -*-
"""
Python Slack Bot class for use with the pythOnBoarding app
"""
import os
from slackclient import SlackClient
# To remember which teams have authorized your app and what tokens are
# associated with each team, we can store this information in memory on
# as a global object. When your bot is out of development, it's best to
# save this in a more persistant memory store.
authed_teams = {}
class Bot(object):
""" Instanciates a Bot object to handle Slack onboarding interactions."""
def __init__(self):
super(Bot, self).__init__()
self.name = "come_back_here"
self.emoji = ":robot_face:"
# When we instantiate a new bot object, we can access the app
# credentials we set earlier in our local development environment.
self.oauth = {"client_id": os.environ.get("CLIENT_ID"),
"client_secret": os.environ.get("CLIENT_SECRET"),
# Scopes provide and limit permissions to what our app
# can access. It's important to use the most restricted
# scope that your app will need.
"scope": "users.profile:read"}
self.oauth
self.verification = os.environ.get("VERIFICATION_TOKEN")
# NOTE: Python-slack requires a client connection to generate
# an oauth token. We can connect to the client without authenticating
# by passing an empty string as a token and then reinstantiating the
# client with a valid OAuth token once we have one.
self.client = SlackClient("")
# We'll use this dictionary to store the state of each message object.
# In a production envrionment you'll likely want to store this more
# persistantly in a database.
self.messages = {}
def auth(self, code):
"""
Authenticate with OAuth and assign correct scopes.
Save a dictionary of authed team information in memory on the bot
object.
Parameters
----------
code : str
temporary authorization code sent by Slack to be exchanged for an
OAuth token
"""
# After the user has authorized this app for use in their Slack team,
# Slack returns a temporary authorization code that we'll exchange for
# an OAuth token using the oauth.access endpoint
auth_response = self.client.api_call(
"oauth.access",
client_id=self.oauth["client_id"],
client_secret=self.oauth["client_secret"],
code=code
)
# To keep track of authorized teams and their associated OAuth tokens,
# we will save the team ID and bot tokens to the global
# authed_teams object
team_id = auth_response["team_id"]
authed_teams[team_id] = {"bot_token":
auth_response["access_token"]}
# Then we'll reconnect to the Slack Client with the correct team's
# bot token
self.client = SlackClient(authed_teams[team_id]["bot_token"])
def bring_back_user(self, user_id, channel, token):
"""
Create and send an onboarding welcome message to new users. Save the
time stamp of this message on the message object for updating in the
future.
Parameters
----------
team_id : str
id of the Slack team associated with the incoming event
user_id : str
id of the Slack user associated with the incoming event
"""
# We'll use the message object's method to create the attachments that
# we'll want to add to our Slack message. This method will also save
# the attachments on the message object which we're accessing in the
# API call below through the message object's `attachments` attribute.
text = "Hey... get back here <@" + str(user_id) + ">"
self.client.api_call(
"chat.postMessage",
channel=channel,
token=token,
username=self.name,
icon_emoji=self.emoji,
text=text
)
self.client.api_call("channels.invite", token=token, channel=channel, user=user_id)
|
the-stack_0_377 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import errno
import logging
from geoserver.layer import Layer as GsLayer
from django.conf import settings
from django.dispatch import receiver, Signal
from django.forms.models import model_to_dict
from django.contrib.staticfiles.templatetags import staticfiles
# use different name to avoid module clash
from geonode.utils import (
is_monochromatic_image,
json_serializer_producer)
from geonode.decorators import on_ogc_backend
from geonode.geoserver.helpers import (
gs_catalog,
ogc_server_settings)
from geonode.geoserver.tasks import geoserver_create_thumbnail
from geonode.layers.models import Layer
from geonode.services.enumerations import CASCADED
from . import BACKEND_PACKAGE
from .tasks import geoserver_cascading_delete, geoserver_post_save_layers
logger = logging.getLogger("geonode.geoserver.signals")
geoserver_post_save_complete = Signal(providing_args=['instance'])
def geoserver_delete(typename):
# cascading_delete should only be called if
# ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings, "BACKEND_WRITE_ENABLED", True):
geoserver_cascading_delete.apply_async((typename,))
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_pre_delete(instance, sender, **kwargs):
"""Removes the layer from GeoServer
"""
# cascading_delete should only be called if
# ogc_server_settings.BACKEND_WRITE_ENABLED == True
if getattr(ogc_server_settings, "BACKEND_WRITE_ENABLED", True):
if instance.remote_service is None or instance.remote_service.method == CASCADED:
if instance.alternate:
geoserver_cascading_delete.apply_async((instance.alternate,))
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_pre_save(*args, **kwargs):
# nothing to do here, processing is pushed to post-save
pass
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_post_save(instance, sender, created, **kwargs):
from geonode.messaging import producer
# this is attached to various models, (ResourceBase, Document)
# so we should select what will be handled here
if isinstance(instance, Layer):
instance_dict = model_to_dict(instance)
payload = json_serializer_producer(instance_dict)
try:
producer.geoserver_upload_layer(payload)
except Exception as e:
logger.error(e)
if getattr(settings, 'DELAYED_SECURITY_SIGNALS', False):
instance.set_dirty_state()
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_post_save_local(instance, *args, **kwargs):
"""Send information to geoserver.
The attributes sent include:
* Title
* Abstract
* Name
* Keywords
* Metadata Links,
* Point of Contact name and url
"""
geoserver_post_save_layers.apply_async(
(instance.id, args, kwargs))
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_pre_save_maplayer(instance, sender, **kwargs):
# If this object was saved via fixtures,
# do not do post processing.
if kwargs.get('raw', False):
return
try:
instance.local = isinstance(
gs_catalog.get_layer(
instance.name),
GsLayer)
except EnvironmentError as e:
if e.errno == errno.ECONNREFUSED:
msg = f'Could not connect to catalog to verify if layer {instance.name} was local'
logger.warn(msg)
else:
raise e
@on_ogc_backend(BACKEND_PACKAGE)
def geoserver_post_save_map(instance, sender, created, **kwargs):
instance.set_missing_info()
if not created:
if not instance.thumbnail_url or \
instance.thumbnail_url == staticfiles.static(settings.MISSING_THUMBNAIL):
logger.debug(f"... Creating Thumbnail for Map [{instance.title}]")
# create_gs_thumbnail(instance, overwrite=False, check_bbox=True)
geoserver_create_thumbnail.apply_async(((instance.id, False, True, )))
@receiver(geoserver_post_save_complete)
def geoserver_post_save_thumbnail(sender, instance, **kwargs):
# Creating Layer Thumbnail
# some thumbnail generators will update thumbnail_url. If so, don't
# immediately re-generate the thumbnail here. use layer#save(update_fields=['thumbnail_url'])
try:
instance.refresh_from_db()
logger.debug(f"... Creating Thumbnail for Layer {instance.title}")
_recreate_thumbnail = False
if 'update_fields' in kwargs and kwargs['update_fields'] is not None and \
'thumbnail_url' in kwargs['update_fields']:
_recreate_thumbnail = True
if not instance.thumbnail_url or \
instance.thumbnail_url == staticfiles.static(settings.MISSING_THUMBNAIL) or \
is_monochromatic_image(instance.thumbnail_url):
_recreate_thumbnail = True
if _recreate_thumbnail:
geoserver_create_thumbnail.apply_async(((instance.id, False, True, )))
else:
logger.debug(f"... Thumbnail for Layer {instance.title} already exists: {instance.thumbnail_url}")
except Exception as e:
logger.exception(e)
|
the-stack_0_380 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for low-level eager execution primitives.
Packaged as a test to ensure that this code is exercised by continuous
integration tests. To get numbers:
bazel build -c opt :benchmarks_test &&
./bazel-bin/tensorflow/python/eager/benchmarks_test --iters=0
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import contextlib
import sys
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
FLAGS = None
@contextlib.contextmanager
def timer(label, iters=30000):
start = time.time()
yield xrange(iters)
end = time.time()
t = (end - start) * 1e6 / iters
print("%-40s took %.2fus (%d iterations)" % (label, t, iters))
def benchmark_create_tensor(n):
"""Benchmark overheads of creating a Tensor object."""
def label(s):
return "{:20s}".format(s)
with timer(label("np.array([[3.0]])"), iters=n) as iters:
for _ in iters:
np.array([[3.0]])
ctx = context.context()
handle = ctx._handle
device = ctx.device_name
# May be warmup GPU.
ops.EagerTensor([[3.0]], context=handle, device=device)
# float32
dtype = dtypes.float32.as_datatype_enum
three = [[3.0]]
with timer(label("EagerTensor([[3.0]])"), iters=n) as iters:
for _ in iters:
ops.EagerTensor(three, context=handle, device=device, dtype=dtype)
np_3 = np.array([[3.0]], dtype=np.float32)
with timer(label("EagerTensor(np.array([[3.0]]))"), iters=n) as iters:
for _ in iters:
ops.EagerTensor(np_3, context=handle, device=device, dtype=dtype)
# int32.
# This is interesting since int32 will be kept on host memory for the GPU
# case.
dtype = dtypes.int32.as_datatype_enum
three = [[3]]
with timer(label("EagerTensor([[3]])"), iters=n) as iters:
for _ in iters:
ops.EagerTensor(three, context=handle, device=device, dtype=dtype)
np_3 = np.array([[3]], dtype=np.int32)
with timer(label("EagerTensor(np.array([[3]]))"), iters=n) as iters:
for _ in iters:
ops.EagerTensor(np_3, context=handle, device=device, dtype=dtype)
def benchmark_matmul(shape, n, use_gpu=False):
"""Benchmark for matrix multiplication using tf.matmul."""
transpose_b = (shape[0] != shape[1])
m = random_ops.random_uniform(shape)
if use_gpu:
m = m.gpu()
# Warm up the GPU - the very first kernel invocation
# seems to require a bunch of setup.
math_ops.matmul(m, m, transpose_b=transpose_b)
def label(s):
return "MatMul {}: {:30s}".format(shape, s)
if not use_gpu:
a = m.cpu().numpy()
b = a.T if transpose_b else a
with timer(label("np.dot"), iters=n) as iters:
for _ in iters:
np.dot(a, b)
with timer(label("tf.matmul"), iters=n) as iters:
for _ in iters:
math_ops.matmul(m, m, transpose_b=transpose_b)
with timer(label("gen_math_ops.mat_mul"), iters=n) as iters:
for _ in iters:
gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)
inputs = [m, m]
# pylint: disable=protected-access
ctx_handle = context.context()._handle
# pylint: enable=protected-access
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
with timer(label("TFE_Py_Execute"), iters=n) as iters:
for _ in iters:
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "MatMul",
inputs, attrs, 1)
f = function.defun(math_ops.matmul)
with timer(label("defun(tf.matmul)"), iters=n) as iters:
for _ in iters:
f(m, m, transpose_b=transpose_b)
def benchmark_multiply(shape, n, use_gpu=False):
m = random_ops.random_uniform(shape)
if use_gpu:
m = m.gpu()
# Warm up the GPU - the very first kernel invocation
# seems to require a bunch of setup.
_ = m * m
def label(s):
return "Multiply {}: {:30s}".format(shape, s)
if not use_gpu:
a = m.cpu().numpy()
with timer(label("np.multiply"), iters=n) as iters:
for _ in iters:
_ = a * a
with timer(label("tf.multiply"), iters=n) as iters:
for _ in iters:
_ = m * m
class BenchmarksTest(test_util.TensorFlowTestCase):
def testBenchmarks(self):
# This isn't actually a test, but benchmarks packaged as a test
# so that continuous integration runs catch any breakages.
print(context.context())
benchmark_create_tensor(FLAGS.iters or 30000)
benchmark_matmul([2, 2], FLAGS.iters or 30000)
benchmark_matmul([100, 28 * 28], FLAGS.iters or 1000)
benchmark_multiply([2], FLAGS.iters or 30000)
if context.context().num_gpus() > 0:
print("---- RUNNING ON GPU NOW ----")
with context.device("/device:GPU:0"):
benchmark_create_tensor(FLAGS.iters or 30000)
benchmark_matmul([2, 2], FLAGS.iters or 30000, use_gpu=True)
benchmark_matmul([100, 28 * 28], FLAGS.iters or 1000, use_gpu=True)
benchmark_multiply([2], FLAGS.iters or 30000, use_gpu=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Default iterations to 1 to keep continuos integration test times low.
parser.add_argument(
"--iters",
type=int,
default=1,
help="Number of iterators for each test. None or 0 for auto-selection")
FLAGS, unparsed = parser.parse_known_args()
sys.argv = [sys.argv[0]] + unparsed
test.main()
|
the-stack_0_385 | # -*- encoding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import itertools
import h2o
from h2o.job import H2OJob
from h2o.frame import H2OFrame
from h2o.exceptions import H2OValueError
from h2o.estimators.estimator_base import H2OEstimator
from h2o.two_dim_table import H2OTwoDimTable
from h2o.display import H2ODisplay
from h2o.grid.metrics import * # NOQA
from h2o.utils.backward_compatibility import backwards_compatible
from h2o.utils.shared_utils import deprecated, quoted
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import assert_is_type, is_type
class H2OGridSearch(backwards_compatible()):
"""
Grid Search of a Hyper-Parameter Space for a Model
:param model: The type of model to be explored initialized with optional parameters that will be
unchanged across explored models.
:param hyper_params: A dictionary of string parameters (keys) and a list of values to be explored by grid
search (values).
:param str grid_id: The unique id assigned to the resulting grid object. If none is given, an id will
automatically be generated.
:param search_criteria: A dictionary of directives which control the search of the hyperparameter space.
The default strategy "Cartesian" covers the entire space of hyperparameter combinations. Specify the
"RandomDiscrete" strategy to get random search of all the combinations of your hyperparameters.
RandomDiscrete should usually be combined with at least one early stopping criterion: max_models
and/or max_runtime_secs, e.g::
>>> criteria = {"strategy": "RandomDiscrete", "max_models": 42,
... "max_runtime_secs": 28800, "seed": 1234}
>>> criteria = {"strategy": "RandomDiscrete", "stopping_metric": "AUTO",
... "stopping_tolerance": 0.001, "stopping_rounds": 10}
>>> criteria = {"strategy": "RandomDiscrete", "stopping_rounds": 5,
... "stopping_metric": "misclassification",
... "stopping_tolerance": 0.00001}
:returns: a new H2OGridSearch instance
Examples
--------
>>> from h2o.grid.grid_search import H2OGridSearch
>>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator
>>> hyper_parameters = {'alpha': [0.01,0.5], 'lambda': [1e-5,1e-6]}
>>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'), hyper_parameters)
>>> training_data = h2o.import_file("smalldata/logreg/benign.csv")
>>> gs.train(x=range(3) + range(4,11),y=3, training_frame=training_data)
>>> gs.show()
"""
def __init__(self, model, hyper_params, grid_id=None, search_criteria=None, export_checkpoints_dir=None):
super(H2OGridSearch, self).__init__()
assert_is_type(model, None, H2OEstimator, lambda mdl: issubclass(mdl, H2OEstimator))
assert_is_type(hyper_params, dict)
assert_is_type(grid_id, None, str)
assert_is_type(search_criteria, None, dict)
if not (model is None or is_type(model, H2OEstimator)): model = model()
self._id = grid_id
self.model = model
self.hyper_params = dict(hyper_params)
self.search_criteria = None if search_criteria is None else dict(search_criteria)
self.export_checkpoints_dir = export_checkpoints_dir
self._grid_json = None
self.models = None # list of H2O Estimator instances
self._parms = {} # internal, for object recycle #
self.parms = {} # external#
self._future = False # used by __repr__/show to query job state#
self._job = None # used when _future is True#
@property
def grid_id(self):
"""A key that identifies this grid search object in H2O."""
return self._id
@grid_id.setter
def grid_id(self, value):
oldname = self.grid_id
self._id = value
h2o.rapids('(rename "{}" "{}")'.format(oldname, value))
@property
def model_ids(self):
return [i['name'] for i in self._grid_json["model_ids"]]
@property
def hyper_names(self):
return self._grid_json["hyper_names"]
@property
def failed_params(self):
return self._grid_json.get("failed_params", None)
@property
def failure_details(self):
return self._grid_json.get("failure_details", None)
@property
def failure_stack_traces(self):
return self._grid_json.get("failure_stack_traces", None)
@property
def failed_raw_params(self):
return self._grid_json.get("failed_raw_params", None)
def start(self, x, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None,
validation_frame=None, **params):
"""
Asynchronous model build by specifying the predictor columns, response column, and any
additional frame-specific values.
To block for results, call :meth:`join`.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
"""
self._future = True
self.train(x=x,
y=y,
training_frame=training_frame,
offset_column=offset_column,
fold_column=fold_column,
weights_column=weights_column,
validation_frame=validation_frame,
**params)
def join(self):
"""Wait until grid finishes computing."""
self._future = False
self._job.poll()
self._job = None
def train(self, x=None, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None,
validation_frame=None, **params):
"""
Train the model synchronously (i.e. do not return until the model finishes training).
To train asynchronously call :meth:`start`.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
"""
algo_params = locals()
parms = self._parms.copy()
parms.update({k: v for k, v in algo_params.items() if k not in ["self", "params", "algo_params", "parms"]})
# dictionaries have special handling in grid search, avoid the implicit conversion
parms["search_criteria"] = None if self.search_criteria is None else str(self.search_criteria)
parms["export_checkpoints_dir"] = self.export_checkpoints_dir
parms["hyper_parameters"] = None if self.hyper_params is None else str(self.hyper_params) # unique to grid search
parms.update({k: v for k, v in list(self.model._parms.items()) if v is not None}) # unique to grid search
parms.update(params)
if '__class__' in parms: # FIXME: hackt for PY3
del parms['__class__']
y = algo_params["y"]
tframe = algo_params["training_frame"]
if tframe is None: raise ValueError("Missing training_frame")
if y is not None:
if is_type(y, list, tuple):
if len(y) == 1:
parms["y"] = y[0]
else:
raise ValueError('y must be a single column reference')
if x is None:
if(isinstance(y, int)):
xset = set(range(training_frame.ncols)) - {y}
else:
xset = set(training_frame.names) - {y}
else:
xset = set()
if is_type(x, int, str): x = [x]
for xi in x:
if is_type(xi, int):
if not (-training_frame.ncols <= xi < training_frame.ncols):
raise H2OValueError("Column %d does not exist in the training frame" % xi)
xset.add(training_frame.names[xi])
else:
if xi not in training_frame.names:
raise H2OValueError("Column %s not in the training frame" % xi)
xset.add(xi)
x = list(xset)
parms["x"] = x
self.build_model(parms)
def build_model(self, algo_params):
"""(internal)"""
if algo_params["training_frame"] is None: raise ValueError("Missing training_frame")
x = algo_params.pop("x")
y = algo_params.pop("y", None)
training_frame = algo_params.pop("training_frame")
validation_frame = algo_params.pop("validation_frame", None)
is_auto_encoder = (algo_params is not None) and ("autoencoder" in algo_params and algo_params["autoencoder"])
algo = self.model._compute_algo() # unique to grid search
is_unsupervised = is_auto_encoder or algo == "pca" or algo == "svd" or algo == "kmeans" or algo == "glrm"
if is_auto_encoder and y is not None: raise ValueError("y should not be specified for autoencoder.")
if not is_unsupervised and y is None: raise ValueError("Missing response")
if not is_unsupervised:
y = y if y in training_frame.names else training_frame.names[y]
self.model._estimator_type = "classifier" if training_frame.types[y] == "enum" else "regressor"
self._model_build(x, y, training_frame, validation_frame, algo_params)
def _model_build(self, x, y, tframe, vframe, kwargs):
kwargs['training_frame'] = tframe
if vframe is not None: kwargs["validation_frame"] = vframe
if is_type(y, int): y = tframe.names[y]
if y is not None: kwargs['response_column'] = y
if not is_type(x, list, tuple): x = [x]
if is_type(x[0], int):
x = [tframe.names[i] for i in x]
offset = kwargs["offset_column"]
folds = kwargs["fold_column"]
weights = kwargs["weights_column"]
ignored_columns = list(set(tframe.names) - set(x + [y, offset, folds, weights]))
kwargs["ignored_columns"] = None if not ignored_columns else [quoted(col) for col in ignored_columns]
kwargs = dict([(k, kwargs[k].frame_id if isinstance(kwargs[k], H2OFrame) else kwargs[k]) for k in kwargs if
kwargs[k] is not None]) # gruesome one-liner
algo = self.model._compute_algo() # unique to grid search
if self.grid_id is not None: kwargs["grid_id"] = self.grid_id
rest_ver = kwargs.pop("_rest_version") if "_rest_version" in kwargs else None
grid = H2OJob(h2o.api("POST /99/Grid/%s" % algo, data=kwargs), job_type=(algo + " Grid Build"))
if self._future:
self._job = grid
return
grid.poll()
grid_json = h2o.api("GET /99/Grids/%s" % (grid.dest_key))
failure_messages_stacks = ""
error_index = 0
if len(grid_json["failure_details"]) > 0:
print("Errors/Warnings building gridsearch model\n")
# will raise error if no grid model is returned, store error messages here
for error_message in grid_json["failure_details"]:
if isinstance(grid_json["failed_params"][error_index], dict):
for h_name in grid_json['hyper_names']:
print("Hyper-parameter: {0}, {1}".format(h_name,
grid_json['failed_params'][error_index][h_name]))
if len(grid_json["failure_stack_traces"]) > error_index:
print("failure_details: {0}\nfailure_stack_traces: "
"{1}\n".format(error_message, grid_json['failure_stack_traces'][error_index]))
failure_messages_stacks += error_message+'\n'
error_index += 1
self.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']]
for model in self.models:
model._estimator_type = self.model._estimator_type
# get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)
# sometimes no model is returned due to bad parameter values provided by the user.
if len(grid_json['model_ids']) > 0:
first_model_json = h2o.api("GET /%d/Models/%s" %
(rest_ver or 3, grid_json['model_ids'][0]['name']))['models'][0]
self._resolve_grid(grid.dest_key, grid_json, first_model_json)
else:
if len(failure_messages_stacks)>0:
raise ValueError(failure_messages_stacks)
else:
raise ValueError("Gridsearch returns no model due to bad parameter values or other reasons....")
def _resolve_grid(self, grid_id, grid_json, first_model_json):
model_class = H2OGridSearch._metrics_class(first_model_json)
m = model_class()
m._id = grid_id
m._grid_json = grid_json
# m._metrics_class = metrics_class
m._parms = self._parms
self.export_checkpoints_dir = m._grid_json["export_checkpoints_dir"]
H2OEstimator.mixin(self, model_class)
self.__dict__.update(m.__dict__.copy())
def __getitem__(self, item):
return self.models[item]
def __iter__(self):
nmodels = len(self.models)
return (self[i] for i in range(nmodels))
def __len__(self):
return len(self.models)
def __repr__(self):
self.show()
return ""
def predict(self, test_data):
"""
Predict on a dataset.
:param H2OFrame test_data: Data to be predicted on.
:returns: H2OFrame filled with predictions.
"""
return {model.model_id: model.predict(test_data) for model in self.models}
def is_cross_validated(self):
"""Return True if the model was cross-validated."""
return {model.model_id: model.is_cross_validated() for model in self.models}
def xval_keys(self):
"""Model keys for the cross-validated model."""
return {model.model_id: model.xval_keys() for model in self.models}
def get_xval_models(self, key=None):
"""
Return a Model object.
:param str key: If None, return all cross-validated models; otherwise return the model
specified by the key.
:returns: A model or a list of models.
"""
return {model.model_id: model.get_xval_models(key) for model in self.models}
def xvals(self):
"""Return the list of cross-validated models."""
return {model.model_id: model.xvals for model in self.models}
def deepfeatures(self, test_data, layer):
"""
Obtain a hidden layer's details on a dataset.
:param test_data: Data to create a feature space on.
:param int layer: Index of the hidden layer.
:returns: A dictionary of hidden layer details for each model.
"""
return {model.model_id: model.deepfeatures(test_data, layer) for model in self.models}
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix.
:param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:returns: an H2OFrame which represents the weight matrix identified by matrix_id
"""
return {model.model_id: model.weights(matrix_id) for model in self.models}
def biases(self, vector_id=0):
"""
Return the frame for the respective bias vector.
:param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:returns: an H2OFrame which represents the bias vector identified by vector_id
"""
return {model.model_id: model.biases(vector_id) for model in self.models}
def normmul(self):
"""Normalization/Standardization multipliers for numeric predictors."""
return {model.model_id: model.normmul() for model in self.models}
def normsub(self):
"""Normalization/Standardization offsets for numeric predictors."""
return {model.model_id: model.normsub() for model in self.models}
def respmul(self):
"""Normalization/Standardization multipliers for numeric response."""
return {model.model_id: model.respmul() for model in self.models}
def respsub(self):
"""Normalization/Standardization offsets for numeric response."""
return {model.model_id: model.respsub() for model in self.models}
def catoffsets(self):
"""
Categorical offsets for one-hot encoding
"""
return {model.model_id: model.catoffsets() for model in self.models}
def model_performance(self, test_data=None, train=False, valid=False, xval=False):
"""
Generate model metrics for this model on test_data.
:param test_data: Data set for which model metrics shall be computed against. All three of train, valid
and xval arguments are ignored if test_data is not None.
:param train: Report the training metrics for the model.
:param valid: Report the validation metrics for the model.
:param xval: Report the validation metrics for the model.
:return: An object of class H2OModelMetrics.
"""
return {model.model_id: model.model_performance(test_data, train, valid, xval) for model in self.models}
def scoring_history(self):
"""
Retrieve model scoring history.
:returns: Score history (H2OTwoDimTable)
"""
return {model.model_id: model.scoring_history() for model in self.models}
def summary(self, header=True):
"""Print a detailed summary of the explored models."""
table = []
for model in self.models:
model_summary = model._model_json["output"]["model_summary"]
r_values = list(model_summary.cell_values[0])
r_values[0] = model.model_id
table.append(r_values)
# if h2o.can_use_pandas():
# import pandas
# pandas.options.display.max_rows = 20
# print pandas.DataFrame(table,columns=self.col_header)
# return
print()
if header:
print('Grid Summary:')
print()
H2ODisplay(table, header=['Model Id'] + model_summary.col_header[1:], numalign="left", stralign="left")
def show(self):
"""Print models sorted by metric."""
hyper_combos = itertools.product(*list(self.hyper_params.values()))
if not self.models:
c_values = [[idx + 1, list(val)] for idx, val in enumerate(hyper_combos)]
print(H2OTwoDimTable(
col_header=['Model', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']'],
table_header='Grid Search of Model ' + self.model.__class__.__name__, cell_values=c_values))
else:
print(self.sorted_metric_table())
def varimp(self, use_pandas=False):
"""
Pretty print the variable importances, or return them in a list/pandas DataFrame.
:param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame.
:returns: A dictionary of lists or Pandas DataFrame instances.
"""
return {model.model_id: model.varimp(use_pandas) for model in self.models}
def residual_deviance(self, train=False, valid=False, xval=False):
"""
Retreive the residual deviance if this model has the attribute, or None otherwise.
:param bool train: Get the residual deviance for the training set. If both train and valid are False,
then train is selected by default.
:param bool valid: Get the residual deviance for the validation set. If both train and valid are True,
then train is selected by default.
:param bool xval: Get the residual deviance for the cross-validated models.
:returns: the residual deviance, or None if it is not present.
"""
return {model.model_id: model.residual_deviance(train, valid, xval) for model in self.models}
def residual_degrees_of_freedom(self, train=False, valid=False, xval=False):
"""
Retreive the residual degress of freedom if this model has the attribute, or None otherwise.
:param bool train: Get the residual dof for the training set. If both train and valid are False, then
train is selected by default.
:param bool valid: Get the residual dof for the validation set. If both train and valid are True, then
train is selected by default.
:param bool xval: Get the residual dof for the cross-validated models.
:returns: the residual degrees of freedom, or None if they are not present.
"""
return {model.model_id: model.residual_degrees_of_freedom(train, valid, xval) for model in self.models}
def null_deviance(self, train=False, valid=False, xval=False):
"""
Retreive the null deviance if this model has the attribute, or None otherwise.
:param bool train: Get the null deviance for the training set. If both train and valid are False, then
train is selected by default.
:param bool valid: Get the null deviance for the validation set. If both train and valid are True, then
train is selected by default.
:param bool xval: Get the null deviance for the cross-validated models.
:returns: the null deviance, or None if it is not present.
"""
return {model.model_id: model.null_deviance(train, valid, xval) for model in self.models}
def null_degrees_of_freedom(self, train=False, valid=False, xval=False):
"""
Retreive the null degress of freedom if this model has the attribute, or None otherwise.
:param bool train: Get the null dof for the training set. If both train and valid are False, then train is
selected by default.
:param bool valid: Get the null dof for the validation set. If both train and valid are True, then train is
selected by default.
:param bool xval: Get the null dof for the cross-validated models.
:returns: the null dof, or None if it is not present.
"""
return {model.model_id: model.null_degrees_of_freedom(train, valid, xval) for model in self.models}
def pprint_coef(self):
"""Pretty print the coefficents table (includes normalized coefficients)."""
for i, model in enumerate(self.models):
print('Model', i)
model.pprint_coef()
print()
def coef(self):
"""Return the coefficients that can be applied to the non-standardized data.
Note: standardize = True by default. If set to False, then coef() returns the coefficients that are fit directly.
"""
return {model.model_id: model.coef() for model in self.models}
def coef_norm(self):
"""Return coefficients fitted on the standardized data (requires standardize = True, which is on by default). These coefficients can be used to evaluate variable importance.
"""
return {model.model_id: model.coef_norm() for model in self.models}
def r2(self, train=False, valid=False, xval=False):
"""
Return the R^2 for this regression model.
The R^2 value is defined to be ``1 - MSE/var``, where ``var`` is computed as ``sigma^2``.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the R^2 value for the training data.
:param bool valid: If valid is True, then return the R^2 value for the validation data.
:param bool xval: If xval is True, then return the R^2 value for the cross validation data.
:returns: The R^2 for this regression model.
"""
return {model.model_id: model.r2(train, valid, xval) for model in self.models}
def mse(self, train=False, valid=False, xval=False):
"""
Get the MSE(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the MSE value for the training data.
:param bool valid: If valid is True, then return the MSE value for the validation data.
:param bool xval: If xval is True, then return the MSE value for the cross validation data.
:returns: The MSE for this regression model.
"""
return {model.model_id: model.mse(train, valid, xval) for model in self.models}
def logloss(self, train=False, valid=False, xval=False):
"""
Get the Log Loss(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the Log Loss value for the training data.
:param bool valid: If valid is True, then return the Log Loss value for the validation data.
:param bool xval: If xval is True, then return the Log Loss value for the cross validation data.
:returns: The Log Loss for this binomial model.
"""
return {model.model_id: model.logloss(train, valid, xval) for model in self.models}
def mean_residual_deviance(self, train=False, valid=False, xval=False):
"""
Get the Mean Residual Deviances(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the Mean Residual Deviance value for the training data.
:param bool valid: If valid is True, then return the Mean Residual Deviance value for the validation data.
:param bool xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data.
:returns: The Mean Residual Deviance for this regression model.
"""
return {model.model_id: model.mean_residual_deviance(train, valid, xval) for model in self.models}
def auc(self, train=False, valid=False, xval=False):
"""
Get the AUC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the AUC value for the training data.
:param bool valid: If valid is True, then return the AUC value for the validation data.
:param bool xval: If xval is True, then return the AUC value for the validation data.
:returns: The AUC.
"""
return {model.model_id: model.auc(train, valid, xval) for model in self.models}
def aic(self, train=False, valid=False, xval=False):
"""
Get the AIC(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the AIC value for the training data.
:param bool valid: If valid is True, then return the AIC value for the validation data.
:param bool xval: If xval is True, then return the AIC value for the validation data.
:returns: The AIC.
"""
return {model.model_id: model.aic(train, valid, xval) for model in self.models}
def gini(self, train=False, valid=False, xval=False):
"""
Get the Gini Coefficient(s).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the Gini Coefficient value for the training data.
:param bool valid: If valid is True, then return the Gini Coefficient value for the validation data.
:param bool xval: If xval is True, then return the Gini Coefficient value for the cross validation data.
:returns: The Gini Coefficient for this binomial model.
"""
return {model.model_id: model.gini(train, valid, xval) for model in self.models}
def get_hyperparams(self, id, display=True):
"""
Get the hyperparameters of a model explored by grid search.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A list of the hyperparameters for the specified model.
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
# if cross-validation is turned on, parameters in one of the fold model actuall contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
res = [model.params[h]['actual'][0] if isinstance(model.params[h]['actual'], list)
else model.params[h]['actual']
for h in self.hyper_params]
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return res
def get_hyperparams_dict(self, id, display=True):
"""
Derived and returned the model parameters used to train the particular grid search model.
:param str id: The model id of the model with hyperparameters of interest.
:param bool display: Flag to indicate whether to display the hyperparameter names.
:returns: A dict of model pararmeters derived from the hyper-parameters used to train this particular model.
"""
idx = id if is_type(id, int) else self.model_ids.index(id)
model = self[idx]
model_params = dict()
# if cross-validation is turned on, parameters in one of the fold model actual contains the max_runtime_secs
# parameter and not the main model that is returned.
if model._is_xvalidated:
model = h2o.get_model(model._xval_keys[0])
for param_name in self.hyper_names:
model_params[param_name] = model.params[param_name]['actual'][0] if \
isinstance(model.params[param_name]['actual'], list) else model.params[param_name]['actual']
if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')
return model_params
def sorted_metric_table(self):
"""
Retrieve summary table of an H2O Grid Search.
:returns: The summary table as an H2OTwoDimTable or a Pandas DataFrame.
"""
summary = self._grid_json["summary_table"]
if summary is not None: return summary.as_data_frame()
print("No sorted metric table for this grid search")
@staticmethod
def _metrics_class(model_json):
model_type = model_json["output"]["model_category"]
if model_type == "Binomial":
model_class = H2OBinomialGridSearch
elif model_type == "Clustering":
model_class = H2OClusteringGridSearch
elif model_type == "Regression":
model_class = H2ORegressionGridSearch
elif model_type == "Multinomial":
model_class = H2OMultinomialGridSearch
elif model_type == "Ordinal":
model_class = H2OOrdinalGridSearch
elif model_type == "AutoEncoder":
model_class = H2OAutoEncoderGridSearch
elif model_type == "DimReduction":
model_class = H2ODimReductionGridSearch
else:
raise NotImplementedError(model_type)
return model_class
def get_grid(self, sort_by=None, decreasing=None):
"""
Retrieve an H2OGridSearch instance.
Optionally specify a metric by which to sort models and a sort order.
Note that if neither cross-validation nor a validation frame is used in the grid search, then the
training metrics will display in the "get grid" output. If a validation frame is passed to the grid, and
``nfolds = 0``, then the validation metrics will display. However, if ``nfolds`` > 1, then cross-validation
metrics will display even if a validation frame is provided.
:param str sort_by: A metric by which to sort the models in the grid space. Choices are: ``"logloss"``,
``"residual_deviance"``, ``"mse"``, ``"auc"``, ``"r2"``, ``"accuracy"``, ``"precision"``, ``"recall"``,
``"f1"``, etc.
:param bool decreasing: Sort the models in decreasing order of metric if true, otherwise sort in increasing
order (default).
:returns: A new H2OGridSearch instance optionally sorted on the specified metric.
"""
if sort_by is None and decreasing is None: return self
grid_json = h2o.api("GET /99/Grids/%s" % self._id, data={"sort_by": sort_by, "decreasing": decreasing})
grid = H2OGridSearch(self.model, self.hyper_params, self._id)
grid.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']] # reordered
first_model_json = h2o.api("GET /99/Models/%s" % grid_json['model_ids'][0]['name'])['models'][0]
model_class = H2OGridSearch._metrics_class(first_model_json)
m = model_class()
m._id = self._id
m._grid_json = grid_json
# m._metrics_class = metrics_class
m._parms = grid._parms
H2OEstimator.mixin(grid, model_class)
grid.__dict__.update(m.__dict__.copy())
return grid
# Deprecated functions; left here for backward compatibility
_bcim = {
"giniCoef": lambda self, *args, **kwargs: self.gini(*args, **kwargs)
}
@deprecated("grid.sort_by() is deprecated; use grid.get_grid() instead")
def sort_by(self, metric, increasing=True):
"""Deprecated since 2016-12-12, use grid.get_grid() instead."""
if metric[-1] != ')': metric += '()'
c_values = [list(x) for x in zip(*sorted(eval('self.' + metric + '.items()'), key=lambda k_v: k_v[1]))]
c_values.insert(1, [self.get_hyperparams(model_id, display=False) for model_id in c_values[0]])
if not increasing:
for col in c_values: col.reverse()
if metric[-2] == '(': metric = metric[:-2]
return H2OTwoDimTable(
col_header=['Model Id', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']', metric],
table_header='Grid Search Results for ' + self.model.__class__.__name__,
cell_values=[list(x) for x in zip(*c_values)])
|
the-stack_0_386 | # -*- coding: utf-8 -*-
import os
from nbformat.v4.nbbase import new_notebook, new_code_cell, new_markdown_cell, new_raw_cell
from jupytext.compare import compare, compare_notebooks
import jupytext
def test_read_simple_file(script="""# ---
# title: Simple file
# ---
# %% [markdown]
# This is a markdown cell
# %% [md]
# This is also a markdown cell
# %% [raw]
# This is a raw cell
# %%% sub-cell title
# This is a sub-cell
# %%%% sub-sub-cell title
# This is a sub-sub-cell
# %% And now a code cell
1 + 2 + 3 + 4
5
6
# %%magic # this is a commented magic, not a cell
7
"""):
nb = jupytext.reads(script, 'py:percent')
compare_notebooks(new_notebook(cells=[
new_raw_cell('---\ntitle: Simple file\n---'),
new_markdown_cell('This is a markdown cell'),
new_markdown_cell('This is also a markdown cell', metadata={'region_name': 'md'}),
new_raw_cell('This is a raw cell'),
new_code_cell('# This is a sub-cell', metadata={'title': 'sub-cell title', 'cell_depth': 1}),
new_code_cell('# This is a sub-sub-cell', metadata={'title': 'sub-sub-cell title', 'cell_depth': 2}),
new_code_cell('''1 + 2 + 3 + 4
5
6
%%magic # this is a commented magic, not a cell
7''', metadata={'title': 'And now a code cell'})]), nb)
script2 = jupytext.writes(nb, 'py:percent')
compare(script2, script)
def test_read_cell_with_metadata(
script="""# %% a code cell with parameters {"tags": ["parameters"]}
a = 3
"""):
nb = jupytext.reads(script, 'py:percent')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'code'
assert nb.cells[0].source == 'a = 3'
assert nb.cells[0].metadata == {
'title': 'a code cell with parameters',
'tags': ['parameters']}
script2 = jupytext.writes(nb, 'py:percent')
compare(script2, script)
def test_read_nbconvert_script(script="""
# coding: utf-8
# A markdown cell
# In[1]:
import pandas as pd
pd.options.display.max_rows = 6
pd.options.display.max_columns = 20
# Another markdown cell
# In[2]:
1 + 1
# Again, a markdown cell
# In[33]:
2 + 2
# <codecell>
3 + 3
"""):
assert jupytext.formats.guess_format(script, '.py')[0] == 'percent'
nb = jupytext.reads(script, '.py')
assert len(nb.cells) == 5
def test_read_remove_blank_lines(script="""# %%
import pandas as pd
# %% Display a data frame
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]},
index=pd.Index(['x0', 'x1'], name='x'))
df
# %% Pandas plot {"tags": ["parameters"]}
df.plot(kind='bar')
# %% sample class
class MyClass:
pass
# %% a function
def f(x):
return 42 * x
"""):
nb = jupytext.reads(script, 'py')
assert len(nb.cells) == 5
for i in range(5):
assert nb.cells[i].cell_type == 'code'
assert not nb.cells[i].source.startswith('\n')
assert not nb.cells[i].source.endswith('\n')
script2 = jupytext.writes(nb, 'py:percent')
compare(script2, script)
def test_no_crash_on_square_bracket(script="""# %% In [2]
print('Hello')
"""):
nb = jupytext.reads(script, 'py')
script2 = jupytext.writes(nb, 'py:percent')
compare(script2, script)
def test_nbconvert_cell(script="""# In[2]:
print('Hello')
"""):
nb = jupytext.reads(script, 'py')
script2 = jupytext.writes(nb, 'py:percent')
expected = """# %%
print('Hello')
"""
compare(script2, expected)
def test_nbformat_v3_nbpy_cell(script="""# <codecell>
print('Hello')
"""):
nb = jupytext.reads(script, 'py')
script2 = jupytext.writes(nb, 'py:percent')
expected = """# %%
print('Hello')
"""
compare(script2, expected)
def test_multiple_empty_cells():
nb = new_notebook(cells=[new_code_cell(), new_code_cell(), new_code_cell()],
metadata={'jupytext': {'notebook_metadata_filter': '-all'}})
text = jupytext.writes(nb, 'py:percent')
expected = """# %%
# %%
# %%
"""
compare(text, expected)
nb2 = jupytext.reads(text, 'py:percent')
nb2.metadata = nb.metadata
compare(nb2, nb)
def test_first_cell_markdown_191():
text = """# %% [markdown]
# Docstring
# %%
from math import pi
# %% [markdown]
# Another markdown cell
"""
nb = jupytext.reads(text, 'py')
assert nb.cells[0].cell_type == 'markdown'
assert nb.cells[1].cell_type == 'code'
assert nb.cells[2].cell_type == 'markdown'
def test_multiline_comments_in_markdown_1():
text = """# %% [markdown]
'''
a
long
cell
'''
"""
nb = jupytext.reads(text, 'py')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'markdown'
assert nb.cells[0].source == "a\nlong\ncell"
py = jupytext.writes(nb, 'py')
compare(py, text)
def test_multiline_comments_in_markdown_2():
text = '''# %% [markdown]
"""
a
long
cell
"""
'''
nb = jupytext.reads(text, 'py')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'markdown'
assert nb.cells[0].source == "a\nlong\ncell"
py = jupytext.writes(nb, 'py')
compare(py, text)
def test_multiline_comments_format_option():
text = '''# %% [markdown]
"""
a
long
cell
"""
'''
nb = new_notebook(cells=[new_markdown_cell("a\nlong\ncell")],
metadata={'jupytext': {'cell_markers': '"""',
'notebook_metadata_filter': '-all'}})
py = jupytext.writes(nb, 'py:percent')
compare(py, text)
def test_multiline_comments_in_raw_cell():
text = '''# %% [raw]
"""
some
text
"""
'''
nb = jupytext.reads(text, 'py')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'raw'
assert nb.cells[0].source == "some\ntext"
py = jupytext.writes(nb, 'py')
compare(py, text)
def test_multiline_comments_in_markdown_cell_no_line_return():
text = '''# %% [markdown]
"""a
long
cell"""
'''
nb = jupytext.reads(text, 'py')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'markdown'
assert nb.cells[0].source == "a\nlong\ncell"
def test_multiline_comments_in_markdown_cell_is_robust_to_additional_cell_marker():
text = '''# %% [markdown]
"""
some text, and a fake cell marker
# %% [raw]
"""
'''
nb = jupytext.reads(text, 'py')
assert len(nb.cells) == 1
assert nb.cells[0].cell_type == 'markdown'
assert nb.cells[0].source == "some text, and a fake cell marker\n# %% [raw]"
py = jupytext.writes(nb, 'py')
compare(py, text)
def test_cell_markers_option_in_contents_manager(tmpdir):
tmp_ipynb = str(tmpdir.join('notebook.ipynb'))
tmp_py = str(tmpdir.join('notebook.py'))
cm = jupytext.TextFileContentsManager()
cm.root_dir = str(tmpdir)
nb = new_notebook(cells=[new_code_cell('1 + 1'), new_markdown_cell('a\nlong\ncell')],
metadata={'jupytext': {'formats': 'ipynb,py:percent',
'notebook_metadata_filter': '-all',
'cell_markers': "'''"}})
cm.save(model=dict(type='notebook', content=nb), path='notebook.ipynb')
assert os.path.isfile(tmp_ipynb)
assert os.path.isfile(tmp_py)
with open(tmp_py) as fp:
text = fp.read()
compare(text, """# %%
1 + 1
# %% [markdown]
'''
a
long
cell
'''
""")
nb2 = jupytext.read(tmp_py)
compare_notebooks(nb, nb2)
def test_default_cell_markers_in_contents_manager(tmpdir):
tmp_ipynb = str(tmpdir.join('notebook.ipynb'))
tmp_py = str(tmpdir.join('notebook.py'))
cm = jupytext.TextFileContentsManager()
cm.root_dir = str(tmpdir)
cm.default_cell_markers = "'''"
nb = new_notebook(cells=[new_code_cell('1 + 1'), new_markdown_cell('a\nlong\ncell')],
metadata={'jupytext': {'formats': 'ipynb,py:percent',
'notebook_metadata_filter': '-all'}})
cm.save(model=dict(type='notebook', content=nb), path='notebook.ipynb')
assert os.path.isfile(tmp_ipynb)
assert os.path.isfile(tmp_py)
with open(tmp_py) as fp:
text = fp.read()
compare(text, """# %%
1 + 1
# %% [markdown]
'''
a
long
cell
'''
""")
nb2 = jupytext.read(tmp_py)
compare_notebooks(nb, nb2)
def test_default_cell_markers_in_contents_manager_does_not_impact_light_format(tmpdir):
tmp_ipynb = str(tmpdir.join('notebook.ipynb'))
tmp_py = str(tmpdir.join('notebook.py'))
cm = jupytext.TextFileContentsManager()
cm.root_dir = str(tmpdir)
cm.default_cell_markers = "'''"
nb = new_notebook(cells=[new_code_cell('1 + 1'), new_markdown_cell('a\nlong\ncell')],
metadata={'jupytext': {'formats': 'ipynb,py',
'notebook_metadata_filter': '-all'}})
cm.save(model=dict(type='notebook', content=nb), path='notebook.ipynb')
assert os.path.isfile(tmp_ipynb)
assert os.path.isfile(tmp_py)
with open(tmp_py) as fp:
text = fp.read()
compare(text, """1 + 1
# a
# long
# cell
""")
nb2 = jupytext.read(tmp_py)
compare_notebooks(nb, nb2)
def test_single_triple_quote_works(no_jupytext_version_number, text='''# ---
# jupyter:
# jupytext:
# cell_markers: '"""'
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# ---
# %%
print("hello")
''', notebook=new_notebook(cells=[new_code_cell('print("hello")')])):
compare_notebooks(jupytext.reads(text, 'py'), notebook)
def test_docstring_with_quadruple_quote(nb=new_notebook(cells=[
new_code_cell('''def fun_1(df):
""""
docstring starting with 4 double quotes and ending with 3
"""
return df'''),
new_code_cell('''def fun_2(df):
"""
docstring
"""
return df''')
])):
"""Reproduces https://github.com/mwouts/jupytext/issues/460"""
py = jupytext.writes(nb, 'py:percent')
nb2 = jupytext.reads(py, 'py')
compare_notebooks(nb2, nb)
|
the-stack_0_389 | if __name__ == "__main__":
import os
import sys
sys.path.append(os.getcwd() + "/../../")
import pandas as pd
import itertools
from kge_from_text import folder_definitions as fd
import kge_from_text.models.term_embeddings as tt
import kge_from_text.bridges.clean_bridge as bridge
from kge_from_text.evaluators.evaluator_handler import EvaluatorHandler
from kge_from_text.evaluators.analogy_evaluator import AnalogyEvaluator
import kge_from_text.models.tee_embeddings as tee
combinations = [(5, 400), (5, 500)]
entity_vector_name = "2016_data/entity_vectors"
type_vector_name = "2016_data/type_vectors"
conactenated_name = "2016_data/concatenated_vectors"
conactenated_name_time = "2016_data/concatenated_vectors_time"
temporal_csv = "2016_data/temporal_vectors.csv"
annotated_entity_file = "2016_data/annotated_text_with_entities"
annotated_type_file = "2016_data/annotated_text_with_types"
type_of_entity_file = "2016_data/type_to_entity_data.ttl"
pure_text_model = "2016_data/text_with_words"
# Declare An Evaluator
evalu = EvaluatorHandler(fd.EVALUATION_RESULTS_ROOT, name="word_base")
for w_e, s_e in combinations:
# ENTITY
model_w = tt.TermEmbedding("text")
model_w.fit(input_text=fd.STARTING_DATA_ROOT + pure_text_model,
output_file_path=fd.PRODUCED_MODELS_ROOT + "2016_data/", _size=s_e, _window=w_e, load_model_if_exits = True)
analogies = pd.read_csv(fd.GOLD_STANDARDS + "mikolov", names=["First", "Second", "Third", "Fourth"],
sep=" ")
br = bridge.CleanBridge()
analogy_eval = AnalogyEvaluator(br, model_w, analogies)
evalu.run_evaluation(analogy_eval)
analogies = pd.read_csv(fd.GOLD_STANDARDS + "currency", names=["First", "Second", "Third", "Fourth"],
sep=" ")
analogy_eval = AnalogyEvaluator(br, model_w, analogies)
evalu.run_evaluation(analogy_eval)
|
the-stack_0_390 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import st2common.content.utils as content_utils
from st2common import log as logging
from st2common.constants.meta import ALLOWED_EXTS
from st2common.bootstrap.base import ResourceRegistrar
from st2common.models.api.action import ActionAliasAPI
from st2common.persistence.actionalias import ActionAlias
__all__ = [
'AliasesRegistrar',
'register_aliases'
]
LOG = logging.getLogger(__name__)
class AliasesRegistrar(ResourceRegistrar):
ALLOWED_EXTENSIONS = ALLOWED_EXTS
def register_aliases_from_packs(self, base_dirs):
"""
Discover all the packs in the provided directory and register aliases from all of the
discovered packs.
:return: Number of aliases registered.
:rtype: ``int``
"""
registered_count = 0
content = self._pack_loader.get_content(base_dirs=base_dirs,
content_type='aliases')
for pack, aliases_dir in six.iteritems(content):
try:
LOG.debug('Registering aliases from pack %s:, dir: %s', pack, aliases_dir)
aliases = self._get_aliases_from_pack(aliases_dir)
count = self._register_aliases_from_pack(pack=pack, aliases=aliases)
registered_count += count
except:
LOG.exception('Failed registering all aliases from pack: %s', aliases_dir)
return registered_count
def register_aliases_from_pack(self, pack_dir):
"""
Register all the aliases from the provided pack.
:return: Number of aliases registered.
:rtype: ``int``
"""
pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir
_, pack = os.path.split(pack_dir)
aliases_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,
content_type='aliases')
registered_count = 0
if not aliases_dir:
return registered_count
LOG.debug('Registering aliases from pack %s:, dir: %s', pack, aliases_dir)
try:
aliases = self._get_aliases_from_pack(aliases_dir=aliases_dir)
registered_count = self._register_aliases_from_pack(pack=pack, aliases=aliases)
except:
LOG.exception('Failed registering all aliases from pack: %s', aliases_dir)
return 0
return registered_count
def _get_aliases_from_pack(self, aliases_dir):
return self.get_resources_from_pack(resources_dir=aliases_dir)
def _register_action_alias(self, pack, action_alias):
content = self._meta_loader.load(action_alias)
pack_field = content.get('pack', None)
if not pack_field:
content['pack'] = pack
pack_field = pack
if pack_field != pack:
raise Exception('Model is in pack "%s" but field "pack" is different: %s' %
(pack, pack_field))
action_alias_api = ActionAliasAPI(**content)
action_alias_api.validate()
action_alias_db = ActionAliasAPI.to_model(action_alias_api)
try:
action_alias_db.id = ActionAlias.get_by_name(action_alias_api.name).id
except ValueError:
LOG.info('ActionAlias %s not found. Creating new one.', action_alias)
try:
action_alias_db = ActionAlias.add_or_update(action_alias_db)
extra = {'action_alias_db': action_alias_db}
LOG.audit('Action alias updated. Action alias %s from %s.', action_alias_db,
action_alias, extra=extra)
except Exception:
LOG.exception('Failed to create action alias %s.', action_alias_api.name)
raise
def _register_aliases_from_pack(self, pack, aliases):
registered_count = 0
for alias in aliases:
try:
LOG.debug('Loading alias from %s.', alias)
self._register_action_alias(pack, alias)
except Exception:
LOG.exception('Unable to register alias: %s', alias)
continue
else:
registered_count += 1
return registered_count
def register_aliases(packs_base_paths=None, pack_dir=None):
if packs_base_paths:
assert(isinstance(packs_base_paths, list))
if not packs_base_paths:
packs_base_paths = content_utils.get_packs_base_paths()
registrar = AliasesRegistrar()
if pack_dir:
result = registrar.register_aliases_from_pack(pack_dir=pack_dir)
else:
result = registrar.register_aliases_from_packs(base_dirs=packs_base_paths)
return result
|
the-stack_0_392 | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import tensorflow as tf
from tensorflow.python import ipu
from ipu_tensorflow_addons.keras.layers import Embedding, LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.optimizers import Adam
if tf.__version__[0] != '2':
raise ImportError("TensorFlow 2 is required for this example")
max_features = 20000
minibatch_size = 32
# Define the dataset.
def get_dataset():
(x_train, y_train), (_, _) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=80)
ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
ds = ds.repeat()
ds = ds.map(lambda x, y: (x, tf.cast(y, tf.int32)))
ds = ds.batch(minibatch_size, drop_remainder=True)
return ds
# Define the model.
def get_model():
return tf.keras.Sequential(
[Embedding(max_features, 128),
LSTM(128, dropout=0.2),
Dense(1, activation='sigmoid')])
def main():
# Configure IPUs.
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
# Set up IPU strategy.
strategy = ipu.ipu_strategy.IPUStrategy()
with strategy.scope():
model = get_model()
model.compile(steps_per_execution=384, loss='binary_crossentropy', optimizer=Adam(0.005))
model.fit(get_dataset(), steps_per_epoch=768, epochs=3)
if __name__ == '__main__':
main()
|
the-stack_0_393 | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v2.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v2.model.dashboard_list_item_request import DashboardListItemRequest
globals()["DashboardListItemRequest"] = DashboardListItemRequest
class DashboardListDeleteItemsRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"dashboards": ([DashboardListItemRequest],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"dashboards": "dashboards", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""DashboardListDeleteItemsRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
dashboards ([DashboardListItemRequest]): List of dashboards to delete from the dashboard list.. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
the-stack_0_394 | from .claims import Claims
from .cose import COSE
from .cose_key import COSEKey
from .cwt import (
CWT,
decode,
encode,
encode_and_encrypt,
encode_and_mac,
encode_and_sign,
set_private_claim_names,
)
from .encrypted_cose_key import EncryptedCOSEKey
from .exceptions import CWTError, DecodeError, EncodeError, VerifyError
from .helpers.hcert import load_pem_hcert_dsc
from .recipient import Recipient
from .signer import Signer
__version__ = "1.3.2"
__title__ = "cwt"
__description__ = "A Python implementation of CWT/COSE"
__url__ = "https://python-cwt.readthedocs.io"
__uri__ = __url__
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = "AJITOMI Daisuke"
__email__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "Copyright 2021 AJITOMI Daisuke"
__all__ = [
"encode",
"encode_and_mac",
"encode_and_sign",
"encode_and_encrypt",
"decode",
"set_private_claim_names",
"CWT",
"COSE",
"COSEKey",
"EncryptedCOSEKey",
"Claims",
"Recipient",
"Signer",
"load_pem_hcert_dsc",
"CWTError",
"EncodeError",
"DecodeError",
"VerifyError",
]
|
the-stack_0_395 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pacvim(MakefilePackage):
"""Pacvim is a command-line-based game based off of Pacman.
The main purpose of this software is to familiarize individuals
with Vim."""
homepage = "https://github.com/jmoon018/PacVim"
url = "https://github.com/jmoon018/PacVim/archive/v1.1.1.tar.gz"
version('1.1.1', sha256='c869c5450fbafdfe8ba8a8a9bba3718775926f276f0552052dcfa090d21acb28')
depends_on('ncurses')
def edit(self, stage, prefix):
makefile = FileFilter('Makefile')
makefile.filter(r'PREFIX = /usr/local',
'PREFIX={0}'.format(self.prefix))
|
the-stack_0_396 | #!/usr/bin/python3.6
activate_this = '/home/ubuntu/flaskapp/venv/bin/activate_this.py'
with open(activate_this) as f:
exec(f.read(), dict(__file__=activate_this))
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/home/ubuntu/flaskapp/flaskapp/")
from manage import app as application
if __name__ == "__main__":
application.run()
|
the-stack_0_398 | import _plotly_utils.basevalidators
class SmoothingValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="smoothing", parent_name="carpet.aaxis", **kwargs):
super(SmoothingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1.3),
min=kwargs.pop("min", 0),
**kwargs,
)
|
the-stack_0_399 | import numpy as np
from finitewave.core.model import CardiacModel
from finitewave.cpuwave2D.model.aliev_panfilov_2d.aliev_panfilov_kernels_2d \
import AlievPanfilovKernels2D
_npfloat = "float64"
class AlievPanfilov2D(CardiacModel):
def __init__(self):
CardiacModel.__init__(self)
self.v = np.ndarray
self.w = np.ndarray
self.state_vars = ["u", "v"]
self.npfloat = 'float64'
def initialize(self):
super().initialize()
weights_shape = self.cardiac_tissue.weights.shape
shape = self.cardiac_tissue.mesh.shape
self.diffuse_kernel = AlievPanfilovKernels2D().get_diffuse_kernel(weights_shape)
self.ionic_kernel = AlievPanfilovKernels2D().get_ionic_kernel()
self.v = np.zeros(shape, dtype=self.npfloat)
def run_ionic_kernel(self):
self.ionic_kernel(self.u_new, self.u, self.v, self.cardiac_tissue.mesh,
self.dt)
|
the-stack_0_401 | # TIPS: only used to find the best epoch of MLP
# MLP
import csv
from itertools import islice
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold, train_test_split
import pandas as pd
from sklearn.utils import shuffle
import tensorflow as tf
def bit2attr(bitstr) -> list:
attr_vec = list()
for i in range(len(bitstr)):
attr_vec.append(int(bitstr[i]))
return attr_vec
def mean_relative_error(y_pred, y_test):
assert len(y_pred) == len(y_test)
mre = 0.0
for i in range(len(y_pred)):
mre = mre + abs((y_pred[i] - y_test[i]) / y_test[i])
mre = mre * 100/ len(y_pred)
return mre
Large_MRE_points = pd.DataFrame()
Large_MRE_X = []
Large_MRE_y_test = []
Large_MRE_y_pred = []
Large_MRE = []
'''
1) 数据预处理
'''
# filepath = 'data/fp/sjn/R+B+Cmorgan_fp1202.csv'
filepath = 'data/database/22-01-29-descriptor-train.csv'
data = pd.read_csv(filepath, encoding='gb18030')
print(data.shape)
data = data.dropna()
print(data.shape)
data = shuffle(data)
data_x_df = data.drop(['label'], axis=1)
data_y_df = data[['label']]
# 归一化
min_max_scaler_X = MinMaxScaler()
min_max_scaler_X.fit(data_x_df)
x_trans1 = min_max_scaler_X.transform(data_x_df)
min_max_scaler_y = MinMaxScaler()
min_max_scaler_y.fit(data_y_df)
y_trans1 = min_max_scaler_y.transform(data_y_df)
test_filepath = "data/database/22-01-29-descriptor-test-level-1.csv"
test_data = pd.read_csv(test_filepath, encoding='gb18030')
print('test data: ', test_data.shape)
test_data_x_df = test_data.drop(['label'], axis=1)
test_data_y_df = test_data[['label']]
x_trans1_test = min_max_scaler_X.transform(test_data_x_df)
y_trans1_test = min_max_scaler_y.transform(test_data_y_df)
'''
3) 构建模型
'''
from keras.layers import MaxPooling1D, Conv1D, Dense, Flatten, Dropout
from keras import models
from keras.optimizers import Adam, RMSprop, SGD
def buildModel():
model = models.Sequential()
l4 = Dense(512, activation='relu')
l5 = Dropout(rate=0.2)
l6 = Dense(128, activation='relu')
l7 = Dense(30, activation='relu')
l8 = Dense(1)
layers = [l4, l5, l6, l7, l8]
for i in range(len(layers)):
model.add(layers[i])
adam = Adam(lr=1e-3)
model.compile(optimizer=adam, loss='logcosh', metrics=['mae', 'mape'])
model_mlp = MLPRegressor(
hidden_layer_sizes=(512, 128, 32), activation='relu', solver='lbfgs', alpha=0.0001,
max_iter=5000,
random_state=1, tol=0.0001, verbose=False, warm_start=False)
return model
def scheduler(epoch, lr):
if epoch > 0 and epoch % 500 == 0:
return lr * 0.1
else:
return lr
'''
4) 训练模型
'''
from sklearn import metrics
# n_split = 10
mlp_scores = []
MAEs = []
out_MAEs = []
in_y_test = []
in_y_pred = []
out_y_test = []
out_y_pred = []
X_train = x_trans1
y_train = y_trans1
# 外部验证
X_test = x_trans1_test
y_trans1_test = np.reshape(y_trans1_test, (-1, 1))
y_test = y_trans1_test
callback = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1)
model_mlp = buildModel()
history = model_mlp.fit(X_train, y_train, epochs=1, verbose=1, validation_data=(X_test, y_test), callbacks=[callback])
print(model_mlp.summary())
exit(0)
losses = history.history['loss']
eval_mres = history.history['val_mape']
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot([x for x in range(len(losses))], losses, 'b', label='loss')
ax1.set_ylabel('loss', color='b')
ax2.plot([x for x in range(len(eval_mres))], eval_mres, 'r', label='eval_mre')
ax2.set_ylabel('eval_mre', color='r')
ax1.set_xlabel('epochs')
plt.title('Training of MLP')
plt.savefig('pics/Training_of_MLP.png')
import os
outdir = 'Out/losses_and_mres'
os.makedirs(outdir, exist_ok=True)
with open(os.path.join(outdir, 'mlp_descriptor.txt'), 'w') as f:
f.write('loss\n')
f.write(' '.join([str(x) for x in losses]))
f.write('\n')
f.write('mres\n')
f.write(' '.join([str(x) for x in eval_mres])) |
the-stack_0_402 | import logging
from google.appengine.ext import db
from google.appengine.api import memcache
from app.utility.utils import memcached
import app.utility.utils as utils
import app.db.counter as counter
import web
QUESTIONS_PER_SITEMAP = 500
class Sitemap(db.Model):
question_count = db.IntegerProperty(default = 0)
question_keys = db.StringListProperty(default = [])
content = db.TextProperty(default ='')
archived = db.BooleanProperty(default = False)
created = db.DateTimeProperty(auto_now_add = True)
last_modified = db.DateTimeProperty(auto_now = True)
@staticmethod
def get_last_sitemap():
entity = Sitemap.all().order('-created').get()
if entity:
if entity.question_count >= QUESTIONS_PER_SITEMAP:
entity.content = unicode(web.render.sitemap_questions(entity.question_keys))
entity.archived = True
entity.put()
entity = Sitemap()
entity.put()
else:
entity = Sitemap()
entity.put()
return entity
@staticmethod
def update_last_sitemap(key):
last_sitemap = Sitemap.get_last_sitemap()
last_sitemap.question_count += 1
last_sitemap.question_keys.insert(0, str(key))
last_sitemap.put()
@staticmethod
def get_sitemaps():
sitemaps = Sitemap.all().order('-created').fetch(500)
return sitemaps
@staticmethod
@memcached('get_sitemap_by_id', 3600*24, lambda id : int(id) )
def get_sitemap_by_id(id):
entity = Sitemap.get_by_id(id)
if entity:
if entity.content:
return entity.content
else:
return unicode(web.render.sitemap_questions(entity.question_keys))
else:
raise web.notfound() |
the-stack_0_403 | from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponse
from .models import Form
from .forms import ReqForm
from .filters import FormFilter
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import redirect
def form(request):
return render(request,'form.html')
def status(request):
return render(request,'status.html')
def about(request):
return render(request,'about.html')
def index(request):
return render(request,'index.html')
def showformdata(request):
if request.method=='POST':
fm=ReqForm(request.POST)
if fm.is_valid():
em=fm.cleaned_data['email']
cn=fm.cleaned_data['ClubName']
rn=fm.cleaned_data['RepresentativeName']
cn=fm.cleaned_data['Contact']
df=fm.cleaned_data['req_date_from']
dt=fm.cleaned_data['req_date_to']
rt=fm.cleaned_data['req_type']
rp=fm.cleaned_data['req_purpose']
profile = fm.save(commit=False)
profile.user = request.user
profile.save()
fm.save()
fm=ReqForm()
print(em)
print(rn)
else:
fm=ReqForm()
return render(request,'form.html',{'frm':fm})
def reqInfo(request):
u=request.user
if u.groups.filter(name='Managers').exists():
req = Form.objects.all()
print(req)
print("this is a manager")
context={
'form':form,
'req': req
}
else:
req = Form.objects.filter(user=request.user)
print(req)
print("normal user")
context={
'form':form,
'req': req
}
return render(request,'status.html',context)
def student_approve(request,user_id):
val=Form.objects.get(id=user_id)
val.alloted=1
val.save()
return HttpResponse("approved successfully")
def student_disapprove(request,user_id):
val=Form.objects.get(id=user_id)
val.alloted=2
val.save()
return HttpResponse("disapproved successfully")
def student_reset(request,user_id):
val=Form.objects.get(id=user_id)
val.alloted=0
val.save()
return HttpResponse("reset successfully")
# def write_view(request, *args, **kwargs):
# val=Form.objects.get(id=user_id)
# if request.is_ajax() and request.method == "POST":
# texteditor = request.POST['TextEntered']
# val.Management_Comments='texteditor'
# print(texteditor)
# ## Don't forget to do validation and cleanup on texteditor to avoid security hassles
# ## Do your logic here
# SuccessAcknowledgment = {"Acknowledged":"Acknowledged"}
# return HttpResponse(json.dumps(SuccessAcknowledgment))
# else:
# return render(request, "write.html")
def reqInfoMess(request):
u=request.user
if u.groups.filter(name='Managers').exists():
req = Form.objects.all()
print(req)
print("this is a manager")
context={
'form':form,
'req': req
}
else:
req = Form.objects.filter(user=request.user)
print(req)
print("normal user")
context={
'form':form,
'req': req
}
return render(request,'status.html',context)
def showmess(request, user_id):
u=request.user
if request.method=='POST':
fm=mess(request.POST)
ms=""
if fm.is_valid():
ms=fm.cleaned_data['Management_Comments']
u = fm.save(commit=False)
#profile.user = request.user
u.save()
fm.save()
fm=mess()
print(ms)
return render(request,'status.html',{'mess':ms})
|
the-stack_0_404 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))
# Copyright (c) 2016-2021, Cabral, Juan; Luczywo, Nadia
# All rights reserved.
# =============================================================================
# DOCS
# =============================================================================
"""Tool to check if each python module has a corresponding API docs."""
# =============================================================================
# IMPORTS
# =============================================================================
import inspect
import pathlib
import attr
import typer
# =============================================================================
# CONSTANTS
# =============================================================================
VERSION = "0.1"
# =============================================================================
# FUNCTIONS
# =============================================================================
def check_apidoc_structure(apidoc_dir, reference_dir):
apidoc_dir = pathlib.Path(apidoc_dir)
reference_dir = pathlib.Path(reference_dir)
if not apidoc_dir.exists():
raise OSError(f"'{apidoc_dir}' do no exist")
if not reference_dir.exists():
raise OSError(f"'{reference_dir}' do no exist")
reference = list(reference_dir.glob("**/*.py"))
result = {}
for ref in reference:
# essentially we remove the parent dir
*dirs, ref_name = ref.relative_to(reference_dir).parts
if ref_name == "__init__.py":
ref_name = "index.py"
search_dir = apidoc_dir
for subdir in dirs:
search_dir /= subdir
search = search_dir / f"{ref_name[:-3]}.rst"
result[str(ref)] = (str(search), search.exists())
return result
# =============================================================================
# CLI
# =============================================================================
@attr.s(frozen=True)
class CLI:
"""Check if the structure of API doc directory is equivalent to those of
the project.
"""
footnotes = "\n".join(
[
"This software is under the BSD 3-Clause License.",
"Copyright (c) 2021, Juan Cabral.",
"For bug reporting or other instructions please check:"
" https://github.com/quatrope/scikit-criteria",
]
)
run = attr.ib(init=False)
@run.default
def _set_run_default(self):
app = typer.Typer()
for k in dir(self):
if k.startswith("_"):
continue
v = getattr(self, k)
if inspect.ismethod(v):
decorator = app.command()
decorator(v)
return app
def version(self):
"""Print checktestdir.py version."""
typer.echo(f"{__file__ } v.{VERSION}")
def check(
self,
test_dir: str = typer.Argument(
..., help="Path to the api-doc structure."
),
reference_dir: str = typer.Option(
..., help="Path to the reference structure."
),
verbose: bool = typer.Option(
default=False, help="Show all the result"
),
):
"""Check if the structure of test directory is equivalent to those
of the project.
"""
try:
check_result = check_apidoc_structure(test_dir, reference_dir)
except Exception as err:
typer.echo(typer.style(str(err), fg=typer.colors.RED))
raise typer.Exit(code=1)
all_tests_exists = True
for ref, test_result in check_result.items():
test, test_exists = test_result
if test_exists:
fg = typer.colors.GREEN
status = ""
else:
all_tests_exists = False
fg = typer.colors.RED
status = typer.style("[NOT FOUND]", fg=typer.colors.YELLOW)
if verbose or not test_exists:
msg = f"{ref} -> {test} {status}"
typer.echo(typer.style(msg, fg=fg))
if all_tests_exists:
final_fg = typer.colors.GREEN
final_status = "Test structure ok!"
exit_code = 0
else:
final_fg = typer.colors.RED
final_status = "Structure not equivalent!"
exit_code = 1
typer.echo("-------------------------------------")
typer.echo(typer.style(final_status, fg=final_fg))
raise typer.Exit(code=exit_code)
def main():
"""Run the checkapidocdir.py cli interface."""
cli = CLI()
cli.run()
if __name__ == "__main__":
main()
|
the-stack_0_406 | """The tests for the Restore component."""
from datetime import datetime
from unittest.mock import patch
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.core import CoreState, State
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.restore_state import (
DATA_RESTORE_STATE_TASK,
STORAGE_KEY,
RestoreEntity,
RestoreStateData,
StoredState,
)
from homeassistant.util import dt as dt_util
async def test_caching_data(hass):
"""Test that we cache data."""
now = dt_util.utcnow()
stored_states = [
StoredState(State("input_boolean.b0", "on"), now),
StoredState(State("input_boolean.b1", "on"), now),
StoredState(State("input_boolean.b2", "on"), now),
]
data = await RestoreStateData.async_get_instance(hass)
await hass.async_block_till_done()
await data.store.async_save([state.as_dict() for state in stored_states])
# Emulate a fresh load
hass.data[DATA_RESTORE_STATE_TASK] = None
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b1"
# Mock that only b1 is present this run
with patch(
"homeassistant.helpers.restore_state.Store.async_save"
) as mock_write_data:
state = await entity.async_get_last_state()
await hass.async_block_till_done()
assert state is not None
assert state.entity_id == "input_boolean.b1"
assert state.state == "on"
assert mock_write_data.called
async def test_hass_starting(hass):
"""Test that we cache data."""
hass.state = CoreState.starting
now = dt_util.utcnow()
stored_states = [
StoredState(State("input_boolean.b0", "on"), now),
StoredState(State("input_boolean.b1", "on"), now),
StoredState(State("input_boolean.b2", "on"), now),
]
data = await RestoreStateData.async_get_instance(hass)
await hass.async_block_till_done()
await data.store.async_save([state.as_dict() for state in stored_states])
# Emulate a fresh load
hass.data[DATA_RESTORE_STATE_TASK] = None
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b1"
# Mock that only b1 is present this run
states = [State("input_boolean.b1", "on")]
with patch(
"homeassistant.helpers.restore_state.Store.async_save"
) as mock_write_data, patch.object(hass.states, "async_all", return_value=states):
state = await entity.async_get_last_state()
await hass.async_block_till_done()
assert state is not None
assert state.entity_id == "input_boolean.b1"
assert state.state == "on"
# Assert that no data was written yet, since hass is still starting.
assert not mock_write_data.called
# Finish hass startup
with patch(
"homeassistant.helpers.restore_state.Store.async_save"
) as mock_write_data:
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
# Assert that this session states were written
assert mock_write_data.called
async def test_dump_data(hass):
"""Test that we cache data."""
states = [
State("input_boolean.b0", "on"),
State("input_boolean.b1", "on"),
State("input_boolean.b2", "on"),
State("input_boolean.b5", "unavailable", {"restored": True}),
]
entity = Entity()
entity.hass = hass
entity.entity_id = "input_boolean.b0"
await entity.async_internal_added_to_hass()
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b1"
await entity.async_internal_added_to_hass()
data = await RestoreStateData.async_get_instance(hass)
now = dt_util.utcnow()
data.last_states = {
"input_boolean.b0": StoredState(State("input_boolean.b0", "off"), now),
"input_boolean.b1": StoredState(State("input_boolean.b1", "off"), now),
"input_boolean.b2": StoredState(State("input_boolean.b2", "off"), now),
"input_boolean.b3": StoredState(State("input_boolean.b3", "off"), now),
"input_boolean.b4": StoredState(
State("input_boolean.b4", "off"),
datetime(1985, 10, 26, 1, 22, tzinfo=dt_util.UTC),
),
"input_boolean.b5": StoredState(State("input_boolean.b5", "off"), now),
}
with patch(
"homeassistant.helpers.restore_state.Store.async_save"
) as mock_write_data, patch.object(hass.states, "async_all", return_value=states):
await data.async_dump_states()
assert mock_write_data.called
args = mock_write_data.mock_calls[0][1]
written_states = args[0]
# b0 should not be written, since it didn't extend RestoreEntity
# b1 should be written, since it is present in the current run
# b2 should not be written, since it is not registered with the helper
# b3 should be written, since it is still not expired
# b4 should not be written, since it is now expired
# b5 should be written, since current state is restored by entity registry
assert len(written_states) == 3
assert written_states[0]["state"]["entity_id"] == "input_boolean.b1"
assert written_states[0]["state"]["state"] == "on"
assert written_states[1]["state"]["entity_id"] == "input_boolean.b3"
assert written_states[1]["state"]["state"] == "off"
assert written_states[2]["state"]["entity_id"] == "input_boolean.b5"
assert written_states[2]["state"]["state"] == "off"
# Test that removed entities are not persisted
await entity.async_remove()
with patch(
"homeassistant.helpers.restore_state.Store.async_save"
) as mock_write_data, patch.object(hass.states, "async_all", return_value=states):
await data.async_dump_states()
assert mock_write_data.called
args = mock_write_data.mock_calls[0][1]
written_states = args[0]
assert len(written_states) == 2
assert written_states[0]["state"]["entity_id"] == "input_boolean.b3"
assert written_states[0]["state"]["state"] == "off"
assert written_states[1]["state"]["entity_id"] == "input_boolean.b5"
assert written_states[1]["state"]["state"] == "off"
async def test_dump_error(hass):
"""Test that we cache data."""
states = [
State("input_boolean.b0", "on"),
State("input_boolean.b1", "on"),
State("input_boolean.b2", "on"),
]
entity = Entity()
entity.hass = hass
entity.entity_id = "input_boolean.b0"
await entity.async_internal_added_to_hass()
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b1"
await entity.async_internal_added_to_hass()
data = await RestoreStateData.async_get_instance(hass)
with patch(
"homeassistant.helpers.restore_state.Store.async_save",
side_effect=HomeAssistantError,
) as mock_write_data, patch.object(hass.states, "async_all", return_value=states):
await data.async_dump_states()
assert mock_write_data.called
async def test_load_error(hass):
"""Test that we cache data."""
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b1"
with patch(
"homeassistant.helpers.storage.Store.async_load",
side_effect=HomeAssistantError,
):
state = await entity.async_get_last_state()
assert state is None
async def test_state_saved_on_remove(hass):
"""Test that we save entity state on removal."""
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b0"
await entity.async_internal_added_to_hass()
now = dt_util.utcnow()
hass.states.async_set(
"input_boolean.b0", "on", {"complicated": {"value": {1, 2, now}}}
)
data = await RestoreStateData.async_get_instance(hass)
# No last states should currently be saved
assert not data.last_states
await entity.async_remove()
# We should store the input boolean state when it is removed
state = data.last_states["input_boolean.b0"].state
assert state.state == "on"
assert isinstance(state.attributes["complicated"]["value"], list)
assert set(state.attributes["complicated"]["value"]) == {1, 2, now.isoformat()}
async def test_restoring_invalid_entity_id(hass, hass_storage):
"""Test restoring invalid entity IDs."""
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "test.invalid__entity_id"
now = dt_util.utcnow().isoformat()
hass_storage[STORAGE_KEY] = {
"version": 1,
"key": STORAGE_KEY,
"data": [
{
"state": {
"entity_id": "test.invalid__entity_id",
"state": "off",
"attributes": {},
"last_changed": now,
"last_updated": now,
"context": {
"id": "3c2243ff5f30447eb12e7348cfd5b8ff",
"user_id": None,
},
},
"last_seen": dt_util.utcnow().isoformat(),
}
],
}
state = await entity.async_get_last_state()
assert state is None
|
the-stack_0_409 | import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Lambda, Reshape, Layer
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
INPUT_DIM = (64,64,3)
CONV_FILTERS = [32,64,64, 128]
CONV_KERNEL_SIZES = [4,4,4,4]
CONV_STRIDES = [2,2,2,2]
CONV_ACTIVATIONS = ['relu','relu','relu','relu']
DENSE_SIZE = 1024
CONV_T_FILTERS = [64,64,32,3]
CONV_T_KERNEL_SIZES = [5,5,6,6]
CONV_T_STRIDES = [2,2,2,2]
CONV_T_ACTIVATIONS = ['relu','relu','relu','sigmoid']
Z_DIM = 32
BATCH_SIZE = 100
LEARNING_RATE = 0.0001
KL_TOLERANCE = 0.5
class Sampling(Layer):
def call(self, inputs):
mu, log_var = inputs
epsilon = K.random_normal(shape=K.shape(mu), mean=0., stddev=1.)
return mu + K.exp(log_var / 2) * epsilon
class VAEModel(Model):
def __init__(self, encoder, decoder, r_loss_factor, **kwargs):
super(VAEModel, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.r_loss_factor = r_loss_factor
def train_step(self, data):
if isinstance(data, tuple):
data = data[0]
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(data)
reconstruction = self.decoder(z)
reconstruction_loss = tf.reduce_mean(
tf.square(data - reconstruction), axis = [1,2,3]
)
reconstruction_loss *= self.r_loss_factor
kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)
kl_loss = tf.reduce_sum(kl_loss, axis = 1)
kl_loss *= -0.5
total_loss = reconstruction_loss + kl_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return {
"loss": total_loss,
"reconstruction_loss": reconstruction_loss,
"kl_loss": kl_loss,
}
def call(self,inputs):
latent = self.encoder(inputs)
return self.decoder(latent)
class VAEGAN(tf.keras.Model):
"""a VAEGAN class for tensorflow
Extends:
tf.keras.Model
"""
def __init__(self, **kwargs):
super(VAEGAN, self).__init__()
self.__dict__.update(kwargs)
self.enc = tf.keras.Sequential(self.enc)
self.dec = tf.keras.Sequential(self.dec)
inputs, disc_l, outputs = self.vae_disc_function()
self.disc = tf.keras.Model(inputs=[inputs], outputs=[outputs, disc_l])
self.enc_optimizer = tf.keras.optimizers.Adam(self.lr_base_gen, beta_1=0.5)
self.dec_optimizer = tf.keras.optimizers.Adam(self.lr_base_gen, beta_1=0.5)
self.disc_optimizer = tf.keras.optimizers.Adam(self.get_lr_d, beta_1=0.5)
def encode(self, x):
mu, sigma = tf.split(self.enc(x), num_or_size_splits=2, axis=1)
return mu, sigma
def dist_encode(self, x):
mu, sigma = self.encode(x)
return ds.MultivariateNormalDiag(loc=mu, scale_diag=sigma)
def get_lr_d(self):
return self.lr_base_disc * self.D_prop
def decode(self, z):
return self.dec(z)
def discriminate(self, x):
return self.disc(x)
def reconstruct(self, x):
mean, _ = self.encode(x)
return self.decode(mean)
def reparameterize(self, mean, logvar):
eps = tf.random.normal(shape=mean.shape)
return eps * tf.exp(logvar * 0.5) + mean
# @tf.function
def compute_loss(self, x):
# pass through network
q_z = self.dist_encode(x)
z = q_z.sample()
p_z = ds.MultivariateNormalDiag(
loc=[0.0] * z.shape[-1], scale_diag=[1.0] * z.shape[-1]
)
xg = self.decode(z)
z_samp = tf.random.normal([x.shape[0], 1, 1, z.shape[-1]])
xg_samp = self.decode(z_samp)
d_xg, ld_xg = self.discriminate(xg)
d_x, ld_x = self.discriminate(x)
d_xg_samp, ld_xg_samp = self.discriminate(xg_samp)
# GAN losses
disc_real_loss = gan_loss(logits=d_x, is_real=True)
disc_fake_loss = gan_loss(logits=d_xg_samp, is_real=False)
gen_fake_loss = gan_loss(logits=d_xg_samp, is_real=True)
discrim_layer_recon_loss = (
tf.reduce_mean(tf.reduce_mean(tf.math.square(ld_x - ld_xg), axis=0))
/ self.recon_loss_div
)
self.D_prop = sigmoid(
disc_fake_loss - gen_fake_loss, shift=0.0, mult=self.sig_mult
)
kl_div = ds.kl_divergence(q_z, p_z)
latent_loss = tf.reduce_mean(tf.maximum(kl_div, 0)) / self.latent_loss_div
return (
self.D_prop,
latent_loss,
discrim_layer_recon_loss,
gen_fake_loss,
disc_fake_loss,
disc_real_loss,
)
# @tf.function
def compute_gradients(self, x):
with tf.GradientTape() as enc_tape, tf.GradientTape() as dec_tape, tf.GradientTape() as disc_tape:
(
_,
latent_loss,
discrim_layer_recon_loss,
gen_fake_loss,
disc_fake_loss,
disc_real_loss,
) = self.compute_loss(x)
enc_loss = latent_loss + discrim_layer_recon_loss
dec_loss = gen_fake_loss + discrim_layer_recon_loss
disc_loss = disc_fake_loss + disc_real_loss
enc_gradients = enc_tape.gradient(enc_loss, self.enc.trainable_variables)
dec_gradients = dec_tape.gradient(dec_loss, self.dec.trainable_variables)
disc_gradients = disc_tape.gradient(disc_loss, self.disc.trainable_variables)
return enc_gradients, dec_gradients, disc_gradients
@tf.function
def apply_gradients(self, enc_gradients, dec_gradients, disc_gradients):
self.enc_optimizer.apply_gradients(
zip(enc_gradients, self.enc.trainable_variables)
)
self.dec_optimizer.apply_gradients(
zip(dec_gradients, self.dec.trainable_variables)
)
self.disc_optimizer.apply_gradients(
zip(disc_gradients, self.disc.trainable_variables)
)
def train(self, x):
enc_gradients, dec_gradients, disc_gradients = self.compute_gradients(x)
self.apply_gradients(enc_gradients, dec_gradients, disc_gradients)
def gan_loss(logits, is_real=True):
"""Computes standard gan loss between logits and labels
Arguments:
logits {[type]} -- output of discriminator
Keyword Arguments:
isreal {bool} -- whether labels should be 0 (fake) or 1 (real) (default: {True})
"""
if is_real:
labels = tf.ones_like(logits)
else:
labels = tf.zeros_like(logits)
return tf.compat.v1.losses.sigmoid_cross_entropy(
multi_class_labels=labels, logits=logits
)
def sigmoid(x, shift=0.0, mult=20):
""" squashes a value with a sigmoid
"""
return tf.constant(1.0) / (
tf.constant(1.0) + tf.exp(-tf.constant(1.0) * (x * mult))
)
class VAE():
def __init__(self):
self.models = self._build()
self.full_model = self.models[0]
self.encoder = self.models[1]
self.decoder = self.models[2]
self.input_dim = INPUT_DIM
self.z_dim = Z_DIM
self.learning_rate = LEARNING_RATE
self.kl_tolerance = KL_TOLERANCE
def _build(self):
vae_x = Input(shape=INPUT_DIM, name='observation_input')
vae_c1 = Conv2D(filters = CONV_FILTERS[0], kernel_size = CONV_KERNEL_SIZES[0], strides = CONV_STRIDES[0], activation=CONV_ACTIVATIONS[0], name='conv_layer_1')(vae_x)
vae_c2 = Conv2D(filters = CONV_FILTERS[1], kernel_size = CONV_KERNEL_SIZES[1], strides = CONV_STRIDES[1], activation=CONV_ACTIVATIONS[0], name='conv_layer_2')(vae_c1)
vae_c3= Conv2D(filters = CONV_FILTERS[2], kernel_size = CONV_KERNEL_SIZES[2], strides = CONV_STRIDES[2], activation=CONV_ACTIVATIONS[0], name='conv_layer_3')(vae_c2)
vae_c4= Conv2D(filters = CONV_FILTERS[3], kernel_size = CONV_KERNEL_SIZES[3], strides = CONV_STRIDES[3], activation=CONV_ACTIVATIONS[0], name='conv_layer_4')(vae_c3)
vae_z_in = Flatten()(vae_c4)
vae_z_mean = Dense(Z_DIM, name='mu')(vae_z_in)
vae_z_log_var = Dense(Z_DIM, name='log_var')(vae_z_in)
vae_z = Sampling(name='z')([vae_z_mean, vae_z_log_var])
#### DECODER:
vae_z_input = Input(shape=(Z_DIM,), name='z_input')
vae_dense = Dense(1024, name='dense_layer')(vae_z_input)
vae_unflatten = Reshape((1,1,DENSE_SIZE), name='unflatten')(vae_dense)
vae_d1 = Conv2DTranspose(filters = CONV_T_FILTERS[0], kernel_size = CONV_T_KERNEL_SIZES[0] , strides = CONV_T_STRIDES[0], activation=CONV_T_ACTIVATIONS[0], name='deconv_layer_1')(vae_unflatten)
vae_d2 = Conv2DTranspose(filters = CONV_T_FILTERS[1], kernel_size = CONV_T_KERNEL_SIZES[1] , strides = CONV_T_STRIDES[1], activation=CONV_T_ACTIVATIONS[1], name='deconv_layer_2')(vae_d1)
vae_d3 = Conv2DTranspose(filters = CONV_T_FILTERS[2], kernel_size = CONV_T_KERNEL_SIZES[2] , strides = CONV_T_STRIDES[2], activation=CONV_T_ACTIVATIONS[2], name='deconv_layer_3')(vae_d2)
vae_d4 = Conv2DTranspose(filters = CONV_T_FILTERS[3], kernel_size = CONV_T_KERNEL_SIZES[3] , strides = CONV_T_STRIDES[3], activation=CONV_T_ACTIVATIONS[3], name='deconv_layer_4')(vae_d3)
#### MODELS
vae_encoder = Model(vae_x, [vae_z_mean, vae_z_log_var, vae_z], name = 'encoder')
vae_decoder = Model(vae_z_input, vae_d4, name = 'decoder')
vae_full = VAEModel(vae_encoder, vae_decoder, 10000)
opti = Adam(lr=LEARNING_RATE)
vae_full.compile(optimizer=opti)
return (vae_full,vae_encoder, vae_decoder)
def set_weights(self, filepath):
self.full_model.load_weights(filepath)
def train(self, data):
self.full_model.fit(data, data,
shuffle=True,
epochs=1,
batch_size=BATCH_SIZE)
def save_weights(self, filepath):
self.full_model.save_weights(filepath)
|
the-stack_0_410 | from utils import prefer_envar
from logs.logger import log
from logs.log_utils import log_json
from config.reddit.reddit_sub_lists import REDDIT_APPROVED_SUBS
from config.reddit.config_gen import config_gen
import sys
import json
import os
if os.path.isfile('config.json'):
file = open("config.json", "r")
AUTH = prefer_envar(json.loads(file.read()))
else:
AUTH = prefer_envar({
# app creds
"reddit_client_id":"",
"reddit_client_secret":"",
# reddit account creds
"reddit_username":"",
"reddit_password":"",
})
for envar in AUTH:
if AUTH[envar] == "":
# reddit auth not configured correctly.
# instruct user to generate a .env file
config_gen()
log.info(f"REDDIT AUTH CONFIG:\n {log_json(AUTH)}")
CONFIG = prefer_envar({
"reddit_crosspost_enabled": False,
# the chance the bot will repost a post
"reddit_post_chance": 0.005,
# the chance the bot will make a comment
"reddit_comment_chance": 0.005,
# the chance the bot will reply to a comment
# otherwise it will reply to a post
"reddit_reply_to_comment": 0.002,
# chance the bot will remove poor performing
# posts and comments
"reddit_remove_low_scores": 0.002,
# posts/comments that get downvoted to this score will be deleted
"reddit_low_score_threshold": 0,
# chance to check if the bot is shadowbanned,
# and shut down the script automatically
"reddit_shadowban_check": 0.002,
# list of subreddits for the bot to use
"reddit_sub_list": REDDIT_APPROVED_SUBS,
# bot schedules. all times are UTC
# add the schedule number to the array
# and the bot will run within that time range
# leave the array empty for no schedule: []
# 1 - 7am-10am ((7,00),(10,00))
# 2 - 10am-2pm ((10,00),(14,00))
# 3 - 2pm-6pm ((14,00),(18,00))
# 4 - 6pm-10pm ((18,00),(22,00))
# 5 - 10pm-2am ((22,00),(2,00))
"reddit_sleep_schedule": [2, 4]
})
log.info(f"REDDIT CONNFIG:\n {log_json(CONFIG)}")
|
the-stack_0_411 | from django.http import JsonResponse
from django.utils import timezone
from django.contrib.sessions.models import Session
from rest_framework import views, viewsets, authentication
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from rest_framework.exceptions import APIException
from liliapi.serializers import *
from liliapi.models import *
from liliapi.permissions import *
from liliapi.paginations import *
from liliapi.authentication import *
from liliapi.tasks import *
########################################################################################################################
#
# copyright: 2017 WiM - USGS
# authors: Aaron Stephenson USGS WiM (Web Informatics and Mapping)
#
# In Django, a view is what takes a Web request and returns a Web response. The response can be many things, but most
# of the time it will be a Web page, a redirect, or a document. In this case, the response will almost always be data
# in JSON format.
#
# All these views are written as Class-Based Views (https://docs.djangoproject.com/en/1.11/topics/class-based-views/)
# because that is the paradigm used by Django Rest Framework (http://www.django-rest-framework.org/api-guide/views/)
# which is the toolkit we used to create web services in Django.
#
#
########################################################################################################################
LIST_DELIMETER = settings.LIST_DELIMETER
######
#
# Abstract Base Classes
#
######
class HistoryViewSet(viewsets.ModelViewSet):
"""
This class will automatically assign the User ID to the created_by and modified_by history fields when appropriate
"""
permission_classes = (permissions.IsAuthenticated,)
pagination_class = StandardResultsSetPagination
def perform_create(self, serializer):
serializer.save(created_by=self.request.user, modified_by=self.request.user)
def perform_update(self, serializer):
serializer.save(modified_by=self.request.user)
# override the default pagination to allow disabling of pagination
def paginate_queryset(self, *args, **kwargs):
if self.request and 'paginate' in self.request.query_params:
return super().paginate_queryset(*args, **kwargs)
return None
######
#
# Samples
#
######
class SampleViewSet(HistoryViewSet):
serializer_class = SampleSerializer
def get_serializer_class(self):
if self.request and 'slim' in self.request.query_params:
return SampleSlimSerializer
else:
return SampleSerializer
@action(detail=False)
def finalsamplemeanconcentrations(self, request):
queryset = Sample.objects.prefetch_related('finalsamplemeanconcentrations').distinct()
query_params = self.request.query_params
# filter by sample IDs, exact list
sample = query_params.get('sample', None)
if sample is not None:
if LIST_DELIMETER in sample:
sample_list = sample.split(LIST_DELIMETER)
queryset = queryset.filter(id__in=sample_list)
else:
queryset = queryset.filter(id__exact=sample)
# filter by target IDs, exact list
target = query_params.get('target', None)
target_list = []
if target is not None:
if LIST_DELIMETER in target:
target_list = target.split(LIST_DELIMETER)
queryset = queryset.filter(finalsamplemeanconcentrations__target__in=target_list)
else:
target_list = [target]
queryset = queryset.filter(finalsamplemeanconcentrations__target__exact=target)
# recalc reps validity
for sample in queryset:
fsmcs = FinalSampleMeanConcentration.objects.filter(sample=sample.id, target__in=target_list)
for fsmc in fsmcs:
recalc_reps('FinalSampleMeanConcentration', sample.id, target=fsmc.target.id, recalc_rep_conc=False)
# start building up the response object
resp = []
for sample in queryset:
sample_target_list = [int(target) for target in target_list]
item = {
"id": sample.id,
"collaborator_sample_id": sample.collaborator_sample_id,
"collection_start_date": sample.collection_start_date,
"final_sample_mean_concentrations": []
}
fsmcs = list(FinalSampleMeanConcentration.objects.filter(sample=sample.id))
for fsmc in fsmcs:
# attempt to find the matching target in the fsmc list
try:
sample_target_index = sample_target_list.index(fsmc.target.id)
# pop the matching fsmc target from its list so that we eventually end up with an empty list,
# or a list of extraneous targets
sample_target_list.pop(sample_target_index)
# start building up the nested response object
item["final_sample_mean_concentrations"].append({
"target": fsmc.target.id,
"target_string": fsmc.target.name,
"final_sample_mean_concentration": fsmc.final_sample_mean_concentration
})
# no matching target was found in the fsmc list
except ValueError:
# do not include this fsmc in the response because its target was not requested
continue
# now list out the other targets that were requested but do not exist for this sample
for extraneous_target in sample_target_list:
# start building up the nested response object
target_name = list(Target.objects.filter(id=extraneous_target).values_list('name', flat=True))
item["final_sample_mean_concentrations"].append({
"target": extraneous_target,
"target_string": target_name[0],
"final_sample_mean_concentration": "N/A"
})
resp.append(item)
return Response(resp)
@action(detail=False)
def get_count(self, request):
# Sample.objects.filter(matrix__in=matrix_list).count()
query_params = self.request.query_params
return Response({"count": self.build_queryset(query_params).count()})
@action(detail=False)
def get_sampler_names(self, request):
sampler_names = set(list(Sample.objects.values_list('sampler_name', flat=True)))
return Response({"sampler_names": sampler_names})
@action(detail=False)
def get_recent_pegnegs(self, request):
pegneg_record_type = RecordType.objects.filter(id=2).first()
recent_pegnegs = Sample.objects.filter(record_type=pegneg_record_type).order_by('-id')[:20]
return Response(self.serializer_class(recent_pegnegs, many=True).data)
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
query_params = self.request.query_params
return self.build_queryset(query_params)
# build a queryset using query_params
# NOTE: this is being done in its own method to adhere to the DRY Principle
def build_queryset(self, query_params):
queryset = Sample.objects.all()
# filter by sample IDs, exact list
sample = query_params.get('id', None)
if sample is not None:
if LIST_DELIMETER in sample:
sample_list = sample.split(LIST_DELIMETER)
queryset = queryset.filter(id__in=sample_list)
else:
queryset = queryset.filter(id__exact=sample)
# filter by sample ID, range
from_sample = query_params.get('from_id', None)
to_sample = query_params.get('to_id', None)
if from_sample is not None and to_sample is not None:
# the filter below using __range is value-inclusive
queryset = queryset.filter(id__range=(from_sample, to_sample))
elif to_sample is not None:
queryset = queryset.filter(id__lte=to_sample)
elif from_sample is not None:
queryset = queryset.filter(id__gte=from_sample)
# filter by study ID, exact list
study = query_params.get('study', None)
if study is not None:
if LIST_DELIMETER in study:
study_list = study.split(LIST_DELIMETER)
queryset = queryset.filter(study__in=study_list)
else:
queryset = queryset.filter(study__exact=study)
# filter by collection_start_date, range
from_collection_start_date = query_params.get('from_collection_start_date', None)
to_collection_start_date = query_params.get('to_collection_start_date', None)
if from_collection_start_date is not None and to_collection_start_date is not None:
# the filter below using __range is value-inclusive
queryset = queryset.filter(collection_start_date__range=(
from_collection_start_date, to_collection_start_date))
elif to_collection_start_date is not None:
queryset = queryset.filter(collection_start_date__lte=to_collection_start_date)
elif from_collection_start_date is not None:
queryset = queryset.filter(collection_start_date__gte=from_collection_start_date)
# filter by collaborator_sample_id, exact list
collaborator_sample_id = query_params.get('collaborator_sample_id', None)
if collaborator_sample_id is not None:
if LIST_DELIMETER in collaborator_sample_id:
collaborator_sample_id_list = collaborator_sample_id.split(LIST_DELIMETER)
queryset = queryset.filter(collaborator_sample_id__in=collaborator_sample_id_list)
else:
queryset = queryset.filter(collaborator_sample_id__exact=collaborator_sample_id)
# filter by sample type, exact list
sample_type = query_params.get('sample_type', None)
if sample_type is not None:
if LIST_DELIMETER in sample_type:
sample_type_list = sample_type.split(LIST_DELIMETER)
queryset = queryset.filter(sample_type__in=sample_type_list)
else:
queryset = queryset.filter(sample_type__exact=sample_type)
# filter by matrix, exact list
matrix = query_params.get('matrix', None)
if matrix is not None:
if LIST_DELIMETER in matrix:
matrix_list = matrix.split(LIST_DELIMETER)
queryset = queryset.filter(matrix__in=matrix_list)
else:
queryset = queryset.filter(matrix__exact=matrix)
# filter by record_type, exact list
record_type = query_params.get('record_type', None)
if record_type is not None:
if LIST_DELIMETER in record_type:
record_type_list = record_type.split(LIST_DELIMETER)
queryset = queryset.filter(record_type__in=record_type_list)
else:
queryset = queryset.filter(record_type__exact=record_type)
# filter by peg_neg, exact list
peg_neg = query_params.get('peg_neg', None)
if peg_neg is not None:
if LIST_DELIMETER in peg_neg:
peg_neg_list = peg_neg.split(LIST_DELIMETER)
queryset = queryset.filter(peg_neg__in=peg_neg_list)
else:
queryset = queryset.filter(peg_neg__exact=peg_neg)
return queryset
class AliquotViewSet(HistoryViewSet):
queryset = Aliquot.objects.all()
serializer_class = AliquotCustomSerializer
@action(detail=False)
def get_location(self, request):
# get the freezer from the request query
freezer = request.query_params.get('freezer', None)
# get the rack from the request query
rack = request.query_params.get('rack', None)
# get the box from the request query
box = request.query_params.get('box', None)
# if a freezer was included in the query, use it, otherwise default to the first freezer
freezer = freezer if freezer else 1
# find all aliquots in the requested rack and/or box (and freezer)
if rack and box:
queryset = Aliquot.objects.filter(freezer_location__freezer=freezer,
freezer_location__rack=rack, freezer_location__box=box)
elif rack:
queryset = Aliquot.objects.filter(freezer_location__freezer=freezer, freezer_location__rack=rack)
elif box:
queryset = Aliquot.objects.filter(freezer_location__freezer=freezer, freezer_location__box=box)
else:
queryset = Aliquot.objects.none()
return Response(AliquotSlimSerializer(queryset, many=True).data)
@action(methods=['post'], detail=False)
def bulk_delete(self, request):
# ensure submitted data is a list of only IDs or a list of only aliquot_strings (SampleID-AliquotNumber)
if all([str(item).isdigit() for item in request.data]):
aliquots = Aliquot.objects.filter(id__in=request.data)
if len(aliquots) != len(request.data):
aliquot_ids = [aliquot.id for aliquot in aliquots]
invalid_ids = list(set(request.data).difference(aliquot_ids))
message = "Invalid request. No aliquots deleted. The following submitted values could not be found"
message += " in the database: " + str(invalid_ids)
return JsonResponse({"message": message}, status=400)
else:
freezer_location_ids = [aliquot.freezer_location_id for aliquot in aliquots]
Aliquot.objects.filter(id__in=request.data).delete()
FreezerLocation.objects.filter(id__in=freezer_location_ids).delete()
return JsonResponse({"message": "Aliquots deleted."}, status=200)
elif all([isinstance(item, str) and '-' in item for item in request.data]):
aliquot_ids = []
freezer_location_ids = []
invalid_ids = []
for item in request.data:
item_split = item.split('-')
aliquot = Aliquot.objects.filter(sample=item_split[0], aliquot_number=item_split[1]).first()
if aliquot:
aliquot_ids.append(aliquot.id)
freezer_location_ids.append(aliquot.freezer_location_id)
else:
invalid_ids.append(item)
if len(invalid_ids) > 0:
message = "Invalid request. No aliquots deleted. The following submitted values could not be found"
message += " in the database: " + str(invalid_ids)
return JsonResponse({"message": message}, status=400)
else:
Aliquot.objects.filter(id__in=aliquot_ids).delete()
FreezerLocation.objects.filter(id__in=freezer_location_ids).delete()
return JsonResponse({"message": "Aliquots deleted."}, status=200)
else:
message = "Invalid request. Submitted data must be a list/array of aliquot IDs"
message += "or sample_id-aliquot_number combinations (e.g., '1001-3')"
return JsonResponse({"message": message}, status=400)
def get_serializer_class(self):
if not isinstance(self.request.data, list):
return AliquotSerializer
else:
return self.serializer_class
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list) and len(data) > 0 and 'aliquot_count' in data[0]:
kwargs['many'] = True
return super(AliquotViewSet, self).get_serializer(*args, **kwargs)
class SampleTypeViewSet(HistoryViewSet):
queryset = SampleType.objects.all()
serializer_class = SampleTypeSerializer
class MatrixViewSet(HistoryViewSet):
queryset = Matrix.objects.all()
serializer_class = MatrixSerializer
class FilterTypeViewSet(HistoryViewSet):
queryset = FilterType.objects.all()
serializer_class = FilterTypeSerializer
class StudyViewSet(HistoryViewSet):
queryset = Study.objects.all()
serializer_class = StudySerializer
class UnitViewSet(HistoryViewSet):
queryset = Unit.objects.all()
serializer_class = UnitSerializer
######
#
# Freezer Locations
#
######
class FreezerLocationViewSet(HistoryViewSet):
queryset = FreezerLocation.objects.all()
serializer_class = FreezerLocationSerializer
@action(methods=['get'], detail=False)
def get_next_available(self, request):
# get the first empty box in the any freezer
first_empty_box = FreezerLocation.objects.get_first_empty_box()
if first_empty_box is None:
first_empty_box = "There are no more empty boxes in this freezer!"
# get the study_id from the request query
study_id = request.query_params.get('study', None)
last_spot = FreezerLocation.objects.get_last_occupied_spot(study_id)
# if a last spot is found look up the next available spot
if last_spot is not None:
next_spot = FreezerLocation.objects.get_next_available_spot(last_spot)
# if there is a next spot
if next_spot is not None:
# start building the full response object
resp = next_spot
# determine maximum available spots in a box in this freezer (for an empty box)
rows_in_box = last_spot.freezer.rows
spots_in_row = last_spot.freezer.spots
spots_in_box = rows_in_box * spots_in_row
# ensure next spot and next empty box are not the same
get_second_empty_box = True if next_spot['available_spots_in_box'] == spots_in_box else False
next_empty_box = FreezerLocation.objects.get_next_empty_box(last_spot, get_second_empty_box)
# then add the next empty box to the response object
resp.update({"next_empty_box": next_empty_box})
# no next spot was found
else:
resp = {"not_found": "There are no more empty boxes in this freezer!"}
# otherwise no last spot has been found
else:
# if a study_id was included in the query, mention it in the response
if study_id is not None:
study = Study.objects.filter(id=study_id).first()
message = "No aliquots for "
if study is not None:
message += study.name + " "
message += "(Study ID #" + str(study_id) + ") are stored in any freezer."
# otherwise inform the user that no freezer locations have been used
else:
message = "No aliquots are stored in any freezer."
resp = {"not_found": message}
resp.update({"next_empty_box": first_empty_box})
return Response(resp)
class FreezerViewSet(HistoryViewSet):
queryset = Freezer.objects.all()
serializer_class = FreezerSerializer
######
#
# Final Sample Values
#
######
class FinalConcentratedSampleVolumeViewSet(HistoryViewSet):
serializer_class = FinalConcentratedSampleVolumeSerializer
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
queryset = FinalConcentratedSampleVolume.objects.all()
# filter by sample ID, exact list
sample = self.request.query_params.get('sample', None)
if sample is not None:
sample_list = sample.split(',')
queryset = queryset.filter(sample__in=sample_list)
return queryset
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list):
kwargs['many'] = True
return super(FinalConcentratedSampleVolumeViewSet, self).get_serializer(*args, **kwargs)
class ConcentrationTypeViewSet(HistoryViewSet):
queryset = ConcentrationType.objects.all()
serializer_class = ConcentrationTypeSerializer
class FinalSampleMeanConcentrationViewSet(HistoryViewSet):
serializer_class = FinalSampleMeanConcentrationSerializer
@action(detail=False)
def summary_statistics(self, request):
sample = request.query_params.get('sample', None)
target = request.query_params.get('target', None)
statistic = request.query_params.get('statistic', None)
report_type = ReportType.objects.filter(id=2).first()
status = Status.objects.filter(id=1).first()
report_file = ReportFile.objects.create(
report_type=report_type, status=status, created_by=request.user, modified_by=request.user)
task = generate_results_summary_report.delay(sample, target, statistic, report_file.id, request.user.username)
monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)
return JsonResponse({"message": "Request for Results Summary Report received."}, status=200)
@action(detail=False)
def results(self, request):
sample = request.query_params.get('sample', None)
target = request.query_params.get('target', None)
report_type = ReportType.objects.filter(id=3).first()
status = Status.objects.filter(id=1).first()
report_file = ReportFile.objects.create(
report_type=report_type, status=status, created_by=request.user, modified_by=request.user)
task = generate_individual_sample_report.delay(sample, target, report_file.id, request.user.username)
monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)
return JsonResponse({"message": "Request for Individual Sample Report received."}, status=200)
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
query_params = self.request.query_params
return self.build_queryset(query_params)
# build a queryset using query_params
# NOTE: this is being done in its own method to adhere to the DRY Principle
def build_queryset(self, query_params):
queryset = FinalSampleMeanConcentration.objects.all()
# filter by sample ID, exact list
sample = query_params.get('sample', None)
if sample is not None:
sample_list = sample.split(',')
queryset = queryset.filter(sample__in=sample_list)
# filter by target ID, exact list
target = query_params.get('target', None)
if target is not None:
target_list = target.split(',')
queryset = queryset.filter(target__in=target_list)
# filter by study ID, exact list
study = query_params.get('study', None)
if study is not None:
study_list = sample.split(',')
queryset = queryset.filter(sample__study__in=study_list)
# filter by collection_start_date, exact list
collection_start_date = query_params.get('collection_start_date', None)
if collection_start_date is not None:
collection_start_date_list = sample.split(',')
queryset = queryset.filter(sample__collection_start_date__in=collection_start_date_list)
# filter by collaborator_sample_id, exact list
collaborator_sample_id = query_params.get('collaborator_sample_id', None)
if collaborator_sample_id is not None:
collaborator_sample_id_list = sample.split(',')
queryset = queryset.filter(sample__collaborator_sample_id__in=collaborator_sample_id_list)
# recalc reps validity
for fsmc in queryset:
recalc_reps('FinalSampleMeanConcentration', fsmc.sample.id, target=fsmc.target.id, recalc_rep_conc=False)
return queryset
# override the default GET method to recalc all child PCR Replicates first before the FSMC Select query
def retrieve(self, request, *args, **kwargs):
recalc_reps('FinalSampleMeanConcentration',
self.get_object().sample.id, target=self.get_object().target.id, recalc_rep_conc=False)
return super(FinalSampleMeanConcentrationViewSet, self).retrieve(request, *args, **kwargs)
######
#
# Sample Groups
#
######
class SampleSampleGroupViewSet(HistoryViewSet):
queryset = SampleSampleGroup.objects.all()
serializer_class = SampleSampleGroupSerializer
class SampleGroupViewSet(HistoryViewSet):
queryset = SampleGroup.objects.all()
serializer_class = SampleGroupSerializer
######
#
# Analyses
#
######
class SampleAnalysisBatchViewSet(HistoryViewSet):
queryset = SampleAnalysisBatch.objects.all()
serializer_class = SampleAnalysisBatchSerializer
class AnalysisBatchViewSet(HistoryViewSet):
queryset = AnalysisBatch.objects.all()
serializer_class = AnalysisBatchSerializer
# override the default DELETE method to prevent deletion of an AnalysisBatch with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates = PCRReplicate.objects.filter(
pcrreplicate_batch__extraction_batch__analysis_batch=self.get_object().id).exclude(cq_value__isnull=True)
if any(nonnull_pcrreplicates):
message = "An Analysis Batch may not be deleted if any related PCR Replicates have results data entered."
raise APIException(message)
return super(AnalysisBatchViewSet, self).destroy(request, *args, **kwargs)
class AnalysisBatchDetailViewSet(HistoryViewSet):
serializer_class = AnalysisBatchDetailSerializer
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
queryset = AnalysisBatch.objects.all()
batch = self.request.query_params.get('id', None)
if batch is not None:
if LIST_DELIMETER in batch:
batch_list = batch.split(',')
queryset = queryset.filter(id__in=batch_list)
else:
queryset = queryset.filter(id__exact=batch)
return queryset
class AnalysisBatchSummaryViewSet(HistoryViewSet):
serializer_class = AnalysisBatchSummarySerializer
@action(detail=False)
def get_count(self, request):
query_params = self.request.query_params
return Response({"count": self.build_queryset(query_params).count()})
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
query_params = self.request.query_params
return self.build_queryset(query_params)
# build a queryset using query_params
# NOTE: this is being done in its own method to adhere to the DRY Principle
def build_queryset(self, query_params):
study = self.request.query_params.get('study', None)
if study is not None:
queryset = AnalysisBatch.objects.prefetch_related('samples').all()
else:
queryset = AnalysisBatch.objects.all()
# filter by batch ID, exact list
batch = self.request.query_params.get('id', None)
if batch is not None:
if LIST_DELIMETER in batch:
batch_list = batch.split(',')
queryset = queryset.filter(id__in=batch_list)
else:
queryset = queryset.filter(id__exact=batch)
# filter by batch ID, range
from_batch = query_params.get('from_id', None)
to_batch = query_params.get('to_id', None)
if from_batch is not None and to_batch is not None:
# the filter below using __range is value-inclusive
queryset = queryset.filter(id__range=(from_batch, to_batch))
elif to_batch is not None:
queryset = queryset.filter(id__lte=to_batch)
elif from_batch is not None:
queryset = queryset.filter(id__gte=from_batch)
# filter by study ID, exact list
if study is not None:
if LIST_DELIMETER in study:
study_list = study.split(',')
queryset = queryset.filter(samples__study__in=study_list).distinct()
else:
queryset = queryset.filter(samples__study__exact=study).distinct()
return queryset
class AnalysisBatchTemplateViewSet(HistoryViewSet):
queryset = AnalysisBatchTemplate.objects.all()
serializer_class = AnalysisBatchTemplateSerializer
######
#
# Extractions
#
######
class ExtractionMethodViewSet(HistoryViewSet):
queryset = ExtractionMethod.objects.all()
serializer_class = ExtractionMethodSerializer
class ExtractionBatchViewSet(HistoryViewSet):
queryset = ExtractionBatch.objects.all()
# override the default serializer_class if summary fields are requested
def get_serializer_class(self):
include_summary_fields = self.request.query_params.get('includeSummaryFields', None)
if include_summary_fields is not None and include_summary_fields.lower() == 'true':
return ExtractionBatchSummarySerializer
else:
return ExtractionBatchSerializer
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list):
kwargs['many'] = True
return super(ExtractionBatchViewSet, self).get_serializer(*args, **kwargs)
# override the default DELETE method to prevent deletion of an ExtractionBatch with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates = PCRReplicate.objects.filter(
pcrreplicate_batch__extraction_batch=self.get_object().id).exclude(cq_value__isnull=True)
if any(nonnull_pcrreplicates):
message = "An Extraction Batch may not be deleted if any related PCR Replicates have results data entered."
raise APIException(message)
return super(ExtractionBatchViewSet, self).destroy(request, *args, **kwargs)
# override the default PATCH method to allow bulk processing
def patch(self, request, pk=None):
request_data = JSONParser().parse(request)
# if there is no pk, assume this is a bulk request
if not pk:
is_valid = True
response_data = []
valid_data = []
response_errors = []
for item in request_data:
# ensure the id field is present, otherwise nothing can be updated
if not item.get('id'):
is_valid = False
response_errors.append({"id": "This field is required."})
else:
eb_id = item.pop('id')
eb = ExtractionBatch.objects.filter(id=eb_id).first()
item['modified_by'] = request.user
# remove nulls coming from client (user not actually sending nulls, so no need to trigger recalcs)
if 'ext_pos_rna_rt_cq_value' in item and item['ext_pos_rna_rt_cq_value'] is None:
item.pop('ext_pos_rna_rt_cq_value')
if 'ext_pos_dna_cq_value' in item and item['ext_pos_dna_cq_value'] is None:
item.pop('ext_pos_dna_cq_value')
if eb:
serializer = self.get_serializer(eb, data=item, partial=True)
# if this item is valid, temporarily hold it until all items are proven valid, then save all
# if even one item is invalid, none will be saved, and the user will be returned the error(s)
if serializer.is_valid():
valid_data.append(serializer)
else:
is_valid = False
response_errors.append(serializer.errors)
else:
is_valid = False
message = "No ExtractionBatch exists with this ID: " + str(eb_id)
response_errors.append({"extractionbatch": message})
if is_valid:
# now that all items are proven valid, save and return them to the user
for item in valid_data:
item.save()
response_data.append(item.data)
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
# otherwise, if there is a pk, update the instance indicated by the pk
else:
rep = ExtractionBatch.objects.filter(id=pk).first()
if rep:
serializer = self.serializer_class(rep, data=request_data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
else:
return Response(serializer.errors, status=400)
else:
message = "No ExtractionBatch exists with this ID: " + str(pk)
return JsonResponse({"extractionbatch": message}, status=400)
class ReverseTranscriptionViewSet(HistoryViewSet):
queryset = ReverseTranscription.objects.all()
serializer_class = ReverseTranscriptionSerializer
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list):
kwargs['many'] = True
return super(ReverseTranscriptionViewSet, self).get_serializer(*args, **kwargs)
# override the default DELETE method to prevent deletion of a ReverseTranscription with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates = PCRReplicate.objects.filter(
pcrreplicate_batch__extraction_batch__reversetranscriptions=self.get_object().id).exclude(
cq_value__isnull=True)
if any(nonnull_pcrreplicates):
message = "A Reverse Transcription may not be deleted"
message += " if any related PCR Replicates have results data entered."
raise APIException(message)
return super(ReverseTranscriptionViewSet, self).destroy(request, *args, **kwargs)
# override the default PATCH method to allow bulk processing
def patch(self, request, pk=None):
request_data = JSONParser().parse(request)
# if there is no pk, assume this is a bulk request
if not pk:
is_valid = True
response_data = []
valid_data = []
response_errors = []
for item in request_data:
# ensure the id field is present, otherwise nothing can be updated
if not item.get('id'):
is_valid = False
response_errors.append({"id": "This field is required."})
else:
rt_id = item.pop('id')
rt = ReverseTranscription.objects.filter(id=rt_id).first()
if rt:
serializer = self.serializer_class(rt, data=item, partial=True)
# if this item is valid, temporarily hold it until all items are proven valid, then save all
# if even one item is invalid, none will be saved, and the user will be returned the error(s)
if serializer.is_valid():
valid_data.append(serializer)
else:
is_valid = False
response_errors.append(serializer.errors)
else:
is_valid = False
response_errors.append(
{"reversetranscription": "No ReverseTranscription exists with this ID: " + str(rt_id)})
if is_valid:
# now that all items are proven valid, save and return them to the user
for item in valid_data:
item.save()
response_data.append(item.data)
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
# otherwise, if there is a pk, update the instance indicated by the pk
else:
rep = ReverseTranscription.objects.filter(id=pk).first()
if rep:
serializer = self.serializer_class(rep, data=request_data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
else:
return Response(serializer.errors, status=400)
else:
return JsonResponse(
{"reversetranscription": "No ReverseTranscription exists with this ID: " + str(pk)}, status=400)
class SampleExtractionViewSet(HistoryViewSet):
queryset = SampleExtraction.objects.all()
serializer_class = SampleExtractionSerializer
@action(detail=False)
def inhibition_report(self, request):
sample = request.query_params.get('sample', None)
report_type = ReportType.objects.filter(id=1).first()
status = Status.objects.filter(id=1).first()
report_file = ReportFile.objects.create(
report_type=report_type, status=status, created_by=request.user, modified_by=request.user)
task = generate_inhibition_report.delay(sample, report_file.id, request.user.username)
monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)
return JsonResponse({"message": "Request for Inhibition Report received."}, status=200)
# override the default DELETE method to prevent deletion of a SampleExtraction with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates = PCRReplicate.objects.filter(
sample_extraction=self.get_object().id).exclude(cq_value__isnull=True)
if any(nonnull_pcrreplicates):
message = "A Sample Extraction may not be deleted if any related PCR Replicates have results data entered."
raise APIException(message)
return super(SampleExtractionViewSet, self).destroy(request, *args, **kwargs)
class PCRReplicateViewSet(HistoryViewSet):
serializer_class = PCRReplicateSerializer
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list):
kwargs['many'] = True
return super(PCRReplicateViewSet, self).get_serializer(*args, **kwargs)
def get_queryset(self):
queryset = PCRReplicate.objects.all()
id = self.request.query_params.get('id', None)
if id is not None:
if LIST_DELIMETER in id:
id_list = id.split(',')
queryset = queryset.filter(id__in=id_list)
else:
queryset = queryset.filter(id__exact=id)
return queryset
# override the default PATCH method to allow bulk processing
def patch(self, request, pk=None):
request_data = JSONParser().parse(request)
# if there is no pk, assume this is a bulk request
if not pk:
is_valid = True
response_data = []
valid_data = []
response_errors = []
for item in request_data:
# ensure the id field is present, otherwise nothing can be updated
if not item.get('id'):
is_valid = False
response_errors.append({"id": "This field is required."})
else:
rep_id = item.pop('id')
rep = PCRReplicate.objects.filter(id=rep_id).first()
if rep:
new_invalid = item.get('invalid', None)
if new_invalid is not None and new_invalid != rep.invalid:
item['invalid_override'] = request.user.id
rep.replicate_concentration = rep.calc_rep_conc()
serializer = self.serializer_class(rep, data=item, partial=True)
# if this item is valid, temporarily hold it until all items are proven valid, then save all
# if even one item is invalid, none will be saved, and the user will be returned the error(s)
if serializer.is_valid():
valid_data.append(serializer)
else:
is_valid = False
response_errors.append(serializer.errors)
else:
is_valid = False
response_errors.append({"pcrreplicate": "No PCRReplicate exists with this ID: " + str(rep_id)})
if is_valid:
# now that all items are proven valid, save and return them to the user
for item in valid_data:
item.save()
response_data.append(item.data)
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
# otherwise, if there is a pk, update the instance indicated by the pk
else:
rep = PCRReplicate.objects.filter(id=pk).first()
if rep:
new_invalid = request_data.get('invalid', None)
if new_invalid is not None and new_invalid != rep.invalid:
if request_data.get('invalid_override', None) is None:
request_data['invalid_override'] = request.user.id
serializer = self.serializer_class(rep, data=request_data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
else:
return Response(serializer.errors, status=400)
else:
return JsonResponse({"pcrreplicate": "No PCRReplicate exists with this ID: " + str(pk)}, status=400)
class PCRReplicateBatchViewSet(HistoryViewSet):
serializer_class = PCRReplicateBatchSerializer
def isnumber(self, val):
try:
return True if float(val) == 0 else float(val)
except ValueError:
return False
def err_obj(self, field, message, severity):
return {"field": field, "message": message, "severity": severity}
def validate_controls(self, field):
synonym = " ('cp')" if 'cq_value' in field else " ('concentration')" if 'gc_reaction' in field else ''
invalid_reason = None
if field not in self.request.data:
invalid_reason = self.err_obj(field, field + synonym + " is missing", 2)
elif self.request.data[field] is not None:
if not self.isnumber(self.request.data[field]):
invalid_reason = self.err_obj(field, field + synonym + " is not a number", 1)
elif self.request.data[field] > Decimal('0') and field not in ['pcr_pos_cq_value', 'pcr_pos_gc_reaction']:
# eventually we will also validate pcr_pos_cq_value by testing if it is >0.5 cylces from expected
invalid_reason = self.err_obj(field, field + synonym + " is positive", 1)
return invalid_reason
@action(methods=['post'], detail=False)
def bulk_load_negatives(self, request):
is_valid = True
valid_data = []
response_errors = []
for item in request.data:
item_validation_errors = []
if 'extraction_batch' not in item:
item_validation_errors.append("extraction_batch is required")
if 'target' not in item:
item_validation_errors.append("target is required")
if 'replicate_number' not in item:
item_validation_errors.append("replicate_number is required")
if 'pcr_pos_cq_value' not in item:
item_validation_errors.append("pcr_pos_cq_value is required")
if len(item_validation_errors) > 0:
is_valid = False
response_errors.append(item_validation_errors)
continue
pcrreplicate_batch = PCRReplicateBatch.objects.filter(
extraction_batch=item['extraction_batch'], target=item['target'],
replicate_number=item['replicate_number']).first()
if pcrreplicate_batch:
if not is_valid:
continue
else:
item.pop('extraction_batch')
item.pop('target')
item.pop('replicate_number')
item['ext_neg_cq_value'] = 0
item['ext_neg_gc_reaction'] = 0
item['rt_neg_cq_value'] = 0
item['rt_neg_gc_reaction'] = 0
item['pcr_neg_cq_value'] = 0
item['pcr_neg_gc_reaction'] = 0
item['pcr_pos_gc_reaction'] = 0
item['updated_pcrreplicates'] = []
pcrreplicates = PCRReplicate.objects.filter(pcrreplicate_batch=pcrreplicate_batch.id)
for rep in pcrreplicates:
item['updated_pcrreplicates'].append(
{"sample": rep.sample_extraction.sample.id, "cq_value": 0, "gc_reaction": 0})
serializer = self.serializer_class(pcrreplicate_batch, data=item, partial=True)
# if this item is valid, temporarily hold it until all items are proven valid, then save all
# if even one item is invalid, none will be saved, and the user will be returned the error(s)
if serializer.is_valid():
valid_data.append(serializer)
else:
is_valid = False
response_errors.append(serializer.errors)
else:
message = "No PCR replicate batch was found with extraction batch of " + str(item['extraction_batch'])
message += " and target of " + str(item['target'])
message += " and replicate number of " + str(item['replicate_number'])
is_valid = False
response_errors.append({"pcrreplicatebatch": message})
if is_valid:
# now that all items are proven valid, save and return them to the user
response_data = []
for item in valid_data:
item.save()
# recalc the child rep validity
reps = PCRReplicate.objects.filter(pcrreplicate_batch=item.data['id'])
for rep in reps:
if rep.invalid_override is None:
rep.invalid = rep.calc_invalid()
rep.save()
response_data.append(item.data)
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
@action(methods=['post'], detail=False)
def validate(self, request):
validation_errors = []
if 'analysis_batch' not in request.data:
validation_errors.append("analysis_batch is required")
if 'extraction_number' not in request.data:
validation_errors.append("extraction_number is required")
if 'target' not in request.data:
validation_errors.append("target is required")
if 'replicate_number' not in request.data:
validation_errors.append("replicate_number is required")
if len(validation_errors) > 0:
return Response(validation_errors)
extraction_batch = ExtractionBatch.objects.filter(
analysis_batch=request.data['analysis_batch'],
extraction_number=request.data['extraction_number']
).first()
if not extraction_batch:
message = "No extraction batch was found with analysis batch of " + str(request.data['analysis_batch'])
message += " and extraction number of " + str(request.data['extraction_number'])
return Response({"extraction_batch": message})
target = Target.objects.filter(id=request.data['target']).first()
if not target:
message = "No target was found with ID of " + str(request.data['target'])
return Response({"target": message})
pcrreplicate_batch = PCRReplicateBatch.objects.filter(
extraction_batch=extraction_batch.id,
target=target.id,
replicate_number=request.data['replicate_number']
).first()
if not pcrreplicate_batch:
message = "No PCR replicate batch was found with extraction batch of " + str(extraction_batch.id)
message += " and target of " + str(request.data['target'])
message += " and replicate number of " + str(request.data['replicate_number'])
return Response({"pcrreplicate_batch": message}, status=400)
rna = True if target.nucleic_acid_type.name == 'RNA' else False
# start building up the response object
field_validations = {
"id": pcrreplicate_batch.id,
"ext_neg_invalid": False,
"rt_neg_invalid": False,
"pcr_neg_invalid": False,
"pcr_pos_invalid": False
}
# populate the response object with the submitted control values and the control validations
control_fields = ['ext_neg_cq_value', 'ext_neg_gc_reaction', 'rt_neg_cq_value', 'rt_neg_gc_reaction',
'pcr_neg_cq_value', 'pcr_neg_gc_reaction', 'pcr_pos_cq_value', 'pcr_pos_gc_reaction']
control_validations = []
for field in control_fields:
field_validations[field] = request.data[field] if field in request.data else None
# exclude RT fields if this is a DNA target
if 'rt' not in field or rna:
validation_error = self.validate_controls(field)
if validation_error:
control_validations.append(validation_error)
if "ext_neg" in field:
field_validations["ext_neg_invalid"] = True
elif "rt_neg" in field:
field_validations["rt_neg_invalid"] = True
elif "pcr_neg" in field:
field_validations["pcr_neg_invalid"] = True
elif "pcr_pos" in field:
field_validations["pcr_pos_invalid"] = True
field_validations["validation_errors"] = control_validations
# check that pcrreplicates have been submitted
if 'updated_pcrreplicates' not in request.data or not request.data['updated_pcrreplicates']:
field_validations["updated_pcrreplicates"] = [("updated_pcrreplicates is missing", 2)]
else:
# validate pcrreplicates
existing_pcrreplicates = PCRReplicate.objects.filter(
pcrreplicate_batch=pcrreplicate_batch.id).order_by('sample_extraction__sample__id')
all_pcrreplicates_validations = []
updated_pcrreplicates = request.data.get('updated_pcrreplicates')
updated_pcrreplicates_sample_ids = [rep['sample'] for rep in updated_pcrreplicates]
for existing_rep in existing_pcrreplicates:
sample_id = existing_rep.sample_extraction.sample.id
rep_validations = []
# attempt to find the matching updated rep
try:
rep_index = updated_pcrreplicates_sample_ids.index(sample_id)
# pop the matching updated rep from its list so that we eventually end up with an empty list,
# or a list of extraneous reps
updated_rep = updated_pcrreplicates.pop(rep_index)
# also remove the parallel sample ID so that the two lists continue to have matching indexes
del updated_pcrreplicates_sample_ids[rep_index]
# start building up the response object
response_rep = {"sample": sample_id}
rep_validations = []
# check if this rep has already been uploaded
if existing_rep.cq_value is not None:
message = "sample " + str(sample_id) + " has already been uploaded for this PCR replicate batch"
rep_validations.append(self.err_obj("cq_value", message, 1))
# validate cq_value
# remember that null is an acceptable value
if 'cq_value' not in updated_rep:
rep_validations.append(self.err_obj("cq_value", "cq_value ('cp') is missing", 2))
else:
rep_cq_value = updated_rep['cq_value']
response_rep['cq_value'] = rep_cq_value
if rep_cq_value is not None:
if not self.isnumber(rep_cq_value):
rep_validations.append(self.err_obj("cq_value", "cq_value ('cp') is not a number", 1))
elif rep_cq_value < Decimal('0'):
rep_validations.append(self.err_obj("cq_value", "cq_value ('cp') is less than zero", 2))
# validate gc_reaction
# remember that null is an acceptable value
if 'gc_reaction' not in updated_rep:
message = "gc_reaction ('concentration') is missing"
rep_validations.append(self.err_obj("gc_reaction", message, 2))
else:
rep_gc_reaction = updated_rep['gc_reaction']
response_rep['gc_reaction'] = rep_gc_reaction
if rep_gc_reaction is not None:
if not self.isnumber(rep_gc_reaction):
message = "gc_reaction ('concentration') is not a number"
rep_validations.append(self.err_obj("gc_reaction", message, 1))
response_rep['gc_reaction_sci'] = ''
elif rep_gc_reaction < Decimal('0'):
message = "gc_reaction ('concentration') is less than zero"
rep_validations.append(self.err_obj("gc_reaction", message, 2))
response_rep['gc_reaction_sci'] = get_sci_val(rep_gc_reaction)
else:
response_rep['gc_reaction_sci'] = get_sci_val(rep_gc_reaction)
else:
response_rep['gc_reaction'] = None
response_rep['gc_reaction_sci'] = ''
response_rep['validation_errors'] = rep_validations
all_pcrreplicates_validations.append(response_rep)
# no matching updated_rep was found
except ValueError:
# start building up the response object
response_rep = {"sample": sample_id}
message = "sample " + str(sample_id) + " expected but not found in submission"
rep_validations.append(self.err_obj("sample", message, 2))
response_rep['validation_errors'] = rep_validations
all_pcrreplicates_validations.append(response_rep)
# now list out the other updated reps that were submitted but do not belong to this batch
for extraneous_rep in updated_pcrreplicates:
rep_validations = []
sample_id = "(No Sample ID)"
if 'sample' not in extraneous_rep or extraneous_rep['sample'] is None:
validation_error = self.err_obj("sample", "sample is a required field", 1)
else:
sample_id = str(extraneous_rep.get('sample'))
message = "sample " + sample_id + " is not in this PCR replicate batch"
validation_error = self.err_obj("sample", message, 1)
# start building up the response object
response_rep = {"sample": sample_id}
if 'cq_value' not in extraneous_rep:
continue
else:
rep_cq_value = extraneous_rep['cq_value']
response_rep['cq_value'] = rep_cq_value
if 'gc_reaction' not in extraneous_rep:
continue
else:
rep_gc_reaction = extraneous_rep['gc_reaction']
response_rep['gc_reaction'] = rep_gc_reaction
if not self.isnumber(rep_gc_reaction):
response_rep['gc_reaction_sci'] = ''
else:
response_rep['gc_reaction_sci'] = get_sci_val(rep_gc_reaction)
rep_validations.append(validation_error)
response_rep['validation_errors'] = rep_validations
all_pcrreplicates_validations.append(response_rep)
field_validations["updated_pcrreplicates"] = all_pcrreplicates_validations
return JsonResponse(field_validations, safe=False, status=200)
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
queryset = PCRReplicateBatch.objects.all()
# if ID is in query, only search by ID and ignore other params
batch = self.request.query_params.get('id', None)
if batch is not None:
queryset = queryset.filter(id__exact=batch)
# else, search by other params (that don't include ID)
else:
analysis_batch = self.request.query_params.get('analysis_batch', None)
extraction_number = self.request.query_params.get('extraction_number', None)
if analysis_batch is not None and extraction_number is not None:
queryset = queryset.filter(extraction_batch__analysis_batch__exact=analysis_batch,
extraction_batch__extraction_number__exact=extraction_number)
target = self.request.query_params.get('target', None)
if target is not None:
queryset = queryset.filter(target__exact=target)
replicate_number = self.request.query_params.get('replicate_number', None)
if replicate_number is not None:
queryset = queryset.filter(replicate_number__exact=replicate_number)
return queryset
# override the default DELETE method to prevent deletion of a PCRReplicateBatch with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates = PCRReplicate.objects.filter(
pcrreplicate_batch=self.get_object().id).exclude(cq_value__isnull=True)
if any(nonnull_pcrreplicates):
message = "A PCR Replicate Batch may not be deleted"
message += " if any related PCR Replicates have results data entered."
raise APIException(message)
return super(PCRReplicateBatchViewSet, self).destroy(request, *args, **kwargs)
class StandardCurveViewSet(HistoryViewSet):
queryset = StandardCurve.objects.all()
serializer_class = StandardCurveSerializer
class InhibitionViewSet(HistoryViewSet):
queryset = Inhibition.objects.all()
serializer_class = InhibitionSerializer
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list):
kwargs['many'] = True
return super(InhibitionViewSet, self).get_serializer(*args, **kwargs)
# override the default DELETE method to prevent deletion of an Inhibition with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates_dna = PCRReplicate.objects.filter(
sample_extraction__inhibition_dna=self.get_object().id).exclude(cq_value__isnull=True)
nonnull_pcrreplicates_rna = PCRReplicate.objects.filter(
sample_extraction__inhibition_rna=self.get_object().id).exclude(cq_value__isnull=True)
nonnull_pcrreplicates = nonnull_pcrreplicates_dna.union(nonnull_pcrreplicates_rna).distinct()
if any(nonnull_pcrreplicates):
message = "An Inhibition may not be deleted if any related PCR Replicates have results data entered."
raise APIException(message)
return super(InhibitionViewSet, self).destroy(request, *args, **kwargs)
# override the default PATCH method to allow bulk processing
def patch(self, request, pk=None):
request_data = JSONParser().parse(request)
# if there is no pk, assume this is a bulk request
if not pk:
is_valid = True
response_data = []
valid_data = []
response_errors = []
for item in request_data:
# ensure the id field is present, otherwise nothing can be updated
if not item.get('id'):
is_valid = False
response_errors.append({"id": "This field is required."})
else:
inhib = item.pop('id')
inhibition = Inhibition.objects.filter(id=inhib).first()
if inhibition:
serializer = self.serializer_class(inhibition, data=item, partial=True)
# if this item is valid, temporarily hold it until all items are proven valid, then save all
# if even one item is invalid, none will be saved, and the user will be returned the error(s)
if serializer.is_valid():
valid_data.append(serializer)
else:
is_valid = False
response_errors.append(serializer.errors)
else:
is_valid = False
response_errors.append({"inhibition": "No Inhibition exists with this ID: " + str(inhib)})
if is_valid:
# now that all items are proven valid, save and return them to the user
for item in valid_data:
item.save()
response_data.append(item.data)
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
# otherwise, if there is a pk, update the instance indicated by the pk
else:
inhibition = Inhibition.objects.filter(id=pk).first()
if inhibition:
serializer = self.serializer_class(inhibition, data=request_data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
else:
return Response(serializer.errors, status=400)
else:
return JsonResponse({"inhibition": "No Inhibition exists with this ID: " + str(pk)}, status=400)
class SampleInhibitionViewSet(HistoryViewSet):
serializer_class = SampleInhibitionSerializer
# override the default queryset to allow filtering by URL arguments
# if sample ID is in query, only search by sample ID and ignore other params
def get_queryset(self):
queryset = Sample.objects.all()
# filter by sample IDs, exact list
sample = self.request.query_params.get('id', None)
if sample is not None:
sample_list = sample.split(',')
queryset = queryset.filter(id__in=sample_list)
# else, search by other params (that don't include sample ID)
else:
# filter by analysis batch ID, exact
analysis_batch = self.request.query_params.get('analysis_batch', None)
if analysis_batch is not None:
queryset = queryset.filter(analysis_batches__in=analysis_batch)
return queryset
class InhibitionCalculateDilutionFactorView(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
def post(self, request):
request_data = JSONParser().parse(request)
ab = request_data.get('analysis_batch', None)
en = request_data.get('extraction_number', None)
na = request_data.get('nucleic_acid_type', None)
eb = ExtractionBatch.objects.filter(analysis_batch=ab, extraction_number=en).first()
if eb:
serializer = InhibitionCalculateDilutionFactorSerializer(data=request_data)
if serializer.is_valid():
is_valid = True
response_data = []
response_errors = []
pos = request_data.get('inh_pos_cq_value', None)
inhibitions = request_data.get('inhibitions', None)
for inhibition in inhibitions:
cq = inhibition.get('cq_value', None)
sample = inhibition.get('sample', None)
inhib = Inhibition.objects.filter(sample=sample, extraction_batch=eb, nucleic_acid_type=na).first()
if inhib:
suggested_dilution_factor = None
diff = abs(pos - cq)
# If INH CONT Cq minus Sample Cq<2 cycles, then dilution factor = 1 (no dilution)
# If INH CONT Cq minus Sample Cq>=2 cycles AND Sample Cq<36, then dilution factor = 5
# If INH CONT Cq minus Sample Cq>2 cycles AND Sample Cq>36 or no Cq, then dilution factor = 10
if not cq:
suggested_dilution_factor = 10
elif 0.0 <= diff < 2.0:
suggested_dilution_factor = 1
elif diff >= 2.0 and cq < 36.0:
suggested_dilution_factor = 5
elif diff > 2.0 and cq > 36.0:
suggested_dilution_factor = 10
new_data = {"id": inhib.id, "sample": sample, "cq_value": cq,
"suggested_dilution_factor": suggested_dilution_factor,
"extraction_batch": eb.id}
response_data.append(new_data)
else:
is_valid = False
message = "No Inhibition exists with Sample ID: " + str(sample)
message += ", Extraction Batch ID: " + str(eb) + ", Nucleic Acid Type ID: " + str(na)
response_errors.append({"inhibition": message})
if is_valid:
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
return Response(serializer.errors, status=400)
else:
message = "No Extraction Batch exists with Analysis Batch ID: " + str(ab)
message += " and Extraction Number: " + str(en)
return JsonResponse({"extraction_batch": message}, status=400)
class TargetViewSet(HistoryViewSet):
queryset = Target.objects.all()
serializer_class = TargetSerializer
######
#
# Misc
#
######
class FieldUnitViewSet(HistoryViewSet):
queryset = FieldUnit.objects.all()
serializer_class = FieldUnitSerializer
class NucleicAcidTypeViewSet(HistoryViewSet):
queryset = NucleicAcidType.objects.all()
serializer_class = NucleicAcidTypeSerializer
class RecordTypeViewSet(HistoryViewSet):
queryset = RecordType.objects.all()
serializer_class = RecordTypeSerializer
class OtherAnalysisViewSet(HistoryViewSet):
queryset = OtherAnalysis.objects.all()
serializer_class = OtherAnalysisSerializer
######
#
# Users
#
######
class UserViewSet(HistoryViewSet):
serializer_class = UserSerializer
def get_queryset(self):
# do not return the admin and public users
queryset = User.objects.all().exclude(id__in=[1])
# filter by username, exact
username = self.request.query_params.get('username', None)
if username is not None:
queryset = queryset.filter(username__exact=username)
return queryset
class AuthView(views.APIView):
authentication_classes = (CustomBasicAuthentication,)
serializer_class = UserSerializer
def post(self, request):
# remove all sessions to prevent CSRF missing error on subsequent basic auth requests
if request.user:
user_sessions = []
all_sessions = Session.objects.filter(expire_date__gte=timezone.now())
for session in all_sessions:
if str(request.user.id) == session.get_decoded().get('_auth_user_id'):
user_sessions.append(session.pk)
Session.objects.filter(pk__in=user_sessions).delete()
resp = Response(self.serializer_class(request.user).data)
# attempt to remove CSRF and session cookies
resp.delete_cookie('csrftoken')
resp.delete_cookie('sessionid')
return resp
######
#
# Reports
#
######
class QualityControlReportView(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
def post(self, request):
request_data = JSONParser().parse(request)
samples = request_data.get('samples', None)
report_type = ReportType.objects.filter(id=4).first()
status = Status.objects.filter(id=1).first()
report_file = ReportFile.objects.create(
report_type=report_type, status=status, created_by=request.user, modified_by=request.user)
task = generate_quality_control_report.delay(samples, report_file.id, request.user.username)
monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)
return JsonResponse({"message": "Request for Inhibition Report received."}, status=200)
class ControlsResultsReportView(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
def post(self, request):
request_data = JSONParser().parse(request)
sample_ids = request_data.get('samples', None)
target_ids = request_data.get('targets', None)
report_type = ReportType.objects.filter(id=5).first()
status = Status.objects.filter(id=1).first()
report_file = ReportFile.objects.create(
report_type=report_type, status=status, created_by=request.user, modified_by=request.user)
task = generate_control_results_report.delay(sample_ids, target_ids, report_file.id, request.user.username)
monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)
return JsonResponse({"message": "Request for Control Results Report received."}, status=200)
class ReportFileViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = ReportFileSerializer
def get_queryset(self):
queryset = ReportFile.objects.all()
query_params = self.request.query_params
# filter by report_type, exact list
report_type = query_params.get('report_type', None)
if report_type is not None:
if LIST_DELIMETER in report_type:
report_type_list = report_type.split(LIST_DELIMETER)
queryset = queryset.filter(report_type__in=report_type_list)
else:
queryset = queryset.filter(report_type__exact=report_type)
return queryset
class ReportTypeViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
queryset = ReportType.objects.all()
serializer_class = ReportTypeSerializer
class StatusViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
queryset = Status.objects.all()
serializer_class = StatusSerializer
|
the-stack_0_415 | # Copyright (c) 2017 The Verde Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Add license notice to every source file if not present
"""
import sys
from argparse import ArgumentParser
from pathlib import Path
from pathspec import PathSpec
PROJECT = "verde"
YEAR = "2017"
NOTICE = f"""
# Copyright (c) {YEAR} The {PROJECT.title()} Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
""".strip()
CHECK_HELP = """
Don't write the files, just return the status. Return code 0 means
nothing would change. Return code 1 means some files lacks the license notice.
"""
def get_gitignore(root):
"""
Return a PathSpec matching gitignore content if present.
This function is a modified version of the one present in Black
(https://github.com/psf/black) available under MIT License.
"""
gitignore = root / ".gitignore"
lines = []
if gitignore.is_file():
with gitignore.open() as gi_file:
lines = gi_file.readlines()
return PathSpec.from_lines("gitwildmatch", lines)
def main():
"""
Add license notice to every source file if not present or just check
"""
# Create option parser
parser = ArgumentParser(
description=" Add license notice to every source file if not present."
)
parser.add_argument(
"--check", action="store_true", dest="check", default=False, help=CHECK_HELP
)
args = parser.parse_args()
gitignore = get_gitignore(Path("."))
python_files = [
path
for path in Path(".").glob("**/*.py")
if not str(path).startswith(".")
if not gitignore.match_file(path)
]
missing_notice_files = []
for pyfile in python_files:
code = pyfile.read_text()
if not code.startswith(NOTICE):
missing_notice_files.append(pyfile)
if args.check:
if missing_notice_files:
print("License notice is missing in some source files! 💔")
for pyfile in missing_notice_files:
print(f" {pyfile}")
sys.exit(1)
else:
print("All source files have the license notice! 🎉")
sys.exit(0)
else:
print("Successfully added license notice to:")
for pyfile in missing_notice_files:
code = pyfile.read_text()
pyfile.write_text("\n".join([NOTICE, code]))
print(f" {pyfile}")
sys.exit(0)
if __name__ == "__main__":
main()
|
the-stack_0_416 | # Copyright 2019, The TensorFlow Federated Authors. #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.research.utils import aggregate_fns
def create_weights_delta(input_size=2, hidden_size=5, constant=0):
"""Returns deterministic weights delta for a linear model."""
kernel = constant + tf.reshape(
tf.range(input_size * hidden_size, dtype=tf.float32),
[input_size, hidden_size])
bias = constant + tf.range(hidden_size, dtype=tf.float32)
return collections.OrderedDict([('dense/kernel', kernel),
('dense/bias', bias)])
class ClipNormAggregateFnTest(tf.test.TestCase):
def global_norm(self, value):
return tf.linalg.global_norm(tf.nest.flatten(value))
def test_clip_by_global_norm(self):
clip_norm = 20.0
aggregate_fn = aggregate_fns.build_clip_norm_aggregate_fn(clip_norm)
# Global l2 norms [17.74824, 53.99074].
deltas = [create_weights_delta(), create_weights_delta(constant=10)]
deltas_type = tff.framework.type_from_tensors(deltas[0])
weights = [1., 1.]
@tff.federated_computation(
tff.FederatedType(deltas_type, tff.CLIENTS),
tff.FederatedType(tf.float32, tff.CLIENTS))
def federated_aggregate_test(deltas, weights):
state = tff.federated_value(aggregate_fn.initialize(), tff.SERVER)
return aggregate_fn(state, deltas, weights)
federated_aggregate_test.type_signature.result.check_equivalent_to(
tff.StructType((
tff.FederatedType(
aggregate_fns.ClipNormAggregateState(
clip_norm=tf.float32, max_norm=tf.float32), tff.SERVER),
tff.FederatedType(deltas_type, tff.SERVER),
)))
state, mean = federated_aggregate_test(deltas, weights)
expected_clipped = []
for delta in deltas:
flat = tf.nest.flatten(delta)
clipped, _ = tf.clip_by_global_norm(flat, clip_norm)
expected_clipped.append(tf.nest.pack_sequence_as(delta, clipped))
expected_mean = tf.nest.map_structure(lambda a, b: (a + b) / 2,
*expected_clipped)
self.assertEqual(state.clip_norm, tf.constant(20.0, tf.float32))
self.assertEqual(state.max_norm, tf.constant(53.99074, tf.float32))
tf.nest.map_structure(self.assertAllEqual, expected_mean, mean)
class FixedClipNormProcessTest(tf.test.TestCase):
def test_clip_by_global_norm(self):
clip_norm = 20.0
test_deltas = [create_weights_delta(), create_weights_delta(constant=10)]
update_type = tff.framework.type_from_tensors(test_deltas[0])
aggregate_fn = aggregate_fns.build_fixed_clip_norm_mean_process(
clip_norm=clip_norm, model_update_type=update_type)
self.assertEqual(
aggregate_fn.next.type_signature,
tff.FunctionType(
parameter=(
tff.FederatedType((), tff.SERVER),
tff.FederatedType(update_type, tff.CLIENTS),
tff.FederatedType(tf.float32, tff.CLIENTS),
),
result=collections.OrderedDict(
state=tff.FederatedType((), tff.SERVER),
result=tff.FederatedType(update_type, tff.SERVER),
measurements=tff.FederatedType(
aggregate_fns.NormClippedAggregationMetrics(
max_global_norm=tf.float32, num_clipped=tf.int32),
tff.SERVER)),
))
state = aggregate_fn.initialize()
weights = [1., 1.]
output = aggregate_fn.next(state, test_deltas, weights)
expected_clipped = []
for delta in test_deltas:
clipped, _ = tf.clip_by_global_norm(tf.nest.flatten(delta), clip_norm)
expected_clipped.append(tf.nest.pack_sequence_as(delta, clipped))
expected_mean = tf.nest.map_structure(lambda a, b: (a + b) / 2,
*expected_clipped)
self.assertAllClose(expected_mean, output['result'])
# Global l2 norms [17.74824, 53.99074].
metrics = output['measurements']
self.assertAlmostEqual(metrics.max_global_norm, 53.99074, places=5)
self.assertEqual(metrics.num_clipped, 1)
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_417 | #!/usr/bin/env python3
# Copyright (c) 2018-2021 The Xaya developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test spendability of premine and that P2SH is enforced correctly for it."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import *
from test_framework.util import *
import codecs
PREMINE_VALUE = Decimal ('222222222')
PREMINE_ADDRESS = 'dHNvNaqcD7XPDnoRjAoyfcMpHRi5upJD7p'
PREMINE_PRIVKEYS = ['b69iyynFSWcU54LqXisbbqZ8uTJ7Dawk3V3yhht6ykxgttqMQFjb',
'b3fgAKVQpMj24gbuh6DiXVwCCjCbo1cWiZC2fXgWEU9nXy6sdxD5']
PREMINE_PUBKEYS = [
'03c278d06b977e67b8ea45ef24e3c96a9258c47bc4cce3d0b497b690d672497b6e',
'0221ac9dc97fe12a98374344d08b458a9c2c1df9afb29dd6089b94a3b4dc9ad570',
]
class PremineTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module (self):
self.skip_if_no_wallet ()
def run_test(self):
node = self.nodes[0]
node.importaddress (PREMINE_ADDRESS)
# Find basic data about the genesis coinbase tx.
genesis = node.getblock (node.getblockhash (0), 2)
assert_equal (len (genesis['tx']), 1)
tx = genesis['tx'][0]
txid = tx['hash']
assert_equal (len (tx['vout']), 1)
out = tx['vout'][0]
assert_equal (out['value'], PREMINE_VALUE)
assert_equal (out['scriptPubKey']['address'], PREMINE_ADDRESS)
# Accessing it should work normally (upstream Bitcoin/Namecoin have a
# special check that disallows the genesis coinbase with getrawtransaction,
# as it is not spendable).
node.gettransaction (txid)
assert_equal (node.getrawtransaction (txid, False, genesis['hash']),
tx['hex'])
# The coinbase txout should be in the UTXO set.
utxo = node.gettxout (txid, 0)
assert utxo is not None
# Check balance of node and then import the keys for the premine
# and check again. It should be available as spendable.
assert_equal (node.getbalance (), 0)
for key in PREMINE_PRIVKEYS:
node.importprivkey (key, 'premine')
pubkeys = []
for addr in node.getaddressesbylabel ('premine'):
data = node.getaddressinfo (addr)
if (not data['isscript']) and (not data['iswitness']):
pubkeys.append (data['pubkey'])
assert_equal (set (pubkeys), set (PREMINE_PUBKEYS))
p2sh = node.addmultisigaddress (1, PREMINE_PUBKEYS)
assert_equal (p2sh['address'], PREMINE_ADDRESS)
node.rescanblockchain ()
assert_equal (node.getbalance (), PREMINE_VALUE)
# Construct a raw tx spending the premine.
addr = node.getnewaddress ()
inputs = [{"txid": txid, "vout": 0}]
outputs = {addr: Decimal ('123456')}
rawTx = node.createrawtransaction (inputs, outputs)
# Try to "sign" it by just adding the redeem script, which would have been
# valid before the P2SH softfork. Doing so should fail, which verifies that
# P2SH is enforced right from the start and thus that the premine is safe.
data = node.getaddressinfo (PREMINE_ADDRESS)
redeemScript = data['hex']
# Prepend script size, so that it will correctly push the script hash
# to the stack.
redeemScript = ("%02x" % (len (redeemScript) // 2)) + redeemScript
forgedTx = tx_from_hex (rawTx)
forgedTx.vin[0].scriptSig = codecs.decode (redeemScript, 'hex_codec')
forgedTx = forgedTx.serialize ().hex ()
assert_raises_rpc_error (-26, "not valid",
node.sendrawtransaction, forgedTx, 0)
# Sign and send the raw tx, should succeed.
signed = node.signrawtransactionwithwallet (rawTx)
assert signed['complete']
signedTx = signed['hex']
sendId = node.sendrawtransaction (signedTx, 0)
node.generate (1)
assert_equal (node.gettransaction (sendId)['confirmations'], 1)
if __name__ == '__main__':
PremineTest().main()
|
the-stack_0_418 | from re import search
from setuptools import setup, find_packages
with open("graphql/__init__.py") as init_file:
version = search('__version__ = "(.*)"', init_file.read()).group(1)
with open("README.md") as readme_file:
readme = readme_file.read()
setup(
name="GraphQL-core-next",
version=version,
description="GraphQL-core-next is a Python port of GraphQL.js,"
" the JavaScript reference implementation for GraphQL.",
long_description=readme,
long_description_content_type="text/markdown",
keywords="graphql",
url="https://github.com/graphql-python/graphql-core-next",
author="Christoph Zwerschke",
author_email="[email protected]",
license="MIT license",
# PEP-561: https://www.python.org/dev/peps/pep-0561/
package_data={"graphql": ["py.typed"]},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
install_requires=[],
python_requires=">=3.6",
test_suite="tests",
tests_require=[
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-describe",
"black",
"flake8",
"mypy",
"tox",
"python-coveralls",
],
packages=find_packages(include=["graphql"]),
include_package_data=True,
zip_safe=False,
)
|
the-stack_0_420 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import sys
from datetime import datetime, timedelta
import operator
import string
from inspect import getargspec
from itertools import product, starmap
from distutils.version import LooseVersion
import nose
from numpy import nan, inf
import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range,
date_range, period_range, timedelta_range)
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import Timestamp, DatetimeIndex
from pandas.tseries.tdi import Timedelta, TimedeltaIndex
import pandas.core.common as com
import pandas.core.config as cf
import pandas.lib as lib
import pandas.core.datetools as datetools
import pandas.core.nanops as nanops
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_almost_equal,
assert_frame_equal,
ensure_clean)
import pandas.util.testing as tm
#------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
_multiprocess_can_split_ = True
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEqual(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEqual(result.name, self.ts.name)
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
self.ts.index.name = None
self.assertIsNone(self.ts.index.name)
self.assertIs(self.ts, self.ts)
cp = self.ts.copy()
cp.index.name = 'foo'
com.pprint_thing(self.ts.index.name)
self.assertIsNone(self.ts.index.name)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEqual(result.name, self.ts.name)
def test_dt_namespace_accessor(self):
# GH 7207
# test .dt namespace accessor
ok_for_base = ['year','month','day','hour','minute','second','weekofyear','week','dayofweek','weekday','dayofyear','quarter','freq']
ok_for_period = ok_for_base + ['qyear']
ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end', 'tz']
ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert']
ok_for_td = ['days','seconds','microseconds','nanoseconds']
ok_for_td_methods = ['components','to_pytimedelta']
def get_expected(s, name):
result = getattr(Index(s.values),prop)
if isinstance(result, np.ndarray):
if com.is_integer_dtype(result):
result = result.astype('int64')
elif not com.is_list_like(result):
return result
return Series(result,index=s.index)
def compare(s, name):
a = getattr(s.dt,prop)
b = get_expected(s,prop)
if not (com.is_list_like(a) and com.is_list_like(b)):
self.assertEqual(a,b)
else:
tm.assert_series_equal(a,b)
# invalids
for s in [Series(np.arange(5)),
Series(list('abcde')),
Series(np.random.randn(5))]:
self.assertRaises(TypeError, lambda : s.dt)
# datetimeindex
for s in [Series(date_range('20130101',periods=5)),
Series(date_range('20130101',periods=5,freq='s')),
Series(date_range('20130101 00:00:00',periods=5,freq='ms'))]:
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt,prop)
result = s.dt.to_pydatetime()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.tz_localize('US/Eastern')
expected = Series(DatetimeIndex(s.values).tz_localize('US/Eastern'),index=s.index)
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
self.assertEqual(str(tz_result), 'US/Eastern')
freq_result = s.dt.freq
self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)
# let's localize, then convert
result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
expected = Series(DatetimeIndex(s.values).tz_localize('UTC').tz_convert('US/Eastern'),index=s.index)
tm.assert_series_equal(result, expected)
# timedeltaindex
for s in [Series(timedelta_range('1 day',periods=5),index=list('abcde')),
Series(timedelta_range('1 day 01:23:45',periods=5,freq='s')),
Series(timedelta_range('2 days 01:23:45.012345',periods=5,freq='ms'))]:
for prop in ok_for_td:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_td_methods:
getattr(s.dt,prop)
result = s.dt.components
self.assertIsInstance(result,DataFrame)
tm.assert_index_equal(result.index,s.index)
result = s.dt.to_pytimedelta()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
freq_result = s.dt.freq
self.assertEqual(freq_result, TimedeltaIndex(s.values, freq='infer').freq)
# both
index = date_range('20130101',periods=3,freq='D')
s = Series(date_range('20140204',periods=3,freq='s'),index=index)
tm.assert_series_equal(s.dt.year,Series(np.array([2014,2014,2014],dtype='int64'),index=index))
tm.assert_series_equal(s.dt.month,Series(np.array([2,2,2],dtype='int64'),index=index))
tm.assert_series_equal(s.dt.second,Series(np.array([0,1,2],dtype='int64'),index=index))
# periodindex
for s in [Series(period_range('20130101',periods=5,freq='D'))]:
for prop in ok_for_period:
# we test freq below
if prop != 'freq':
compare(s, prop)
freq_result = s.dt.freq
self.assertEqual(freq_result, PeriodIndex(s.values).freq)
# test limited display api
def get_dir(s):
results = [ r for r in s.dt.__dir__() if not r.startswith('_') ]
return list(sorted(set(results)))
s = Series(date_range('20130101',periods=5,freq='D'))
results = get_dir(s)
tm.assert_almost_equal(results,list(sorted(set(ok_for_dt + ok_for_dt_methods))))
s = Series(period_range('20130101',periods=5,freq='D').asobject)
results = get_dir(s)
tm.assert_almost_equal(results,list(sorted(set(ok_for_period))))
# no setting allowed
s = Series(date_range('20130101',periods=5,freq='D'))
with tm.assertRaisesRegexp(ValueError, "modifications"):
s.dt.hour = 5
# trying to set a copy
with pd.option_context('chained_assignment','raise'):
def f():
s.dt.hour[0] = 5
self.assertRaises(com.SettingWithCopyError, f)
def test_valid_dt_with_missing_values(self):
from datetime import date, time
# GH 8689
s = Series(date_range('20130101',periods=5,freq='D'))
s_orig = s.copy()
s.iloc[2] = pd.NaT
for attr in ['microsecond','nanosecond','second','minute','hour','day']:
expected = getattr(s.dt,attr).copy()
expected.iloc[2] = np.nan
result = getattr(s.dt,attr)
tm.assert_series_equal(result, expected)
result = s.dt.date
expected = Series([date(2013,1,1),date(2013,1,2),np.nan,date(2013,1,4),date(2013,1,5)],dtype='object')
tm.assert_series_equal(result, expected)
result = s.dt.time
expected = Series([time(0),time(0),np.nan,time(0),time(0)],dtype='object')
tm.assert_series_equal(result, expected)
def test_dt_accessor_api(self):
# GH 9322
from pandas.tseries.common import (CombinedDatetimelikeProperties,
DatetimeProperties)
self.assertIs(Series.dt, CombinedDatetimelikeProperties)
s = Series(date_range('2000-01-01', periods=3))
self.assertIsInstance(s.dt, DatetimeProperties)
with tm.assertRaisesRegexp(TypeError, "only use .dt accessor"):
Series([1]).dt
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEqual(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEqual(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assertIsNone(result.name)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEqual(result.name, self.ts.name)
def test_combine_first_dt64(self):
from pandas.tseries.tools import to_datetime
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = to_datetime(Series([np.NaN, "2011"]))
rs = s0.combine_first(s1)
xp = to_datetime(Series(['2010', '2011']))
assert_series_equal(rs, xp)
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = Series([np.NaN, "2011"])
rs = s0.combine_first(s1)
xp = Series([datetime(2010, 1, 1), '2011'])
assert_series_equal(rs, xp)
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result,expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index([25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result,expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i':[0]*3, 'b':[False]*3})
vc = df.i.value_counts()
result = vc.get(99,default='Missing')
self.assertEqual(result,'Missing')
vc = df.b.value_counts()
result = vc.get(False,default='Missing')
self.assertEqual(result,3)
result = vc.get(True,default='Missing')
self.assertEqual(result,'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1,5),index=lrange(1,5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2,5),index=lrange(2,5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64'))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64'))
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEqual(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEqual(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEqual(result.name, self.ts.name)
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth, dtype: int64"]
expected = "\n".join(expected)
self.assertEqual(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEqual(result.name, s.name)
self.assertEqual(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
# test big series (diff code path)
s = Series(lrange(0, 1000))
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
s = Series(index=date_range('20010101', '20020101'), name='test')
self.assertIn("Name: test", repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip_name(self.ts)
self.assertEqual(unpickled.name, self.ts.name)
def _pickle_roundtrip_name(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEqual(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEqual(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEqual(result.name, self.ts.name)
class TestNanops(tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
s == s2
s2 == s
def test_none_comparison(self):
# bug brought up by #1079
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
self.assertRaises(TypeError, s.__eq__, None)
def test_sum_zero(self):
arr = np.array([])
self.assertEqual(nanops.nansum(arr), 0)
arr = np.empty((10, 0))
self.assertTrue((nanops.nansum(arr, axis=1) == 0).all())
# GH #844
s = Series([], index=[])
self.assertEqual(s.sum(), 0)
df = DataFrame(np.empty((10, 0)))
self.assertTrue((df.sum(1) == 0).all())
def test_nansum_buglet(self):
s = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(s)
assert_almost_equal(result, 1)
def test_overflow(self):
# GH 6915
# overflowing on the smaller int dtypes
for dtype in ['int32','int64']:
v = np.arange(5000000,dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
self.assertEqual(int(result),v.sum(dtype='int64'))
result = s.min(skipna=False)
self.assertEqual(int(result),0)
result = s.max(skipna=False)
self.assertEqual(int(result),v[-1])
# use bottleneck if available
result = s.sum()
self.assertEqual(int(result),v.sum(dtype='int64'))
result = s.min()
self.assertEqual(int(result),0)
result = s.max()
self.assertEqual(int(result),v[-1])
for dtype in ['float32','float64']:
v = np.arange(5000000,dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
self.assertTrue(np.allclose(float(result),v.sum(dtype='float64')))
result = s.min(skipna=False)
self.assertTrue(np.allclose(float(result),0.0))
result = s.max(skipna=False)
self.assertTrue(np.allclose(float(result),v[-1]))
# use bottleneck if available
result = s.sum()
self.assertTrue(np.allclose(float(result),v.sum(dtype='float64')))
result = s.min()
self.assertTrue(np.allclose(float(result),0.0))
result = s.max()
self.assertTrue(np.allclose(float(result),v[-1]))
class SafeForSparse(object):
pass
_ts = tm.makeTimeSeries()
class TestSeries(tm.TestCase, CheckNameIntegration):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.ts = _ts.copy()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
self.assertNotIsInstance(scalar, float)
# coercion
self.assertEqual(float(Series([1.])), 1.0)
self.assertEqual(int(Series([1.])), 1)
self.assertEqual(long(Series([1.])), 1)
def test_astype(self):
s = Series(np.random.randn(5),name='foo')
for dtype in ['float32','float64','int64','int32']:
astyped = s.astype(dtype)
self.assertEqual(astyped.dtype, dtype)
self.assertEqual(astyped.name, s.name)
def test_constructor(self):
# Recognize TimeSeries
self.assertTrue(self.ts.is_time_series)
# Pass in Series
derived = Series(self.ts)
self.assertTrue(derived.is_time_series)
self.assertTrue(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEqual(id(self.ts.index), id(derived.index))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assertEqual(mixed.dtype, np.object_)
self.assertIs(mixed[1], np.NaN)
self.assertFalse(self.empty.is_time_series)
self.assertFalse(Series({}).is_time_series)
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
mixed.name = 'Series'
rs = Series(mixed).name
xp = 'Series'
self.assertEqual(rs, xp)
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
self.assertRaises(NotImplementedError, Series, m)
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=lrange(10))
empty2 = Series(np.nan, index=lrange(10))
assert_series_equal(empty, empty2)
def test_constructor_series(self):
index1 = ['d', 'b', 'a', 'c']
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
assert_series_equal(s2, s1.sort_index())
def test_constructor_iterator(self):
expected = Series(list(range(10)),dtype='int64')
result = Series(range(10),dtype='int64')
assert_series_equal(result, expected)
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(lrange(10))
assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(lrange(10))
assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], fastpath=True)
cat.name = 'foo'
res = Series(cat)
self.assertEqual(res.name, cat.name)
self.assertTrue(res.values.equals(cat))
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([nan, nan, nan], dtype=float)
assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0, nan, 2], index=index, dtype=float)
assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([nan, nan, nan], dtype=object)
assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([True, nan, False], index=index, dtype=object)
assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
assert_series_equal(result, expected)
from pandas import tslib
data = ma.masked_all((3,), dtype='M8[ns]')
result = Series(data)
expected = Series([tslib.iNaT, tslib.iNaT, tslib.iNaT], dtype='M8[ns]')
assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), tslib.iNaT,
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
tm.assert_isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1., 1., 8.]), dtype='i8')
self.assertEqual(s.dtype, np.dtype('i8'))
s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')
self.assertEqual(s.dtype, np.dtype('f8'))
def test_constructor_pass_none(self):
s = Series(None, index=lrange(5))
self.assertEqual(s.dtype, np.float64)
s = Series(None, index=lrange(5), dtype=object)
self.assertEqual(s.dtype, np.object_)
# GH 7431
# inference on the index
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
assert_series_equal(s,expected)
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dtype_nocast(self):
# 1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
self.assertEqual(s[1], 5)
def test_constructor_dtype_datetime64(self):
import pandas.tslib as tslib
s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))
self.assertTrue(isnull(s).all())
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(tslib.iNaT, index=lrange(5))
self.assertFalse(isnull(s).all())
s = Series(nan, dtype='M8[ns]', index=lrange(5))
self.assertTrue(isnull(s).all())
s = Series([datetime(2001, 1, 2, 0, 0), tslib.iNaT], dtype='M8[ns]')
self.assertTrue(isnull(s[1]))
self.assertEqual(s.dtype, 'M8[ns]')
s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')
self.assertTrue(isnull(s[1]))
self.assertEqual(s.dtype, 'M8[ns]')
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
self.assertEqual(s.dtype, 'M8[ns]')
s.ix[0] = np.nan
self.assertEqual(s.dtype, 'M8[ns]')
# invalid astypes
for t in ['s', 'D', 'us', 'ms']:
self.assertRaises(TypeError, s.astype, 'M8[%s]' % t)
# GH3414 related
self.assertRaises(TypeError, lambda x: Series(
Series(dates).astype('int') / 1000000, dtype='M8[ms]'))
self.assertRaises(
TypeError, lambda x: Series(dates, dtype='datetime64'))
# invalid dates can be help as object
result = Series([datetime(2,1,1)])
self.assertEqual(result[0], datetime(2,1,1,0,0))
result = Series([datetime(3000,1,1)])
self.assertEqual(result[0], datetime(3000,1,1,0,0))
# don't mix types
result = Series([ Timestamp('20130101'), 1],index=['a','b'])
self.assertEqual(result['a'], Timestamp('20130101'))
self.assertEqual(result['b'], 1)
# GH6529
# coerce datetime64 non-ns properly
dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')
values2 = dates.view(np.ndarray).astype('datetime64[ns]')
expected = Series(values2, dates)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, dates)
assert_series_equal(result,expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()],
dtype=object)
series1 = Series(dates2, dates)
self.assert_numpy_array_equal(series1.values,dates2)
self.assertEqual(series1.dtype,object)
# these will correctly infer a datetime
s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101',periods=3)
self.assertTrue(Series(dr).iloc[0].tz is None)
dr = date_range('20130101',periods=3,tz='UTC')
self.assertTrue(str(Series(dr).iloc[0].tz) == 'UTC')
dr = date_range('20130101',periods=3,tz='US/Eastern')
self.assertTrue(str(Series(dr).iloc[0].tz) == 'US/Eastern')
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range('20130101',periods=5,freq='D')
s = Series(pi)
expected = Series(pi.asobject)
assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx)
expected.ix[0] = 0
expected.ix[1] = 1
assert_series_equal(result, expected)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_series_equal(
result, expected, check_dtype=True, check_index_type=True,
check_series_type=True)
d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}
_d = sorted(d.items())
ser = Series(d)
expected = Series([x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d]))
check(ser, expected)
d['z'] = 111.
_d.insert(0, ('z', d['z']))
ser = Series(d)
expected = Series(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False))
ser = ser.reindex(index=expected.index)
check(ser, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = Series(data)
refseries = Series(dict(compat.iteritems(data)))
assert_series_equal(refseries, series)
def test_orderedDict_ctor(self):
# GH3283
import pandas
import random
data = OrderedDict([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
self.assertTrue(all(s.values == list(data.values())))
def test_orderedDict_subclass_ctor(self):
# GH3283
import pandas
import random
class A(OrderedDict):
pass
data = A([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
self.assertTrue(all(s.values == list(data.values())))
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_constructor_set(self):
values = set([1, 2, 3, 4, 5])
self.assertRaises(TypeError, Series, values)
values = frozenset(values)
self.assertRaises(TypeError, Series, values)
def test_fromDict(self):
data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
series = Series(data)
self.assertTrue(tm.is_sorted(series.index))
data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}
series = Series(data)
self.assertEqual(series.dtype, np.object_)
data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}
series = Series(data)
self.assertEqual(series.dtype, np.object_)
data = {'a': '0', 'b': '1'}
series = Series(data, dtype=float)
self.assertEqual(series.dtype, np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(Exception, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
tm.assert_isinstance(series.index, Index)
def test_array_finalize(self):
pass
def test_pop(self):
# GH 6600
df = DataFrame({
'A': 0,
'B': np.arange(5,dtype='int64'),
'C': 0,
})
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0,0],index=['A','C'])
assert_series_equal(k, expected)
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
self.assertRaises(TypeError, hash, s_empty)
self.assertRaises(TypeError, hash, s)
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assertEqual(nans.dtype, np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assertEqual(strings.dtype, np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assertEqual(dates.dtype, 'M8[ns]')
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iget([0, 2, 3, 4, 5])
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iget(2), 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_numpy_array_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan,index=['C'],dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object)
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=datetools.bday) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
#ts[mask_shifted]
#ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
#ts.ix[mask_shifted]
#ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assert_isinstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(np.isscalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5),index=['A','A','B','C','C'],dtype=np.int64)
expected = Series([3,4],index=['C','C'],dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df>5)
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5],index=[1]))
assert_series_equal(s2,expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5],index=[1]))
assert_series_equal(s2,expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda : self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
expected = self.series.append(Series([1],index=['foobar']))
assert_series_equal(s,expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan,2,3])
s = Series([1,2,3])
s.iloc[0] = np.nan
assert_series_equal(s,expected)
s = Series([1,2,3])
s.loc[0] = np.nan
assert_series_equal(s,expected)
s = Series([1,2,3])
s[0] = np.nan
assert_series_equal(s,expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s,Series([np.nan]))
s = Series([False,True])
s.loc[0] = np.nan
assert_series_equal(s,Series([np.nan,1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_reshape_non_2d(self):
# GH 4554
x = Series(np.random.random(201), name='x')
self.assertTrue(x.reshape(x.shape,) is x)
# GH 2719
a = Series([1, 2, 3, 4])
result = a.reshape(2, 2)
expected = a.values.reshape(2, 2)
np.testing.assert_array_equal(result, expected)
self.assertTrue(type(result) is type(expected))
def test_reshape_2d_return_array(self):
x = Series(np.random.random(201), name='x')
result = x.reshape((-1, 1))
self.assertNotIsInstance(result, Series)
result2 = np.reshape(x, (-1, 1))
self.assertNotIsInstance(result2, Series)
result = x[:, None]
expected = x.reshape((-1, 1))
assert_almost_equal(result, expected)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert(s.shape == rs.shape)
assert(rs is not s)
# test alignment
cond = Series([True,False,False,True,False],index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(
ValueError, s.__setitem__, tuple([[[True, False]]]), [0, 2, 3])
self.assertRaises(
ValueError, s.__setitem__, tuple([[[True, False]]]), [])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5,4,3,2,1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1,2,3,4])
result = s.where(s>2,np.nan)
expected = Series([np.nan,np.nan,3,4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan,index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0,1,2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0,'b',1,'d','e','f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a','b','c',0,1,'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0,1,2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0,1,2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)),'b','c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [np.resize([True, False, False, False, False], size), # First element should be set
# Set alternating elements]
np.resize([True, False], size),
np.resize([False], size)]: # No element should be set
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item,)]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[i]
for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1,s2])
result = comb.where(comb < 2)
expected = Series([0,1,np.nan,0,1,np.nan],index=[0,1,2,0,1,2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb<1] = 5
expected = Series([5,1,2,5,1,2],index=[0,1,2,0,1,2])
assert_series_equal(comb, expected)
comb[comb<2] += 10
expected = Series([5,11,2,5,11,2],index=[0,1,2,0,1,2])
assert_series_equal(comb, expected)
def test_mask(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond, np.nan)
assert_series_equal(rs, s.mask(~cond))
def test_drop(self):
# unique
s = Series([1,2],index=['one','two'])
expected = Series([1],index=['one'])
result = s.drop(['two'])
assert_series_equal(result,expected)
result = s.drop('two', axis='rows')
assert_series_equal(result,expected)
# non-unique
# GH 5248
s = Series([1,1,2],index=['one','two','one'])
expected = Series([1,2],index=['one','one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result,expected)
result = s.drop('two')
assert_series_equal(result,expected)
expected = Series([1],index=['two'])
result = s.drop(['one'])
assert_series_equal(result,expected)
result = s.drop('one')
assert_series_equal(result,expected)
# single string/tuple-like
s = Series(range(3),index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a',))
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2,3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3],index=[False])
assert_series_equal(result,expected)
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s>1, 'X')
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s>1, ['X', 'Y', 'Z'])
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s>1, np.array(['X', 'Y', 'Z']))
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.order()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# with Nones
ots = self.ts.astype('O')
ots[::2] = None
repr(ots)
# various names
for name in ['', 1, 1.2, 'foo', u('\u03B1\u03B2\u03B3'),
'loooooooooooooooooooooooooooooooooooooooooooooooooooong',
('foo', 'bar', 'baz'),
(1, 2),
('foo', 1, 2.3),
(u('\u03B1'), u('\u03B2'), u('\u03B3')),
(u('\u03B1'), 'bar')]:
self.series.name = name
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
# 0 as name
ser = Series(np.random.randn(100), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
# tidy repr
ser = Series(np.random.randn(1001), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
ser = Series(["a\n\r\tb"], name=["a\n\r\td"], index=["a\n\r\tf"])
self.assertFalse("\t" in repr(ser))
self.assertFalse("\r" in repr(ser))
self.assertFalse("a\n" in repr(ser))
# with empty series (#4651)
s = Series([], dtype=np.int64, name='foo')
self.assertEqual(repr(s), 'Series([], name: foo, dtype: int64)')
s = Series([], dtype=np.int64, name=None)
self.assertEqual(repr(s), 'Series([], dtype: int64)')
def test_tidy_repr(self):
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a) # should not raise exception
def test_repr_bool_fails(self):
s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
# it works (with no Cython exception barf)!
repr(s)
finally:
sys.stderr = tmp
self.assertEqual(buf.getvalue(), '')
def test_repr_name_iterable_indexable(self):
s = Series([1, 2, 3], name=np.int64(3))
# it works!
repr(s)
s.name = (u("\u05d0"),) * 2
repr(s)
def test_repr_should_return_str(self):
# http://docs.python.org/py3k/reference/datamodel.html#object.__repr__
# http://docs.python.org/reference/datamodel.html#object.__repr__
# ...The return value must be a string object.
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
df = Series(data, index=index1)
self.assertTrue(type(df.__repr__() == str)) # both py2 / 3
def test_repr_max_rows(self):
# GH 6863
with pd.option_context('max_rows', None):
str(Series(range(1001))) # should not raise exception
def test_unicode_string_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
bytes(df)
else:
str(df)
def test_timeseries_repr_object_dtype(self):
index = Index([datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)], dtype=object)
ts = Series(np.random.randn(len(index)), index)
repr(ts)
ts = tm.makeTimeSeries(1000)
self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:'))
ts2 = ts.ix[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
def test_timeseries_periodindex(self):
# GH2891
from pandas import period_range
prng = period_range('1/1/2011', '1/1/2012', freq='M')
ts = Series(np.random.randn(len(prng)), prng)
new_ts = self.round_trip_pickle(ts)
self.assertEqual(new_ts.index.freq, 'M')
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assertIs(getkeys(), self.ts.index)
def test_values(self):
self.assert_numpy_array_equal(self.ts, self.ts.values)
def test_iteritems(self):
for idx, val in compat.iteritems(self.series):
self.assertEqual(val, self.series[idx])
for idx, val in compat.iteritems(self.ts):
self.assertEqual(val, self.ts[idx])
# assert is lazy (genrators don't define reverse, lists do)
self.assertFalse(hasattr(self.series.iteritems(), 'reverse'))
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_sum_inf(self):
import pandas.core.nanops as nanops
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
self.assertTrue(np.isinf(s.sum()))
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
with cf.option_context("mode.use_inf_as_null", True):
assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
self.assertTrue(np.isinf(res).all())
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = Series(np.ones(10, dtype=int), index=lrange(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
def test_mode(self):
s = Series([12, 12, 11, 10, 19, 11])
exp = Series([11, 12])
assert_series_equal(s.mode(), exp)
assert_series_equal(Series([1, 2, 3]).mode(), Series([], dtype='int64'))
lst = [5] * 20 + [1] * 10 + [6] * 25
np.random.shuffle(lst)
s = Series(lst)
assert_series_equal(s.mode(), Series([6]))
s = Series([5] * 10)
assert_series_equal(s.mode(), Series([5]))
s = Series(lst)
s[0] = np.nan
assert_series_equal(s.mode(), Series([6.]))
s = Series(list('adfasbasfwewefwefweeeeasdfasnbam'))
assert_series_equal(s.mode(), Series(['e']))
s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]')
assert_series_equal(s.mode(), Series([], dtype="M8[ns]"))
s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03',
'2013-01-02'], dtype='M8[ns]')
assert_series_equal(s.mode(), Series(['2011-01-03', '2013-01-02'],
dtype='M8[ns]'))
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_var_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
result = self.ts.std(ddof=4)
expected = np.std(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
result = self.ts.var(ddof=4)
expected = np.var(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.var(ddof=1)
self.assertTrue(isnull(result))
result = s.std(ddof=1)
self.assertTrue(isnull(result))
def test_sem(self):
alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.ts.sem(ddof=4)
expected = np.std(self.ts.values, ddof=4)/np.sqrt(len(self.ts.values))
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.sem(ddof=1)
self.assertTrue(isnull(result))
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
alt = lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
# test corner cases, skew() returns NaN unless there's at least 3 values
min_N = 3
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
self.assertTrue(np.isnan(s.skew()))
self.assertTrue(np.isnan(df.skew()).all())
else:
self.assertEqual(0, s.skew())
self.assertTrue((df.skew() == 0).all())
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
alt = lambda x: kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
self.assertAlmostEqual(s.kurt(), s.kurt(level=0)['bar'])
# test corner cases, kurt() returns NaN unless there's at least 4 values
min_N = 4
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
self.assertTrue(np.isnan(s.kurt()))
self.assertTrue(np.isnan(df.kurt()).all())
else:
self.assertEqual(0, s.kurt())
self.assertTrue((df.kurt() == 0).all())
def test_argsort(self):
self._check_accum_op('argsort')
argsorted = self.ts.argsort()
self.assertTrue(issubclass(argsorted.dtype.type, np.integer))
# GH 2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])
self.assertEqual(s.dtype, 'datetime64[ns]')
shifted = s.shift(-1)
self.assertEqual(shifted.dtype, 'datetime64[ns]')
self.assertTrue(isnull(shifted[4]))
result = s.argsort()
expected = Series(lrange(5), dtype='int64')
assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(lrange(4) + [-1], dtype='int64')
assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind='mergesort')
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind='mergesort')
qexpected = np.argsort(s.values, kind='quicksort')
self.assert_numpy_array_equal(mindexer, mexpected)
self.assert_numpy_array_equal(qindexer, qexpected)
self.assertFalse(np.array_equal(qindexer, mindexer))
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(['L0', 'L1', 'L2'])
assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = Series(np.arange(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = Series(range(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels(['L0', 'L0', 'L0'])
assert_series_equal(result, expected)
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def test_cummin(self):
self.assert_numpy_array_equal(self.ts.cummin(),
np.minimum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.valid())
self.assert_numpy_array_equal(result, expected)
def test_cummax(self):
self.assert_numpy_array_equal(self.ts.cummax(),
np.maximum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.valid())
self.assert_numpy_array_equal(result, expected)
def test_cummin_datetime64(self):
s = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-1']))
result = s.cummin(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1']))
result = s.cummin(skipna=False)
self.assert_series_equal(expected, result)
def test_cummax_datetime64(self):
s = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-2', 'NaT', '2000-1-3']))
result = s.cummax(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3']))
result = s.cummax(skipna=False)
self.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '1 min', ]))
result = s.cummin(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', '2 min', '1 min', '1 min', '1 min', ]))
result = s.cummin(skipna=False)
self.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '2 min', 'NaT', '3 min', ]))
result = s.cummax(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', '2 min', '2 min', '2 min', '3 min', ]))
result = s.cummax(skipna=False)
self.assert_series_equal(expected, result)
def test_npdiff(self):
raise nose.SkipTest("skipping due to Series no longer being an "
"ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
r = np.diff(s)
assert_series_equal(Series([nan, 0, 0, 0, nan]), r)
def _check_stat_op(self, name, alternate, check_objects=False):
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# idxmax, idxmin, min, and max are valid for dates
if name not in ['max','min']:
ds = Series(date_range('1/1/2001', periods=10))
self.assertRaises(TypeError, f, ds)
# skipna or no
self.assertTrue(notnull(f(self.series)))
self.assertTrue(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona.values))
assert_almost_equal(f(self.series), alternate(nona.values))
allna = self.series * nan
self.assertTrue(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# 2888
l = [0]
l.extend(lrange(2 ** 40, 2 ** 40+1000))
s = Series(l, dtype='int64')
assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
s = Series(bdate_range('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
# Invalid axis.
self.assertRaises(ValueError, f, self.series, axis=1)
# Unimplemented numeric_only parameter.
if 'numeric_only' in getargspec(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
self.series, numeric_only=True)
testit()
try:
import bottleneck as bn
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name):
func = getattr(np, name)
self.assert_numpy_array_equal(func(self.ts), func(np.array(self.ts)))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_numpy_array_equal(result, expected)
def test_round(self):
# numpy.round doesn't preserve metadata, probably a numpy bug,
# re: GH #314
result = np.round(self.ts, 2)
expected = Series(np.round(self.ts.values, 2), index=self.ts.index)
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.], index=lrange(3))
result = s.prod()
self.assertNotIsInstance(result, Series)
def test_quantile(self):
from numpy import percentile
q = self.ts.quantile(0.1)
self.assertEqual(q, percentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# object dtype
q = Series(self.ts,dtype=object).quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
self.assertEqual(q, pd.to_timedelta('24:00:00'))
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
self.assertTrue(result is pd.NaT)
def test_quantile_multi(self):
from numpy import percentile
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([percentile(self.ts.valid(), 10),
percentile(self.ts.valid(), 90)],
index=qs)
assert_series_equal(result, expected)
dts = self.ts.index.to_series()
result = dts.quantile((.2, .2))
assert_series_equal(result, Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2]))
def test_append(self):
appendedSeries = self.series.append(self.objSeries)
for idx, value in compat.iteritems(appendedSeries):
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.objSeries.index:
self.assertEqual(value, self.objSeries[idx])
else:
self.fail("orphaned index!")
self.assertRaises(ValueError, self.ts.append, self.ts,
verify_integrity=True)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
self.assertFalse(bool_series.all())
self.assertTrue(bool_series.any())
# Alternative types, with implicit 'object' dtype.
s = Series(['abc', True])
self.assertEqual('abc', s.any()) # 'abc' || True => 'abc'
def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
self.assertTrue(s1.all(skipna=False)) # nan && True => True
self.assertTrue(s1.all(skipna=True))
self.assertTrue(np.isnan(s2.any(skipna=False))) # nan || False => nan
self.assertFalse(s2.any(skipna=True))
# Check level.
s = pd.Series([False, False, True, True, False, True],
index=[0, 0, 1, 1, 2, 2])
assert_series_equal(s.all(level=0), Series([False, True, False]))
assert_series_equal(s.any(level=0), Series([False, True, True]))
# bool_only is not implemented with level option.
self.assertRaises(NotImplementedError, s.any, bool_only=True, level=0)
self.assertRaises(NotImplementedError, s.all, bool_only=True, level=0)
# bool_only is not implemented alone.
self.assertRaises(NotImplementedError, s.any, bool_only=True)
self.assertRaises(NotImplementedError, s.all, bool_only=True)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_modulo(self):
# GH3590, modulo as ints
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] % p['second']
expected = Series(p['first'].values %
p['second'].values, dtype='float64')
expected.iloc[0:3] = np.nan
assert_series_equal(result, expected)
result = p['first'] % 0
expected = Series(np.nan, index=p.index)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
result2 = p['second'] % p['first']
self.assertFalse(np.array_equal(result, result2))
# GH 9144
s = Series([0, 1])
result = s % 0
expected = Series([nan, nan])
assert_series_equal(result, expected)
result = 0 % s
expected = Series([nan, 0.0])
assert_series_equal(result, expected)
def test_div(self):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values, dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'))
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan,1.,-1.])
result = s / 0
expected = Series([np.nan,np.inf,-np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1,0), 'second': (-0.01,-0.02)})
expected = Series([-0.01,-np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_almost_equal(cython_or_numpy, python)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other):
_check_op(series, other, operator.gt)
_check_op(series, other, operator.ge)
_check_op(series, other, operator.eq)
_check_op(series, other, operator.lt)
_check_op(series, other, operator.le)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1),timedelta(days=2),np.timedelta64(1,'s')])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# mixed with NaT
from pandas import tslib
td = Series([timedelta(days=1),tslib.NaT ], dtype='m8[ns]' )
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1),np.nan ], dtype='m8[ns]' )
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), pd.NaT],dtype='m8[ns]')
self.assertEqual(td.dtype, 'timedelta64[ns]')
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), pd.NaT])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), tslib.iNaT])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), np.nan])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([pd.NaT, np.timedelta64(300000000)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(1,'s')])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# these are frequency conversion astypes
#for t in ['s', 'D', 'us', 'ms']:
# self.assertRaises(TypeError, td.astype, 'm8[%s]' % t)
# valid astype
td.astype('int64')
# invalid casting
self.assertRaises(TypeError, td.astype, 'int32')
# this is an invalid casting
def f():
Series([timedelta(days=1), 'foo'],dtype='m8[ns]')
self.assertRaises(Exception, f)
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ['foo'])
self.assertEqual(td.dtype, 'object')
# these will correctly infer a timedelta
s = Series([None, pd.NaT, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([np.nan, pd.NaT, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([pd.NaT, None, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([pd.NaT, np.nan, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(
Exception, self.objSeries.__add__, np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(
Exception, self.objSeries.__sub__, np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24, rs.index).astype(
'int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assert_isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
expected = Series(
[Timestamp('20111230'), Timestamp('20120101'), Timestamp('20120103')])
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series([timedelta(days=4017 + i) for i in range(3)])
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5,seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5,seconds=1))
self.assertEqual(rs[2], value)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series(
[Timestamp('20130101 9:01:05'), Timestamp('20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp('20130101 9:06:00.005'), Timestamp('20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series(
[Timestamp('20130101 9:01:01'), Timestamp('20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series(
[Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',
'Milli', 'Nano' ]:
op = getattr(pd.offsets,do)
s + op(5)
op(5) + s
# invalid DateOffsets
for do in [ 'Week', 'BDay', 'BQuarterEnd', 'BMonthEnd', 'BYearEnd',
'BYearBegin','BQuarterBegin', 'BMonthBegin',
'MonthEnd','YearBegin', 'YearEnd',
'MonthBegin', 'QuarterBegin' ]:
op = getattr(pd.offsets,do)
self.assertRaises(TypeError, s.__add__, op(5))
self.assertRaises(TypeError, s.__radd__, op(5))
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) -Series(
[timedelta(seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2,td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) -Series(
[timedelta(seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2,td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result,expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result,expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result,expected)
for dtype in ['int32','int16','uint32','uint64','uint32','uint16','uint8']:
s2 = Series([20, 30, 40],dtype=dtype)
expected = Series(s1.values.astype(np.int64) * s2.astype(np.int64), dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result,expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
# invalid ops
for op in ['__true_div__','__div__','__mul__']:
sop = getattr(s1,op,None)
if sop is not None:
self.assertRaises(TypeError, sop, s2.astype(float))
self.assertRaises(TypeError, sop, 2.)
for op in ['__add__','__sub__']:
sop = getattr(s1,op,None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D','h','m','s','ms','us','ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m,unit))
result = s1 / np.timedelta64(m,unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(lambda x: np.timedelta64(m,unit) / x)
result = np.timedelta64(m,unit) / s1
# astype
s = Series(date_range('20130101',periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0],datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0],timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'),
Timestamp('20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
npy16_mappings = {'D': 24 * 60 * 60 * 1000000, 'h': 60 * 60 * 1000000,
'm': 60 * 1000000, 's': 1000000, 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s, us, lhs, rhs))
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s,Series(Timedelta('1 days'),index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s,Series([np.nan,Timedelta('1 days')],index=['A','B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'),index=['A','B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
for op in ops:
try:
op = getattr(get_ser, op, None)
if op is not None:
self.assertRaises(TypeError, op, test_ser)
except:
com.pprint_thing("Failed on op %r" % op)
raise
### timedelta64 ###
td1 = Series([timedelta(minutes=5,seconds=3)]*3)
td2 = timedelta(minutes=5,seconds=4)
ops = ['__mul__','__floordiv__','__pow__',
'__rmul__','__rfloordiv__','__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
### datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
### datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
### timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rsub__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0),timedelta(0),pd.NaT])
result = dt2-dt
assert_series_equal(result,expected)
result = (dt2.to_frame()-dt.to_frame())[0]
assert_series_equal(result,expected)
def test_timedelta64_functions(self):
from datetime import timedelta
from pandas import date_range
# index min/max
td = Series(date_range('2012-1-1', periods=3, freq='D')) - \
Timestamp('20120101')
result = td.idxmin()
self.assertEqual(result, 0)
result = td.idxmax()
self.assertEqual(result, 2)
# GH 2982
# with NaT
td[0] = np.nan
result = td.idxmin()
self.assertEqual(result, 1)
result = td.idxmax()
self.assertEqual(result, 2)
# abs
s1 = Series(date_range('20120101', periods=3))
s2 = Series(date_range('20120102', periods=3))
expected = Series(s2 - s1)
# this fails as numpy returns timedelta64[us]
#result = np.abs(s1-s2)
# assert_frame_equal(result,expected)
result = (s1 - s2).abs()
assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta('2 days')
self.assertEqual(result, expected)
result = td.min()
expected = Timedelta('1 days')
self.assertEqual(result, expected)
def test_ops_consistency_on_empty(self):
# GH 7869
# consistency on empty
# float
result = Series(dtype=float).sum()
self.assertEqual(result,0)
result = Series(dtype=float).mean()
self.assertTrue(isnull(result))
result = Series(dtype=float).median()
self.assertTrue(isnull(result))
# timedelta64[ns]
result = Series(dtype='m8[ns]').sum()
self.assertEqual(result, Timedelta(0))
result = Series(dtype='m8[ns]').mean()
self.assertTrue(result is pd.NaT)
result = Series(dtype='m8[ns]').median()
self.assertTrue(result is pd.NaT)
def test_timedelta_fillna(self):
#GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0),
timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(0),
timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)], dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9*3600+60+1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'),
Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_raise_on_info(self):
s = Series(np.random.randn(10))
with tm.assertRaises(AttributeError):
s.info()
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
# TimeSeries-specific
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_numpy_array_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
self.assert_numpy_array_equal(ts.fillna(method='ffill'),
[0., 1., 1., 3., 4.])
self.assert_numpy_array_equal(ts.fillna(method='backfill'),
[0., 1., 3., 3., 4.])
self.assert_numpy_array_equal(ts.fillna(value=5), [0., 1., 5., 3., 4.])
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result,expected)
result = s1.fillna({})
assert_series_equal(result,s1)
result = s1.fillna(Series(()))
assert_series_equal(result,s1)
result = s2.fillna(s1)
assert_series_equal(result,s2)
result = s1.fillna({ 0 : 1})
assert_series_equal(result,expected)
result = s1.fillna({ 1 : 1})
assert_series_equal(result,Series([np.nan]))
result = s1.fillna({ 0 : 1, 1 : 1})
assert_series_equal(result,expected)
result = s1.fillna(Series({ 0 : 1, 1 : 1}))
assert_series_equal(result,expected)
result = s1.fillna(Series({ 0 : 1, 1 : 1},index=[4,5]))
assert_series_equal(result,s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0,0,2.], list('bac'))
assert_series_equal(result,expected)
# limit
s = Series(np.nan,index=[0,1,2])
result = s.fillna(999,limit=1)
expected = Series([999,np.nan,np.nan],index=[0,1,2])
assert_series_equal(result,expected)
result = s.fillna(999,limit=2)
expected = Series([999,999,np.nan],index=[0,1,2])
assert_series_equal(result,expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_sub_of_datetime_from_TimeSeries(self):
from pandas.tseries.timedeltas import to_timedelta
from datetime import datetime
a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00))
b = datetime(1993, 6, 22, 13, 30)
a = Series([a])
result = to_timedelta(np.abs(a - b))
self.assertEqual(result.dtype, 'timedelta64[ns]')
def test_datetime64_with_index(self):
# arithmetic integer ops with an index
s = Series(np.random.randn(5))
expected = s-s.index.to_series()
result = s-s.index
assert_series_equal(result,expected)
# GH 4629
# arithmetic datetime64 ops with an index
s = Series(date_range('20130101',periods=5),index=date_range('20130101',periods=5))
expected = s-s.index.to_series()
result = s-s.index
assert_series_equal(result,expected)
result = s-s.index.to_period()
assert_series_equal(result,expected)
df = DataFrame(np.random.randn(5,2),index=date_range('20130101',periods=5))
df['date'] = Timestamp('20130102')
df['expected'] = df['date'] - df.index.to_series()
df['result'] = df['date'] - df.index
assert_series_equal(df['result'],df['expected'])
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
#result = td[(td>np.timedelta64(timedelta(days=3))) & (td<np.timedelta64(timedelta(days=7)))] = np.nan
#self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_operators_na_handling(self):
from decimal import Decimal
from datetime import date
s = Series([Decimal('1.3'), Decimal('2.3')],
index=[date(2012, 1, 1), date(2012, 1, 2)])
result = s + s.shift(1)
result2 = s.shift(1) + s
self.assertTrue(isnull(result[0]))
self.assertTrue(isnull(result2[0]))
s = Series(['foo', 'bar', 'baz', np.nan])
result = 'prefix_' + s
expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])
assert_series_equal(result, expected)
result = s + '_suffix'
expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s,s2),(s2,s)]:
self.assertRaises(TypeError, lambda : x == y)
self.assertRaises(TypeError, lambda : x != y)
self.assertRaises(TypeError, lambda : x >= y)
self.assertRaises(TypeError, lambda : x > y)
self.assertRaises(TypeError, lambda : x < y)
self.assertRaises(TypeError, lambda : x <= y)
def test_more_na_comparisons(self):
left = Series(['a', np.nan, 'c'])
right = Series(['a', np.nan, 'd'])
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([True, False, False], list('bca'))
result = a & b
assert_series_equal(result,expected)
expected = Series([True, False, True], list('bca'))
result = a | b
assert_series_equal(result,expected)
expected = Series([False, False, True], list('bca'))
result = a ^ b
assert_series_equal(result,expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([True, False, False], list('bca'))
result = a & b
assert_series_equal(result,expected)
expected = Series([True, False, True], list('bca'))
result = a | b
assert_series_equal(result,expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result,expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result,expected)
# vs non-matching
result = a & Series([1],['z'])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result,expected)
result = a | Series([1],['z'])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result,expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]),Series([1],['z']),Series(['z']),Series(np.nan,b.index),Series(np.nan,a.index)]:
result = a[a | e]
assert_series_equal(result,a[a])
# vs scalars
index = list('bca')
t = Series([True,False,True])
for v in [True,1,2]:
result = Series([True,False,True],index=index) | v
expected = Series([True,True,True],index=index)
assert_series_equal(result,expected)
for v in [np.nan,'foo']:
self.assertRaises(TypeError, lambda : t | v)
for v in [False,0]:
result = Series([True,False,True],index=index) | v
expected = Series([True,False,True],index=index)
assert_series_equal(result,expected)
for v in [True,1]:
result = Series([True,False,True],index=index) & v
expected = Series([True,False,True],index=index)
assert_series_equal(result,expected)
for v in [False,0]:
result = Series([True,False,True],index=index) & v
expected = Series([False,False,False],index=index)
assert_series_equal(result,expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda : t & v)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
s_0101 = Series([0,1,0,1])
s_0123 = Series(range(4),dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4),dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8),dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1]*4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a','b','c','d'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
assert_series_equal(s_tft & s_0123, Series([False] * 3, list('bca')))
# s_tft will be all false now because of reindexing like s_0123
assert_series_equal(s_0123 & s_tft, Series([False] * 4))
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a','b',np.NaN,'d'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_between(self):
s = Series(bdate_range('1/1/2000', periods=20).asobject)
s[::2] = np.nan
result = s[s.between(s[3], s[17])]
expected = s[3:18].dropna()
assert_series_equal(result, expected)
result = s[s.between(s[3], s[17], inclusive=False)]
expected = s[5:16].dropna()
assert_series_equal(result, expected)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True,index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
self.assertRaises(ValueError, tester, s, d)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assertTrue(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmin()))
# datetime64[ns]
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmin()
self.assertEqual(result, 0)
s[0] = np.nan
result = s.idxmin()
self.assertEqual(result, 1)
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assertTrue(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmax()))
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmax()
self.assertEqual(result, 5)
s[5] = np.nan
result = s.idxmax()
self.assertEqual(result, 4)
# Float64Index
# GH 5914
s = pd.Series([1,2,3],[1.1,2.1,3.1])
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
s = pd.Series(s.index, s.index)
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],
index=date_range('1/1/2000', periods=1000))
def f(x):
return x[x.argmax()]
result = tsdf.apply(f)
expected = tsdf.max()
assert_series_equal(result,expected)
# .item()
s = Series([1])
result = s.item()
self.assertEqual(result, 1)
self.assertEqual(s.item(), s.iloc[0])
# using an ndarray like function
s = Series(np.random.randn(10))
result = np.ones_like(s)
expected = Series(1,index=range(10),dtype='float64')
#assert_series_equal(result,expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order='F'),s.values.ravel(order='F'))
# compress
# GH 6658
s = Series([0,1.,-1],index=list('abc'))
result = np.compress(s>0,s)
assert_series_equal(result, Series([1.],index=['b']))
result = np.compress(s<-1,s)
assert_series_equal(result, Series([],dtype='float64'))
def test_complexx(self):
# GH4819
# complex access for ndarray compat
a = np.arange(5)
b = Series(a + 4j*a)
tm.assert_almost_equal(a,b.real)
tm.assert_almost_equal(4*a,b.imag)
b.real = np.arange(5)+5
tm.assert_almost_equal(a+5,b.real)
tm.assert_almost_equal(4*a,b.imag)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1,2,3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2,2,2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(dict(a = [1,2,3], b = [1,2,3], c = [1,2,3], val = [0,1,0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df,expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment',None)
df = DataFrame({ "aa":range(5), "bb":[2.2]*5})
df["cc"] = 0.0
ck = [True]*len(df)
df["bb"].iloc[0] = .13
df_tmp = df.iloc[ck]
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment','raise')
# GH 3217
df = DataFrame(dict(a = [1,3], b = [np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'],index=[0]))
expected = DataFrame(dict(a = [1,3], b = [np.nan, 2], c = ['foo',np.nan]))
tm.assert_frame_equal(df,expected)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_numpy_array_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10),
dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_series_frame_radd_bug(self):
import operator
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A': self.ts})
tm.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
tm.assert_almost_equal(self.ts ** self.ts, (self.ts ** df)['A'])
tm.assert_almost_equal(self.ts < self.ts, (self.ts < df)['A'])
tm.assert_almost_equal(self.ts / self.ts, (self.ts / df)['A'])
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
pairings = []
for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, 'r' + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
if compat.PY3:
pairings.append((Series.div, operator.truediv, 1))
pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1))
else:
pairings.append((Series.div, operator.div, 1))
pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))
for op, equiv_op, fv in pairings:
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
self.assert_numpy_array_equal(combined, series)
# Holes filled from input
combined = series_copy.combine_first(series)
self.assertTrue(np.isfinite(combined).all())
self.assert_numpy_array_equal(combined[::2], series[::2])
self.assert_numpy_array_equal(combined[1::2], series_copy[1::2])
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_dict_equal(strings, combined, compare_keys=False)
tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
assert_series_equal(s, result)
def test_update(self):
s = Series([1.5, nan, 3., 4., nan])
s2 = Series([nan, 3.5, nan, 5.])
s.update(s2)
expected = Series([1.5, 3.5, 3., 5., np.nan])
assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
# this will fail as long as series is a sub-class of ndarray
# df['c'].update(Series(['foo'],index=[0])) #####
def test_corr(self):
tm._skip_if_no_scipy()
import scipy.stats as stats
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
self.assertTrue(isnull(self.ts[:15].corr(self.ts[5:], min_periods=12)))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
self.assertTrue(isnull(ts1.corr(ts2, min_periods=12)))
# No overlap
self.assertTrue(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assertTrue(isnull(cp.corr(cp)))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
self.assertAlmostEqual(result, expected)
def test_corr_rank(self):
tm._skip_if_no_scipy()
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
self.assertAlmostEqual(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
self.assertAlmostEqual(result, expected)
# these methods got rewritten in 0.8
if scipy.__version__ < LooseVersion('0.9'):
raise nose.SkipTest("skipping corr rank because of scipy version "
"{0}".format(scipy.__version__))
# results from R
A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,
0.76910310, -0.06430576, -2.09704447, 0.40660407,
-0.89926396, 0.94209606])
B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943,
-0.01680292, 1.17258718, -1.06009347, -0.10222060,
-0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)
self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)
def test_cov(self):
# full overlap
self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std() ** 2)
# partial overlap
self.assertAlmostEqual(
self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std() ** 2)
# No overlap
self.assertTrue(np.isnan(self.ts[::2].cov(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assertTrue(isnull(cp.cov(cp)))
# min_periods
self.assertTrue(isnull(self.ts[:15].cov(self.ts[5:], min_periods=12)))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
self.assertTrue(isnull(ts1.cov(ts2, min_periods=12)))
def test_copy(self):
ts = self.ts.copy()
ts[::2] = np.NaN
# Did not modify original Series
self.assertFalse(np.isnan(self.ts[0]))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
self.ts[::2] = np.NaN
self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())
def test_dtype(self):
self.assertEqual(self.ts.dtype, np.dtype('float64'))
self.assertEqual(self.ts.dtypes, np.dtype('float64'))
self.assertEqual(self.ts.ftype, 'float64:dense')
self.assertEqual(self.ts.ftypes, 'float64:dense')
assert_series_equal(self.ts.get_dtype_counts(),Series(1,['float64']))
assert_series_equal(self.ts.get_ftype_counts(),Series(1,['float64:dense']))
def test_dot(self):
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
result = a.dot(b)
expected = Series(np.dot(a.values, b.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
# Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_series_equal(result, expected)
# Check ndarray argument
result = a.dot(b.values)
self.assertTrue(np.all(result == expected.values))
assert_almost_equal(a.dot(b['2'].values), expected['2'])
# Check series argument
assert_almost_equal(a.dot(b['1']), expected['1'])
assert_almost_equal(a.dot(b2['1']), expected['1'])
self.assertRaises(Exception, a.dot, a.values[:3])
self.assertRaises(ValueError, a.dot, b.T)
def test_value_counts_nunique(self):
# basics.rst doc example
series = Series(np.random.randn(500))
series[20:500] = np.nan
series[10:20] = 5000
result = series.nunique()
self.assertEqual(result, 11)
def test_unique(self):
# 714 also, dtype=float
s = Series([1.2345] * 100)
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
s = Series([1.2345] * 100, dtype='f4')
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
# NAs in object arrays #714
s = Series(['foo'] * 100, dtype='O')
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
# decision about None
s = Series([1, 2, 3, None, None, None], dtype=object)
result = s.unique()
expected = np.array([1, 2, 3, None], dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))
self.assertEqual(s.dropna().sum('rows'), 3)
self.assertEqual(s._get_axis_number('rows'), 0)
self.assertEqual(s._get_axis_name('rows'), 'index')
def test_drop_duplicates(self):
s = Series([1, 2, 3, 3])
result = s.duplicated()
expected = Series([False, False, False, True])
assert_series_equal(result, expected)
result = s.duplicated(take_last=True)
expected = Series([False, False, True, False])
assert_series_equal(result, expected)
result = s.drop_duplicates()
expected = s[[True, True, True, False]]
assert_series_equal(result, expected)
sc = s.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, expected)
result = s.drop_duplicates(take_last=True)
expected = s[[True, True, False, True]]
assert_series_equal(result, expected)
sc = s.copy()
sc.drop_duplicates(take_last=True, inplace=True)
assert_series_equal(sc, expected)
def test_sort(self):
ts = self.ts.copy()
ts.sort()
self.assert_numpy_array_equal(ts, self.ts.order())
self.assert_numpy_array_equal(ts.index, self.ts.order().index)
ts.sort(ascending=False)
self.assert_numpy_array_equal(ts, self.ts.order(ascending=False))
self.assert_numpy_array_equal(ts.index,
self.ts.order(ascending=False).index)
# GH 5856/5853
# Series.sort operating on a view
df = DataFrame(np.random.randn(10,4))
s = df.iloc[:,0]
def f():
s.sort()
self.assertRaises(ValueError, f)
# test order/sort inplace
# GH6859
ts1 = self.ts.copy()
ts1.sort(ascending=False)
ts2 = self.ts.copy()
ts2.order(ascending=False,inplace=True)
assert_series_equal(ts1,ts2)
ts1 = self.ts.copy()
ts1 = ts1.sort(ascending=False,inplace=False)
ts2 = self.ts.copy()
ts2 = ts.order(ascending=False)
assert_series_equal(ts1,ts2)
def test_sort_index(self):
import random
rindex = list(self.ts.index)
random.shuffle(rindex)
random_order = self.ts.reindex(rindex)
sorted_series = random_order.sort_index()
assert_series_equal(sorted_series, self.ts)
# descending
sorted_series = random_order.sort_index(ascending=False)
assert_series_equal(sorted_series,
self.ts.reindex(self.ts.index[::-1]))
def test_order(self):
ts = self.ts.copy()
ts[:5] = np.NaN
vals = ts.values
result = ts.order()
self.assertTrue(np.isnan(result[-5:]).all())
self.assert_numpy_array_equal(result[:-5], np.sort(vals[5:]))
result = ts.order(na_position='first')
self.assertTrue(np.isnan(result[:5]).all())
self.assert_numpy_array_equal(result[5:], np.sort(vals[5:]))
# something object-type
ser = Series(['A', 'B'], [1, 2])
# no failure
ser.order()
# ascending=False
ordered = ts.order(ascending=False)
expected = np.sort(ts.valid().values)[::-1]
assert_almost_equal(expected, ordered.valid().values)
ordered = ts.order(ascending=False, na_position='first')
assert_almost_equal(expected, ordered.valid().values)
def test_nsmallest_nlargest(self):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
base = [3, 2, 1, 2, 5]
s_list = [
Series(base, dtype='int8'),
Series(base, dtype='int16'),
Series(base, dtype='int32'),
Series(base, dtype='int64'),
Series(base, dtype='float32'),
Series(base, dtype='float64'),
Series(base, dtype='uint8'),
Series(base, dtype='uint16'),
Series(base, dtype='uint32'),
Series(base, dtype='uint64'),
Series(base).astype('timedelta64[ns]'),
Series(pd.to_datetime(['2003', '2002', '2001', '2002', '2005'])),
]
raising = [
Series([3., 2, 1, 2, '5'], dtype='object'),
Series([3., 2, 1, 2, 5], dtype='object'),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3., 2, 1, 2, 5], dtype='complex128'),
]
for r in raising:
dt = r.dtype
msg = "Cannot use method 'n(larg|small)est' with dtype %s" % dt
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
with tm.assertRaisesRegexp(TypeError, msg):
method(arg)
for s in s_list:
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]])
assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]])
assert_series_equal(s.nlargest(3, take_last=True),
s.iloc[[4, 0, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
assert_series_equal(s.nsmallest(-1), empty)
assert_series_equal(s.nlargest(0), empty)
assert_series_equal(s.nlargest(-1), empty)
assert_series_equal(s.nsmallest(len(s)), s.order())
assert_series_equal(s.nsmallest(len(s) + 1), s.order())
assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
assert_series_equal(s.nlargest(len(s) + 1),
s.iloc[[4, 0, 1, 3, 2]])
s = Series([3., np.nan, 1, 2, 5])
assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.ts[::2] = np.nan
self.ts[:10][::3] = 4.
ranks = self.ts.rank()
oranks = self.ts.astype('O').rank()
assert_series_equal(ranks, oranks)
mask = np.isnan(self.ts)
filled = self.ts.fillna(np.inf)
# rankdata returns a ndarray
exp = Series(rankdata(filled),index=filled.index)
exp[mask] = np.nan
assert_almost_equal(ranks, exp)
iseries = Series(np.arange(5).repeat(2))
iranks = iseries.rank()
exp = iseries.astype(float).rank()
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
exp = iseries / 5.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(1, 100))
exp = Series(np.repeat(0.505, 100))
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries[1] = np.nan
exp = Series(np.repeat(50.0 / 99.0, 100))
exp[1] = np.nan
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(np.nan, 100))
exp = iseries.copy()
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
rng = date_range('1/1/1990', periods=5)
iseries = Series(np.arange(5), rng) + 1
iseries.ix[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20+1e-30, 1e-1])
exp = Series([2, 1, 3.5, 5, 3.5, 6])
iranks = iseries.rank()
assert_series_equal(iranks, exp)
values = np.array([-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_rank_inf(self):
raise nose.SkipTest('DataFrame.rank does not currently rank np.inf and -np.inf properly')
values = np.array([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40, np.inf], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_from_csv(self):
with ensure_clean() as path:
self.ts.to_csv(path)
ts = Series.from_csv(path)
assert_series_equal(self.ts, ts)
self.assertTrue(ts.index.name is None)
self.series.to_csv(path)
series = Series.from_csv(path)
self.assertIsNone(series.name)
self.assertIsNone(series.index.name)
assert_series_equal(self.series, series)
outfile = open(path, 'w')
outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
outfile.close()
series = Series.from_csv(path, sep='|')
checkseries = Series(
{datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0})
assert_series_equal(checkseries, series)
series = Series.from_csv(path, sep='|', parse_dates=False)
checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})
assert_series_equal(checkseries, series)
def test_to_csv(self):
import io
with ensure_clean() as path:
self.ts.to_csv(path)
lines = io.open(path, newline=None).readlines()
assert(lines[1] != '\n')
self.ts.to_csv(path, index=False)
arr = np.loadtxt(path)
assert_almost_equal(arr, self.ts.values)
def test_to_csv_unicode_index(self):
buf = StringIO()
s = Series([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")])
s.to_csv(buf, encoding='UTF-8')
buf.seek(0)
s2 = Series.from_csv(buf, index_col=0, encoding='UTF-8')
assert_series_equal(s, s2)
def test_tolist(self):
rs = self.ts.tolist()
xp = self.ts.values.tolist()
assert_almost_equal(rs, xp)
# datetime64
s = Series(self.ts.index)
rs = s.tolist()
self.assertEqual(self.ts.index[0], rs[0])
def test_to_frame(self):
self.ts.name = None
rs = self.ts.to_frame()
xp = pd.DataFrame(self.ts.values, index=self.ts.index)
assert_frame_equal(rs, xp)
self.ts.name = 'testname'
rs = self.ts.to_frame()
xp = pd.DataFrame(dict(testname=self.ts.values), index=self.ts.index)
assert_frame_equal(rs, xp)
rs = self.ts.to_frame(name='testdifferent')
xp = pd.DataFrame(dict(testdifferent=self.ts.values), index=self.ts.index)
assert_frame_equal(rs, xp)
def test_to_dict(self):
self.assert_numpy_array_equal(Series(self.ts.to_dict()), self.ts)
def test_to_csv_float_format(self):
with ensure_clean() as filename:
ser = Series([0.123456, 0.234567, 0.567567])
ser.to_csv(filename, float_format='%.2f')
rs = Series.from_csv(filename)
xp = Series([0.12, 0.23, 0.57])
assert_series_equal(rs, xp)
def test_to_csv_list_entries(self):
s = Series(['jack and jill', 'jesse and frank'])
split = s.str.split(r'\s+and\s+')
buf = StringIO()
split.to_csv(buf)
def test_to_csv_path_is_none(self):
# GH 8215
# Series.to_csv() was returning None, inconsistent with
# DataFrame.to_csv() which returned string
s = Series([1, 2, 3])
csv_str = s.to_csv(path=None)
self.assertIsInstance(csv_str, str)
def test_clip(self):
val = self.ts.median()
self.assertEqual(self.ts.clip_lower(val).min(), val)
self.assertEqual(self.ts.clip_upper(val).max(), val)
self.assertEqual(self.ts.clip(lower=val).min(), val)
self.assertEqual(self.ts.clip(upper=val).max(), val)
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
assert_series_equal(result, expected)
tm.assert_isinstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [Series([np.nan, 1.0, 2.0, 3.0]),
Series([None, 'a', 'b', 'c']),
Series(pd.to_datetime([np.nan, 1, 2, 3], unit='D'))]
for s in sers:
thresh = s[2]
l = s.clip_lower(thresh)
u = s.clip_upper(thresh)
self.assertEqual(l[notnull(l)].min(), thresh)
self.assertEqual(u[notnull(u)].max(), thresh)
self.assertEqual(list(isnull(s)), list(isnull(l)))
self.assertEqual(list(isnull(s)), list(isnull(u)))
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_dict_equal(result, ts, compare_keys=False)
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(
ser.isnull(), Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(
ser.notnull(), Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_shift(self):
shifted = self.ts.shift(1)
unshifted = shifted.shift(-1)
tm.assert_dict_equal(unshifted.valid(), self.ts, compare_keys=False)
offset = datetools.bday
shifted = self.ts.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
assert_series_equal(unshifted, self.ts)
unshifted = self.ts.shift(0, freq=offset)
assert_series_equal(unshifted, self.ts)
shifted = self.ts.shift(1, freq='B')
unshifted = shifted.shift(-1, freq='B')
assert_series_equal(unshifted, self.ts)
# corner case
unshifted = self.ts.shift(0)
assert_series_equal(unshifted, self.ts)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_dict_equal(unshifted.valid(), ps, compare_keys=False)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, datetools.bday)
assert_series_equal(shifted2, shifted3)
assert_series_equal(ps, shifted2.shift(-1, 'B'))
self.assertRaises(ValueError, ps.shift, freq='D')
# legacy support
shifted4 = ps.shift(1, timeRule='B')
assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, offset=datetools.bday)
assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH 8129
index=date_range('2000-01-01',periods=5)
for dtype in ['int32','int64']:
s1 = Series(np.arange(5,dtype=dtype),index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan,0,1,2,3],index=index)
assert_series_equal(result,expected)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_series_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=datetools.bday)
assert_series_equal(shifted, shifted3)
self.assertRaises(ValueError, ps.tshift, freq='M')
# DatetimeIndex
shifted = self.ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(self.ts, unshifted)
shifted2 = self.ts.tshift(freq=self.ts.index.freq)
assert_series_equal(shifted, shifted2)
inferred_ts = Series(self.ts.values, Index(np.asarray(self.ts.index)))
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(shifted, self.ts.tshift(1))
assert_series_equal(unshifted, inferred_ts)
no_freq = self.ts[[0, 5, 7]]
self.assertRaises(ValueError, no_freq.tshift)
def test_shift_int(self):
ts = self.ts.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
def test_truncate(self):
offset = datetools.bday
ts = self.ts[::3]
start, end = self.ts.index[3], self.ts.index[6]
start_missing, end_missing = self.ts.index[2], self.ts.index[7]
# neither specified
truncated = ts.truncate()
assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_series_equal(truncated, expected)
# corner case, empty series returned
truncated = ts.truncate(after=self.ts.index[0] - offset)
assert(len(truncated) == 0)
truncated = ts.truncate(before=self.ts.index[-1] + offset)
assert(len(truncated) == 0)
self.assertRaises(ValueError, ts.truncate,
before=self.ts.index[-1] + offset,
after=self.ts.index[0] - offset)
def test_ptp(self):
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
self.assertEqual(np.ptp(ser), np.ptp(arr))
def test_asof(self):
# array or list or dates
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
val = result[result.index[result.index >= ub][0]]
self.assertEqual(ts[ub], val)
self.ts[5:10] = np.NaN
self.ts[15:20] = np.NaN
val1 = self.ts.asof(self.ts.index[7])
val2 = self.ts.asof(self.ts.index[19])
self.assertEqual(val1, self.ts[4])
self.assertEqual(val2, self.ts[14])
# accepts strings
val1 = self.ts.asof(str(self.ts.index[7]))
self.assertEqual(val1, self.ts[4])
# in there
self.assertEqual(self.ts.asof(self.ts.index[3]), self.ts[3])
# no as of value
d = self.ts.index[0] - datetools.bday
self.assertTrue(np.isnan(self.ts.asof(d)))
def test_getitem_setitem_datetimeindex(self):
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# repeat all the above with naive datetimes
result = ts[datetime(1990, 1, 1, 4)]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4)] = 0
result[datetime(1990, 1, 1, 4)] = ts[4]
assert_series_equal(result, ts)
result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]
assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
self.assertEqual(result, expected)
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_pytz(self):
tm._skip_if_no_pytz();
from pytz import timezone as tz
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz('US/Central').localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil(self):
tm._skip_if_no_dateutil();
from dateutil.tz import tzutc
from dateutil.zoneinfo import gettz
tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_periodindex(self):
from pandas import period_range
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
self.assertEqual(result, expected)
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
def test_asof_periodindex(self):
from pandas import period_range, PeriodIndex
# array or list or dates
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='37min')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq='H')
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
ts[5:10] = np.NaN
ts[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
self.assertEqual(val1, ts[4])
self.assertEqual(val2, ts[14])
# accepts strings
val1 = ts.asof(str(ts.index[7]))
self.assertEqual(val1, ts[4])
# in there
self.assertEqual(ts.asof(ts.index[3]), ts[3])
# no as of value
d = ts.index[0].to_timestamp() - datetools.bday
self.assertTrue(np.isnan(ts.asof(d)))
def test_asof_more(self):
from pandas import date_range
s = Series([nan, nan, 1, 2, nan, nan, 3, 4, 5],
index=date_range('1/1/2000', periods=9))
dates = s.index[[4, 5, 6, 2, 1]]
result = s.asof(dates)
expected = Series([2, 2, 3, 1, np.nan], index=dates)
assert_series_equal(result, expected)
s = Series([1.5, 2.5, 1, 2, nan, nan, 3, 4, 5],
index=date_range('1/1/2000', periods=9))
result = s.asof(s.index[0])
self.assertEqual(result, s[0])
def test_cast_on_putmask(self):
# GH 2746
# need to upcast
s = Series([1, 2], index=[1, 2], dtype='int64')
s[[True, False]] = Series([0], index=[1], dtype='int64')
expected = Series([0, 2], index=[1, 2], dtype='int64')
assert_series_equal(s, expected)
def test_type_promote_putmask(self):
# GH8387: test that changing types does not break alignment
ts = Series(np.random.randn(100), index=np.arange(100,0,-1)).round(5)
left, mask = ts.copy(), ts > 0
right = ts[mask].copy().map(str)
left[mask] = right
assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))
s = Series([0, 1, 2, 0 ])
mask = s > 0
s2 = s[ mask ].map( str )
s[mask] = s2
assert_series_equal(s, Series([0, '1', '2', 0]))
s = Series([0, 'foo', 'bar', 0 ])
mask = Series([False, True, True, False])
s2 = s[ mask ]
s[mask] = s2
assert_series_equal(s, Series([0, 'foo','bar', 0]))
def test_astype_cast_nan_int(self):
df = Series([1.0, 2.0, 3.0, np.nan])
self.assertRaises(ValueError, df.astype, np.int64)
def test_astype_cast_object_int(self):
arr = Series(["car", "house", "tree", "1"])
self.assertRaises(ValueError, arr.astype, int)
self.assertRaises(ValueError, arr.astype, np.int64)
self.assertRaises(ValueError, arr.astype, np.int8)
arr = Series(['1', '2', '3', '4'], dtype=object)
result = arr.astype(int)
self.assert_numpy_array_equal(result, np.arange(1, 5))
def test_astype_datetimes(self):
import pandas.tslib as tslib
s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
self.assertEqual(s.dtype, 'M8[ns]')
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
def test_astype_str(self):
# GH4405
digits = string.digits
s1 = Series([digits * 10, tm.rands(63), tm.rands(64),
tm.rands(1000)])
s2 = Series([digits * 10, tm.rands(63), tm.rands(64), nan, 1.0])
types = (compat.text_type, np.str_)
for typ in types:
for s in (s1, s2):
res = s.astype(typ)
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
def test_astype_unicode(self):
# GH7758
# a bit of magic is required to set default encoding encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([u('データーサイエンス、お前はもう死んでいる')]),
]
former_encoding = None
if not compat.PY3:
# in python we can force the default encoding
# for this test
former_encoding = sys.getdefaultencoding()
reload(sys)
sys.setdefaultencoding("utf-8")
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series([u('野菜食べないとやばい').encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
# restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
def test_map(self):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# function
result = self.ts.map(lambda x: x * 2)
self.assert_numpy_array_equal(result, self.ts * 2)
def test_map_compat(self):
# related GH 8024
s = Series([True,True,False],index=[1,2,3])
result = s.map({ True : 'foo', False : 'bar' })
expected = Series(['foo','foo','bar'],index=[1,2,3])
assert_series_equal(result,expected)
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
self.assertEqual(left.dtype, np.float_)
self.assertTrue(issubclass(right.dtype.type, np.integer))
merged = left.map(right)
self.assertEqual(merged.dtype, np.float_)
self.assertTrue(isnull(merged['d']))
self.assertTrue(not isnull(merged['c']))
def test_map_type_inference(self):
s = Series(lrange(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
self.assertTrue(issubclass(s2.dtype.type, np.integer))
def test_map_decimal(self):
from decimal import Decimal
result = self.series.map(lambda x: Decimal(str(x)))
self.assertEqual(result.dtype, np.object_)
tm.assert_isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action='ignore')
exp = s * 2
assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
'''
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
'''
df = pd.DataFrame({'a': [(1,), (2,), (3, 4), (5, 6)]})
label_mappings = {
(1,): 'A',
(2,): 'B',
(3, 4): 'A',
(5, 6): 'B'
}
df['labels'] = df['a'].map(label_mappings)
df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df['labels'], df['expected_labels'])
def test_apply(self):
assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# elementwise-apply
import math
assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# how to handle Series result, #2316
result = self.ts.apply(lambda x: Series([x, x ** 2],
index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
self.assertIsNot(s, rs)
self.assertIs(s.index, rs.index)
self.assertEqual(s.dtype, rs.dtype)
self.assertEqual(s.name, rs.name)
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
self.assertEqual(result.dtype, object)
def test_convert_objects(self):
s = Series([1., 2, 3], index=['a', 'b', 'c'])
result = s.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
expected = s.copy()
expected['a'] = np.nan
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
# dates
s = Series(
[datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(
2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O')
result = s.convert_objects(convert_dates=True, convert_numeric=False)
expected = Series(
[Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
result = s.convert_objects(
convert_dates='coerce', convert_numeric=False)
result = s.convert_objects(
convert_dates='coerce', convert_numeric=True)
assert_series_equal(result, expected)
expected = Series(
[Timestamp(
'20010101'), Timestamp('20010102'), Timestamp('20010103'),
lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]')
result = s2.convert_objects(
convert_dates='coerce', convert_numeric=False)
assert_series_equal(result, expected)
result = s2.convert_objects(
convert_dates='coerce', convert_numeric=True)
assert_series_equal(result, expected)
# preserver all-nans (if convert_dates='coerce')
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
result = s.convert_objects(
convert_dates='coerce', convert_numeric=False)
assert_series_equal(result, s)
# preserver if non-object
s = Series([1], dtype='float32')
result = s.convert_objects(
convert_dates='coerce', convert_numeric=False)
assert_series_equal(result, s)
#r = s.copy()
#r[0] = np.nan
#result = r.convert_objects(convert_dates=True,convert_numeric=False)
#self.assertEqual(result.dtype, 'M8[ns]')
# dateutil parses some single letters into today's value as a date
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
s = Series([x.upper()])
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
def test_convert_objects_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
r = s.convert_objects(convert_numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_objects_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
r = s.convert_objects(convert_numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',',))
self.assertEqual(result[0], ['foo', 'bar'])
tm.assert_isinstance(result[0], list)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind,
method=meth, limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
self.assertTrue(np.may_share_memory(self.series.index, identity.index))
except (AttributeError):
pass
self.assertTrue(identity.index.is_(self.series.index))
self.assertTrue(identity.index.identical(self.series.index))
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
self.assertEqual(val, self.series[idx])
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
self.assertEqual(val, self.ts[idx])
stuffSeries = self.ts.reindex(subIndex)
self.assertTrue(np.isnan(stuffSeries).all())
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
self.assertEqual(val, self.ts[idx])
# return a copy the same index here
result = self.ts.reindex()
self.assertFalse((result is self.ts))
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
reindexed_dep = self.empty.reindex(self.ts.index, method='pad')
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10),dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1,2,3,4,5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a','g','c','f']
expected = Series([1,1,3,3],index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
# invalid because we can't forward fill on this type of index
self.assertRaises(ValueError, lambda : s.reindex(new_index, method='ffill'))
# inferrence of new dtype
s = Series([True,False,False,True],index=list('abcd'))
new_index='agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True,True,False],index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False,index=lrange(0,5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False,index=lrange(0,5))
assert_series_equal(result, expected)
def test_reindex_backfill(self):
pass
def test_reindex_int(self):
ts = self.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_int.dtype, np.float_)
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
self.assertEqual(reindexed_int.dtype, np.int_)
def test_reindex_bool(self):
# A series other than float, int, string, or object
ts = self.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_bool.dtype, np.object_)
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
self.assertEqual(reindexed_bool.dtype, np.bool_)
def test_reindex_bool_pad(self):
# fail
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
self.assertTrue(isnull(filled_bool[:5]).all())
def test_reindex_like(self):
other = self.ts[::2]
assert_series_equal(self.ts.reindex(other.index),
self.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013,3,5)
day2 = datetime(2013,5,5)
day3 = datetime(2014,3,5)
series1 = Series([5, None, None],[day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value(self):
#------------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
#------------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
self.assertTrue(issubclass(result.dtype.type, np.integer))
assert_series_equal(result, expected)
#------------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
#------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_rename(self):
renamer = lambda x: x.strftime('%Y%m%d')
renamed = self.ts.rename(renamer)
self.assertEqual(renamed.index[0], renamer(self.ts.index[0]))
# dict
rename_dict = dict(zip(self.ts.index, renamed.index))
renamed2 = self.ts.rename(rename_dict)
assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')
renamed = s.rename({'b': 'foo', 'd': 'bar'})
self.assert_numpy_array_equal(renamed.index, ['a', 'foo', 'c', 'bar'])
# index with name
renamer = Series(
np.arange(4), index=Index(['a', 'b', 'c', 'd'], name='name'), dtype='int64')
renamed = renamer.rename({})
self.assertEqual(renamed.index.name, renamer.index.name)
def test_rename_inplace(self):
renamer = lambda x: x.strftime('%Y%m%d')
expected = renamer(self.ts.index[0])
self.ts.rename(renamer, inplace=True)
self.assertEqual(self.ts.index[0], expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
self.assertTrue(tm.equalContents(ts.index != 5, expected))
self.assertTrue(tm.equalContents(~(ts.index == 5), expected))
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan],
['z', 'a', 'b', 'c', 'd'], dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_unstack(self):
from numpy import nan
from pandas.util.testing import assert_frame_equal
index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],
labels=[[1, 1, 0, 0], [0, 1, 0, 2]])
s = Series(np.arange(4.), index=index)
unstacked = s.unstack()
expected = DataFrame([[2., nan, 3.], [0., 1., nan]],
index=['bar', 'foo'],
columns=['one', 'three', 'two'])
assert_frame_equal(unstacked, expected)
unstacked = s.unstack(level=0)
assert_frame_equal(unstacked, expected.T)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
expected = DataFrame({'bar': s.values}, index=exp_index).sortlevel(0)
unstacked = s.unstack(0)
assert_frame_equal(unstacked, expected)
# GH5873
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
ts = pd.Series([1,2], index=idx)
left = ts.unstack()
right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],
columns=[nan, 3.5])
assert_frame_equal(left, right)
idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'],
['a', 'a', 'b', 'a', 'b'], [1, 2, 1, 1, np.nan]])
ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],
columns=['cat', 'dog'])
tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]
right.index = pd.MultiIndex.from_tuples(tpls)
assert_frame_equal(ts.unstack(level=0), right)
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sortlevel('A')
assert_series_equal(backwards, res)
res = s.sortlevel(['A', 'B'])
assert_series_equal(backwards, res)
res = s.sortlevel('A', sort_remaining=False)
assert_series_equal(s, res)
res = s.sortlevel(['A', 'B'], sort_remaining=False)
assert_series_equal(s, res)
def test_head_tail(self):
assert_series_equal(self.series.head(), self.series[:5])
assert_series_equal(self.series.tail(), self.series[-5:])
def test_isin(self):
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
result = s.isin(['A', 'C'])
expected = Series([True, False, True, False, False, False, True, True])
assert_series_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
with tm.assertRaises(TypeError):
s.isin('a')
with tm.assertRaises(TypeError):
s = Series(['aaa', 'b', 'c'])
s.isin('aaa')
def test_isin_with_i8(self):
# GH 5021
expected = Series([True,True,False,False,False])
expected2 = Series([False,True,False,False,False])
# datetime64[ns]
s = Series(date_range('jan-01-2013','jan-05-2013'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(s[0:2].values.astype('datetime64[D]'))
assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
assert_series_equal(result, expected2)
# timedelta64[ns]
s = Series(pd.to_timedelta(lrange(5),unit='d'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
#------------------------------------------------------------------------------
# TimeSeries-specific
def test_cummethods_bool(self):
# GH 6270
# looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2
def cummin(x):
return np.minimum.accumulate(x)
def cummax(x):
return np.maximum.accumulate(x)
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {'cumsum': np.cumsum, 'cumprod': np.cumprod,
'cummin': cummin, 'cummax': cummax}
args = product((a, b, c, d), methods)
for s, method in args:
expected = Series(methods[method](s.values))
result = getattr(s, method)()
assert_series_equal(result, expected)
e = pd.Series([False, True, nan, False])
cse = pd.Series([0, 1, nan, 1], dtype=object)
cpe = pd.Series([False, 0, nan, 0])
cmin = pd.Series([False, False, nan, False])
cmax = pd.Series([False, True, nan, True])
expecteds = {'cumsum': cse, 'cumprod': cpe, 'cummin': cmin,
'cummax': cmax}
for method in methods:
res = getattr(e, method)()
assert_series_equal(res, expecteds[method])
def test_replace(self):
N = 100
ser = Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
ser.replace([np.nan], -1, inplace=True)
exp = ser.fillna(-1)
assert_series_equal(ser, exp)
rs = ser.replace(0., np.nan)
ser[ser == 0.] = np.nan
assert_series_equal(rs, ser)
ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
ser = Series([np.nan, 0, np.inf])
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
assert_series_equal(ser.replace(np.inf, 0), filled)
ser = Series(self.ts.index)
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])
# make sure that we aren't just masking a TypeError because bools don't
# implement indexing
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
ser.replace([1, 2], [np.nan, 0])
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
assert_series_equal(result, Series([4, 3, 2, 1, 0]))
# API change from 0.12?
# GH 5319
ser = Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
assert_series_equal(result, expected)
ser = Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
assert_series_equal(result, expected)
#GH 5797
ser = Series(date_range('20130101', periods=5))
expected = ser.copy()
expected.loc[2] = Timestamp('20120101')
result = ser.replace({Timestamp('20130103'):
Timestamp('20120101')})
assert_series_equal(result, expected)
result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))
assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([1,2,3])
assert_series_equal(result, Series([0,0,0,0,4]))
s = ser.copy()
s.replace([1,2,3],inplace=True)
assert_series_equal(s, Series([0,0,0,0,4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
with tm.assertRaises(ValueError):
s.replace([1,2,3],inplace=True,method='crash_cymbal')
assert_series_equal(s, ser)
def test_replace_mixed_types(self):
s = Series(np.arange(5),dtype='int64')
def check_replace(to_rep, val, expected):
sc = s.copy()
r = s.replace(to_rep, val)
sc.replace(to_rep, val, inplace=True)
assert_series_equal(expected, r)
assert_series_equal(expected, sc)
# should NOT upcast to float
e = Series([0,1,2,3,4])
tr, v = [3], [3.0]
check_replace(tr, v, e)
# MUST upcast to float
e = Series([0,1,2,3.5,4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = Series([0,1,2,3.5,'a'])
tr, v = [3,4], [3.5,'a']
check_replace(tr, v, e)
# again casts to object
e = Series([0,1,2,3.5,Timestamp('20130101')])
tr, v = [3,4],[3.5,Timestamp('20130101')]
check_replace(tr, v, e)
# casts to float
e = Series([0,1,2,3.5,1])
tr, v = [3,4],[3.5,True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = date_range('1/1/2001', '1/10/2001',
freq='D').to_series().reset_index(drop=True)
r = dr.astype(object).replace([dr[0],dr[1],dr[2]], [1.0,2,'a'])
assert_series_equal(r, Series([1.0,2,'a'] +
dr[3:].tolist(),dtype=object))
def test_replace_bool_with_string_no_op(self):
s = Series([True, False, True])
result = s.replace('fun', 'in-the-sun')
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = Series([True, False, True])
result = s.replace(True, '2u')
expected = Series(['2u', False, '2u'])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = Series([True, False, True])
result = s.replace(True, False)
expected = Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = Series([True, False, True])
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
s.replace({'asdf': 'asdb', True: 'yes'})
def test_asfreq(self):
ts = Series([0., 1., 2.], index=[datetime(2009, 10, 30),
datetime(2009, 11, 30),
datetime(2009, 12, 31)])
daily_ts = ts.asfreq('B')
monthly_ts = daily_ts.asfreq('BM')
self.assert_numpy_array_equal(monthly_ts, ts)
daily_ts = ts.asfreq('B', method='pad')
monthly_ts = daily_ts.asfreq('BM')
self.assert_numpy_array_equal(monthly_ts, ts)
daily_ts = ts.asfreq(datetools.bday)
monthly_ts = daily_ts.asfreq(datetools.bmonthEnd)
self.assert_numpy_array_equal(monthly_ts, ts)
result = ts[:0].asfreq('M')
self.assertEqual(len(result), 0)
self.assertIsNot(result, ts)
def test_diff(self):
# Just run the function
self.ts.diff()
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = s.diff()
self.assertEqual(rs[1], 1)
# neg n
rs = self.ts.diff(-1)
xp = self.ts - self.ts.shift(-1)
assert_series_equal(rs, xp)
# 0
rs = self.ts.diff(0)
xp = self.ts - self.ts
assert_series_equal(rs, xp)
# datetime diff (GH3100)
s = Series(date_range('20130102', periods=5))
rs = s - s.shift(1)
xp = s.diff()
assert_series_equal(rs, xp)
# timedelta diff
nrs = rs - rs.shift(1)
nxp = xp.diff()
assert_series_equal(nrs, nxp)
def test_pct_change(self):
rs = self.ts.pct_change(fill_method=None)
assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)
rs = self.ts.pct_change(2)
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(2) - 1)
rs = self.ts.pct_change(fill_method='bfill', limit=1)
filled = self.ts.fillna(method='bfill', limit=1)
assert_series_equal(rs, filled / filled.shift(1) - 1)
rs = self.ts.pct_change(freq='5D')
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
chg = s.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
assert_series_equal(chg, expected)
def test_autocorr(self):
# Just run the function
corr1 = self.ts.autocorr()
# Now run it with the lag parameter
corr2 = self.ts.autocorr(lag=1)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
self.assertTrue(np.isnan(corr1))
self.assertTrue(np.isnan(corr2))
else:
self.assertEqual(corr1, corr2)
# Choose a random lag between 1 and length of Series - 2
# and compare the result with the Series corr() function
n = 1 + np.random.randint(max(1, len(self.ts) - 2))
corr1 = self.ts.corr(self.ts.shift(n))
corr2 = self.ts.autocorr(lag=n)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
self.assertTrue(np.isnan(corr1))
self.assertTrue(np.isnan(corr2))
else:
self.assertEqual(corr1, corr2)
def test_first_last_valid(self):
ts = self.ts.copy()
ts[:5] = np.NaN
index = ts.first_valid_index()
self.assertEqual(index, ts.index[5])
ts[-5:] = np.NaN
index = ts.last_valid_index()
self.assertEqual(index, ts.index[-6])
ts[:] = np.nan
self.assertIsNone(ts.last_valid_index())
self.assertIsNone(ts.first_valid_index())
ser = Series([], index=[])
self.assertIsNone(ser.last_valid_index())
self.assertIsNone(ser.first_valid_index())
def test_mpl_compat_hack(self):
result = self.ts[:, np.newaxis]
expected = self.ts.values[:, np.newaxis]
assert_almost_equal(result, expected)
#------------------------------------------------------------------------------
# GroupBy
def test_select(self):
n = len(self.ts)
result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
expected = self.ts.reindex(self.ts.index[n // 2:])
assert_series_equal(result, expected)
result = self.ts.select(lambda x: x.weekday() == 2)
expected = self.ts[self.ts.index.weekday == 2]
assert_series_equal(result, expected)
#------------------------------------------------------------------------------
# Misc not safe for sparse
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_numpy_unique(self):
# it works!
result = np.unique(self.ts)
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype,['float64','int8','uint8','bool','m8[ns]','M8[ns]'])
for dtype in dtypes:
self.assertEqual(pd.concat([Series(dtype=dtype)]).dtype, dtype)
self.assertEqual(pd.concat([Series(dtype=dtype),
Series(dtype=dtype)]).dtype, dtype)
def int_result_type(dtype, dtype2):
typs = set([dtype.kind,dtype2.kind])
if not len(typs-set(['i','u','b'])) and (dtype.kind == 'i' or dtype2.kind == 'i'):
return 'i'
elif not len(typs-set(['u','b'])) and (dtype.kind == 'u' or dtype2.kind == 'u'):
return 'u'
return None
def float_result_type(dtype, dtype2):
typs = set([dtype.kind,dtype2.kind])
if not len(typs-set(['f','i','u'])) and (dtype.kind == 'f' or dtype2.kind == 'f'):
return 'f'
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return 'O'
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = pd.concat([Series(dtype=dtype),
Series(dtype=dtype2)]).dtype
self.assertEqual(result.kind, expected)
def test_concat_empty_series_dtypes(self):
# bools
self.assertEqual(pd.concat([Series(dtype=np.bool_),
Series(dtype=np.int32)]).dtype, np.int32)
self.assertEqual(pd.concat([Series(dtype=np.bool_),
Series(dtype=np.float32)]).dtype, np.object_)
# datetimelike
self.assertEqual(pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.bool)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.int64)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.int64)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool_),
Series(dtype=np.int64)]).dtype, np.object_)
# categorical
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='category')]).dtype, 'category')
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='float64')]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='object')]).dtype, 'category')
# sparse
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='float64').to_sparse()])
self.assertEqual(result.dtype,np.float64)
self.assertEqual(result.ftype,'float64:sparse')
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='float64')])
self.assertEqual(result.dtype,np.float64)
self.assertEqual(result.ftype,'float64:sparse')
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='object')])
self.assertEqual(result.dtype,np.object_)
self.assertEqual(result.ftype,'object:dense')
def test_searchsorted_numeric_dtypes_scalar(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted(30)
e = 2
tm.assert_equal(r, e)
r = s.searchsorted([30])
e = np.array([2])
tm.assert_array_equal(r, e)
def test_searchsorted_numeric_dtypes_vector(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted([91, 2e6])
e = np.array([3, 4])
tm.assert_array_equal(r, e)
def test_search_sorted_datetime64_scalar(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = pd.Timestamp('20120102')
r = s.searchsorted(v)
e = 1
tm.assert_equal(r, e)
def test_search_sorted_datetime64_list(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]
r = s.searchsorted(v)
e = np.array([1, 2])
tm.assert_array_equal(r, e)
def test_searchsorted_sorter(self):
# GH8490
s = Series([3, 1, 2])
r = s.searchsorted([0, 3], sorter=np.argsort(s))
e = np.array([0, 2])
tm.assert_array_equal(r, e)
class TestSeriesNonUnique(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ['hash', 'category']
ser.name = 'value'
df = ser.reset_index()
self.assertIn('value', df)
df = ser.reset_index(name='value2')
self.assertIn('value2', df)
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
assert_series_equal(s, s2)
# level
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
self.assertEqual(len(rs.columns), 2)
rs = s.reset_index(level=[0, 2], drop=True)
self.assertTrue(rs.index.equals(Index(index.get_level_values(1))))
tm.assert_isinstance(rs, Series)
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(lrange(10))
s.index = idx
self.assertTrue(s.is_time_series == True)
def test_timeseries_coercion(self):
idx = tm.makeDateIndex(10000)
ser = Series(np.random.randn(len(idx)), idx.astype(object))
self.assertTrue(ser.is_time_series)
self.assertIsInstance(ser.index, DatetimeIndex)
def test_replace(self):
N = 100
ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
def test_repeat(self):
s = Series(np.random.randn(3), index=['a', 'b', 'c'])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
assert_series_equal(reps, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep),
index=s.index.values.repeat(to_rep))
assert_series_equal(reps, exp)
def test_unique_data_ownership(self):
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort()
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
the-stack_0_421 | import os
from os import path
from pathlib import Path
from shutil import rmtree
from typing import Union
from pyspark.sql.types import DataType
from pyspark.sql.types import StructType
from spark_fhir_schemas.r4.complex_types.address import AddressSchema
from spark_fhir_schemas.r4.resources.explanationofbenefit import (
ExplanationOfBenefitSchema,
)
from spark_fhir_schemas.r4.resources.patient import PatientSchema
def test_simple() -> None:
data_dir: Path = Path(__file__).parent.joinpath("./")
temp_folder = data_dir.joinpath("./temp")
if path.isdir(temp_folder):
rmtree(temp_folder)
os.mkdir(temp_folder)
schema: Union[StructType, DataType] = PatientSchema.get_schema()
assert isinstance(schema, StructType)
# print(schema)
print("------- Patient --------")
print(schema.json())
with open(temp_folder.joinpath("patient_schema.json"), "w+") as file:
file.write(schema.json())
print("------- Address --------")
schema = AddressSchema.get_schema()
print(schema.json())
with open(temp_folder.joinpath("address_schema.json"), "w+") as file:
file.write(schema.json())
print("------- ExplanationOfBenefitSchema --------")
schema = ExplanationOfBenefitSchema.get_schema()
print(schema.json())
# noinspection SpellCheckingInspection
with open(temp_folder.joinpath("explanationofbenefit_schema.json"), "w") as file:
file.write(schema.json())
assert 1 == 1
|
the-stack_0_422 | from __future__ import division
from __future__ import print_function
import os
# disable autotune
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
import argparse
import glob
import logging
logging.basicConfig(level=logging.INFO)
import time
import numpy as np
import mxnet as mx
from tqdm import tqdm
from mxnet import nd
from mxnet import gluon
import gluoncv as gcv
gcv.utils.check_version('0.6.0')
from gluoncv import data as gdata
from gluoncv.data import batchify
from gluoncv.data.transforms.presets.rcnn import FasterRCNNDefaultValTransform
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric
from gluoncv.utils.metrics.coco_detection import COCODetectionMetric
def parse_args():
parser = argparse.ArgumentParser(description='Validate Faster-RCNN networks.')
parser.add_argument('--network', type=str, default='resnet50_v1b',
help="Base feature extraction network name")
parser.add_argument('--dataset', type=str, default='voc',
help='Training dataset.')
parser.add_argument('--num-workers', '-j', dest='num_workers', type=int,
default=4, help='Number of data workers')
parser.add_argument('--gpus', type=str, default='0',
help='Training with GPUs, you can specify 1,3 for example.')
parser.add_argument('--pretrained', type=str, default='True',
help='Load weights from previously saved parameters.')
parser.add_argument('--save-prefix', type=str, default='',
help='Saving parameter prefix')
parser.add_argument('--save-json', action='store_true',
help='Save coco output json')
parser.add_argument('--eval-all', action='store_true',
help='Eval all models begins with save prefix. Use with pretrained.')
parser.add_argument('--norm-layer', type=str, default=None,
help='Type of normalization layer to use. '
'If set to None, backbone normalization layer will be fixed,'
' and no normalization layer will be used. '
'Currently supports \'bn\', and None, default is None')
parser.add_argument('--use-fpn', action='store_true',
help='Whether to use feature pyramid network.')
args = parser.parse_args()
return args
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval',
cleanup=not args.save_json)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return val_dataset, val_metric
def get_dataloader(net, val_dataset, batch_size, num_workers):
"""Get dataloader."""
val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(3)])
val_loader = mx.gluon.data.DataLoader(
val_dataset.transform(FasterRCNNDefaultValTransform(net.short, net.max_size)),
batch_size, False, batchify_fn=val_bfn, last_batch='keep', num_workers=num_workers)
return val_loader
def split_and_load(batch, ctx_list):
"""Split data to 1 batch each device."""
num_ctx = len(ctx_list)
new_batch = []
for i, data in enumerate(batch):
new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]
new_batch.append(new_data)
return new_batch
def validate(net, val_data, ctx, eval_metric, size):
"""Test on validation dataset."""
clipper = gcv.nn.bbox.BBoxClipToImage()
eval_metric.reset()
net.hybridize(static_alloc=True)
with tqdm(total=size) as pbar:
for ib, batch in enumerate(val_data):
batch = split_and_load(batch, ctx_list=ctx)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for x, y, im_scale in zip(*batch):
# get prediction results
ids, scores, bboxes = net(x)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(clipper(bboxes, x))
# rescale to original resolution
im_scale = im_scale.reshape((-1)).asscalar()
det_bboxes[-1] *= im_scale
# split ground truths
gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))
gt_bboxes[-1] *= im_scale
gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)
# update metric
for det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff in zip(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults):
eval_metric.update(det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff)
pbar.update(len(ctx))
return eval_metric.get()
if __name__ == '__main__':
args = parse_args()
# contexts
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = ctx if ctx else [mx.cpu()]
args.batch_size = len(ctx) # 1 batch per device
# network
kwargs = {}
module_list = []
if args.use_fpn:
module_list.append('fpn')
if args.norm_layer is not None:
module_list.append(args.norm_layer)
if args.norm_layer == 'bn':
kwargs['num_devices'] = len(args.gpus.split(','))
net_name = '_'.join(('faster_rcnn', *module_list, args.network, args.dataset))
args.save_prefix += net_name
if args.pretrained.lower() in ['true', '1', 'yes', 't']:
net = gcv.model_zoo.get_model(net_name, pretrained=True, **kwargs)
else:
net = gcv.model_zoo.get_model(net_name, pretrained=False, **kwargs)
net.load_parameters(args.pretrained.strip(), cast_dtype=True)
net.collect_params().reset_ctx(ctx)
# validation data
val_dataset, eval_metric = get_dataset(args.dataset, args)
val_data = get_dataloader(
net, val_dataset, args.batch_size, args.num_workers)
# validation
if not args.eval_all:
names, values = validate(net, val_data, ctx, eval_metric, len(val_dataset))
for k, v in zip(names, values):
print(k, v)
else:
saved_models = glob.glob(args.save_prefix + '*.params')
for epoch, saved_model in enumerate(sorted(saved_models)):
print('[Epoch {}] Validating from {}'.format(epoch, saved_model))
net.load_parameters(saved_model)
net.collect_params().reset_ctx(ctx)
map_name, mean_ap = validate(net, val_data, ctx, eval_metric, len(val_dataset))
val_msg = '\n'.join(['{}={}'.format(k, v) for k, v in zip(map_name, mean_ap)])
print('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
current_map = float(mean_ap[-1])
with open(args.save_prefix+'_best_map.log', 'a') as f:
f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_map))
|
the-stack_0_423 | # -*- coding: utf-8 -*-
"""
pygments.styles.rrt
~~~~~~~~~~~~~~~~~~~
pygments "rrt" theme, based on Zap and Emacs defaults.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Comment, Name, Keyword, String
class RrtStyle(Style):
"""
Minimalistic "rrt" theme, based on Zap and Emacs defaults.
"""
background_color = '#000000'
highlight_color = '#0000ff'
styles = {
Comment: '#00ff00',
Name.Function: '#ffff00',
Name.Variable: '#eedd82',
Name.Constant: '#7fffd4',
Keyword: '#ff0000',
Comment.Preproc: '#e5e5e5',
String: '#87ceeb',
Keyword.Type: '#ee82ee',
}
|
the-stack_0_424 | from scraping.funtion import html_convert_python
def get_data_page_locate(url):
soup = html_convert_python( url )
data = []
for row in soup.find("ul", {"id": "postcode-list"}).find_all("li"):
url = row.find('a').attrs['href']
data.append(url)
return data
def get_data_page_region(url):
soup = html_convert_python( url )
data = []
for row in soup.find_all("div", {"class": "col-md-3 col-xs-4"}):
url = row.a.get('href')
print(url)
data.append(url)
return data
def get_data_page_postcode(url):
soup = html_convert_python( url )
data = []
for row in soup.find_all("div", {"class": "col-md-3 col-xs-12"}):
url = row.a.string
print(url)
data.append(url)
return data
|
the-stack_0_425 | #!/usr/bin/env python
#encoding: utf8
#
# Copyright © Burak Arslan <burak at arskom dot com dot tr>,
# Arskom Ltd. http://www.arskom.com.tr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the owner nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
import random
import sys
import base64
from Cookie import SimpleCookie
# bcrypt seems to be among the latest consensus around cryptograpic circles on
# storing passwords.
# You need the package from http://code.google.com/p/py-bcrypt/
# You can install it by running easy_install py-bcrypt.
try:
import bcrypt
except ImportError:
print('easy_install --user py-bcrypt to get it.')
raise
from spyne.application import Application
from spyne.decorator import rpc
from spyne.error import ResourceNotFoundError
from spyne.model.complex import ComplexModel
from spyne.model.fault import Fault
from spyne.model.primitive import Mandatory
from spyne.model.primitive import String
from spyne.protocol.soap import Soap11
from spyne.server.wsgi import WsgiApplication
from spyne.service import ServiceBase
class PublicKeyError(ResourceNotFoundError):
__namespace__ = 'spyne.examples.authentication'
def __init__(self, value):
Fault.__init__(self,
faultcode='Client.KeyError',
faultstring='Value %r not found' % value
)
class AuthenticationError(Fault):
__namespace__ = 'spyne.examples.authentication'
def __init__(self, user_name):
# TODO: self.transport.http.resp_code = HTTP_401
Fault.__init__(self,
faultcode='Client.AuthenticationError',
faultstring='Invalid authentication request for %r' % user_name
)
class AuthorizationError(Fault):
__namespace__ = 'spyne.examples.authentication'
def __init__(self):
# TODO: self.transport.http.resp_code = HTTP_401
Fault.__init__(self,
faultcode='Client.AuthorizationError',
faultstring='You are not authorized to access this resource.'
)
class UnauthenticatedError(Fault):
__namespace__ = 'spyne.examples.authentication'
def __init__(self):
Fault.__init__(self,
faultcode='Client.UnauthenticatedError',
faultstring='This resource can only be accessed after authentication.'
)
class SpyneDict(dict):
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise PublicKeyError(key)
class Preferences(ComplexModel):
__namespace__ = 'spyne.examples.authentication'
language = String(max_len=2)
time_zone = String
user_db = {
'neo': bcrypt.hashpw('Wh1teR@bbit', bcrypt.gensalt()),
}
session_db = set()
preferences_db = SpyneDict({
'neo': Preferences(language='en', time_zone='Underground/Zion'),
'smith': Preferences(language='xx', time_zone='Matrix/Core'),
})
class UserService(ServiceBase):
__tns__ = 'spyne.examples.authentication'
@rpc(Mandatory.String, Mandatory.String, _returns=None,
_throws=AuthenticationError)
def authenticate(ctx, user_name, password):
password_hash = user_db.get(user_name, None)
if password_hash is None:
raise AuthenticationError(user_name)
if bcrypt.hashpw(password, password_hash) != password_hash:
raise AuthenticationError(user_name)
session_id = (user_name, '%x' % random.randint(1<<128, (1<<132)-1))
session_db.add(session_id)
cookie = SimpleCookie()
cookie["session-id"] = base64.urlsafe_b64encode(str(session_id[0]) + "\0" + str(session_id[1]))
cookie["session-id"]["max-age"] = 3600
header_name, header_value = cookie.output().split(":", 1)
ctx.transport.resp_headers[header_name] = header_value.strip()
from pprint import pprint
pprint(ctx.transport.resp_headers)
@rpc(Mandatory.String, _throws=PublicKeyError, _returns=Preferences)
def get_preferences(ctx, user_name):
# Only allow access to the users own preferences.
if user_name != ctx.udc:
raise AuthorizationError()
retval = preferences_db[user_name]
return retval
def _on_method_call(ctx):
if ctx.descriptor.name == "authenticate":
# No checking of session cookie for call to authenticate
return
cookie = SimpleCookie()
http_cookie = ctx.transport.req_env.get("HTTP_COOKIE")
if http_cookie:
cookie.load(http_cookie)
if "session-id" not in cookie:
raise UnauthenticatedError()
session_cookie = cookie["session-id"].value
session_id = tuple(base64.urlsafe_b64decode(session_cookie).split("\0", 1))
if not session_id in session_db:
raise AuthenticationError(session_id[0])
ctx.udc = session_id[0] # user name
UserService.event_manager.add_listener('method_call', _on_method_call)
if __name__=='__main__':
from spyne.util.wsgi_wrapper import run_twisted
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG)
logging.getLogger('twisted').setLevel(logging.DEBUG)
application = Application([UserService],
tns='spyne.examples.authentication',
in_protocol=Soap11(validator='lxml'),
out_protocol=Soap11()
)
twisted_apps = [
(WsgiApplication(application), 'app'),
]
sys.exit(run_twisted(twisted_apps, 7789))
|
the-stack_0_426 | __author__ = 'dereyly'
import sys
#sys.path.append('/home/dereyly/progs/caffe_cudnn33/python_33')
#sys.path.append('/home/dereyly/progs/caffe-master-triplet/python')
import caffe
import numpy as np
'''
layer {
name: 'rcls_lost_my'
type: 'Python'
bottom: 'feats'
bottom: 'labels'
top: 'cls_lost_my'
python_param {
module: 'fast_rcnn.skip_softmax_loss'
layer: 'SoftmaxLossLayer'
#param_str: "{'ratios': [0.5, 1, 2], 'scales': [2, 4, 8, 16, 32]}"
}
loss_weight: 1
}
'''
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
sf = np.exp(x)
sum_sf=np.sum(sf, axis=1)
for i in range(x.shape[0]):
sf[i]/=sum_sf[i]
return sf
class SoftmaxLossLayer(caffe.Layer):
def setup(self, bottom, top):
# check input pair
if len(bottom) != 2:
raise Exception("Need two inputs to compute distance.")
# DBG
self.count = 0
self.skip_count = 0
top[0].reshape(1)
def reshape(self, bottom, top):
# check input dimensions match
# difference is shape of inputs
sz=bottom[0].data.shape
self.batch_sz=sz[0]
self.diff = np.zeros((sz[0],sz[1]),dtype=np.float32)
self.lbl_gt=np.zeros((sz[0],sz[1]),dtype=np.float32)
# loss output is scalar
#top[1].reshape(self.batch_sz)
def forward(self, bottom, top):
self.count+=1
sz=bottom[0].data.shape
self.lbl_gt=np.zeros((sz[0],sz[1]),dtype=np.float32)
lbl_idx=bottom[1].data
lbl_idx=lbl_idx.astype(dtype= int)
for i in range(self.batch_sz):
self.lbl_gt[i,lbl_idx[i]]=1
soft_max=softmax(bottom[0].data)
#loss = -self.lbl_gt*np.log(np.maximum(soft_max,np.finfo(np.float32).eps))
loss=0
for i in range(self.batch_sz):
loss -= np.log(np.maximum(soft_max[i][lbl_idx[i]],np.finfo(np.float32).eps))
#loss2=-np.log(soft_max)
#for i in range(self.batch_sz):
# loss[i,lbl_idx[i]]=0
#print bottom[1].data.shape
self.diff[...] = soft_max-self.lbl_gt
for i in range(self.batch_sz):
coeff=soft_max[i,lbl_idx[i]]
self.diff[i]*=coeff
self.skip_count+=coeff
if self.count%100==0:
print('-- skip count -- ',self.skip_count/(100.0*self.batch_sz))
self.skip_count=0
top[0].data[...] = np.sum(loss) / bottom[0].num
#top[1].data[...] = loss
def backward(self, top, propagate_down, bottom):
#pass
bottom[0].diff[...] = self.diff / bottom[0].num
|
the-stack_0_428 |
"""
Runs one instance of the Atari environment and optimizes using DQN algorithm.
Can use a GPU for the agent (applies to both sample and train). No parallelism
employed, so everything happens in one python process; can be easier to debug.
The kwarg snapshot_mode="last" to logger context will save the latest model at
every log point (see inside the logger for other options).
In viskit, whatever (nested) key-value pairs appear in config will become plottable
keys for showing several experiments. If you need to add more after an experiment,
use rlpyt.utils.logging.context.add_exp_param().
"""
from rlpyt.samplers.serial.sampler import SerialSampler
from rlpyt.envs.atari.atari_env import AtariEnv, AtariTrajInfo
from rlpyt.algos.dqn.dqn import DQN
from rlpyt.agents.dqn.atari.atari_dqn_agent import AtariDqnAgent
from rlpyt.runners.minibatch_rl import MinibatchRlEval
from rlpyt.utils.logging.context import logger_context
from polyaxon_client.tracking import get_outputs_path
def build_and_train(game="pong", run_ID=0, cuda_idx=None):
sampler = SerialSampler(
EnvCls=AtariEnv,
TrajInfoCls=AtariTrajInfo, # default traj info + GameScore
env_kwargs=dict(game=game),
eval_env_kwargs=dict(game=game),
batch_T=4, # Four time-steps per sampler iteration.
batch_B=1,
max_decorrelation_steps=0,
eval_n_envs=10,
eval_max_steps=int(10e3),
eval_max_trajectories=5,
)
algo = DQN(min_steps_learn=1e3) # Run with defaults.
agent = AtariDqnAgent()
runner = MinibatchRlEval(
algo=algo,
agent=agent,
sampler=sampler,
n_steps=50e6,
log_interval_steps=1e3,
affinity=dict(cuda_idx=cuda_idx),
)
config = dict(game=game)
name = "dqn_" + game
#log_dir = "example_1"
log_dir = get_outputs_path()
with logger_context(log_dir, run_ID, name, config, snapshot_mode="last"):
runner.train()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--game', help='Atari game', default='pong')
parser.add_argument('--run_ID', help='run identifier (logging)', type=int, default=0)
parser.add_argument('--cuda_idx', help='gpu to use ', type=int, default=1)
args = parser.parse_args()
build_and_train(
game=args.game,
run_ID=args.run_ID,
cuda_idx=args.cuda_idx,
)
|
the-stack_0_431 | from datetime import datetime
from pathlib import Path
from tkinter import *
from tkinter import filedialog
from docxtpl import DocxTemplate
import xlrd
import os
import configparser
import sys
def resource_path(relative_path):
if getattr(sys, 'frozen', False):
base_path = sys._MEIPASS
else:
base_path = os.path.dirname(os.path.abspath(__file__))
print(os.path.join(base_path, relative_path))
return os.path.join(base_path, relative_path)
def valid_count():
config = configparser.ConfigParser()
config.read(resource_path(os.path.join('res', 'conf.ini')), encoding="utf8")
return config.getint("sys_config", "totalCount"), config.getint("sys_config", "usedCount")
def update_valid(count):
config = configparser.ConfigParser()
config.read(resource_path(os.path.join('res', 'conf.ini')), encoding="utf8")
config.set("sys_config", "usedCount", repr(count))
config.write(open(resource_path(os.path.join('res', 'conf.ini')), "w"))
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master, bg='white')
self.pack(expand=YES, fill=BOTH)
self.window_init()
self.createWidgets()
def window_init(self):
self.master.title('报告批处理系统')
self.master.bg = 'white'
width, height = self.master.maxsize()
self.master.geometry("{}x{}".format(500, 500))
def createWidgets(self):
# # fm1
self.fm1 = Frame(self, bg='white')
self.openButton = Button(self.fm1, text='选择表格文件', bg='#e4e4e5', fg='black', font=('微软雅黑', 12),
command=self.fileOpen)
self.openButton.pack(expand=YES)
self.fm1.pack(side=TOP, pady=10, expand=NO, fill='x')
# fm2
self.fm2 = Frame(self, bg='white')
self.predictEntry = Text(self.fm2, font=('微软雅黑', 10), fg='#FF4081', state=DISABLED)
self.predictEntry.pack(side=LEFT, fill='y', padx=20, expand=YES)
self.fm2.pack(side=TOP, expand=YES, fill="y")
def output_predict_sentence(self, r):
# self.predictEntry.delete(0, END)
self.predictEntry.config(state=NORMAL)
self.predictEntry.insert(INSERT, r + "\n")
self.predictEntry.config(state=DISABLED)
def fileOpen(self):
fileName = filedialog.askopenfilename(title='选择表格文件', filetypes=[('Excel', '*.xlsx')])
self.read_excel(fileName)
# self.output_predict_sentence("结束")
def read_excel(self, fileName):
try:
self.output_predict_sentence("选择文件为:" + fileName)
my_file = Path(fileName)
if my_file.exists():
pass
else:
self.output_predict_sentence("文件不存在,重新选择文件!")
my_dir_name = fileName.replace('.xlsx', '')
my_dir = Path(my_dir_name)
if my_dir.exists():
pass
else:
os.makedirs(my_dir)
# self.output_predict_sentence("创建存储目录")
# 打开excel
x1 = xlrd.open_workbook(fileName)
# 打开sheet1
table = x1.sheet_by_index(0)
nrows = table.nrows
validCount = valid_count()
if nrows - 2 + validCount[1] > validCount[0]:
self.output_predict_sentence('数据异常,联系开发人员!')
return
self.output_predict_sentence('预计生成报告数:' + str(nrows - 2))
self.output_predict_sentence("开始生成报告!")
for i in range(nrows - 2):
reqTimeStr = str(table.cell_value(i + 2, 0)).strip()
companyName = table.cell_value(i + 2, 1)
if companyName is None:
break
productNumber = str(table.cell_value(i + 2, 2)).strip()
SCCJ = str(table.cell_value(i + 2, 3)).strip()
productName = str(table.cell_value(i + 2, 4)).strip()
productTime = table.cell_value(i + 2, 5)
PH = table.cell_value(i + 2, 6)
LC = str(table.cell_value(i + 2, 7)).strip()
GCZCH = table.cell_value(i + 2, 8)
YJZH = table.cell_value(i + 2, 9)
CYWZ = str(table.cell_value(i + 2, 10)).strip()
GH = str(table.cell_value(i + 2, 11)).strip()
reportTime = str(table.cell_value(i + 2, 12)).strip()
# 日期转换
reqTime = datetime.strptime(reqTimeStr, '%Y.%m.%d')
reportTime = datetime.strptime(reportTime, '%Y.%m.%d')
tpl = DocxTemplate(resource_path(os.path.join('res', 'tempdoc.docx')))
context = {
'companyName': companyName,
'productNumber': productNumber,
# 'SCCJ': SCCJ,
# 'productName': productName,
# 'productTime': productTime,
# 'PH': PH,
# 'LC': LC,
# 'GCZCH': GCZCH,
# 'YJZH': YJZH,
'CYWZ': CYWZ,
'GH': GH,
'reqTime': "{0:%Y}.{0:%m}.{0:%d}".format(reqTime),
'checkTime': "{0:%Y}.{0:%m}.{0:%d}".format(reqTime),
'reportTime': "{0:%Y}.{0:%m}.{0:%d}".format(reportTime),
}
if productName == 'None':
context['productName'] = ''
else:
context['productName'] = productName
if LC == 'None':
context['LC'] = ''
else:
context['LC'] = LC
if productTime is None:
context['productTime'] = ''
else:
if isinstance(productTime, float):
context['productTime'] = int(float(productTime))
elif isinstance(productTime, int):
context['productTime'] = int(productTime)
else:
context['productTime'] = str(
productTime).replace('00:00:00+00:00', '')
if PH is None:
context['PH'] = ''
else:
if isinstance(PH, float):
context['PH'] = int(float(PH))
else:
context['PH'] = PH
if SCCJ == 'None':
context['SCCJ'] = ''
else:
context['SCCJ'] = SCCJ
if YJZH is None:
context['YJZH'] = ''
else:
if isinstance(YJZH, float):
context['YJZH'] = int(float(YJZH))
else:
context['YJZH'] = YJZH
if GCZCH is None:
context['GCZCH'] = ''
else:
if isinstance(GCZCH, float):
context['GCZCH'] = int(float(GCZCH))
else:
context['GCZCH'] = GCZCH
temp = str(i + 1)
saveFileName = my_dir_name + '/' + \
companyName.replace('有限公司', '').strip() + '_' + \
GH + "_" + temp + '.docx'
# self.output_predict_sentence("第" + temp + "文件:" + saveFileName)
tpl.render(context)
tpl.save(saveFileName)
update_valid(nrows - 2 + validCount[1])
self.output_predict_sentence("报告生成结束,共生成报告:" + repr(nrows - 2))
except Exception as err:
blogpath = resource_path(os.path.join('res', 'log_err.txt'))
f = open(blogpath, 'w+')
f.writelines(repr(err))
f.close()
self.output_predict_sentence("报告生成失败,原因:" + repr(err))
if __name__ == '__main__':
app = Application()
app.mainloop()
|
the-stack_0_432 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
from numpy import nan, ndarray
import numpy as np
import warnings
import operator
from pandas.core.common import isnull, _values_from_object, _maybe_match_name
from pandas.core.index import Index, _ensure_index
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.internals import SingleBlockManager
from pandas.core import generic
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.index as _index
from pandas.sparse.array import (make_sparse, _sparse_array_op, SparseArray)
from pandas._sparse import BlockIndex, IntIndex
import pandas._sparse as splib
from pandas.sparse.scipy_sparse import (_sparse_series_to_coo,
_coo_to_sparse_series)
# -----------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are
present for compatibility.
"""
def wrapper(self, other):
if isinstance(other, Series):
if not isinstance(other, SparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, name)
elif isinstance(other, DataFrame):
return NotImplemented
elif np.isscalar(other):
if isnull(other) or isnull(self.fill_value):
new_fill_value = np.nan
else:
new_fill_value = op(np.float64(self.fill_value),
np.float64(other))
return SparseSeries(op(self.sp_values, other),
index=self.index,
sparse_index=self.sp_index,
fill_value=new_fill_value,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
wrapper.__name__ = name
if name.startswith("__"):
# strip special method names, e.g. `__add__` needs to be `add` when
# passed to _sparse_series_op
name = name[2:-2]
return wrapper
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join='outer', copy=False)
new_index = left.index
new_name = _maybe_match_name(left, right)
result = _sparse_array_op(left, right, op, name)
return SparseSeries(result, index=new_index, name=new_name)
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Defaults to NaN (code for missing)
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseSeries objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_series'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
is_sparse_array = isinstance(data, SparseArray)
if fill_value is None:
if is_sparse_array:
fill_value = data.fill_value
else:
fill_value = nan
if is_sparse_array:
if isinstance(data, SparseSeries) and index is None:
index = data.index.view()
elif index is not None:
assert (len(index) == len(data))
sparse_index = data.sp_index
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
if index is None:
index = data.index.view()
data = Series(data)
data, sparse_index = make_sparse(data, kind=kind,
fill_value=fill_value)
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
data, sparse_index = make_sparse(data, kind=kind,
fill_value=fill_value)
else:
assert (len(data) == sparse_index.npoints)
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isnull(data) and isnull(fill_value)):
if kind == 'block':
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == 'block':
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype,
copy=copy)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
@property
def values(self):
""" return the array """
return self.block.values
def __array__(self, result=None):
""" the array interface, return my values """
return self.block.values
def get_values(self):
""" same as values """
return self.block.to_dense().view()
@property
def block(self):
return self._data._block
@property
def fill_value(self):
return self.block.fill_value
@fill_value.setter
def fill_value(self, v):
self.block.fill_value = v
@property
def sp_index(self):
return self.block.sp_index
@property
def sp_values(self):
return self.values.sp_values
@property
def npoints(self):
return self.sp_index.npoints
@classmethod
def from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
"""
Simplified alternate constructor
"""
return cls(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@property
def _constructor(self):
return SparseSeries
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def as_sparse_array(self, kind=None, fill_value=None, copy=False):
""" return my self as a sparse array, do not copy by default """
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, copy=copy)
def __len__(self):
return len(self.block)
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
rep = '%s\n%s' % (series_rep, repr(self.sp_index))
return rep
def __array_wrap__(self, result):
"""
Gets called prior to a ufunc (and after)
"""
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=self.fill_value,
copy=False).__finalize__(self)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.name = getattr(obj, 'name', None)
self.fill_value = getattr(obj, 'fill_value', None)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.get_values(), skipna=skipna, **kwds)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
fill_value=self.fill_value, name=self.name)
def _unpickle_series_compat(self, state):
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
index, fill_value, sp_index = own_state[:3]
name = None
if len(own_state) > 3:
name = own_state[3]
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sp_index,
fill_value=fill_value, copy=False)
# recreate
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data)
self._set_axis(0, index)
self.name = name
def __iter__(self):
""" forward to the array """
return iter(self.values)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'sparse_time_series')
else:
object.__setattr__(self, '_subtyp', 'sparse_series')
def _get_val_at(self, loc):
""" forward to the array """
return self.block.values._get_val_at(loc)
def __getitem__(self, key):
"""
"""
try:
return self._get_val_at(self.index.get_loc(key))
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item, must be array-like?
pass
# is there a case where this would NOT be an ndarray?
# need to find an example, I took out the case for now
key = _values_from_object(key)
dataSlice = self.values[key]
new_index = Index(self.index.view(ndarray)[key])
return self._constructor(dataSlice, index=new_index).__finalize__(self)
def _set_with_engine(self, key, value):
return self.set_value(key, value)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: type of caller
"""
res_sp_values = np.abs(self.sp_values)
return self._constructor(res_sp_values, index=self.index,
sparse_index=self.sp_index,
fill_value=self.fill_value).__finalize__(self)
def get(self, label, default=None):
"""
Returns value occupying requested label, default to specified
missing value if not present. Analogous to dict.get
Parameters
----------
label : object
Label value looking for
default : object, optional
Value to return if label not in index
Returns
-------
y : scalar
"""
if label in self.index:
loc = self.index.get_loc(label)
return self._get_val_at(loc)
else:
return default
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
loc = label if takeable is True else self.index.get_loc(label)
return self._get_val_at(loc)
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Series
Returns
-------
series : SparseSeries
"""
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
# and possibily change the index
new_values = values.set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
new_index = values.index
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, new_index)
self._index = new_index
def _set_values(self, key, value):
# this might be inefficient as we have to recreate the sparse array
# rather than setting individual elements, but have to convert
# the passed slice/boolean that's in dense space into a sparse indexer
# not sure how to do that!
if isinstance(key, Series):
key = key.values
values = self.values.to_dense()
values[key] = _index.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
def to_dense(self, sparse_only=False):
"""
Convert SparseSeries to (dense) Series
"""
if sparse_only:
int_index = self.sp_index.to_int_index()
index = self.index.take(int_index.indices)
return Series(self.sp_values, index=index, name=self.name)
else:
return Series(self.values.to_dense(), index=self.index,
name=self.name)
@property
def density(self):
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
new_data = self._data
if deep:
new_data = self._data.copy()
return self._constructor(new_data, sparse_index=self.sp_index,
fill_value=self.fill_value).__finalize__(self)
def reindex(self, index=None, method=None, copy=True, limit=None):
"""
Conform SparseSeries to new Index
See Series.reindex docstring for general behavior
Returns
-------
reindexed : SparseSeries
"""
new_index = _ensure_index(index)
if self.index.equals(new_index):
if copy:
return self.copy()
else:
return self
return self._constructor(self._data.reindex(new_index, method=method,
limit=limit, copy=copy),
index=new_index).__finalize__(self)
def sparse_reindex(self, new_index):
"""
Conform sparse values to new SparseIndex
Parameters
----------
new_index : {BlockIndex, IntIndex}
Returns
-------
reindexed : SparseSeries
"""
if not isinstance(new_index, splib.SparseIndex):
raise TypeError('new index must be a SparseIndex')
block = self.block.sparse_reindex(new_index)
new_data = SingleBlockManager(block, self.index)
return self._constructor(new_data, index=self.index,
sparse_index=new_index,
fill_value=self.fill_value).__finalize__(self)
def take(self, indices, axis=0, convert=True):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
new_values = SparseArray.take(self.values, indices)
new_index = self.index.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def cumsum(self, axis=0, dtype=None, out=None):
"""
Cumulative sum of values. Preserves locations of NaN values
Returns
-------
cumsum : Series or SparseSeries
"""
new_array = SparseArray.cumsum(self.values)
if isinstance(new_array, SparseArray):
return self._constructor(
new_array, index=self.index,
sparse_index=new_array.sp_index).__finalize__(self)
return Series(new_array, index=self.index).__finalize__(self)
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Analogous to Series.dropna. If fill_value=NaN, returns a dense Series
"""
# TODO: make more efficient
axis = self._get_axis_number(axis or 0)
dense_valid = self.to_dense().valid()
if inplace:
raise NotImplementedError("Cannot perform inplace dropna"
" operations on a SparseSeries")
if isnull(self.fill_value):
return dense_valid
else:
dense_valid = dense_valid[dense_valid != self.fill_value]
return dense_valid.to_sparse(fill_value=self.fill_value)
def shift(self, periods, freq=None):
"""
Analogous to Series.shift
"""
# no special handling of fill values yet
if not isnull(self.fill_value):
# TODO: kwds is not defined...should this work?
dense_shifted = self.to_dense().shift(periods, freq=freq, **kwds) # noqa
return dense_shifted.to_sparse(fill_value=self.fill_value,
kind=self.kind)
if periods == 0:
return self.copy()
if freq is not None:
return self._constructor(
self.sp_values, sparse_index=self.sp_index,
index=self.index.shift(periods, freq),
fill_value=self.fill_value).__finalize__(self)
int_index = self.sp_index.to_int_index()
new_indices = int_index.indices + periods
start, end = new_indices.searchsorted([0, int_index.length])
new_indices = new_indices[start:end]
new_sp_index = IntIndex(len(self), new_indices)
if isinstance(self.sp_index, BlockIndex):
new_sp_index = new_sp_index.to_block_index()
return self._constructor(self.sp_values[start:end].copy(),
index=self.index, sparse_index=new_sp_index,
fill_value=self.fill_value).__finalize__(self)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
if isinstance(other, SparseSeries):
other = other.to_dense()
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value)
def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
.. versionadded:: 0.16.0
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> from numpy import nan
>>> s = Series([3.0, nan, 1.0, 3.0, nan, nan])
>>> s.index = MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
>>> ss = s.to_sparse()
>>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'],
column_levels=['C', 'D'],
sort_labels=True)
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 3.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
A, rows, columns = _sparse_series_to_coo(self, row_levels,
column_levels,
sort_labels=sort_labels)
return A, rows, columns
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a SparseSeries from a scipy.sparse.coo_matrix.
.. versionadded:: 0.16.0
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : SparseSeries
Examples
---------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
shape=(3, 4))
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 2.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> ss = SparseSeries.from_coo(A)
>>> ss
0 2 1
3 2
1 0 3
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
"""
return _coo_to_sparse_series(A, dense_index=dense_index)
# overwrite series methods with unaccelerated versions
ops.add_special_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_special_funcs)
ops.add_flex_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_flex_funcs)
# overwrite basic arithmetic to use SparseSeries version
# force methods to overwrite previous definitions.
ops.add_special_arithmetic_methods(SparseSeries, _arith_method,
radd_func=operator.add, comp_method=None,
bool_method=None, use_numexpr=False,
force=True)
# backwards compatiblity
class SparseTimeSeries(SparseSeries):
def __init__(self, *args, **kwargs):
# deprecation TimeSeries, #10890
warnings.warn("SparseTimeSeries is deprecated. Please use "
"SparseSeries", FutureWarning, stacklevel=2)
super(SparseTimeSeries, self).__init__(*args, **kwargs)
|
the-stack_0_433 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""BYOL tasks."""
import random
from typing import Any, Callable, Dict, Optional, Tuple, cast
import torch
import torch.nn.functional as F
from kornia import augmentation as K
from kornia import filters
from kornia.geometry import transform as KorniaTransform
from pytorch_lightning.core.lightning import LightningModule
from torch import Tensor, optim
from torch.autograd import Variable
from torch.nn.modules import BatchNorm1d, Conv2d, Linear, Module, ReLU, Sequential
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchvision.models import resnet18
from torchvision.models.resnet import resnet50
# https://github.com/pytorch/pytorch/issues/60979
# https://github.com/pytorch/pytorch/pull/61045
Module.__module__ = "torch.nn"
def normalized_mse(x: Tensor, y: Tensor) -> Tensor:
"""Computes the normalized mean squared error between x and y.
Args:
x: tensor x
y: tensor y
Returns:
the normalized MSE between x and y
"""
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
mse = torch.mean(2 - 2 * (x * y).sum(dim=-1))
return mse
# TODO: Move this to transforms
class RandomApply(Module):
"""Applies augmentation function (augm) with probability p."""
def __init__(self, augm: Callable[[Tensor], Tensor], p: float) -> None:
"""Initialize RandomApply.
Args:
augm: augmentation function to apply
p: probability with which the augmentation function is applied
"""
super().__init__()
self.augm = augm
self.p = p
def forward(self, x: Tensor) -> Tensor:
"""Applies an augmentation to the input with some probability.
Args:
x: a batch of imagery
Returns
augmented version of ``x`` with probability ``self.p`` else an un-augmented
version
"""
return x if random.random() > self.p else self.augm(x)
# TODO: This isn't _really_ applying the augmentations from SimCLR as we have
# multispectral imagery and thus can't naively apply color jittering or grayscale
# conversions. We should think more about what makes sense here.
class SimCLRAugmentation(Module):
"""A module for applying SimCLR augmentations.
SimCLR was one of the first papers to show the effectiveness of random data
augmentation in self-supervised-learning setups. See
https://arxiv.org/pdf/2002.05709.pdf for more details.
"""
def __init__(self, image_size: Tuple[int, int] = (256, 256)) -> None:
"""Initialize a module for applying SimCLR augmentations.
Args:
image_size: Tuple of integers defining the image size
"""
super().__init__()
self.size = image_size
self.augmentation = Sequential(
KorniaTransform.Resize(size=image_size, align_corners=False),
# Not suitable for multispectral adapt
# RandomApply(K.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
# K.RandomGrayscale(p=0.2),
K.RandomHorizontalFlip(),
RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
K.RandomResizedCrop(size=image_size),
)
def forward(self, x: Tensor) -> Tensor:
"""Applys SimCLR augmentations to the input tensor.
Args:
x: a batch of imagery
Returns:
an augmented batch of imagery
"""
return cast(Tensor, self.augmentation(x))
class MLP(Module):
"""MLP used in the BYOL projection head."""
def __init__(
self, dim: int, projection_size: int = 256, hidden_size: int = 4096
) -> None:
"""Initializes the MLP projection head.
Args:
dim: size of layer to project
projection_size: size of the output layer
hidden_size: size of the hidden layer
"""
super().__init__()
self.mlp = Sequential(
Linear(dim, hidden_size),
BatchNorm1d(hidden_size), # type: ignore[no-untyped-call]
ReLU(inplace=True),
Linear(hidden_size, projection_size),
)
def forward(self, x: Tensor) -> Tensor:
"""Forward pass of the MLP model.
Args:
x: batch of imagery
Returns:
embedded version of the input
"""
return cast(Tensor, self.mlp(x))
class EncoderWrapper(Module):
"""Encoder wrapper for joining a model and a projection head.
When we call .forward() on this module the following steps happen:
* The input is passed through the base model
* When the encoding layer is reached a hook is called
* The output of the encoding layer is passed through the projection head
* The forward call returns the output of the projection head
"""
def __init__(
self,
model: Module,
projection_size: int = 256,
hidden_size: int = 4096,
layer: int = -2,
) -> None:
"""Initializes EncoderWrapper.
Args:
model: model to encode
projection_size: size of the ouput layer of the projector MLP
hidden_size: size of hidden layer of the projector MLP
layer: layer from model to project
"""
super().__init__()
self.model = model
self.projection_size = projection_size
self.hidden_size = hidden_size
self.layer = layer
self._projector: Optional[Module] = None
self._projector_dim: Optional[int] = None
self._encoded = torch.empty(0)
self._register_hook()
@property
def projector(self) -> Module:
"""Wrapper module for the projector head."""
assert self._projector_dim is not None
if self._projector is None:
self._projector = MLP(
self._projector_dim, self.projection_size, self.hidden_size
)
return self._projector
def _hook(self, module: Any, input: Any, output: Tensor) -> None:
"""Hook to record the activations at the projection layer.
See the following docs page for more details on hooks:
https://pytorch.org/docs/stable/generated/torch.nn.modules.module.register_module_forward_hook.html
Args:
module: the calling module
input: input to the module this hook was registered to
output: output from the module this hook was registered to
"""
output = output.flatten(start_dim=1)
if self._projector_dim is None:
# If we haven't already, measure the output size
self._projector_dim = output.shape[-1]
# Project the output to get encodings, the projector model is created the first
# time this is called
self._encoded = self.projector(output)
def _register_hook(self) -> None:
"""Register a hook for layer that we will extract features from."""
layer = list(self.model.children())[self.layer]
layer.register_forward_hook(self._hook)
def forward(self, x: Tensor) -> Tensor:
"""Pass through the model, and collect the representation from our forward hook.
Args:
x: tensor of data to run through the model
Returns:
output from the model
"""
_ = self.model(x)
return self._encoded
class BYOL(Module):
"""BYOL implementation.
BYOL contains two identical encoder networks. The first is trained as usual, and its
weights are updated with each training batch. The second, "target" network, is
updated using a running average of the first encoder's weights.
See https://arxiv.org/abs/2006.07733 for more details (and please cite it if you
use it in your own work).
"""
def __init__(
self,
model: Module,
image_size: Tuple[int, int] = (256, 256),
hidden_layer: int = -2,
in_channels: int = 4,
projection_size: int = 256,
hidden_size: int = 4096,
augment_fn: Optional[Module] = None,
beta: float = 0.99,
**kwargs: Any,
) -> None:
"""Sets up a model for pre-training with BYOL using projection heads.
Args:
model: the model to pretrain using BYOL
image_size: the size of the training images
hidden_layer: the hidden layer in ``model`` to attach the projection
head to, can be the name of the layer or index of the layer
in_channels: number of input channels to the model
projection_size: size of first layer of the projection MLP
hidden_size: size of the hidden layer of the projection MLP
augment_fn: an instance of a module that performs data augmentation
beta: the speed at which the target encoder is updated using the main
encoder
"""
super().__init__()
self.augment: Module
if augment_fn is None:
self.augment = SimCLRAugmentation(image_size)
else:
self.augment = augment_fn
self.beta = beta
self.in_channels = in_channels
self.encoder = EncoderWrapper(
model, projection_size, hidden_size, layer=hidden_layer
)
self.predictor = MLP(projection_size, projection_size, hidden_size)
self.target = EncoderWrapper(
model, projection_size, hidden_size, layer=hidden_layer
)
# Perform a single forward pass to initialize the wrapper correctly
self.encoder(torch.zeros(2, self.in_channels, *image_size))
def forward(self, x: Tensor) -> Tensor:
"""Forward pass of the encoder model through the MLP and prediction head.
Args:
x: tensor of data to run through the model
Returns:
output from the model
"""
return cast(Tensor, self.predictor(self.encoder(x)))
def update_target(self) -> None:
"""Method to update the "target" model weights."""
for p, pt in zip(self.encoder.parameters(), self.target.parameters()):
pt.data = self.beta * pt.data + (1 - self.beta) * p.data
class BYOLTask(LightningModule):
"""Class for pre-training any PyTorch model using BYOL."""
def config_task(self) -> None:
"""Configures the task based on kwargs parameters passed to the constructor."""
in_channels = self.hyperparams["in_channels"]
pretrained = self.hyperparams["imagenet_pretraining"]
encoder = None
if self.hyperparams["encoder_name"] == "resnet18":
encoder = resnet18(pretrained=pretrained)
elif self.hyperparams["encoder_name"] == "resnet50":
encoder = resnet50(pretrained=pretrained)
else:
raise ValueError(
f"Encoder type '{self.hyperparams['encoder_name']}' is not valid."
)
layer = encoder.conv1
# Creating new Conv2d layer
new_layer = Conv2d(
in_channels=in_channels,
out_channels=layer.out_channels,
kernel_size=layer.kernel_size,
stride=layer.stride,
padding=layer.padding,
bias=layer.bias,
).requires_grad_()
# initialize the weights from new channel with the red channel weights
copy_weights = 0
# Copying the weights from the old to the new layer
new_layer.weight[:, : layer.in_channels, :, :].data[:] = Variable(
layer.weight.clone(), requires_grad=True
)
# Copying the weights of the old layer to the extra channels
for i in range(in_channels - layer.in_channels):
channel = layer.in_channels + i
new_layer.weight[:, channel : channel + 1, :, :].data[:] = Variable(
layer.weight[:, copy_weights : copy_weights + 1, ::].clone(),
requires_grad=True,
)
encoder.conv1 = new_layer
self.model = BYOL(encoder, in_channels=in_channels, image_size=(256, 256))
def __init__(self, **kwargs: Any) -> None:
"""Initialize a LightningModule for pre-training a model with BYOL.
Keyword Args:
in_channels: number of channels on the input imagery
encoder_name: either "resnet18" or "resnet50"
imagenet_pretraining: bool indicating whether to use imagenet pretrained
weights
Raises:
ValueError: if kwargs arguments are invalid
"""
super().__init__()
# Creates `self.hparams` from kwargs
self.save_hyperparameters() # type: ignore[operator]
self.hyperparams = cast(Dict[str, Any], self.hparams)
self.config_task()
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Forward pass of the model.
Args:
x: tensor of data to run through the model
Returns:
output from the model
"""
return self.model(*args, **kwargs)
def configure_optimizers(self) -> Dict[str, Any]:
"""Initialize the optimizer and learning rate scheduler.
Returns:
a "lr dict" according to the pytorch lightning documentation --
https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers
"""
optimizer_class = getattr(optim, self.hyperparams.get("optimizer", "Adam"))
lr = self.hyperparams.get("lr", 1e-4)
weight_decay = self.hyperparams.get("weight_decay", 1e-6)
optimizer = optimizer_class(self.parameters(), lr=lr, weight_decay=weight_decay)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": ReduceLROnPlateau(
optimizer,
patience=self.hyperparams["learning_rate_schedule_patience"],
),
"monitor": "val_loss",
},
}
def training_step(self, *args: Any, **kwargs: Any) -> Tensor:
"""Compute and return the training loss.
Args:
batch: the output of your DataLoader
Returns:
training loss
"""
batch = args[0]
x = batch["image"]
with torch.no_grad():
x1, x2 = self.model.augment(x), self.model.augment(x)
pred1, pred2 = self.forward(x1), self.forward(x2)
with torch.no_grad():
targ1, targ2 = self.model.target(x1), self.model.target(x2)
loss = torch.mean(normalized_mse(pred1, targ2) + normalized_mse(pred2, targ1))
self.log("train_loss", loss, on_step=True, on_epoch=False)
self.model.update_target()
return loss
def validation_step(self, *args: Any, **kwargs: Any) -> None:
"""Compute validation loss.
Args:
batch: the output of your DataLoader
"""
batch = args[0]
x = batch["image"]
x1, x2 = self.model.augment(x), self.model.augment(x)
pred1, pred2 = self.forward(x1), self.forward(x2)
targ1, targ2 = self.model.target(x1), self.model.target(x2)
loss = torch.mean(normalized_mse(pred1, targ2) + normalized_mse(pred2, targ1))
self.log("val_loss", loss, on_step=False, on_epoch=True)
def test_step(self, *args: Any, **kwargs: Any) -> Any:
"""No-op, does nothing."""
|
the-stack_0_434 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Subnet(object):
def __init__(self, region=None, az=None, subnetId=None, name=None, cidr=None, vpcId=None, vpcName=None, availableIpCount=None, totalIpCount=None, networkType=None, description=None, createTime=None):
"""
:param region: (Optional) 地域代码, 如cn-east-1
:param az: (Optional) 可用区, 如cn-east-1a
:param subnetId: (Optional) 子网ID
:param name: (Optional) 子网名称
:param cidr: (Optional) 子网CIDR
:param vpcId: (Optional) 私有网络Id
:param vpcName: (Optional) 私有网络名称
:param availableIpCount: (Optional) 可用ip数量
:param totalIpCount: (Optional) 总ip数量
:param networkType: (Optional) 网络类型
:param description: (Optional) 描述
:param createTime: (Optional) 创建时间
"""
self.region = region
self.az = az
self.subnetId = subnetId
self.name = name
self.cidr = cidr
self.vpcId = vpcId
self.vpcName = vpcName
self.availableIpCount = availableIpCount
self.totalIpCount = totalIpCount
self.networkType = networkType
self.description = description
self.createTime = createTime
|
the-stack_0_437 | # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_hub.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint:disable=g-import-not-at-top,g-statement-before-imports
try:
import mock as mock
except ImportError:
import unittest.mock as mock
# pylint:disable=g-import-not-at-top,g-statement-before-imports
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow_hub import test_utils
from tensorflow_hub import tf_v1
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.ops.lookup_ops import HashTable
from tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer
# pylint: enable=g-direct-tensorflow-import
_dense_features_module = test_utils.get_dense_features_module()
def text_module_fn():
embeddings = [
("", [0, 0, 0, 0]), # OOV items are mapped to this embedding.
("hello world", [1, 2, 3, 4]),
("pair-programming", [5, 5, 5, 5]),
]
keys = tf.constant([item[0] for item in embeddings], dtype=tf.string)
indices = tf.constant(list(range(len(embeddings))), dtype=tf.int64)
tbl_init = KeyValueTensorInitializer(keys, indices)
table = HashTable(tbl_init, 0)
weights_initializer = tf.cast(
tf.constant(list([item[1] for item in embeddings])), tf.float32)
weights = tf_v1.get_variable(
"weights", dtype=tf.float32, initializer=weights_initializer)
text_tensor = tf_v1.placeholder(dtype=tf.string, name="text", shape=[None])
indices_tensor = table.lookup(text_tensor)
embedding_tensor = tf.gather(weights, indices_tensor)
hub.add_signature(inputs=text_tensor, outputs=embedding_tensor)
def invalid_text_module_fn():
text = tf_v1.placeholder(tf.string, shape=[10])
hub.add_signature(inputs=text, outputs=tf.zeros([10, 3]))
class CommonColumnTest(tf.test.TestCase):
def setUp(self):
self.spec = hub.create_module_spec(text_module_fn)
@mock.patch.object(feature_column_v2._StateManagerImpl, "add_resource")
def testFeatureColumnsWithResources(self, mock_add_resource):
feature_column = hub.text_embedding_column("text_a", self.spec)
if not isinstance(feature_column, feature_column_v2.FeatureColumn):
self.skipTest("Resources not implemented in the state manager of feature "
"column v2.")
self.assertTrue(feature_column_v2.is_feature_column_v2([feature_column]))
@mock.patch.object(feature_column_v2._StateManagerImpl, "add_resource")
def testFeatureColumnsWithNoResources(self, mock_add_resource):
mock_add_resource.side_effect = NotImplementedError
feature_column = hub.text_embedding_column("text_a", self.spec)
self.assertFalse(feature_column_v2.is_feature_column_v2([feature_column]))
class TextEmbeddingColumnTest(tf.test.TestCase):
def setUp(self):
self.spec = hub.create_module_spec(text_module_fn)
def testVariableShape(self):
text_column = hub.text_embedding_column("text", self.spec, trainable=False)
self.assertEqual(text_column._variable_shape, [4])
def testParents(self):
text_column = hub.text_embedding_column("text", self.spec, trainable=False)
self.assertEqual(["text"], text_column.parents)
def testMakeParseExampleSpec(self):
text_column = hub.text_embedding_column("text", self.spec, trainable=False)
parsing_spec = tf_v1.feature_column.make_parse_example_spec([text_column])
self.assertEqual(parsing_spec,
{"text": tf_v1.FixedLenFeature([1], dtype=tf.string)})
def testInputLayer(self):
features = {
"text_a": ["hello world", "pair-programming"],
"text_b": ["hello world", "oov token"],
}
feature_columns = [
hub.text_embedding_column("text_a", self.spec, trainable=False),
hub.text_embedding_column("text_b", self.spec, trainable=False),
]
with tf.Graph().as_default():
input_layer = tf_v1.feature_column.input_layer(features, feature_columns)
with tf_v1.train.MonitoredSession() as sess:
output = sess.run(input_layer)
self.assertAllEqual(
output, [[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])
def testDenseFeatures(self):
features = {
"text_a": ["hello world", "pair-programming"],
"text_b": ["hello world", "oov token"],
}
feature_columns = [
hub.text_embedding_column("text_a", self.spec, trainable=False),
hub.text_embedding_column("text_b", self.spec, trainable=False),
]
if not feature_column_v2.is_feature_column_v2(feature_columns):
self.skipTest("Resources not implemented in the state manager of feature "
"column v2.")
with tf.Graph().as_default():
feature_layer = _dense_features_module.DenseFeatures(feature_columns)
feature_layer_out = feature_layer(features)
with tf_v1.train.MonitoredSession() as sess:
output = sess.run(feature_layer_out)
self.assertAllEqual(
output, [[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])
def testDenseFeatures_shareAcrossApplication(self):
features = {
"text": ["hello world", "pair-programming"],
}
feature_columns = [
hub.text_embedding_column("text", self.spec, trainable=True),
]
if not feature_column_v2.is_feature_column_v2(feature_columns):
self.skipTest("Resources not implemented in the state manager of feature "
"column v2.")
with tf.Graph().as_default():
feature_layer = _dense_features_module.DenseFeatures(feature_columns)
feature_layer_out_1 = feature_layer(features)
feature_layer_out_2 = feature_layer(features)
# We define loss only on the first layer. Since layers should have shared
# weights, we expect the second layer will change too.
loss = feature_layer_out_1 - tf.constant(0.005)
optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.7)
train_op = optimizer.minimize(loss)
with tf_v1.train.MonitoredSession() as sess:
before_update_1 = sess.run(feature_layer_out_1)
sess.run(train_op)
after_update_1 = sess.run(feature_layer_out_1)
after_update_2 = sess.run(feature_layer_out_2)
self.assertAllEqual(before_update_1, [[1, 2, 3, 4],
[5, 5, 5, 5]])
self.assertAllEqual(after_update_1, after_update_2)
def testWorksWithCannedEstimator(self):
comment_embedding_column = hub.text_embedding_column(
"comment", self.spec, trainable=False)
upvotes = tf_v1.feature_column.numeric_column("upvotes")
feature_columns = [comment_embedding_column, upvotes]
estimator = tf_v1.estimator.DNNClassifier(
hidden_units=[10],
feature_columns=feature_columns,
model_dir=self.get_temp_dir())
# This only tests that estimator apis are working with the feature
# column without throwing exceptions.
features = {
"comment": np.array([
["the quick brown fox"],
["spam spam spam"],
]),
"upvotes": np.array([
[20],
[1],
]),
}
labels = np.array([[1], [0]])
if hasattr(tf.compat, "v1"):
numpy_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn
else:
numpy_input_fn = tf_v1.estimator.inputs.numpy_input_fn
input_fn = numpy_input_fn(features, labels, shuffle=True)
estimator.train(input_fn, max_steps=1)
estimator.evaluate(input_fn, steps=1)
estimator.predict(input_fn)
def testTrainableEmbeddingColumn(self):
feature_columns = [
hub.text_embedding_column("text", self.spec, trainable=True),
]
with tf.Graph().as_default():
features = {
"text": ["hello world", "pair-programming"],
}
target = [[1, 1, 1, 1], [4, 3, 2, 1]]
input_layer = tf_v1.feature_column.input_layer(features, feature_columns)
loss = tf.cast(
tf_v1.losses.mean_squared_error(input_layer, target), tf.float64)
optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.97)
train_op = optimizer.minimize(loss)
with tf_v1.train.MonitoredSession() as sess:
self.assertAllEqual(sess.run(input_layer), [[1, 2, 3, 4], [5, 5, 5, 5]])
for _ in range(10):
sess.run(train_op)
self.assertAllClose(sess.run(input_layer), target, atol=0.5)
def testInvalidTextModule(self):
spec = hub.create_module_spec(invalid_text_module_fn)
with self.assertRaisesRegexp(ValueError, "only one input"):
hub.text_embedding_column("coment", spec, trainable=False)
def create_image_module_fn(randomly_initialized=False):
def image_module_fn():
"""Maps 1x2 images to sums of each color channel."""
images = tf_v1.placeholder(dtype=tf.float32, shape=[None, 1, 2, 3])
if randomly_initialized:
initializer = tf_v1.random_uniform_initializer(
minval=-1, maxval=1, dtype=tf.float32)
else:
initializer = tf_v1.constant_initializer(1.0, dtype=tf.float32)
weight = tf_v1.get_variable(
name="weight", shape=[1], initializer=initializer)
sum_channels = tf.reduce_sum(images, axis=[1, 2]) * weight
hub.add_signature(inputs={"images": images}, outputs=sum_channels)
return image_module_fn
class ImageEmbeddingColumnTest(tf.test.TestCase):
def setUp(self):
self.spec = hub.create_module_spec(create_image_module_fn())
self.randomly_initialized_spec = hub.create_module_spec(
create_image_module_fn(randomly_initialized=True))
def testExpectedImageSize(self):
image_column = hub.image_embedding_column("image", self.spec)
# The usage comment recommends this code pattern, so we test it here.
self.assertSequenceEqual(
hub.get_expected_image_size(image_column.module_spec), [1, 2])
def testVariableShape(self):
image_column = hub.image_embedding_column("image", self.spec)
self.assertEqual(image_column.variable_shape, [3])
def testParents(self):
image_column = hub.image_embedding_column("image", self.spec)
self.assertEqual(["image"], image_column.parents)
def testMakeParseExampleSpec(self):
image_column = hub.image_embedding_column("image", self.spec)
parsing_spec = tf_v1.feature_column.make_parse_example_spec([image_column])
self.assertEqual(
parsing_spec,
{"image": tf_v1.FixedLenFeature([1, 2, 3], dtype=tf.float32)})
def testInputLayer(self):
features = {
"image_a": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],
[[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],
"image_b": [[[[0.1, 0.2, 0.1], [0.2, 0.1, 0.2]]],
[[[0.1, 0.2, 0.3], [0.3, 0.2, 0.1]]]],
}
feature_columns = [
hub.image_embedding_column("image_a", self.spec),
hub.image_embedding_column("image_b", self.spec),
]
with tf.Graph().as_default():
input_layer = tf_v1.feature_column.input_layer(features, feature_columns)
with tf_v1.train.MonitoredSession() as sess:
output = sess.run(input_layer)
self.assertAllClose(
output,
[[0.5, 0.7, 0.9, 0.3, 0.3, 0.3], [0.8, 0.9, 1.0, 0.4, 0.4, 0.4]])
def testDenseFeatures(self):
features = {
"image_a": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],
[[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],
"image_b": [[[[0.1, 0.2, 0.1], [0.2, 0.1, 0.2]]],
[[[0.1, 0.2, 0.3], [0.3, 0.2, 0.1]]]],
}
feature_columns = [
hub.image_embedding_column("image_a", self.spec),
hub.image_embedding_column("image_b", self.spec),
]
if not feature_column_v2.is_feature_column_v2(feature_columns):
self.skipTest("Resources not implemented in the state manager of feature "
"column v2.")
with tf.Graph().as_default():
feature_layer = _dense_features_module.DenseFeatures(feature_columns)
feature_layer_out = feature_layer(features)
with tf_v1.train.MonitoredSession() as sess:
output = sess.run(feature_layer_out)
self.assertAllClose(
output,
[[0.5, 0.7, 0.9, 0.3, 0.3, 0.3], [0.8, 0.9, 1.0, 0.4, 0.4, 0.4]])
def testDenseFeatures_shareAcrossApplication(self):
features = {
"image": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],
[[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],
}
feature_columns = [
hub.image_embedding_column("image", self.randomly_initialized_spec),
]
if not feature_column_v2.is_feature_column_v2(feature_columns):
self.skipTest("Resources not implemented in the state manager of feature "
"column v2.")
with tf.Graph().as_default():
feature_layer = _dense_features_module.DenseFeatures(feature_columns)
feature_layer_out_1 = feature_layer(features)
feature_layer_out_2 = feature_layer(features)
with tf_v1.train.MonitoredSession() as sess:
output_1 = sess.run(feature_layer_out_1)
output_2 = sess.run(feature_layer_out_2)
self.assertAllClose(output_1, output_2)
def testWorksWithCannedEstimator(self):
image_column = hub.image_embedding_column("image", self.spec)
other_column = tf_v1.feature_column.numeric_column("number")
feature_columns = [image_column, other_column]
estimator = tf_v1.estimator.DNNClassifier(
hidden_units=[10],
feature_columns=feature_columns,
model_dir=self.get_temp_dir())
# This only tests that estimator apis are working with the feature
# column without throwing exceptions.
features = {
"image":
np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],
[[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],
dtype=np.float32),
"number":
np.array([[20], [1]]),
}
labels = np.array([[1], [0]])
if hasattr(tf.compat, "v1"):
numpy_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn
else:
numpy_input_fn = tf_v1.estimator.inputs.numpy_input_fn
input_fn = numpy_input_fn(features, labels, shuffle=True)
estimator.train(input_fn, max_steps=1)
estimator.evaluate(input_fn, steps=1)
estimator.predict(input_fn)
class SparseTextEmbeddingColumnTest(tf.test.TestCase):
def setUp(self):
self.spec = hub.create_module_spec(text_module_fn)
def testVariableShape(self):
text_column = hub.sparse_text_embedding_column(
"text", self.spec, combiner="mean", default_value=None, trainable=False)
self.assertEqual(text_column._variable_shape, [4])
def testMakeParseExampleSpec(self):
text_column = hub.sparse_text_embedding_column(
"text", self.spec, combiner="mean", default_value=None, trainable=False)
parsing_spec = tf_v1.feature_column.make_parse_example_spec([text_column])
self.assertEqual(parsing_spec, {"text": tf_v1.VarLenFeature(tf.string)})
def testParents(self):
text_column = hub.sparse_text_embedding_column(
"text", self.spec, "sum", "", trainable=False)
self.assertEqual(["text"], text_column.parents)
def testInputLayer(self):
with tf.Graph().as_default():
text_a = tf.SparseTensor(
values=["hello world", "pair-programming", "hello world"],
indices=[[0, 0], [0, 1], [1, 0]],
dense_shape=[2, 2])
text_b = tf.SparseTensor(
values=["hello world", "oov token"],
indices=[[0, 0], [0, 1]],
dense_shape=[2, 3])
features = {
"text_a": text_a,
"text_b": text_b,
}
feature_columns = [
hub.sparse_text_embedding_column(
"text_a",
self.spec,
combiner="mean",
default_value="__UNKNOWN__",
trainable=False),
hub.sparse_text_embedding_column(
"text_b",
self.spec,
combiner="mean",
default_value="__UNKNOWN__",
trainable=False),
]
input_layer = tf_v1.feature_column.input_layer(features, feature_columns)
with tf_v1.train.MonitoredSession() as sess:
output = sess.run(input_layer)
self.assertAllEqual(
output,
[[3, 3.5, 4, 4.5, 0.5, 1, 1.5, 2], [1, 2, 3, 4, 0, 0, 0, 0]])
# ([1, 2, 3, 4] + [5, 5, 5, 5])/2 extend ([1, 2, 3, 4] + [0, 0, 0, 0])/2
# [1, 2, 3, 4] extend [0, 0, 0, 0]
def testTrainableEmbeddingColumn(self):
feature_columns = [
hub.sparse_text_embedding_column(
"text",
self.spec,
combiner="mean",
default_value=None,
trainable=True),
]
with tf.Graph().as_default():
text = tf.SparseTensor(
values=["hello world", "pair-programming"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 2])
target = [[1, 1, 1, 1], [4, 3, 2, 1]]
input_layer = tf_v1.feature_column.input_layer({"text": text},
feature_columns)
loss = tf_v1.losses.mean_squared_error(input_layer, target)
optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.97)
train_op = optimizer.minimize(loss)
with tf_v1.train.MonitoredSession() as sess:
self.assertAllEqual(sess.run(input_layer), [[1, 2, 3, 4], [5, 5, 5, 5]])
for _ in range(10):
sess.run(train_op)
self.assertAllClose(sess.run(input_layer), target, atol=0.5)
def testEmptySparseTensorBatch(self):
feature_columns = [
hub.sparse_text_embedding_column(
"text",
self.spec,
combiner="mean",
default_value="default",
trainable=True),
]
with tf.Graph().as_default():
text = tf.SparseTensor(
values=tf_v1.constant([], dtype=tf_v1.string, shape=[0]),
indices=tf_v1.constant([], dtype=tf_v1.int64, shape=[0, 2]),
dense_shape=[3, 0])
input_layer = tf_v1.feature_column.input_layer({"text": text},
feature_columns)
with tf_v1.train.MonitoredSession() as sess:
embeddings = sess.run(input_layer)
self.assertAllEqual(embeddings,
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
def testEmptySparseTensorRow(self):
feature_columns = [
hub.sparse_text_embedding_column(
"text",
self.spec,
combiner="mean",
default_value="default",
trainable=True),
]
with tf.Graph().as_default():
text = tf.SparseTensor(
values=tf_v1.constant(["hello world"], dtype=tf_v1.string, shape=[1]),
indices=tf_v1.constant([[0, 0]], dtype=tf_v1.int64, shape=[1, 2]),
dense_shape=[2, 1])
input_layer = tf_v1.feature_column.input_layer({"text": text},
feature_columns)
with tf_v1.train.MonitoredSession() as sess:
embeddings = sess.run(input_layer)
self.assertAllEqual(embeddings, [[1, 2, 3, 4], [0, 0, 0, 0]])
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_438 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.template import TemplateDoesNotExist
from django.template.loader import select_template
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from js_services import models, forms
from .constants import (
IS_THERE_COMPANIES,
)
if IS_THERE_COMPANIES:
from js_companies.models import Company
@plugin_pool.register_plugin
class RelatedServicesPlugin(CMSPluginBase):
TEMPLATE_NAME = 'js_services/plugins/related_services__%s.html'
module = 'Services'
render_template = 'js_services/plugins/related_services.html'
name = _('Related Services')
model = models.RelatedServicesPlugin
form = forms.RelatedServicesPluginForm
def render(self, context, instance, placeholder):
request = context.get('request')
context['instance'] = instance
context['title'] = instance.title
context['icon'] = instance.icon
context['image'] = instance.image
context['background_color'] = instance.background_color
context['full_screen'] = instance.full_screen
qs = instance.related_services.published()
related_sections = instance.related_sections.all()
related_people = instance.related_people.all()
if IS_THERE_COMPANIES:
related_companies = instance.related_companies.all()
related_categories = instance.related_categories.all()
if not qs.exists():
selected = False
qs = models.Service.objects.published().distinct()
if related_sections.exists():
selected = True
qs = qs.filter(sections__in=related_sections)
if related_people.exists():
selected = True
qs = qs.filter(person__in=related_people)
if IS_THERE_COMPANIES and related_companies.exists():
selected = True
qs = qs.filter(companies__in=related_companies)
if related_categories.exists():
selected = True
qs = qs.filter(categories__in=related_categories)
if not selected:
qs = models.Service.objects.none()
context['related_services_all'] = qs
context['related_services'] = qs[:int(instance.count)]
return context
def get_render_template(self, context, instance, placeholder):
if instance.layout:
template = self.TEMPLATE_NAME % instance.layout
try:
select_template([template])
return template
except TemplateDoesNotExist:
pass
return self.render_template
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
if IS_THERE_COMPANIES:
obj.related_companies.set(Company.objects.filter(pk__in=form.cleaned_data.get('related_companies')))
|
the-stack_0_440 | import sys
import yaml
import os
def getcsv(argv):
if len(argv) == 0:
print("No input files given.")
else:
flag = True
out_string = ''
keys = ['L1c', 'L1b', 'L1a', 'L2c', 'L2b', 'L2a', 'L2prf',
'TLBe', 'TLBp', 'TLBa', 'IPC',
'Total_Instructions', 'Total_Cycles',
'L1-Total-Misses', 'L1-Load-Misses', 'L1-Store-Misses',
'L2-Total-Misses', 'L2-Load-Misses', 'L2-Store-Misses',
'Tlb-Total-Misses', 'Tlb-Load-Misses', 'Tlb-Store-Misses']
header = ''
for key in keys:
header += key
header += ';'
out_string = out_string + header + '\n'
for i in range(0, len(argv)):
if os.path.exists(argv[i]):
with open(argv[i], 'r') as in_file:
l_key = ''
try:
in_stream = yaml.safe_load(in_file)
line = ''
for key in keys:
l_key = key
line += str(in_stream[key])
line += ';'
out_string = out_string + line + '\n'
except KeyError:
sys.stderr.write("--Error-- {} does not contain key: {}.\n".format(argv[i], l_key))
flag = False
else:
sys.stderr.write("File {} does not exist.".format(argv[i]))
if flag:
print('Process finished without errors.')
return out_string
else:
sys.stderr.write('Process finished with errors.' + '\n')
return False
|
the-stack_0_441 | import random
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from .auto_augment import cutout, apply_policy
from .utils import *
class Cifar10ImageDataGenerator:
def __init__(self, args):
self.datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, fill_mode='constant', cval=0, horizontal_flip=True)
self.means = np.array([0.4914009 , 0.48215896, 0.4465308])
self.stds = np.array([0.24703279, 0.24348423, 0.26158753])
self.args = args
if args.auto_augment:
self.policies = [
['Invert', 0.1, 7, 'Contrast', 0.2, 6],
['Rotate', 0.7, 2, 'TranslateX', 0.3, 9],
['Sharpness', 0.8, 1, 'Sharpness', 0.9, 3],
['ShearY', 0.5, 8, 'TranslateY', 0.7, 9],
['AutoContrast', 0.5, 8, 'Equalize', 0.9, 2],
['ShearY', 0.2, 7, 'Posterize', 0.3, 7],
['Color', 0.4, 3, 'Brightness', 0.6, 7],
['Sharpness', 0.3, 9, 'Brightness', 0.7, 9],
['Equalize', 0.6, 5, 'Equalize', 0.5, 1],
['Contrast', 0.6, 7, 'Sharpness', 0.6, 5],
['Color', 0.7, 7, 'TranslateX', 0.5, 8],
['Equalize', 0.3, 7, 'AutoContrast', 0.4, 8],
['TranslateY', 0.4, 3, 'Sharpness', 0.2, 6],
['Brightness', 0.9, 6, 'Color', 0.2, 8],
['Solarize', 0.5, 2, 'Invert', 0, 0.3],
['Equalize', 0.2, 0, 'AutoContrast', 0.6, 0],
['Equalize', 0.2, 8, 'Equalize', 0.6, 4],
['Color', 0.9, 9, 'Equalize', 0.6, 6],
['AutoContrast', 0.8, 4, 'Solarize', 0.2, 8],
['Brightness', 0.1, 3, 'Color', 0.7, 0],
['Solarize', 0.4, 5, 'AutoContrast', 0.9, 3],
['TranslateY', 0.9, 9, 'TranslateY', 0.7, 9],
['AutoContrast', 0.9, 2, 'Solarize', 0.8, 3],
['Equalize', 0.8, 8, 'Invert', 0.1, 3],
['TranslateY', 0.7, 9, 'AutoContrast', 0.9, 1],
]
def standardize(self, x):
x = x.astype('float32') / 255
means = self.means.reshape(1, 1, 1, 3)
stds = self.stds.reshape(1, 1, 1, 3)
x -= means
x /= (stds + 1e-6)
return x
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None,
seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None):
batches = self.datagen.flow(x, y, batch_size, shuffle, sample_weight,
seed, save_to_dir, save_prefix, save_format, subset)
while True:
x_batch, y_batch = next(batches)
if self.args.cutout:
for i in range(x_batch.shape[0]):
x_batch[i] = cutout(x_batch[i])
if self.args.auto_augment:
x_batch = x_batch.astype('uint8')
for i in range(x_batch.shape[0]):
x_batch[i] = apply_policy(x_batch[i], self.policies[random.randrange(len(self.policies))])
x_batch = self.standardize(x_batch)
yield x_batch, y_batch
def main():
import argparse
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
parser = argparse.ArgumentParser()
parser.add_argument('--cutout', default=True, type=str2bool)
parser.add_argument('--auto-augment', default=True, type=str2bool)
args = parser.parse_args()
datagen = Cifar10ImageDataGenerator(args)
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
for imgs, _ in datagen.flow(x_train, y_train):
plt.imshow(imgs[0].astype('uint8'))
plt.axis('off')
plt.show()
if __name__ == '__main__':
main()
|
the-stack_0_443 | #!/usr/bin/env python
"""Implementation of various cryptographic types."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import binascii
import hashlib
import logging
import os
from cryptography import exceptions
from cryptography import x509
from cryptography.hazmat.backends import openssl
from cryptography.hazmat.primitives import ciphers
from cryptography.hazmat.primitives import constant_time
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import hmac
from cryptography.hazmat.primitives import padding as sym_padding
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.ciphers import algorithms
from cryptography.hazmat.primitives.ciphers import modes
from cryptography.hazmat.primitives.kdf import pbkdf2
from cryptography.x509 import oid
from future.builtins import str
from future.utils import string_types
from typing import Text
from grr_response_core.lib import config_lib
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import type_info
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import standard as rdf_standard
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import precondition
from grr_response_core.lib.util import random
from grr_response_proto import jobs_pb2
class Error(Exception):
pass
class VerificationError(Error):
pass
class InvalidSignature(Error):
pass
class CipherError(rdfvalue.DecodeError):
"""Raised when decryption failed."""
class Certificate(rdf_structs.RDFProtoStruct):
protobuf = jobs_pb2.Certificate
class RDFX509Cert(rdfvalue.RDFPrimitive):
"""X509 certificates used to communicate with this client."""
def __init__(self, initializer=None, age=None):
super(RDFX509Cert, self).__init__(initializer=initializer, age=age)
if self._value is None and initializer is not None:
if isinstance(initializer, x509.Certificate):
self._value = initializer
elif isinstance(initializer, bytes):
self.ParseFromString(initializer)
else:
raise rdfvalue.InitializeError(
"Cannot initialize %s from %s." % (self.__class__, initializer))
def GetRawCertificate(self):
return self._value
def GetCN(self):
subject = self._value.subject
try:
cn_attributes = subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)
if len(cn_attributes) > 1:
raise rdfvalue.DecodeError("Cert has more than 1 CN entries.")
cn_attribute = cn_attributes[0]
except IndexError:
raise rdfvalue.DecodeError("Cert has no CN")
return cn_attribute.value
def GetPublicKey(self):
return RSAPublicKey(self._value.public_key())
def GetSerialNumber(self):
return self._value.serial_number
def GetIssuer(self):
return self._value.issuer
def ParseFromString(self, string):
try:
self._value = x509.load_pem_x509_certificate(
string, backend=openssl.backend)
except (ValueError, TypeError) as e:
raise rdfvalue.DecodeError("Invalid certificate %s: %s" % (string, e))
# This can also raise if there isn't exactly one CN entry.
self.GetCN()
def ParseFromHumanReadable(self, string):
precondition.AssertType(string, Text)
self.ParseFromString(string.encode("ascii"))
def ParseFromDatastore(self, value):
precondition.AssertType(value, bytes)
self.ParseFromString(value)
def SerializeToString(self):
if self._value is None:
return ""
return self._value.public_bytes(encoding=serialization.Encoding.PEM)
def AsPEM(self):
return self.SerializeToString()
def __str__(self):
return self.SerializeToString()
def Verify(self, public_key):
"""Verifies the certificate using the given key.
Args:
public_key: The public key to use.
Returns:
True: Everything went well.
Raises:
VerificationError: The certificate did not verify.
"""
# TODO(amoser): We have to do this manually for now since cryptography does
# not yet support cert verification. There is PR 2460:
# https://github.com/pyca/cryptography/pull/2460/files
# that will add it, once it's in we should switch to using this.
# Note that all times here are in UTC.
now = rdfvalue.RDFDatetime.Now().AsDatetime()
if now > self._value.not_valid_after:
raise VerificationError("Certificate expired!")
if now < self._value.not_valid_before:
raise VerificationError("Certificate not yet valid!")
public_key.Verify(
self._value.tbs_certificate_bytes,
self._value.signature,
hash_algorithm=self._value.signature_hash_algorithm)
return True
@classmethod
def ClientCertFromCSR(cls, csr):
"""Creates a new cert for the given common name.
Args:
csr: A CertificateSigningRequest.
Returns:
The signed cert.
"""
builder = x509.CertificateBuilder()
# Use the client CN for a cert serial_id. This will ensure we do
# not have clashing cert id.
common_name = csr.GetCN()
serial = int(common_name.split(".")[1], 16)
builder = builder.serial_number(serial)
builder = builder.subject_name(
x509.Name(
[x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))]))
now = rdfvalue.RDFDatetime.Now()
now_plus_year = now + rdfvalue.Duration("52w")
builder = builder.not_valid_after(now_plus_year.AsDatetime())
now_minus_ten = now - rdfvalue.Duration("10s")
builder = builder.not_valid_before(now_minus_ten.AsDatetime())
# TODO(user): dependency loop with
# grr/core/grr_response_core/config/client.py.
# pylint: disable=protected-access
ca_cert = config_lib._CONFIG["CA.certificate"]
# pylint: enable=protected-access
builder = builder.issuer_name(ca_cert.GetIssuer())
builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey())
# TODO(user): dependency loop with
# grr/core/grr_response_core/config/client.py.
# pylint: disable=protected-access
ca_key = config_lib._CONFIG["PrivateKeys.ca_key"]
# pylint: enable=protected-access
return RDFX509Cert(
builder.sign(
private_key=ca_key.GetRawPrivateKey(),
algorithm=hashes.SHA256(),
backend=openssl.backend))
class CertificateSigningRequest(rdfvalue.RDFValue):
"""A CSR Rdfvalue."""
def __init__(self,
initializer=None,
common_name=None,
private_key=None,
age=None):
super(CertificateSigningRequest, self).__init__(
initializer=initializer, age=age)
if self._value is None:
if isinstance(initializer, x509.CertificateSigningRequest):
self._value = initializer
elif isinstance(initializer, string_types):
self.ParseFromString(initializer)
elif common_name and private_key:
self._value = x509.CertificateSigningRequestBuilder().subject_name(
x509.Name(
[x509.NameAttribute(oid.NameOID.COMMON_NAME,
str(common_name))])).sign(
private_key.GetRawPrivateKey(),
hashes.SHA256(),
backend=openssl.backend)
elif initializer is not None:
raise rdfvalue.InitializeError(
"Cannot initialize %s from %s." % (self.__class__, initializer))
def ParseFromString(self, csr_as_pem):
self._value = x509.load_pem_x509_csr(csr_as_pem, backend=openssl.backend)
def ParseFromDatastore(self, value):
precondition.AssertType(value, bytes)
self.ParseFromString(value)
def SerializeToString(self):
if self._value is None:
return ""
return self._value.public_bytes(serialization.Encoding.PEM)
def AsPEM(self):
return self.SerializeToString()
def __str__(self):
return self.SerializeToString()
def GetCN(self):
subject = self._value.subject
try:
cn_attributes = subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)
if len(cn_attributes) > 1:
raise rdfvalue.DecodeError("CSR has more than 1 CN entries.")
cn_attribute = cn_attributes[0]
except IndexError:
raise rdfvalue.DecodeError("CSR has no CN")
return cn_attribute.value
def GetPublicKey(self):
return RSAPublicKey(self._value.public_key())
def Verify(self, public_key):
public_key.Verify(
self._value.tbs_certrequest_bytes,
self._value.signature,
hash_algorithm=self._value.signature_hash_algorithm)
return True
class RSAPublicKey(rdfvalue.RDFPrimitive):
"""An RSA public key."""
def __init__(self, initializer=None, age=None):
super(RSAPublicKey, self).__init__(initializer=initializer, age=age)
if self._value is None and initializer is not None:
if isinstance(initializer, rsa.RSAPublicKey):
self._value = initializer
elif isinstance(initializer, bytes):
self.ParseFromString(initializer)
elif isinstance(initializer, Text):
self.ParseFromString(initializer.encode("ascii"))
else:
raise rdfvalue.InitializeError(
"Cannot initialize %s from %s." % (self.__class__, initializer))
def GetRawPublicKey(self):
return self._value
def ParseFromString(self, pem_string):
precondition.AssertType(pem_string, bytes)
try:
self._value = serialization.load_pem_public_key(
pem_string, backend=openssl.backend)
except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:
raise type_info.TypeValueError("Public key invalid: %s" % e)
def ParseFromDatastore(self, value):
precondition.AssertType(value, bytes)
self.ParseFromString(value)
def ParseFromHumanReadable(self, string):
precondition.AssertType(string, Text)
self.ParseFromString(string.encode("ascii"))
def SerializeToString(self):
if self._value is None:
return ""
return self._value.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
def GetN(self):
return self._value.public_numbers().n
def __str__(self):
return self.SerializeToString()
def AsPEM(self):
return self.SerializeToString()
def KeyLen(self):
if self._value is None:
return 0
return self._value.key_size
def Encrypt(self, message):
if self._value is None:
raise ValueError("Can't Encrypt with empty key.")
try:
return self._value.encrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None))
except ValueError as e:
raise CipherError(e)
def Verify(self, message, signature, hash_algorithm=None):
"""Verifies a given message."""
# This method accepts both PSS and PKCS1v15 padding. PSS is preferred but
# old clients only support PKCS1v15.
if hash_algorithm is None:
hash_algorithm = hashes.SHA256()
last_e = None
for padding_algorithm in [
padding.PSS(
mgf=padding.MGF1(hash_algorithm),
salt_length=padding.PSS.MAX_LENGTH),
padding.PKCS1v15()
]:
try:
self._value.verify(signature, message, padding_algorithm,
hash_algorithm)
return True
except exceptions.InvalidSignature as e:
last_e = e
raise VerificationError(last_e)
class RSAPrivateKey(rdfvalue.RDFPrimitive):
"""An RSA private key."""
def __init__(self, initializer=None, age=None, allow_prompt=None):
self.allow_prompt = allow_prompt
super(RSAPrivateKey, self).__init__(initializer=initializer, age=age)
if self._value is None and initializer is not None:
if isinstance(initializer, rsa.RSAPrivateKey):
self._value = initializer
elif isinstance(initializer, bytes):
self.ParseFromString(initializer)
elif isinstance(initializer, Text):
self.ParseFromString(initializer.encode("ascii"))
else:
raise rdfvalue.InitializeError(
"Cannot initialize %s from %s." % (self.__class__, initializer))
def ParseFromHumanReadable(self, string):
precondition.AssertType(string, Text)
self.ParseFromString(string.encode("ascii"))
def GetRawPrivateKey(self):
return self._value
def GetPublicKey(self):
return RSAPublicKey(self._value.public_key())
def Sign(self, message, use_pss=False):
"""Sign a given message."""
precondition.AssertType(message, bytes)
# TODO(amoser): This should use PSS by default at some point.
if not use_pss:
padding_algorithm = padding.PKCS1v15()
else:
padding_algorithm = padding.PSS(
mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH)
return self._value.sign(message, padding_algorithm, hashes.SHA256())
def Decrypt(self, message):
if self._value is None:
raise ValueError("Can't Decrypt with empty key.")
try:
return self._value.decrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None))
except ValueError as e:
raise CipherError(e)
@classmethod
def GenerateKey(cls, bits=2048, exponent=65537):
key = rsa.generate_private_key(
public_exponent=exponent, key_size=bits, backend=openssl.backend)
return cls(key)
def ParseFromString(self, pem_string):
precondition.AssertType(pem_string, bytes)
try:
self._value = serialization.load_pem_private_key(
pem_string, password=None, backend=openssl.backend)
return
except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:
if "private key is encrypted" not in str(e):
raise type_info.TypeValueError("Private key invalid: %s" % e)
# pylint: disable=g-explicit-bool-comparison, g-equals-none
# The private key is passphrase protected, we need to see if we are
# allowed to ask the user.
#
# If allow_prompt is False, we are explicitly told that we are not.
if self.allow_prompt == False:
raise type_info.TypeValueError("Private key invalid: %s" % e)
# allow_prompt was not set, we use the context we are in to see if it
# makes sense to ask.
elif self.allow_prompt == None:
# TODO(user): dependency loop with
# core/grr_response_core/grr/config/client.py.
# pylint: disable=protected-access
if "Commandline Context" not in config_lib._CONFIG.context:
raise type_info.TypeValueError("Private key invalid: %s" % e)
# pylint: enable=protected-access
# pylint: enable=g-explicit-bool-comparison, g-equals-none
try:
# The private key is encrypted and we can ask the user for the passphrase.
password = utils.PassphraseCallback()
self._value = serialization.load_pem_private_key(
pem_string, password=password, backend=openssl.backend)
except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:
raise type_info.TypeValueError("Unable to load private key: %s" % e)
def ParseFromDatastore(self, value):
precondition.AssertType(value, bytes)
self.ParseFromString(value)
def SerializeToString(self):
if self._value is None:
return ""
return self._value.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
def __str__(self):
digest = hashlib.sha256(self.AsPEM()).hexdigest()
return "%s (%s)" % (self.__class__.__name__, digest)
def AsPEM(self):
return self.SerializeToString()
def AsPassphraseProtectedPEM(self, passphrase):
if self._value is None:
return ""
return self._value.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.BestAvailableEncryption(passphrase))
def KeyLen(self):
if self._value is None:
return 0
return self._value.key_size
# TODO(amoser): Get rid of those.
# Conserve old names for backwards compatibility.
class PEMPrivateKey(RSAPrivateKey):
pass
class PEMPublicKey(RSAPublicKey):
pass
class Hash(rdf_structs.RDFProtoStruct):
"""A hash object containing multiple digests."""
protobuf = jobs_pb2.Hash
rdf_deps = [
rdf_standard.AuthenticodeSignedData,
rdfvalue.HashDigest,
]
class SignedBlob(rdf_structs.RDFProtoStruct):
"""A signed blob.
The client can receive and verify a signed blob (e.g. driver or executable
binary). Once verified, the client may execute this.
"""
protobuf = jobs_pb2.SignedBlob
def Verify(self, public_key):
"""Verify the data in this blob.
Args:
public_key: The public key to use for verification.
Returns:
True when verification succeeds.
Raises:
rdfvalue.DecodeError if the data is not suitable verified.
"""
if self.digest_type != self.HashType.SHA256:
raise rdfvalue.DecodeError("Unsupported digest.")
if self.signature_type not in [
self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS
]:
raise rdfvalue.DecodeError("Unsupported signature type.")
try:
public_key.Verify(self.data, self.signature)
except InvalidSignature as e:
raise rdfvalue.DecodeError("Could not verify blob. Error: %s" % e)
return True
def Sign(self, data, signing_key, verify_key=None):
"""Use the data to sign this blob.
Args:
data: String containing the blob data.
signing_key: The key to sign with.
verify_key: Key to verify with. If None we assume the signing key also
contains the public key.
Returns:
self for call chaining.
"""
if signing_key.KeyLen() < 2048:
logging.warning("signing key is too short.")
self.signature = signing_key.Sign(data)
self.signature_type = self.SignatureType.RSA_PKCS1v15
self.digest = hashlib.sha256(data).digest()
self.digest_type = self.HashType.SHA256
self.data = data
# Test we can verify before we send it off.
if verify_key is None:
verify_key = signing_key.GetPublicKey()
# Verify our own data.
self.Verify(verify_key)
return self
class EncryptionKey(rdfvalue.RDFBytes):
"""Base class for encryption keys."""
# Size of the key in bits.
length = 0
def ParseFromString(self, string):
if len(string) % 8:
raise CipherError(
"Invalid key length %d (%s)." % (len(string) * 8, string))
self._value = string
self.length = 8 * len(self._value)
if self.length < 128:
raise CipherError("Key too short (%d): %s" % (self.length, string))
def __str__(self):
digest = hashlib.sha256(self.AsHexDigest()).hexdigest()
return "%s (%s)" % (self.__class__.__name__, digest)
def AsHexDigest(self):
return binascii.hexlify(self._value)
@classmethod
def FromHex(cls, hex_string):
precondition.AssertType(hex_string, Text)
return cls(binascii.unhexlify(hex_string))
def SerializeToString(self):
return self._value
@classmethod
def GenerateKey(cls, length=128):
return cls(os.urandom(length // 8))
@classmethod
def GenerateRandomIV(cls, length=128):
return cls.GenerateKey(length=length)
def RawBytes(self):
return self._value
# TODO(amoser): Size is now flexible, this class makes no sense anymore.
class AES128Key(EncryptionKey):
length = 128
class AutoGeneratedAES128Key(AES128Key):
"""Like AES128Key, but its UI edit box is prefilled with generated key."""
def __init__(self, initializer=None, **kwargs):
if isinstance(initializer, AES128Key):
super(AutoGeneratedAES128Key, self).__init__(
initializer=initializer.RawBytes(), **kwargs)
else:
super(AutoGeneratedAES128Key, self).__init__(
initializer=initializer, **kwargs)
class StreamingCBCEncryptor(object):
"""A class to stream data to a CBCCipher object."""
def __init__(self, cipher):
self._cipher = cipher
self._encryptor = cipher.GetEncryptor()
self._overflow_buffer = b""
self._block_size = len(cipher.key)
def Update(self, data):
data = self._overflow_buffer + data
overflow_count = len(data) % self._block_size
length_to_encrypt = len(data) - overflow_count
to_encrypt = data[:length_to_encrypt]
self._overflow_buffer = data[length_to_encrypt:]
return self._encryptor.update(to_encrypt)
def Finalize(self):
res = self._encryptor.update(self._cipher.Pad(self._overflow_buffer))
res += self._encryptor.finalize()
return res
class AES128CBCCipher(object):
"""A Cipher using AES128 in CBC mode and PKCS7 for padding."""
algorithm = None
def __init__(self, key, iv):
"""Init.
Args:
key: The key, a rdf_crypto.EncryptionKey instance.
iv: The iv, a rdf_crypto.EncryptionKey instance.
"""
self.key = key.RawBytes()
self.iv = iv.RawBytes()
def Pad(self, data):
padder = sym_padding.PKCS7(128).padder()
return padder.update(data) + padder.finalize()
def UnPad(self, padded_data):
unpadder = sym_padding.PKCS7(128).unpadder()
return unpadder.update(padded_data) + unpadder.finalize()
def GetEncryptor(self):
return ciphers.Cipher(
algorithms.AES(self.key), modes.CBC(self.iv),
backend=openssl.backend).encryptor()
def Encrypt(self, data):
"""A convenience method which pads and encrypts at once."""
encryptor = self.GetEncryptor()
padded_data = self.Pad(data)
try:
return encryptor.update(padded_data) + encryptor.finalize()
except ValueError as e:
raise CipherError(e)
def GetDecryptor(self):
return ciphers.Cipher(
algorithms.AES(self.key), modes.CBC(self.iv),
backend=openssl.backend).decryptor()
def Decrypt(self, data):
"""A convenience method which pads and decrypts at once."""
decryptor = self.GetDecryptor()
try:
padded_data = decryptor.update(data) + decryptor.finalize()
return self.UnPad(padded_data)
except ValueError as e:
raise CipherError(e)
class SymmetricCipher(rdf_structs.RDFProtoStruct):
"""Abstract symmetric cipher operations."""
protobuf = jobs_pb2.SymmetricCipher
rdf_deps = [
EncryptionKey,
]
@classmethod
def Generate(cls, algorithm):
if algorithm != cls.Algorithm.AES128CBC:
raise RuntimeError("Algorithm not supported.")
return cls(
_algorithm=algorithm,
_key=EncryptionKey.GenerateKey(length=128),
_iv=EncryptionKey.GenerateKey(length=128))
def _get_cipher(self):
if self._algorithm != self.Algorithm.AES128CBC:
raise CipherError("Unknown cipher type %s" % self._algorithm)
return AES128CBCCipher(self._key, self._iv)
def Encrypt(self, data):
if self._algorithm == self.Algorithm.NONE:
raise TypeError("Empty encryption is not allowed.")
return self._get_cipher().Encrypt(data)
def Decrypt(self, data):
if self._algorithm == self.Algorithm.NONE:
raise TypeError("Empty encryption is not allowed.")
return self._get_cipher().Decrypt(data)
class HMAC(object):
"""A wrapper for the cryptography HMAC object."""
def __init__(self, key, use_sha256=False):
# We store the raw key from cryptography.io.
if isinstance(key, EncryptionKey):
key = key.RawBytes()
self.key = key
self._hmac = self._NewHMAC(use_sha256=use_sha256)
def _NewHMAC(self, use_sha256=False):
if use_sha256:
hash_algorithm = hashes.SHA256()
else:
hash_algorithm = hashes.SHA1()
return hmac.HMAC(self.key, hash_algorithm, backend=openssl.backend)
def Update(self, data):
self._hmac.update(data)
def Finalize(self):
return self._hmac.finalize()
def HMAC(self, message, use_sha256=False):
"""Calculates the HMAC for a given message."""
h = self._NewHMAC(use_sha256=use_sha256)
h.update(message)
return h.finalize()
def Verify(self, message, signature):
"""Verifies the signature for a given message."""
siglen = len(signature)
if siglen == 20:
hash_algorithm = hashes.SHA1()
elif siglen == 32:
hash_algorithm = hashes.SHA256()
else:
raise VerificationError("Invalid signature length %d." % siglen)
h = hmac.HMAC(self.key, hash_algorithm, backend=openssl.backend)
h.update(message)
try:
h.verify(signature)
return True
except exceptions.InvalidSignature as e:
raise VerificationError(e)
class Password(rdf_structs.RDFProtoStruct):
"""A password stored in the database."""
protobuf = jobs_pb2.Password
def _CalculateHash(self, password, salt, iteration_count):
kdf = pbkdf2.PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=iteration_count,
backend=openssl.backend)
return kdf.derive(password)
def SetPassword(self, password):
self.salt = b"%016x" % random.UInt64()
self.iteration_count = 100000
# prevent non-descriptive 'key_material must be bytes' error later
if isinstance(password, string_types):
password = password.encode("utf-8")
self.hashed_pwd = self._CalculateHash(password, self.salt,
self.iteration_count)
def CheckPassword(self, password):
# prevent non-descriptive 'key_material must be bytes' error later
if isinstance(password, string_types):
password = password.encode("utf-8")
h = self._CalculateHash(password, self.salt, self.iteration_count)
return constant_time.bytes_eq(h, self.hashed_pwd)
|
the-stack_0_444 | # Copyright 2019 Google LLC
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google LLC nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@com_google_api_gax_java_properties//:dependencies.properties.bzl", "PROPERTIES")
def com_google_api_gax_java_repositories():
# Import dependencies shared between Gradle and Bazel (i.e. maven dependencies)
for name, artifact in PROPERTIES.items():
_maybe(
native.maven_jar,
name = name,
strip_repo_prefix = "maven.",
artifact = _fix_bazel_artifact_format(artifact),
)
# Import Bazel-only dependencies (Gradle version will import maven artifacts of same
# version, while Bazel will depend on Bazel workspaces). The versions are shared in the
# properties file.
_protobuf_version = PROPERTIES["version.com_google_protobuf"]
_protobuf_version_in_link = "v%s" % _protobuf_version
_maybe(
http_archive,
name = "com_google_protobuf",
urls = ["https://github.com/protocolbuffers/protobuf/archive/%s.zip" % _protobuf_version_in_link],
strip_prefix = "protobuf-%s" % _protobuf_version,
)
_grpc_version = PROPERTIES["version.io_grpc"]
_grpc_version_in_link = "v%s" % _grpc_version
_maybe(
http_archive,
name = "io_grpc_grpc_java",
urls = ["https://github.com/grpc/grpc-java/archive/%s.zip" % _grpc_version_in_link],
strip_prefix = "grpc-java-%s" % _grpc_version,
)
_maybe(
http_archive,
name = "bazel_skylib",
strip_prefix = "bazel-skylib-0.7.0",
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/0.7.0.zip"],
)
_maybe(
native.maven_jar,
name = "io_grpc_grpc_netty_shaded",
artifact = "io.grpc:grpc-netty-shaded:%s" % PROPERTIES["version.io_grpc"],
)
_maybe(
native.maven_jar,
name = "google_java_format_all_deps",
artifact = "com.google.googlejavaformat:google-java-format:jar:all-deps:%s" % PROPERTIES["version.google_java_format"],
)
_maybe(
native.bind,
name = "guava",
actual = "@com_google_guava_guava//jar",
)
_maybe(
native.bind,
name = "gson",
actual = "@com_google_code_gson_gson//jar",
)
_maybe(
native.bind,
name = "error_prone_annotations",
actual = "@com_google_errorprone_error_prone_annotations//jar",
)
def _maybe(repo_rule, name, strip_repo_prefix = "", **kwargs):
if not name.startswith(strip_repo_prefix):
return
repo_name = name[len(strip_repo_prefix):]
if repo_name in native.existing_rules():
return
repo_rule(name = repo_name, **kwargs)
def _fix_bazel_artifact_format(artifact_id):
# Fix the artifact id format discrepancy between Bazel & Gradle.
# This is relevant only when classifier is specified explicitly.
# Bazel format: groupId:artifactId:jar:classifier:version
# Gradle format: groupId:artifactId:version:classifier
ids = artifact_id.split(":")
if len(ids) != 4:
return artifact_id
return "%s:%s:%s:%s:%s" % (ids[0], ids[1], "jar", ids[3], ids[2])
|
the-stack_0_446 | #coding=utf-8
HOST = ''
PORT = 50008
# maximum sleep time while there is no connect for a smv process
MAX_SLEEP_TIME = 5
# time out in seconds
TIME_OUT = 5
MU_CHECK_TIMEOUT = 600
MU_CHECK_MEMORY = 1024
# path to NuSMV
SMV_PATH = '/home/lyj238/Downloads/NuSMV/bin/NuSMV'
MU_PATH = '/home/lyj238/Downloads/cmurphi5.4.9/src/mu'
MU_INCLUDE = '/home/lyj238/Downloads/cmurphi5.4.9/include'
GXX_PATH = '/usr/bin/g++'
# path for storing smv files
SMV_FILE_DIR = '/tmp/NuSMV/'
MU_FILE_DIR = '/tmp/cmurphi/'
dirs = [SMV_FILE_DIR, MU_FILE_DIR]
import os
for d in dirs:
if not os.path.isdir(d):
os.makedirs(d)
|
the-stack_0_447 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ImportJobRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'ImportFileReq'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""ImportJobRequest - a model defined in huaweicloud sdk"""
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this ImportJobRequest.
:return: The body of this ImportJobRequest.
:rtype: ImportFileReq
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ImportJobRequest.
:param body: The body of this ImportJobRequest.
:type: ImportFileReq
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ImportJobRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_448 | import cv2
import os
import numpy as np
import random
# 例子为:在NEU-CLS数据集上操作的。
# 在合成后数据集中随机选取若干张数据作为新的数据集。
image_dir = '/content/drive/MyDrive/colab/multiClass/NEU-CLS'
# 打乱原始数据集顺序
img_path = []
for name in os.listdir(image_dir):
img_path.append(os.path.join(image_dir, name))
random.shuffle(img_path)
new_types = ['PS', 'RS', 'Cr', 'In', 'Pa', 'Sc']
# 处理type
def str_to_defect_types(s):
defect_types = []
for t in new_types:
defect_types.append(s.count(t))
return defect_types
s = []
y = []
dataset_list = img_path # 训练或测试需要修改 列表 训练:train_dataset; 测试:test_dataset
# size_4_1 = int(len(dataset_list)/4) # 合成图像个数new_dataset_path
# randvector = list(range(len(dataset_list)))
randvector = list(range(1000)) # 3400 2800 1440
for i in randvector:
# img2 = dataset_list[i]
img2 = random.choice(dataset_list) # 路径
imgx = img2.split("/")[-1].split("_")[0] # 类别
s.append(imgx)
y.append(img2)
def to_matrix(x_y, n):
ls_4 = []
for i in range(0, len(x_y), n):
ls_4.append(x_y[i: i + n])
return ls_4
s = to_matrix(s, 4)
y = to_matrix(y, 4)
# 合成图片 4 -> 1
img_data = []
img_type = []
num = 0
for i in range(250):
x1 = cv2.imread(y[i][0]) # ,as_gray=True)
x2 = cv2.imread(y[i][1]) # ,as_gray=True)
x3 = cv2.imread(y[i][2]) # ,as_gray=True)
x4 = cv2.imread(y[i][3]) # ,as_gray=True)
im_h1 = cv2.hconcat([x1, x2]) # 合并函数
im_h2 = cv2.hconcat([x3, x4])
im_f = cv2.vconcat([im_h1, im_h2])
img_data.append(np.array(im_f))
img_type.append(str_to_defect_types(s[i])) # 处理type
root_path = '/content/drive/MyDrive/colab/multiClass/Defects' # 保存至此文件夹下
# 类型转换
img_data_np = np.array(img_data)
img_type_np = np.array(img_type)
# 合成保存文件绝对路径
img_data_file = os.path.join(root_path, 'data文件名.npy')
img_types = os.path.join(root_path, 'type文件名.npy')
# 保存
np.save(img_data_file, img_data_np)
np.save(img_types, img_type_np)
|
the-stack_0_449 | import discord
import random
import asyncio
import discord
from discord.ext import commands, tasks
class Prescence(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.prescence_default.start()
self.ctfu_rgblighting.start()
def cog_unload(self):
self.prescence_default.cancel()
@tasks.loop(seconds=60.0)
async def prescence_default(self):
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=f'{len(self.bot.users)} users.'))
@tasks.loop(seconds=600.0)
async def ctfu_rgblighting(self):
ctfuserver = self.bot.get_guild(694217343173394432)
role = ctfuserver.get_role(701007133994647622)
await role.edit(colour=discord.Colour(random.randint(0, 0xFFFFFF)))
@prescence_default.before_loop
async def before_running(self):
print('Bot setting up... Adding presence...')
await self.bot.wait_until_ready()
@ctfu_rgblighting.before_loop
async def before_running(self):
print('Bot setting up... Adding RGB Lighting for CTFU...')
await self.bot.wait_until_ready()
def setup(bot):
bot.add_cog(Prescence(bot))
|
the-stack_0_450 | import random
import string
from time import time
from settings import URL, CHATS_COLLECTION_NAME
from .base import CommandBase
class CommandStart(CommandBase):
async def __call__(self, payload):
self.set_bot(payload)
registered_chat = self.sdk.db.find_one(CHATS_COLLECTION_NAME, {'chat': payload['chat'], 'bot': self.bot})
if registered_chat:
user_token = registered_chat['user']
else:
user_token = self.generate_user_token()
new_chat = {
'chat': payload['chat'],
'user': user_token,
'dt_register': time(),
'bot': self.bot
}
self.sdk.db.insert(CHATS_COLLECTION_NAME, new_chat)
self.sdk.log("New user registered with token {}".format(user_token))
message = "Use this webhook for sending notifications to the chat:\n" \
"\n" \
"<code>{}/u/{}</code>\n" \
"\n" \
"Make a POST request with text in «message» param."
await self.send(
payload["chat"],
message.format(URL, user_token),
"HTML"
)
@staticmethod
def generate_user_token():
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(8))
|
the-stack_0_451 | import unittest
from .framework import selenium_test, SeleniumTestCase
class ToolDescribingToursTestCase(SeleniumTestCase):
def setUp(self):
super().setUp()
self.home()
@selenium_test
def test_generate_tour_no_data(self):
"""Ensure a tour without data is generated and pops up."""
self._ensure_tdt_available()
self.tool_open('environment_variables')
self.tool_form_generate_tour()
popover_component = self.components.tour.popover._
popover_component.wait_for_visible()
title = popover_component.title.wait_for_visible().text
assert title == "environment_variables Tour", title
# Run tool
self.tool_form_execute()
self.history_panel_wait_for_hid_ok(1)
@selenium_test
def test_generate_tour_with_data(self):
"""Ensure a tour with data populates history."""
self._ensure_tdt_available()
self.tool_open('md5sum')
self.tool_form_generate_tour()
self.history_panel_wait_for_hid_ok(1)
popover_component = self.components.tour.popover._
popover_component.wait_for_visible()
title = popover_component.title.wait_for_visible().text
assert title == "md5sum Tour", title
self.screenshot("tool_describing_tour_0_start")
popover_component.next.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
text = popover_component.content.wait_for_visible().text
assert "Select dataset" in text, text
self.screenshot("tool_describing_tour_1_select")
popover_component.next.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
title = popover_component.title.wait_for_visible().text
assert title == "Execute tool"
self.screenshot("tool_describing_tour_2_execute")
popover_component.end.wait_for_and_click()
popover_component.wait_for_absent_or_hidden()
# Run tool
self.tool_form_execute()
self.history_panel_wait_for_hid_ok(2)
self.screenshot("tool_describing_tour_3_after_execute")
def _ensure_tdt_available(self):
""" Skip a test if the webhook TDT doesn't appear. """
response = self.api_get('webhooks', raw=True)
self.assertEqual(response.status_code, 200)
data = response.json()
webhooks = [x['id'] for x in data]
if 'tour_generator' not in webhooks:
raise unittest.SkipTest('Skipping test, webhook "Tool-Describing-Tours" doesn\'t appear to be configured.')
|
the-stack_0_452 | #!/usr/bin/env python
import fileinput
jumps = [int(jump) for jump in fileinput.input()]
clock, pc, max_pc = 0, 0, 0
while pc < len(jumps):
jump = jumps[pc]
jumps[pc] += 1
pc += jump
clock += 1
if pc > max_pc:
max_pc = pc
print("%09d: %04d" % (clock, pc))
print(clock)
|
the-stack_0_454 | # vim:ts=4:sts=4:sw=4:expandtab
"""Matching Clients with event queues.
"""
import collections
from satori.objects import Object
from satori.events.misc import Namespace
class Dispatcher(Object):
"""Abstract. Dispatches Events to Clients.
"""
def __init__(self):
self.queues = dict()
self.clients = dict()
def _qdata(self, queue_id):
if queue_id not in self.queues:
qdata = Namespace()
qdata.references = 0
qdata.events = collections.deque()
qdata.clients = collections.deque()
self.queues[queue_id] = qdata
return self.queues[queue_id]
def _cdata(self, client):
if client not in self.clients:
cdata = Namespace()
cdata.queue_ids = set()
cdata.active = False
self.clients[client] = cdata
return self.clients[client]
def attach(self, client, queue_id):
"""Declare Client's interest in events from a given queue.
"""
qdata = self._qdata(queue_id)
cdata = self._cdata(client)
if queue_id not in cdata.queue_ids:
cdata.queue_ids.add(queue_id)
qdata.references += 1
def detach(self, client, queue_id):
"""Revoke Client's interest in events from a given queue.
"""
qdata = self._qdata(queue_id)
cdata = self._cdata(client)
if queue_id in cdata.queues:
cdata.queue_ids.remove(queue_id)
qdata.references -= 1
if qdata.references == 0:
yield queue_id
del self.queues[queue_id]
def activate(self, client):
"""Mark a Client as ready to receive a (single) event.
"""
cdata = self._cdata(client)
best = None
for queue_id in cdata.queue_ids:
qdata = self._qdata(queue_id)
if len(qdata.events) > 0:
event = qdata.events[0]
if best is None or best[1] > event.serial:
best = (queue_id, event.serial)
if best is not None:
qdata = self._qdata(best[0])
client.sendResponse((best[0], qdata.events.popleft()))
return
for queue_id in cdata.queue_ids:
qdata = self._qdata(queue_id)
qdata.clients.append(client)
cdata.active = True
def enqueue(self, queue_id, event):
"""Add a new event to a given queue.
"""
qdata = self._qdata(queue_id)
qdata.events.append(event)
while len(qdata.clients) > 0:
client = qdata.clients.popleft()
cdata = self._cdata(client)
if not cdata.active:
continue
if queue_id not in cdata.queue_ids:
continue
cdata.active = False
client.sendResponse((queue_id, qdata.events.popleft()))
return
|
the-stack_0_456 | import logging
log = logging.getLogger('onegov.form') # noqa
log.addHandler(logging.NullHandler()) # noqa
from translationstring import TranslationStringFactory
_ = TranslationStringFactory('onegov.form') # noqa
from onegov.form.collection import (
FormCollection,
FormSubmissionCollection,
FormDefinitionCollection
)
from onegov.form.core import (
FieldDependency,
Form,
merge_forms,
move_fields,
)
from onegov.form.display import render_field
from onegov.form.extensions import FormExtension, Extendable
from onegov.form.integration import FormApp
from onegov.form.models import (
FormDefinition,
FormFile,
FormSubmission,
FormRegistrationWindow,
PendingFormSubmission,
CompleteFormSubmission
)
from onegov.form.parser import find_field
from onegov.form.parser import flatten_fieldsets
from onegov.form.parser import parse_form
from onegov.form.parser import parse_formcode
from onegov.form.parser import WTFormsClassBuilder
from onegov.form.utils import decimal_range, as_internal_id, with_options
__all__ = [
'as_internal_id',
'CompleteFormSubmission',
'decimal_range',
'find_field',
'flatten_fieldsets',
'Extendable',
'FieldDependency',
'Form',
'FormApp',
'FormCollection',
'FormDefinition',
'FormDefinitionCollection',
'FormExtension',
'FormFile',
'FormRegistrationWindow',
'FormSubmission',
'FormSubmissionCollection',
'merge_forms',
'move_fields',
'parse_form',
'parse_formcode',
'PendingFormSubmission',
'render_field',
'with_options',
'WTFormsClassBuilder',
]
|
the-stack_0_459 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.
#
import os
import random
import re
import string
import time
import pytest
from conftest import get_engine
from mock import patch
from parameters import CONNECTION_PARAMETERS
from snowflake.connector import ProgrammingError, connect
from snowflake.sqlalchemy import URL, MergeInto, dialect
from sqlalchemy import (
REAL,
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
LargeBinary,
MetaData,
Numeric,
Sequence,
String,
Table,
create_engine,
dialects,
inspect,
text,
)
from sqlalchemy.sql import and_, not_, or_, select
try:
from parameters import (CONNECTION_PARAMETERS2)
except ImportError:
CONNECTION_PARAMETERS2 = CONNECTION_PARAMETERS
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
def _create_users_addresses_tables(engine_testaccount, metadata, fk=None):
users = Table('users', metadata,
Column('id', Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name', String),
Column('fullname', String),
)
addresses = Table('addresses', metadata,
Column('id', Integer, Sequence('address_id_seq'),
primary_key=True),
Column('user_id', None,
ForeignKey('users.id', name=fk)),
Column('email_address', String, nullable=False)
)
metadata.create_all(engine_testaccount)
return users, addresses
def _create_users_addresses_tables_without_sequence(engine_testaccount,
metadata):
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('fullname', String),
)
addresses = Table('addresses', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', None, ForeignKey('users.id')),
Column('email_address', String, nullable=False)
)
metadata.create_all(engine_testaccount)
return users, addresses
def test_connect_args():
"""
Tests connect string
Snowflake connect string supports account name as a replacement of
host:port
"""
from sqlalchemy import create_engine
engine = create_engine(
'snowflake://{user}:{password}@{account}/{database}/{schema}'.format(
user=CONNECTION_PARAMETERS2['user'],
password=CONNECTION_PARAMETERS2['password'],
account=CONNECTION_PARAMETERS2['account'],
database=CONNECTION_PARAMETERS2['database'],
schema=CONNECTION_PARAMETERS2['schema'],
)
)
try:
results = engine.execute('select current_version()').fetchone()
assert results is not None
finally:
engine.dispose()
engine = create_engine(
'snowflake://{user}:{password}@{account}/'.format(
user=CONNECTION_PARAMETERS2['user'],
password=CONNECTION_PARAMETERS2['password'],
account=CONNECTION_PARAMETERS2['account'],
)
)
try:
results = engine.execute('select current_version()').fetchone()
assert results is not None
finally:
engine.dispose()
engine = create_engine(URL(
user=CONNECTION_PARAMETERS2['user'],
password=CONNECTION_PARAMETERS2['password'],
account=CONNECTION_PARAMETERS2['account'],
)
)
try:
results = engine.execute('select current_version()').fetchone()
assert results is not None
finally:
engine.dispose()
engine = create_engine(URL(
user=CONNECTION_PARAMETERS2['user'],
password=CONNECTION_PARAMETERS2['password'],
account=CONNECTION_PARAMETERS2['account'],
warehouse='testwh'
)
)
try:
results = engine.execute('select current_version()').fetchone()
assert results is not None
finally:
engine.dispose()
def test_simple_sql(engine_testaccount):
"""
Simple SQL by SQLAlchemy
"""
result = engine_testaccount.execute('show databases')
rows = [row for row in result]
assert len(rows) >= 0, 'show database results'
def test_create_drop_tables(engine_testaccount):
"""
Creates and Drops tables
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables_without_sequence(
engine_testaccount, metadata)
try:
# validate the tables exists
results = engine_testaccount.execute('desc table users')
assert len([row for row in results]) > 0, "users table doesn't exist"
# validate the tables exists
results = engine_testaccount.execute('desc table addresses')
assert len([row for row in results]) > 0, \
"addresses table doesn't exist"
finally:
# drop tables
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
def test_insert_tables(engine_testaccount):
"""
Inserts data into tables
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables(
engine_testaccount, metadata)
conn = engine_testaccount.connect()
try:
# inserts data with an implicitly generated id
ins = users.insert().values(name='jack', fullname='Jack Jones')
results = engine_testaccount.execute(ins)
assert results.inserted_primary_key == [1], 'sequence value'
results.close()
# inserts data with the given id
ins = users.insert()
conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams')
# verify the results
s = select([users])
results = conn.execute(s)
assert len([row for row in results]) == 2, \
'number of rows from users table'
results.close()
# fetchone
s = select([users]).order_by('id')
results = conn.execute(s)
row = results.fetchone()
results.close()
assert row[2] == 'Jack Jones', 'user name'
assert row['fullname'] == 'Jack Jones', "user name by dict"
assert row[users.c.fullname] == 'Jack Jones', \
'user name by Column object'
conn.execute(addresses.insert(), [
{'user_id': 1, 'email_address': '[email protected]'},
{'user_id': 1, 'email_address': '[email protected]'},
{'user_id': 2, 'email_address': '[email protected]'},
{'user_id': 2, 'email_address': '[email protected]'},
])
# more records
s = select([addresses])
results = conn.execute(s)
assert len([row for row in results]) == 4, \
'number of rows from addresses table'
results.close()
# select specified column names
s = select([users.c.name, users.c.fullname]).order_by('name')
results = conn.execute(s)
results.fetchone()
row = results.fetchone()
assert row['name'] == 'wendy', 'name'
# join
s = select([users, addresses]).where(users.c.id == addresses.c.user_id)
results = conn.execute(s)
results.fetchone()
results.fetchone()
results.fetchone()
row = results.fetchone()
assert row['email_address'] == '[email protected]', 'email address'
# Operator
assert str(users.c.id == addresses.c.user_id) == \
'users.id = addresses.user_id', 'equal operator'
assert str(users.c.id == 7) == 'users.id = :id_1', \
'equal to a static number'
assert str(users.c.name == None) # NOQA
assert str(users.c.id + addresses.c.id) == 'users.id + addresses.id', \
'number + number'
assert str(users.c.name + users.c.fullname) == \
'users.name || users.fullname', 'str + str'
# Conjunctions
# example 1
obj = and_(
users.c.name.like('j%'),
users.c.id == addresses.c.user_id,
or_(
addresses.c.email_address == '[email protected]',
addresses.c.email_address == '[email protected]'
),
not_(users.c.id > 5)
)
expected_sql = """users.name LIKE :name_1
AND users.id = addresses.user_id
AND (addresses.email_address = :email_address_1
OR addresses.email_address = :email_address_2)
AND users.id <= :id_1"""
assert str(obj) == ''.join(expected_sql.split('\n')), \
"complex condition"
# example 2
obj = users.c.name.like('j%') & (users.c.id == addresses.c.user_id) & \
(
(addresses.c.email_address == '[email protected]') |
(addresses.c.email_address == '[email protected]')
) \
& ~(users.c.id > 5)
assert str(obj) == ''.join(expected_sql.split('\n')), \
"complex condition using python operators"
# example 3
s = select([(users.c.fullname +
", " + addresses.c.email_address).
label('title')]). \
where(
and_(
users.c.id == addresses.c.user_id,
users.c.name.between('m', 'z'),
or_(
addresses.c.email_address.like('%@aol.com'),
addresses.c.email_address.like('%@msn.com')
)
)
)
results = engine_testaccount.execute(s).fetchall()
assert results[0][0] == 'Wendy Williams, [email protected]'
# Aliases
a1 = addresses.alias()
a2 = addresses.alias()
s = select([users]).where(and_(
users.c.id == a1.c.user_id,
users.c.id == a2.c.user_id,
a1.c.email_address == '[email protected]',
a2.c.email_address == '[email protected]'))
results = engine_testaccount.execute(s).fetchone()
assert results == (1, 'jack', 'Jack Jones')
# Joins
assert str(users.join(addresses)) == 'users JOIN addresses ON ' \
'users.id = addresses.user_id'
assert str(users.join(addresses,
addresses.c.email_address.like(
users.c.name + '%'))) == \
'users JOIN addresses ' \
'ON addresses.email_address LIKE users.name || :name_1'
s = select([users.c.fullname]).select_from(
users.join(addresses,
addresses.c.email_address.like(users.c.name + '%')))
results = engine_testaccount.execute(s).fetchall()
assert results[1] == ('Jack Jones',)
s = select([users.c.fullname]).select_from(users.outerjoin(
addresses)).order_by(users.c.fullname)
results = engine_testaccount.execute(s).fetchall()
assert results[-1] == ('Wendy Williams',)
finally:
conn.close()
# drop tables
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
@pytest.mark.skip("""
Reflection is not implemented yet.
""")
def test_reflextion(engine_testaccount):
"""
Tests Reflection
"""
engine_testaccount.execute("""
CREATE OR REPLACE TABLE user (
id Integer primary key,
name String,
fullname String
)
""")
try:
meta = MetaData()
user_reflected = Table('user', meta, autoload=True,
autoload_with=engine_testaccount)
assert user_reflected.c == ['user.id', 'user.name', 'user.fullname']
finally:
engine_testaccount.execute("""
DROP TABLE IF EXISTS user
""")
def test_inspect_column(engine_testaccount):
"""
Tests Inspect
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables_without_sequence(
engine_testaccount,
metadata)
try:
inspector = inspect(engine_testaccount)
all_table_names = inspector.get_table_names()
assert 'users' in all_table_names
assert 'addresses' in all_table_names
columns_in_users = inspector.get_columns('users')
assert columns_in_users[0]['autoincrement'], 'autoincrement'
assert columns_in_users[0]['default'] is None, 'default'
assert columns_in_users[0]['name'] == 'id', 'name'
assert columns_in_users[0]['primary_key'], 'primary key'
assert not columns_in_users[1]['autoincrement'], 'autoincrement'
assert columns_in_users[1]['default'] is None, 'default'
assert columns_in_users[1]['name'] == 'name', 'name'
assert not columns_in_users[1]['primary_key'], 'primary key'
assert not columns_in_users[2]['autoincrement'], 'autoincrement'
assert columns_in_users[2]['default'] is None, 'default'
assert columns_in_users[2]['name'] == 'fullname', 'name'
assert not columns_in_users[2]['primary_key'], 'primary key'
finally:
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
def test_get_indexes(engine_testaccount):
"""
Tests get indexes
NOTE: Snowflake doesn't support indexes
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables_without_sequence(
engine_testaccount,
metadata)
try:
inspector = inspect(engine_testaccount)
assert inspector.get_indexes("users") == []
finally:
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
def test_get_primary_keys(engine_testaccount):
"""
Tests get primary keys
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables_without_sequence(
engine_testaccount,
metadata)
try:
inspector = inspect(engine_testaccount)
primary_keys = inspector.get_pk_constraint('users')
assert primary_keys['constrained_columns'] == ['id']
primary_keys = inspector.get_pk_constraint('addresses')
assert primary_keys['constrained_columns'] == ['id']
finally:
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
def test_get_foreign_keys(engine_testaccount):
"""
Tests foreign keys
"""
metadata = MetaData()
fk_name = 'fk_users_id_from_addresses'
users, addresses = _create_users_addresses_tables(
engine_testaccount,
metadata, fk=fk_name)
try:
inspector = inspect(engine_testaccount)
foreign_keys = inspector.get_foreign_keys('addresses')
assert foreign_keys[0]['name'] == fk_name
assert foreign_keys[0]['constrained_columns'] == ['user_id']
finally:
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
def test_get_multile_column_primary_key(engine_testaccount):
"""
Tests multicolumn primary key with and without autoincrement
"""
metadata = MetaData()
mytable = Table('mytable', metadata,
Column('gid',
Integer,
primary_key=True,
autoincrement=False),
Column('id',
Integer,
primary_key=True,
autoincrement=True))
metadata.create_all(engine_testaccount)
try:
inspector = inspect(engine_testaccount)
columns_in_mytable = inspector.get_columns('mytable')
assert not columns_in_mytable[0]['autoincrement'], 'autoincrement'
assert columns_in_mytable[0]['default'] is None, 'default'
assert columns_in_mytable[0]['name'] == 'gid', 'name'
assert columns_in_mytable[0]['primary_key'], 'primary key'
assert columns_in_mytable[1]['autoincrement'], 'autoincrement'
assert columns_in_mytable[1]['default'] is None, 'default'
assert columns_in_mytable[1]['name'] == 'id', 'name'
assert columns_in_mytable[1]['primary_key'], 'primary key'
primary_keys = inspector.get_pk_constraint('mytable')
assert primary_keys['constrained_columns'] == ['gid', 'id']
finally:
mytable.drop(engine_testaccount)
def test_create_table_with_cluster_by(engine_testaccount):
# Test case for https://github.com/snowflakedb/snowflake-sqlalchemy/pull/14
metadata = MetaData()
user = Table('clustered_user', metadata,
Column('Id', Integer, primary_key=True),
Column('name', String),
snowflake_clusterby=['Id', 'name'])
metadata.create_all(engine_testaccount)
try:
inspector = inspect(engine_testaccount)
columns_in_table = inspector.get_columns('clustered_user')
assert columns_in_table[0]['name'] == 'Id', 'name'
finally:
user.drop(engine_testaccount)
def test_view_names(engine_testaccount):
"""
Tests all views
"""
inspector = inspect(engine_testaccount)
information_schema_views = inspector.get_view_names(
schema='information_schema')
assert 'columns' in information_schema_views
assert 'table_constraints' in information_schema_views
def test_view_definition(engine_testaccount, db_parameters):
"""
Tests view definition
"""
test_table_name = "test_table_sqlalchemy"
test_view_name = "testview_sqlalchemy"
engine_testaccount.execute("""
CREATE OR REPLACE TABLE {0} (
id INTEGER,
name STRING
)
""".format(test_table_name))
sql = """
CREATE OR REPLACE VIEW {0} AS
SELECT * FROM {1} WHERE id > 10""".format(
test_view_name, test_table_name)
engine_testaccount.execute(text(sql).execution_options(
autocommit=True))
try:
inspector = inspect(engine_testaccount)
assert inspector.get_view_definition(test_view_name) == sql.strip()
assert inspector.get_view_definition(test_view_name,
db_parameters['schema']) == \
sql.strip()
assert inspector.get_view_names() == [test_view_name]
finally:
engine_testaccount.execute(text(
"DROP TABLE IF EXISTS {0}".format(test_table_name)))
engine_testaccount.execute(text(
"DROP VIEW IF EXISTS {0}".format(test_view_name)))
def test_view_comment_reading(engine_testaccount, db_parameters):
"""
Tests reading a comment from a view once it's defined
"""
test_table_name = "test_table_sqlalchemy"
test_view_name = "testview_sqlalchemy"
engine_testaccount.execute("""
CREATE OR REPLACE TABLE {} (
id INTEGER,
name STRING
)
""".format(test_table_name))
sql = """
CREATE OR REPLACE VIEW {} AS
SELECT * FROM {} WHERE id > 10""".format(
test_view_name, test_table_name)
engine_testaccount.execute(text(sql).execution_options(
autocommit=True))
comment_text = "hello my viewing friends"
sql = "COMMENT ON VIEW {} IS '{}';".format(
test_view_name, comment_text)
engine_testaccount.execute(text(sql).execution_options(
autocommit=True))
try:
inspector = inspect(engine_testaccount)
# NOTE: sqlalchemy doesn't have a way to get view comments specifically,
# but the code to get table comments should work for views too
assert inspector.get_table_comment(test_view_name) == {'text': comment_text}
assert inspector.get_table_comment(test_table_name) == {'text': None}
assert str(inspector.get_columns(test_table_name)) == str(inspector.get_columns(test_view_name))
finally:
engine_testaccount.execute(text(
"DROP TABLE IF EXISTS {0}".format(test_table_name)))
engine_testaccount.execute(text(
"DROP VIEW IF EXISTS {0}".format(test_view_name)))
@pytest.mark.skip("Temp table cannot be viewed for some reason")
def test_get_temp_table_names(engine_testaccount):
num_of_temp_tables = 2
temp_table_name = "temp_table"
for idx in range(num_of_temp_tables):
engine_testaccount.execute(text("""
CREATE TEMPORARY TABLE {0} (col1 integer, col2 string)
""".format(temp_table_name + str(idx))).execution_options(
autocommit=True))
for row in engine_testaccount.execute("SHOW TABLES"):
print(row)
try:
inspector = inspect(engine_testaccount)
temp_table_names = inspector.get_temp_table_names()
assert len(temp_table_names) == num_of_temp_tables
finally:
pass
def test_create_table_with_schema(engine_testaccount, db_parameters):
metadata = MetaData()
new_schema = db_parameters['schema'] + "_NEW"
engine_testaccount.execute(text(
"CREATE OR REPLACE SCHEMA \"{0}\"".format(new_schema)))
Table('users', metadata,
Column('id', Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name', String),
Column('fullname', String),
schema=new_schema
)
metadata.create_all(engine_testaccount)
try:
inspector = inspect(engine_testaccount)
columns_in_users = inspector.get_columns('users', schema=new_schema)
assert columns_in_users is not None
finally:
metadata.drop_all(engine_testaccount)
engine_testaccount.execute(
text("DROP SCHEMA IF EXISTS \"{0}\"".format(new_schema)))
@pytest.mark.skipif(os.getenv("SNOWFLAKE_GCP") is not None, reason="PUT and GET is not supported for GCP yet")
def test_copy(engine_testaccount):
"""
COPY must be in a transaction
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables_without_sequence(
engine_testaccount,
metadata)
try:
engine_testaccount.execute(
"PUT file://{file_name} @%users".format(
file_name=os.path.join(THIS_DIR, "data", "users.txt")))
engine_testaccount.execute("COPY INTO users")
results = engine_testaccount.execute("SELECT * FROM USERS").fetchall()
assert results is not None and len(results) > 0
finally:
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
@pytest.mark.skip("""
No transaction works yet in the core API. Use orm API or Python Connector
directly if needed at the moment.
Note Snowflake DB supports DML transaction natively, but we have not figured out
how to integrate with SQLAlchemy core API yet.
""")
def test_transaction(engine_testaccount, db_parameters):
engine_testaccount.execute(text("""
CREATE TABLE {0} (c1 number)""".format(db_parameters['name'])))
trans = engine_testaccount.connect().begin()
try:
engine_testaccount.execute(text("""
INSERT INTO {0} VALUES(123)
""".format(db_parameters['name'])))
trans.commit()
engine_testaccount.execute(text("""
INSERT INTO {0} VALUES(456)
""".format(db_parameters['name'])))
trans.rollback()
results = engine_testaccount.execute("""
SELECT * FROM {0}
""".format(db_parameters['name'])).fetchall()
assert results == [(123,)]
finally:
engine_testaccount.execute(text("""
DROP TABLE IF EXISTS {0}
""".format(db_parameters['name'])))
def test_get_schemas(engine_testaccount):
"""
Tests get schemas from inspect.
Although the method get_schema_names is not part of DefaultDialect,
inspect() may call the method if exists.
"""
inspector = inspect(engine_testaccount)
schemas = inspector.get_schema_names()
assert 'information_schema' in schemas
def test_column_metadata(engine_testaccount):
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Appointment(Base):
__tablename__ = 'appointment'
id = Column(Numeric(38, 3), primary_key=True)
string_with_len = Column(String(100))
binary_data = Column(LargeBinary)
real_data = Column(REAL)
Base.metadata.create_all(engine_testaccount)
metadata = Base.metadata
t = Table('appointment', metadata)
inspector = inspect(engine_testaccount)
inspector.reflecttable(t, None)
assert str(t.columns['id'].type) == 'DECIMAL(38, 3)'
assert str(t.columns['string_with_len'].type) == 'VARCHAR(100)'
assert str(t.columns['binary_data'].type) == 'BINARY'
assert str(t.columns['real_data'].type) == 'FLOAT'
def _get_engine_with_columm_metadata_cache(
db_parameters, user=None, password=None, account=None):
"""
Creates a connection with column metadata cache
"""
if user is not None:
db_parameters['user'] = user
if password is not None:
db_parameters['password'] = password
if account is not None:
db_parameters['account'] = account
from sqlalchemy.pool import NullPool
from sqlalchemy import create_engine
from snowflake.sqlalchemy import URL
engine = create_engine(URL(
user=db_parameters['user'],
password=db_parameters['password'],
host=db_parameters['host'],
port=db_parameters['port'],
database=db_parameters['database'],
schema=db_parameters['schema'],
account=db_parameters['account'],
protocol=db_parameters['protocol'],
cache_column_metadata=True,
), poolclass=NullPool)
return engine
def test_many_table_column_metadta(db_parameters):
"""
Get dozens of table metadata with column metadata cache.
cache_column_metadata=True will cache all column metadata for all tables
in the schema.
"""
engine = _get_engine_with_columm_metadata_cache(db_parameters)
RE_SUFFIX_NUM = re.compile(r'.*(\d+)$')
metadata = MetaData()
total_objects = 10
for idx in range(total_objects):
Table('mainusers' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name' + str(idx), String),
Column('fullname', String),
Column('password', String)
)
Table('mainaddresses' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('address_id_seq'),
primary_key=True),
Column('user_id' + str(idx), None,
ForeignKey('mainusers' + str(idx) + '.id' + str(idx))),
Column('email_address' + str(idx), String, nullable=False)
)
metadata.create_all(engine)
inspector = inspect(engine)
cnt = 0
schema = inspector.default_schema_name
for table_name in inspector.get_table_names(schema):
m = RE_SUFFIX_NUM.match(table_name)
if m:
suffix = m.group(1)
cs = inspector.get_columns(table_name, schema)
if table_name.startswith("mainusers"):
assert len(cs) == 4
assert cs[1]['name'] == 'name' + suffix
cnt += 1
elif table_name.startswith("mainaddresses"):
assert len(cs) == 3
assert cs[2]['name'] == 'email_address' + suffix
cnt += 1
ps = inspector.get_pk_constraint(table_name, schema)
if table_name.startswith("mainusers"):
assert ps['constrained_columns'] == ['id' + suffix]
elif table_name.startswith("mainaddresses"):
assert ps['constrained_columns'] == ['id' + suffix]
fs = inspector.get_foreign_keys(table_name, schema)
if table_name.startswith("mainusers"):
assert len(fs) == 0
elif table_name.startswith("mainaddresses"):
assert len(fs) == 1
assert fs[0]['constrained_columns'] == ['user_id' + suffix]
assert fs[0]['referred_table'] == 'mainusers' + suffix
assert cnt == total_objects * 2, 'total number of test objects'
def test_cache_time(engine_testaccount, db_parameters):
"""Check whether Inspector cache is working"""
# Set up necessary tables
metadata = MetaData()
total_objects = 10
for idx in range(total_objects):
Table('mainusers' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name' + str(idx), String),
Column('fullname', String),
Column('password', String)
)
Table('mainaddresses' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('address_id_seq'),
primary_key=True),
Column('user_id' + str(idx), None,
ForeignKey('mainusers' + str(idx) + '.id' + str(idx))),
Column('email_address' + str(idx), String, nullable=False)
)
metadata.create_all(engine_testaccount)
inspector = inspect(engine_testaccount)
schema = db_parameters['schema']
def harass_inspector():
for table_name in inspector.get_table_names(schema):
inspector.get_columns(table_name, schema)
inspector.get_pk_constraint(table_name, schema)
inspector.get_foreign_keys(table_name, schema)
outcome = False
# Allow up to 5 times for the speed test to pass to avoid flaky test
for _ in range(5):
# Python 2.7 has no timeit.timeit with globals and locals parameters
s_time = time.time()
harass_inspector()
m_time = time.time()
harass_inspector()
time2 = time.time() - m_time
time1 = m_time - s_time
print("Ran inspector through tables twice, times:\n\tfirst: {0}\n\tsecond: {1}".format(time1, time2))
if time2 < time1 * 0.01:
outcome = True
break
else:
# Reset inspector to reset cache
inspector = inspect(engine_testaccount)
metadata.drop_all(engine_testaccount)
assert outcome
@pytest.mark.timeout(15)
def test_region():
from sqlalchemy import create_engine
engine = create_engine(URL(
user='testuser',
password='testpassword',
account='testaccount',
region='eu-central-1',
login_timeout=5
))
try:
engine.execute('select current_version()').fetchone()
pytest.fail('should not run')
except Exception as ex:
assert ex.orig.errno == 250001
assert 'Failed to connect to DB' in ex.orig.msg
assert 'testaccount.eu-central-1.snowflakecomputing.com' in ex.orig.msg
@pytest.mark.timeout(15)
def test_azure():
from sqlalchemy import create_engine
engine = create_engine(URL(
user='testuser',
password='testpassword',
account='testaccount',
region='east-us-2.azure',
login_timeout=5
))
try:
engine.execute('select current_version()').fetchone()
pytest.fail('should not run')
except Exception as ex:
assert ex.orig.errno == 250001
assert 'Failed to connect to DB' in ex.orig.msg
assert 'testaccount.east-us-2.azure.snowflakecomputing.com' in \
ex.orig.msg
def test_load_dialect():
"""
Test loading Snowflake SQLAlchemy dialect class
"""
assert isinstance(dialects.registry.load('snowflake')(), dialect)
@pytest.mark.parametrize('conditional_flag', [True, False])
@pytest.mark.parametrize('update_flag,insert_flag,delete_flag', [
(True, False, False),
(False, True, False),
(False, False, True),
(False, True, True),
(True, True, False)])
def test_upsert(engine_testaccount, update_flag, insert_flag, delete_flag, conditional_flag):
meta = MetaData()
users = Table('users', meta,
Column('id', Integer, Sequence('user_id_seq'), primary_key=True),
Column('name', String),
Column('fullname', String))
onboarding_users = Table('onboarding_users', meta,
Column('id', Integer, Sequence('new_user_id_seq'), primary_key=True),
Column('name', String),
Column('fullname', String),
Column('delete', Boolean))
meta.create_all(engine_testaccount)
conn = engine_testaccount.connect()
try:
conn.execute(users.insert(), [
{'id': 1, 'name': 'mark', 'fullname': 'Mark Keller'},
{'id': 4, 'name': 'luke', 'fullname': 'Luke Lorimer'},
{'id': 2, 'name': 'amanda', 'fullname': 'Amanda Harris'}])
conn.execute(onboarding_users.insert(), [
{'id': 2, 'name': 'amanda', 'fullname': 'Amanda Charlotte Harris', 'delete': True},
{'id': 3, 'name': 'jim', 'fullname': 'Jim Wang', 'delete': False},
{'id': 4, 'name': 'lukas', 'fullname': 'Lukas Lorimer', 'delete': False},
{'id': 5, 'name': 'andras', 'fullname': None, 'delete': False}
])
merge = MergeInto(users, onboarding_users, users.c.id == onboarding_users.c.id)
if update_flag:
clause = merge.when_matched_then_update().values(name=onboarding_users.c.name,
fullname=onboarding_users.c.fullname)
if conditional_flag:
clause.where(onboarding_users.c.name != 'amanda')
if insert_flag:
clause = merge.when_not_matched_then_insert().values(
id=onboarding_users.c.id,
name=onboarding_users.c.name,
fullname=onboarding_users.c.fullname,
)
if conditional_flag:
clause.where(onboarding_users.c.fullname != None) # NOQA
if delete_flag:
clause = merge.when_matched_then_delete()
if conditional_flag:
clause.where(onboarding_users.c.delete == True) # NOQA
conn.execute(merge)
users_tuples = {tuple(row) for row in conn.execute(select([users]))}
onboarding_users_tuples = {tuple(row) for row in conn.execute(select([onboarding_users]))}
expected_users = {
(1, 'mark', 'Mark Keller'),
(2, 'amanda', 'Amanda Harris'),
(4, 'luke', 'Luke Lorimer')
}
if update_flag:
if not conditional_flag:
expected_users.remove((2, 'amanda', 'Amanda Harris'))
expected_users.add((2, 'amanda', 'Amanda Charlotte Harris'))
expected_users.remove((4, 'luke', 'Luke Lorimer'))
expected_users.add((4, 'lukas', 'Lukas Lorimer'))
elif delete_flag:
if not conditional_flag:
expected_users.remove((4, 'luke', 'Luke Lorimer'))
expected_users.remove((2, 'amanda', 'Amanda Harris'))
if insert_flag:
if not conditional_flag:
expected_users.add((5, 'andras', None))
expected_users.add((3, 'jim', 'Jim Wang'))
expected_onboarding_users = {
(2, 'amanda', 'Amanda Charlotte Harris', True),
(3, 'jim', 'Jim Wang', False),
(4, 'lukas', 'Lukas Lorimer', False),
(5, 'andras', None, False)
}
assert users_tuples == expected_users
assert onboarding_users_tuples == expected_onboarding_users
finally:
conn.close()
users.drop(engine_testaccount)
onboarding_users.drop(engine_testaccount)
def test_deterministic_merge_into(sql_compiler):
meta = MetaData()
users = Table('users', meta,
Column('id', Integer, Sequence('user_id_seq'), primary_key=True),
Column('name', String),
Column('fullname', String))
onboarding_users = Table('onboarding_users', meta,
Column('id', Integer, Sequence('new_user_id_seq'), primary_key=True),
Column('name', String),
Column('fullname', String),
Column('delete', Boolean))
merge = MergeInto(users, onboarding_users, users.c.id == onboarding_users.c.id)
merge.when_matched_then_update().values(name=onboarding_users.c.name,
fullname=onboarding_users.c.fullname)
merge.when_not_matched_then_insert().values(
id=onboarding_users.c.id,
name=onboarding_users.c.name,
fullname=onboarding_users.c.fullname,
).where(onboarding_users.c.fullname != None) # NOQA
assert sql_compiler(merge) == "MERGE INTO users USING onboarding_users ON users.id = onboarding_users.id " \
"WHEN MATCHED THEN UPDATE SET fullname = onboarding_users.fullname, " \
"name = onboarding_users.name WHEN NOT MATCHED AND onboarding_users.fullname " \
"IS NOT NULL THEN INSERT (fullname, id, name) VALUES (onboarding_users.fullname, " \
"onboarding_users.id, onboarding_users.name)"
def test_comments(engine_testaccount):
"""Tests strictly reading column comment through SQLAlchemy"""
table_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(5))
try:
engine_testaccount.execute("create table public.{} (\"col1\" text);".format(table_name))
engine_testaccount.execute("alter table public.{} alter \"col1\" comment 'this is my comment'".format(table_name))
engine_testaccount.execute("select comment from information_schema.columns where table_name='{}'".format(table_name)).fetchall()
inspector = inspect(engine_testaccount)
columns = inspector.get_columns(table_name, schema='PUBLIC')
assert columns[0].get('comment') == u'this is my comment'
finally:
engine_testaccount.execute("drop table public.{}".format(table_name))
def test_comment_sqlalchemy(db_parameters, engine_testaccount, on_public_ci):
"""Testing adding/reading column and table comments through SQLAlchemy"""
new_schema = db_parameters['schema'] + '2'
# Use same table name in 2 different schemas to make sure comment retrieval works properly
table_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(5))
table_comment1 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
column_comment1 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
table_comment2 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
column_comment2 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
engine2, _ = get_engine(schema=new_schema)
con2 = None
if not on_public_ci:
con2 = engine2.connect()
con2.execute("CREATE SCHEMA IF NOT EXISTS {0}".format(new_schema))
inspector = inspect(engine_testaccount)
metadata1 = MetaData()
metadata2 = MetaData()
mytable1 = Table(table_name,
metadata1,
Column("tstamp", DateTime, comment=column_comment1),
comment=table_comment1)
mytable2 = Table(table_name,
metadata2,
Column("tstamp", DateTime, comment=column_comment2),
comment=table_comment2)
metadata1.create_all(engine_testaccount, tables=[mytable1])
if not on_public_ci:
metadata2.create_all(engine2, tables=[mytable2])
try:
assert inspector.get_columns(table_name)[0]['comment'] == column_comment1
assert inspector.get_table_comment(table_name)['text'] == table_comment1
if not on_public_ci:
assert inspector.get_columns(table_name, schema=new_schema)[0]['comment'] == column_comment2
assert inspector.get_table_comment(
table_name,
schema=new_schema.upper() # Note: since did not quote schema name it was uppercase'd
)['text'] == table_comment2
finally:
mytable1.drop(engine_testaccount)
if not on_public_ci:
mytable2.drop(engine2)
con2.execute("DROP SCHEMA IF EXISTS {0}".format(new_schema))
con2.close()
engine2.dispose()
def test_special_schema_character(db_parameters, on_public_ci):
"""Make sure we decode special characters correctly"""
if on_public_ci:
pytest.skip("Public CIs cannot create Schemas and Databases")
# Constants
database = "a/b/c" # "'/'.join([choice(ascii_lowercase) for _ in range(3)])
schema = "d/e/f" # '/'.join([choice(ascii_lowercase) for _ in range(3)])
# Setup
options = dict(**db_parameters)
conn = connect(**options)
conn.cursor().execute("CREATE OR REPLACE DATABASE \"{0}\"".format(database))
conn.cursor().execute("CREATE OR REPLACE SCHEMA \"{0}\"".format(schema))
conn.close()
# Test
options.update({'database': '"' + database + '"',
'schema': '"' + schema + '"'})
sf_conn = connect(**options)
sf_connection = [res for res in sf_conn.cursor().execute("select current_database(), "
"current_schema();")]
sa_conn = create_engine(URL(**options)).connect()
sa_connection = [res for res in sa_conn.execute("select current_database(), "
"current_schema();")]
sa_conn.close()
sf_conn.close()
# Teardown
conn = connect(**options)
conn.cursor().execute("DROP DATABASE IF EXISTS \"{0}\"".format(database))
conn.close()
assert [(database, schema)] == sf_connection == sa_connection
def test_autoincrement(engine_testaccount):
metadata = MetaData()
users = Table('users', metadata,
Column('uid', Integer, Sequence('id_seq'), primary_key=True),
Column('name', String(39)))
try:
users.create(engine_testaccount)
connection = engine_testaccount.connect()
connection.execute(users.insert(), [{'name': 'sf1'}])
assert connection.execute(select([users])).fetchall() == [
(1, 'sf1')
]
connection.execute(users.insert(), {'name': 'sf2'}, {'name': 'sf3'})
assert connection.execute(select([users])).fetchall() == [
(1, 'sf1'),
(2, 'sf2'),
(3, 'sf3')
]
connection.execute(users.insert(), {'name': 'sf4'})
assert connection.execute(select([users])).fetchall() == [
(1, 'sf1'),
(2, 'sf2'),
(3, 'sf3'),
(4, 'sf4')
]
seq = Sequence('id_seq')
nextid = connection.execute(seq)
connection.execute(users.insert(), [{'uid': nextid, 'name': 'sf5'}])
assert connection.execute(select([users])).fetchall() == [
(1, 'sf1'),
(2, 'sf2'),
(3, 'sf3'),
(4, 'sf4'),
(5, 'sf5')
]
finally:
users.drop(engine_testaccount)
def test_get_too_many_columns(engine_testaccount, db_parameters):
"""Check whether Inspector cache is working, when there are too many column to cache whole schema's columns"""
# Set up necessary tables
metadata = MetaData()
total_objects = 10
for idx in range(total_objects):
Table('mainuserss' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name' + str(idx), String),
Column('fullname', String),
Column('password', String)
)
Table('mainaddressess' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('address_id_seq'),
primary_key=True),
Column('user_id' + str(idx), None,
ForeignKey('mainuserss' + str(idx) + '.id' + str(idx))),
Column('email_address' + str(idx), String, nullable=False)
)
metadata.create_all(engine_testaccount)
inspector = inspect(engine_testaccount)
schema = db_parameters['schema']
# Emulate error
with patch.object(inspector.dialect, '_get_schema_columns', return_value=None) as mock_method:
def harass_inspector():
for table_name in inspector.get_table_names(schema):
column_metadata = inspector.get_columns(table_name, schema)
inspector.get_pk_constraint(table_name, schema)
inspector.get_foreign_keys(table_name, schema)
assert 3 <= len(column_metadata) <= 4 # Either one of the tables should have 3 or 4 columns
outcome = False
# Allow up to 5 times for the speed test to pass to avoid flaky test
for _ in range(5):
# Python 2.7 has no timeit.timeit with globals and locals parameters
s_time = time.time()
harass_inspector()
m_time = time.time()
harass_inspector()
time2 = time.time() - m_time
time1 = m_time - s_time
print("Ran inspector through tables twice, times:\n\tfirst: {0}\n\tsecond: {1}".format(time1, time2))
if time2 < time1 * 0.01:
outcome = True
break
else:
# Reset inspector to reset cache
inspector = inspect(engine_testaccount)
metadata.drop_all(engine_testaccount)
assert mock_method.call_count > 0 # Make sure we actually mocked the issue happening
assert outcome
def test_too_many_columns_detection(engine_testaccount, db_parameters):
"""This tests whether a too many column error actually triggers the more granular table version"""
# Set up a single table
metadata = MetaData()
Table('users', metadata,
Column('id', Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name', String),
Column('fullname', String),
Column('password', String)
)
metadata.create_all(engine_testaccount)
inspector = inspect(engine_testaccount)
# Do test
original_execute = inspector.bind.execute
def mock_helper(command, *args, **kwargs):
if '_get_schema_columns' in command:
raise ProgrammingError("Information schema query returned too much data. Please repeat query with more "
"selective predicates.", 90030)
else:
return original_execute(command, *args, **kwargs)
with patch.object(inspector.bind, 'execute', side_effect=mock_helper):
column_metadata = inspector.get_columns('users', db_parameters['schema'])
assert len(column_metadata) == 4
# Clean up
metadata.drop_all(engine_testaccount)
def test_empty_comments(engine_testaccount):
"""Test that no comment returns None"""
table_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(5))
try:
engine_testaccount.execute("create table public.{} (\"col1\" text);".format(table_name))
engine_testaccount.execute("select comment from information_schema.columns where table_name='{}'".format(table_name)).fetchall()
inspector = inspect(engine_testaccount)
columns = inspector.get_columns(table_name, schema='PUBLIC')
assert inspector.get_table_comment(table_name, schema='PUBLIC') == {'text': None}
assert all([c['comment'] is None for c in columns])
finally:
engine_testaccount.execute("drop table public.{}".format(table_name))
|
the-stack_0_460 | """
ZetCode PyQt5 tutorial
This example shows an icon
in the titlebar of the window.
Author: Jan Bodnar
Website: zetcode.com
Last edited: August 2017
"""
import sys
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QIcon
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'))
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) |
the-stack_0_461 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import json
import freezegun
import pytest
import update_ext_version
TEST_DATETIME = "2022-03-14 01:23:45"
# The build ID is calculated via:
# "1" + datetime.datetime.strptime(TEST_DATETIME,"%Y-%m-%d %H:%M:%S").strftime('%j%H%M')
EXPECTED_BUILD_ID = "10730123"
def create_package_json(directory, version):
"""Create `package.json` in `directory` with a specified version of `version`."""
package_json = directory / "package.json"
package_json.write_text(json.dumps({"version": version}), encoding="utf-8")
return package_json
def run_test(tmp_path, version, args, expected):
package_json = create_package_json(tmp_path, version)
update_ext_version.main(package_json, args)
package = json.loads(package_json.read_text(encoding="utf-8"))
assert expected == update_ext_version.parse_version(package["version"])
@pytest.mark.parametrize(
"version, args",
[
("1.0.0-rc", []),
("1.1.0-rc", ["--release"]),
("1.0.0-rc", ["--release", "--build-id", "-1"]),
("1.0.0-rc", ["--release", "--for-publishing", "--build-id", "-1"]),
("1.0.0-rc", ["--release", "--for-publishing", "--build-id", "999999999999"]),
("1.1.0-rc", ["--build-id", "-1"]),
("1.1.0-rc", ["--for-publishing", "--build-id", "-1"]),
("1.1.0-rc", ["--for-publishing", "--build-id", "999999999999"]),
],
)
def test_invalid_args(tmp_path, version, args):
with pytest.raises(ValueError):
run_test(tmp_path, version, args, None)
@pytest.mark.parametrize(
"version, args, expected",
[
("1.1.0-rc", ["--build-id", "12345"], ("1", "1", "12345", "rc")),
("1.0.0-rc", ["--release", "--build-id", "12345"], ("1", "0", "12345", "")),
(
"1.1.0-rc",
["--for-publishing", "--build-id", "12345"],
("1", "1", "12345", ""),
),
(
"1.0.0-rc",
["--release", "--for-publishing", "--build-id", "12345"],
("1", "0", "12345", ""),
),
(
"1.0.0-rc",
["--release", "--build-id", "999999999999"],
("1", "0", "999999999999", ""),
),
(
"1.1.0-rc",
["--build-id", "999999999999"],
("1", "1", "999999999999", "rc"),
),
("1.1.0-rc", [], ("1", "1", EXPECTED_BUILD_ID, "rc")),
(
"1.0.0-rc",
["--release"],
("1", "0", "0", ""),
),
(
"1.1.0-rc",
["--for-publishing"],
("1", "1", EXPECTED_BUILD_ID, ""),
),
(
"1.0.0-rc",
["--release", "--for-publishing"],
("1", "0", "0", ""),
),
(
"1.0.0-rc",
["--release"],
("1", "0", "0", ""),
),
(
"1.1.0-rc",
[],
("1", "1", EXPECTED_BUILD_ID, "rc"),
),
],
)
@freezegun.freeze_time("2022-03-14 01:23:45")
def test_update_ext_version(tmp_path, version, args, expected):
run_test(tmp_path, version, args, expected)
|
the-stack_0_466 | def imc (peso,altura):
valor = peso / altura **2
if valor <18:
return "Delgadez"
elif valor <25:
return "Normal"
elif valor <29:
return "Sobrepeso"
else:
return "Obesidad"
valor_imc = imc (58,1.55)
print (valor_imc)
|
the-stack_0_468 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import base64
import collections
import datetime
import hashlib
import imghdr
import json
import os
import random
import re
import string
import sys
import time
import unicodedata
import zlib
from constants import constants
import feconf
import python_utils
_YAML_PATH = os.path.join(os.getcwd(), '..', 'oppia_tools', 'pyyaml-5.1.2')
sys.path.insert(0, _YAML_PATH)
import yaml # isort:skip #pylint: disable=wrong-import-position
DATETIME_FORMAT = '%m/%d/%Y, %H:%M:%S:%f'
PNG_DATA_URL_PREFIX = 'data:image/png;base64,'
SECONDS_IN_HOUR = 60 * 60
SECONDS_IN_MINUTE = 60
class InvalidInputException(Exception):
"""Error class for invalid input."""
pass
class ValidationError(Exception):
"""Error class for when a domain object fails validation."""
pass
class ExplorationConversionError(Exception):
"""Error class for when an exploration fails to convert from a certain
version to a certain version.
"""
pass
def create_enum(*sequential, **names):
"""Creates a enumerated constant.
Args:
*sequential: *. Sequence List to generate the enumerations.
**names: *. Names of the enumerration.
Returns:
dict. Dictionary containing the enumerated constants.
"""
enums = dict(python_utils.ZIP(sequential, sequential), **names)
return type(b'Enum', (), enums)
def get_file_contents(filepath, raw_bytes=False, mode='r'):
"""Gets the contents of a file, given a relative filepath from oppia/.
Args:
filepath: str. A full path to the file.
raw_bytes: bool. Flag for the raw_bytes output.
mode: str. File opening mode, default is in read mode.
Returns:
*. Either the raw_bytes stream if the flag is set or the
decoded stream in utf-8 format.
"""
if raw_bytes:
mode = 'rb'
encoding = None
else:
encoding = 'utf-8'
with python_utils.open_file(filepath, mode, encoding=encoding) as f:
return f.read()
def get_exploration_components_from_dir(dir_path):
"""Gets the (yaml, assets) from the contents of an exploration data dir.
Args:
dir_path: str. A full path to the exploration root directory.
Returns:
*. A 2-tuple, the first element of which is a yaml string, and the
second element of which is a list of (filepath, content) 2-tuples.
The filepath does not include the assets/ prefix.
Raises:
Exception. If the following condition doesn't hold: "There
is exactly one file not in assets/, and this file has a
.yaml suffix".
"""
yaml_content = None
assets_list = []
dir_path_array = dir_path.split('/')
while dir_path_array[-1] == '':
dir_path_array = dir_path_array[:-1]
dir_path_length = len(dir_path_array)
for root, directories, files in os.walk(dir_path):
for directory in directories:
if root == dir_path and directory != 'assets':
raise Exception(
'The only directory in %s should be assets/' % dir_path)
for filename in files:
filepath = os.path.join(root, filename)
if root == dir_path:
# These files are added automatically by Mac OS Xsystems.
# We ignore them.
if not filepath.endswith('.DS_Store'):
if yaml_content is not None:
raise Exception(
'More than one non-asset file specified '
'for %s' % dir_path)
elif not filepath.endswith('.yaml'):
raise Exception(
'Found invalid non-asset file %s. There '
'should only be a single non-asset file, '
'and it should have a .yaml suffix.' % filepath)
else:
yaml_content = get_file_contents(filepath)
else:
filepath_array = filepath.split('/')
# The additional offset is to remove the 'assets/' prefix.
filename = '/'.join(filepath_array[dir_path_length + 1:])
assets_list.append((filename, get_file_contents(
filepath, raw_bytes=True)))
if yaml_content is None:
raise Exception('No yaml file specifed for %s' % dir_path)
return yaml_content, assets_list
def get_comma_sep_string_from_list(items):
"""Turns a list of items into a comma-separated string.
Args:
items: list(str). List of the items.
Returns:
str. String containing the items in the list separated by commas.
"""
if not items:
return ''
if len(items) == 1:
return items[0]
return '%s and %s' % (', '.join(items[:-1]), items[-1])
def to_ascii(input_string):
"""Change unicode characters in a string to ascii if possible.
Args:
input_string: str. String to convert.
Returns:
str. String containing the ascii representation of the input string.
"""
return unicodedata.normalize(
'NFKD', python_utils.UNICODE(input_string)).encode('ascii', 'ignore')
def dict_from_yaml(yaml_str):
"""Gets the dict representation of a YAML string.
Args:
yaml_str: str. Yaml string for conversion into dict.
Returns:
dict. Parsed dict representation of the yaml string.
Raises:
InavlidInputException. If the yaml string sent as the
parameter is unable to get parsed, them this error gets
raised.
"""
try:
retrieved_dict = yaml.safe_load(yaml_str)
assert isinstance(retrieved_dict, dict)
return retrieved_dict
except (AssertionError, yaml.YAMLError) as e:
raise InvalidInputException(e)
def recursively_remove_key(obj, key_to_remove):
"""Recursively removes keys from a list or dict.
Args:
obj: *. List or dict passed for which the keys has to
be removed.
key_to_remove: str. Key value that has to be removed.
Returns:
*. Dict or list with a particular key value removed.
"""
if isinstance(obj, list):
for item in obj:
recursively_remove_key(item, key_to_remove)
elif isinstance(obj, dict):
if key_to_remove in obj:
del obj[key_to_remove]
for key, unused_value in obj.items():
recursively_remove_key(obj[key], key_to_remove)
def get_random_int(upper_bound):
"""Returns a random integer in [0, upper_bound).
Args:
upper_bound: int. Upper limit for generation of random
integer.
Returns:
int. Randomly generated integer less than the upper_bound.
"""
assert upper_bound >= 0 and isinstance(upper_bound, int)
generator = random.SystemRandom()
return generator.randrange(0, stop=upper_bound)
def get_random_choice(alist):
"""Gets a random element from a list.
Args:
alist: list(*). Input to get a random choice.
Returns:
*. Random element choosen from the passed input list.
"""
assert isinstance(alist, list) and len(alist) > 0
index = get_random_int(len(alist))
return alist[index]
def convert_png_data_url_to_binary(image_data_url):
"""Converts a PNG base64 data URL to a PNG binary data.
Args:
image_data_url: str. A string that is to be interpreted as a PNG
data URL.
Returns:
str. Binary content of the PNG created from the data URL.
Raises:
Exception. The given string does not represent a PNG data URL.
"""
if image_data_url.startswith(PNG_DATA_URL_PREFIX):
return base64.b64decode(
python_utils.urllib_unquote(
image_data_url[len(PNG_DATA_URL_PREFIX):]))
else:
raise Exception('The given string does not represent a PNG data URL.')
def convert_png_binary_to_data_url(content):
"""Converts a PNG image string (represented by 'content') to a data URL.
Args:
content: str. PNG binary file content.
Returns:
str. Data URL created from the binary content of the PNG.
Raises:
Exception. The given binary string does not represent a PNG image.
"""
if imghdr.what(None, h=content) == 'png':
return '%s%s' % (
PNG_DATA_URL_PREFIX,
python_utils.url_quote(base64.b64encode(content))
)
else:
raise Exception('The given string does not represent a PNG image.')
def convert_png_to_data_url(filepath):
"""Converts the png file at filepath to a data URL.
Args:
filepath: str. A full path to the file.
Returns:
str. Data url created from the filepath of the PNG.
"""
file_contents = get_file_contents(filepath, raw_bytes=True, mode='rb')
return convert_png_binary_to_data_url(file_contents)
def camelcase_to_hyphenated(camelcase_str):
"""Camelcase to hyhpenated conversion of the passed string.
Args:
camelcase_str: str. Camelcase string representation.
Returns:
str. Hypenated string representation of the camelcase string.
"""
intermediate_str = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', camelcase_str)
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', intermediate_str).lower()
def camelcase_to_snakecase(camelcase_str):
"""Camelcase to snake case conversion of the passed string.
Args:
camelcase_str: str. Camelcase string representation.
Returns:
str. Snakecase representation of the passed camelcase string.
"""
intermediate_str = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camelcase_str)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', intermediate_str).lower()
def set_url_query_parameter(url, param_name, param_value):
"""Set or replace a query parameter, and return the modified URL.
Args:
url: str. URL string which contains the query parameter.
param_name: str. Parameter name to be removed.
param_value: str. Set the parameter value, if it exists.
Returns:
str. Formated URL that has query parameter set or replaced.
Raises:
Exception. If the query parameter sent is not of string type,
them this exception is raised.
"""
if not isinstance(param_name, python_utils.BASESTRING):
raise Exception(
'URL query parameter name must be a string, received %s'
% param_name)
scheme, netloc, path, query_string, fragment = python_utils.url_split(url)
query_params = python_utils.parse_query_string(query_string)
query_params[param_name] = [param_value]
new_query_string = python_utils.url_encode(query_params, doseq=True)
return python_utils.url_unsplit(
(scheme, netloc, path, new_query_string, fragment))
class JSONEncoderForHTML(json.JSONEncoder):
"""Encodes JSON that is safe to embed in HTML."""
def encode(self, o):
chunks = self.iterencode(o, True)
return ''.join(chunks) if self.ensure_ascii else u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(
JSONEncoderForHTML, self).iterencode(o, _one_shot=_one_shot)
for chunk in chunks:
yield chunk.replace('&', '\\u0026').replace(
'<', '\\u003c').replace('>', '\\u003e')
def convert_to_hash(input_string, max_length):
"""Convert a string to a SHA1 hash.
Args:
input_string: str. Input string for conversion to hash.
max_length: int. Maximum Length of the generated hash.
Returns:
str. Hash Value generated from the input_String of the
specified length.
Raises:
Exception. If the input string is not the instance of the str,
them this exception is raised.
"""
if not isinstance(input_string, python_utils.BASESTRING):
raise Exception(
'Expected string, received %s of type %s' %
(input_string, type(input_string)))
# Encodes strings using the character set [A-Za-z0-9].
# Prefixing altchars with b' to ensure that all characters in encoded_string
# remain encoded (otherwise encoded_string would be of type unicode).
encoded_string = base64.b64encode(
hashlib.sha1(python_utils.convert_to_bytes(input_string)).digest(),
altchars=b'ab'
).replace('=', 'c')
return encoded_string[:max_length]
def base64_from_int(value):
"""Converts the number into base64 representation.
Args:
value: int. Integer value for conversion into base64.
Returns:
*. Returns the base64 representation of the number passed.
"""
return base64.b64encode(bytes([value]))
def get_time_in_millisecs(datetime_obj):
"""Returns time in milliseconds since the Epoch.
Args:
datetime_obj: datetime. An object of type datetime.datetime.
Returns:
float. The time in milliseconds since the Epoch.
"""
msecs = time.mktime(datetime_obj.timetuple()) * 1000.0
return msecs + python_utils.divide(datetime_obj.microsecond, 1000.0)
def convert_naive_datetime_to_string(datetime_obj):
"""Returns a human-readable string representing the naive datetime object.
Args:
datetime_obj: datetime. An object of type datetime.datetime. Must be a
naive datetime object.
Returns:
str. The string representing the naive datetime object.
"""
return datetime_obj.strftime(DATETIME_FORMAT)
def convert_string_to_naive_datetime_object(date_time_string):
"""Returns the naive datetime object equivalent of the date string.
Args:
date_time_string: str. The string format representing the datetime
object in the format: Month/Day/Year,
Hour:Minute:Second:MicroSecond.
Returns:
datetime. An object of type naive datetime.datetime corresponding to
that string.
"""
return datetime.datetime.strptime(date_time_string, DATETIME_FORMAT)
def get_current_time_in_millisecs():
"""Returns time in milliseconds since the Epoch.
Returns:
float. The time in milliseconds since the Epoch.
"""
return get_time_in_millisecs(datetime.datetime.utcnow())
def get_human_readable_time_string(time_msec):
"""Given a time in milliseconds since the epoch, get a human-readable
time string for the admin dashboard.
Args:
time_msec: float. Time in milliseconds since the Epoch.
Returns:
str. A string representing the time.
"""
return time.strftime(
'%B %d %H:%M:%S', time.gmtime(python_utils.divide(time_msec, 1000.0)))
def create_string_from_largest_unit_in_timedelta(timedelta_obj):
"""Given the timedelta object, find the largest nonzero time unit and
return that value, along with the time unit, as a human readable string.
The returned string is not localized.
Args:
timedelta_obj: datetime.timedelta. A datetime timedelta object. Datetime
timedelta objects are created when you subtract two datetime
objects.
Returns:
str. A human readable string representing the value of the largest
nonzero time unit, along with the time units. If the largest time unit
is seconds, 1 minute is returned. The value is represented as an integer
in the string.
Raises:
Exception. If the provided timedelta is not positive.
"""
total_seconds = timedelta_obj.total_seconds()
if total_seconds <= 0:
raise Exception(
'Expected a positive timedelta, received: %s.' % total_seconds)
elif timedelta_obj.days != 0:
return '%s day%s' % (
int(timedelta_obj.days), 's' if timedelta_obj.days > 1 else '')
else:
number_of_hours, remainder = divmod(total_seconds, SECONDS_IN_HOUR)
number_of_minutes, _ = divmod(remainder, SECONDS_IN_MINUTE)
if number_of_hours != 0:
return '%s hour%s' % (
int(number_of_hours), 's' if number_of_hours > 1 else '')
elif number_of_minutes > 1:
return '%s minutes' % int(number_of_minutes)
# Round any seconds up to one minute.
else:
return '1 minute'
def are_datetimes_close(later_datetime, earlier_datetime):
"""Given two datetimes, determines whether they are separated by less than
feconf.PROXIMAL_TIMEDELTA_SECS seconds.
Args:
later_datetime: datetime. The later datetime.
earlier_datetime: datetime. The earlier datetime.
Returns:
bool. True if difference between two datetimes is less than
feconf.PROXIMAL_TIMEDELTA_SECS seconds otherwise false.
"""
difference_in_secs = (later_datetime - earlier_datetime).total_seconds()
return difference_in_secs < feconf.PROXIMAL_TIMEDELTA_SECS
def generate_random_string(length):
"""Generates a random string of the specified length.
Args:
length: int. Length of the string to be generated.
Returns:
str. Random string of specified length.
"""
return base64.urlsafe_b64encode(os.urandom(length))[:length]
def generate_new_session_id():
"""Generates a new session id.
Returns:
str. Random string of length 24.
"""
return generate_random_string(24)
def vfs_construct_path(base_path, *path_components):
"""Mimics behavior of os.path.join on Posix machines.
Args:
base_path: str. The initial path upon which components would be added.
*path_components: list(str). Components that would be added to the path.
Returns:
str. The path that is obtained after adding the components.
"""
return os.path.join(base_path, *path_components)
def vfs_normpath(path):
"""Normalize path from posixpath.py, eliminating double slashes, etc.
Args:
path: str. Path that is to be normalized.
Returns:
str. Path if it is not null else a dot string.
"""
return os.path.normpath(path)
def require_valid_name(name, name_type, allow_empty=False):
"""Generic name validation.
Args:
name: str. The name to validate.
name_type: str. A human-readable string, like 'the exploration title' or
'a state name'. This will be shown in error messages.
allow_empty: bool. If True, empty strings are allowed.
Raises:
Exception. Name isn't a string.
Exception. The length of the name_type isn't between
1 and 50.
Exception. Name starts or ends with whitespace.
Exception. Adjacent whitespace in name_type isn't collapsed.
Exception. Invalid character is present in name.
"""
if not isinstance(name, python_utils.BASESTRING):
raise ValidationError('%s must be a string.' % name)
if allow_empty and name == '':
return
# This check is needed because state names are used in URLs and as ids
# for statistics, so the name length should be bounded above.
if len(name) > 50 or len(name) < 1:
raise ValidationError(
'The length of %s should be between 1 and 50 '
'characters; received %s' % (name_type, name))
if name[0] in string.whitespace or name[-1] in string.whitespace:
raise ValidationError(
'Names should not start or end with whitespace.')
if re.search(r'\s\s+', name):
raise ValidationError(
'Adjacent whitespace in %s should be collapsed.' % name_type)
for character in constants.INVALID_NAME_CHARS:
if character in name:
raise ValidationError(
'Invalid character %s in %s: %s' %
(character, name_type, name))
def require_valid_url_fragment(name, name_type, allowed_length):
"""Generic URL fragment validation.
Args:
name: str. The name to validate.
name_type: str. A human-readable string, like 'topic url fragment'.
This will be shown in error messages.
allowed_length: int. Allowed length for the name.
Raises:
Exception. Name is not a string.
Exception. Name is empty.
Exception. The length of the name_type is not correct.
Exception. Invalid character is present in the name.
"""
if not isinstance(name, python_utils.BASESTRING):
raise ValidationError(
'%s field must be a string. Received %s.' % (name_type, name))
if name == '':
raise ValidationError(
'%s field should not be empty.' % name_type)
if len(name) > allowed_length:
raise ValidationError(
'%s field should not exceed %d characters, '
'received %s.' % (name_type, allowed_length, name))
if not re.match(constants.VALID_URL_FRAGMENT_REGEX, name):
raise ValidationError(
'%s field contains invalid characters. Only lowercase words'
' separated by hyphens are allowed. Received %s.' % (
name_type, name))
def require_valid_thumbnail_filename(thumbnail_filename):
"""Generic thumbnail filename validation.
Args:
thumbnail_filename: str. The thumbnail filename to validate.
"""
if thumbnail_filename is not None:
if not isinstance(thumbnail_filename, python_utils.BASESTRING):
raise ValidationError(
'Expected thumbnail filename to be a string, received %s'
% thumbnail_filename)
if thumbnail_filename.rfind('.') == 0:
raise ValidationError(
'Thumbnail filename should not start with a dot.')
if '/' in thumbnail_filename or '..' in thumbnail_filename:
raise ValidationError(
'Thumbnail filename should not include slashes or '
'consecutive dot characters.')
if '.' not in thumbnail_filename:
raise ValidationError(
'Thumbnail filename should include an extension.')
dot_index = thumbnail_filename.rfind('.')
extension = thumbnail_filename[dot_index + 1:].lower()
if extension != 'svg':
raise ValidationError(
'Expected a filename ending in svg, received %s' %
thumbnail_filename)
def require_valid_meta_tag_content(meta_tag_content):
"""Generic meta tag content validation.
Args:
meta_tag_content: str. The meta tag content to validate.
"""
if not isinstance(meta_tag_content, python_utils.BASESTRING):
raise ValidationError(
'Expected meta tag content to be a string, received %s'
% meta_tag_content)
if len(meta_tag_content) > constants.MAX_CHARS_IN_META_TAG_CONTENT:
raise ValidationError(
'Meta tag content should not be longer than %s characters.'
% constants.MAX_CHARS_IN_META_TAG_CONTENT)
def require_valid_page_title_fragment_for_web(page_title_fragment_for_web):
"""Generic page title fragment validation.
Args:
page_title_fragment_for_web: str. The page title fragment to validate.
Raises:
Exception. Page title fragment is not a string.
Exception. Page title fragment is too lengthy.
"""
max_chars_in_page_title_frag_for_web = (
constants.MAX_CHARS_IN_PAGE_TITLE_FRAGMENT_FOR_WEB)
if not isinstance(page_title_fragment_for_web, python_utils.BASESTRING):
raise ValidationError(
'Expected page title fragment to be a string, received %s'
% page_title_fragment_for_web)
if len(page_title_fragment_for_web) > max_chars_in_page_title_frag_for_web:
raise ValidationError(
'Page title fragment should not be longer than %s characters.'
% constants.MAX_CHARS_IN_PAGE_TITLE_FRAGMENT_FOR_WEB)
def capitalize_string(input_string):
"""Converts the first character of a string to its uppercase equivalent (if
it's a letter), and returns the result.
Args:
input_string: str. String to process (to capitalize).
Returns:
str. Capitalizes the string.
"""
# This guards against empty strings.
if input_string:
return input_string[0].upper() + input_string[1:]
else:
return input_string
def get_hex_color_for_category(category):
"""Returns the category, it returns the color associated with the category,
if the category is present in the app constants else given a default color.
Args:
category: str. Category to get color.
Returns:
str. Color assigned to that category.
"""
return (
constants.CATEGORIES_TO_COLORS[category]
if category in constants.CATEGORIES_TO_COLORS
else constants.DEFAULT_COLOR)
def get_thumbnail_icon_url_for_category(category):
"""Returns the category, it returns the associated thumbnail icon, if the
category is present in the app constants else given a default thumbnail.
Args:
category: str. Category to get Thumbnail icon.
Returns:
str. Path to the Thumbnail Icon assigned to that category.
"""
icon_name = (
category if category in constants.CATEGORIES_TO_COLORS
else constants.DEFAULT_THUMBNAIL_ICON)
# Remove all spaces from the string.
return '/subjects/%s.svg' % (icon_name.replace(' ', ''))
def is_supported_audio_language_code(language_code):
"""Checks if the given language code is a supported audio language code.
Args:
language_code: str. The language code.
Returns:
bool. Whether the language code is supported audio language code or not.
"""
language_codes = [lc['id'] for lc in constants.SUPPORTED_AUDIO_LANGUAGES]
return language_code in language_codes
def is_valid_language_code(language_code):
"""Checks if the given language code is a valid language code.
Args:
language_code: str. The language code.
Returns:
bool. Whether the language code is valid or not.
"""
language_codes = [
lc['code'] for lc in constants.SUPPORTED_CONTENT_LANGUAGES]
return language_code in language_codes
def get_supported_audio_language_description(language_code):
"""Returns the language description for the given language code.
Args:
language_code: str. The language code for which the description is
required.
Returns:
str. The language description for the given language code.
Raises:
Exception. If the given language code is unsupported.
"""
for language in constants.SUPPORTED_AUDIO_LANGUAGES:
if language['id'] == language_code:
return language['description']
raise Exception('Unsupported audio language code: %s' % language_code)
def is_user_id_valid(
user_id, allow_system_user_id=False, allow_pseudonymous_id=False):
"""Verify that the user ID is in a correct format or that it belongs to
a system user.
Args:
user_id: str. The user ID to be checked.
allow_system_user_id: bool. Whether to allow system user ID.
allow_pseudonymous_id: bool. Whether to allow pseudonymized ID.
Returns:
bool. True when the ID is in a correct format or if the ID belongs to
a system user, False otherwise.
"""
if allow_system_user_id and user_id in feconf.SYSTEM_USERS.keys():
return True
if allow_pseudonymous_id and is_pseudonymous_id(user_id):
return True
return bool(re.match(feconf.USER_ID_REGEX, user_id))
def is_pseudonymous_id(user_id):
"""Check that the ID is a pseudonymous one.
Args:
user_id: str. The ID to be checked.
Returns:
bool. Whether the ID represents a pseudonymous user.
"""
return bool(re.match(feconf.PSEUDONYMOUS_ID_REGEX, user_id))
def unescape_encoded_uri_component(escaped_string):
"""Unescape a string that is encoded with encodeURIComponent.
Args:
escaped_string: str. String that is encoded with encodeURIComponent.
Returns:
str. Decoded string that was initially encoded with encodeURIComponent.
"""
return python_utils.urllib_unquote(escaped_string).decode('utf-8')
def snake_case_to_camel_case(snake_str):
"""Converts a string in snake_case to camelCase.
Args:
snake_str: str. String that is in snake_case.
Returns:
str. Converted string that is in camelCase.
"""
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + ''.join(x.title() for x in components[1:])
def get_asset_dir_prefix():
"""Returns prefix for asset directory depending whether dev or prod.
It is used as a prefix in urls for images, css and script files.
Returns:
str. Prefix '/build' if constants.DEV_MODE is false, otherwise
null string.
"""
asset_dir_prefix = ''
if not constants.DEV_MODE:
asset_dir_prefix = '/build'
return asset_dir_prefix
def get_hashable_value(value):
"""This function returns a hashable version of the input JSON-like value.
It converts the built-in sequences into their hashable counterparts
{list: tuple, dict: (sorted tuple of pairs)}. Additionally, their
elements are converted to hashable values through recursive calls. All
other value types are assumed to already be hashable.
Args:
value: *. Some JSON-like object, that is, an object made-up of only:
lists, dicts, strings, ints, bools, None. Types can be nested in
each other.
Returns:
*. A new object that will always have the same hash for "equivalent"
values.
"""
if isinstance(value, list):
return tuple(get_hashable_value(e) for e in value)
elif isinstance(value, dict):
return tuple(sorted(
# Dict keys are already hashable, only values need converting.
(k, get_hashable_value(v)) for k, v in value.items()))
else:
return value
def compress_to_zlib(data):
"""Compress the data to zlib format for efficient storage and communication.
Args:
data: str. Data to be compressed.
Returns:
str. Compressed data string.
"""
return zlib.compress(data)
def decompress_from_zlib(data):
"""Decompress the zlib compressed data.
Args:
data: str. Data to be decompressed.
Returns:
str. Decompressed data string.
"""
return zlib.decompress(data)
def compute_list_difference(list_a, list_b):
"""Returns the set difference of two lists.
Args:
list_a: list. The first list.
list_b: list. The second list.
Returns:
list. List of the set difference of list_a - list_b.
"""
return list(set(list_a) - set(list_b))
class OrderedCounter(collections.Counter, collections.OrderedDict):
"""Counter that remembers the order elements are first encountered."""
pass
|
the-stack_0_470 | """Provides the 'OffshoreSubstationDesign` class."""
__author__ = "Jake Nunemaker"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "Jake Nunemaker"
__email__ = "[email protected]"
import numpy as np
from ORBIT.phases.design import DesignPhase
class OffshoreSubstationDesign(DesignPhase):
"""Offshore Substation Design Class."""
expected_config = {
"site": {"depth": "m"},
"plant": {"num_turbines": "int"},
"turbine": {"turbine_rating": "MW"},
"substation_design": {
"mpt_cost_rate": "USD/MW (optional)",
"topside_fab_cost_rate": "USD/t (optional)",
"topside_design_cost": "USD (optional)",
"shunt_cost_rate": "USD/MW (optional)",
"switchgear_cost": "USD (optional)",
"backup_gen_cost": "USD (optional)",
"workspace_cost": "USD (optional)",
"other_ancillary_cost": "USD (optional)",
"topside_assembly_factor": "float (optional)",
"oss_substructure_cost_rate": "USD/t (optional)",
"oss_pile_cost_rate": "USD/t (optional)",
"num_substations": "int (optional)",
},
}
output_config = {
"num_substations": "int",
"offshore_substation_topside": "dict",
"offshore_substation_substructure": "dict",
}
def __init__(self, config, **kwargs):
"""
Creates an instance of OffshoreSubstationDesign.
Parameters
----------
config : dict
"""
config = self.initialize_library(config, **kwargs)
self.config = self.validate_config(config)
self._outputs = {}
def run(self):
"""Main run function."""
self.calc_substructure_length()
self.calc_substructure_deck_space()
self.calc_topside_deck_space()
self.calc_num_mpt_and_rating()
self.calc_mpt_cost()
self.calc_topside_mass_and_cost()
self.calc_shunt_reactor_cost()
self.calc_switchgear_cost()
self.calc_ancillary_system_cost()
self.calc_assembly_cost()
self.calc_substructure_mass_and_cost()
self._outputs["offshore_substation_substructure"] = {
"type": "Monopile", # Substation install only supports monopiles
"deck_space": self.substructure_deck_space,
"mass": self.substructure_mass,
"length": self.substructure_length,
"unit_cost": self.substructure_cost,
}
self._outputs["offshore_substation_topside"] = {
"deck_space": self.topside_deck_space,
"mass": self.topside_mass,
"unit_cost": self.substation_cost,
}
self._outputs["num_substations"] = self.num_substations
@property
def substation_cost(self):
"""Returns total procuremet cost of the topside."""
return (
self.mpt_cost
+ self.topside_cost
+ self.shunt_reactor_cost
+ self.switchgear_costs
+ self.ancillary_system_costs
+ self.land_assembly_cost
)
@property
def total_cost(self):
"""Returns total procurement cost of the substation(s)."""
if not self._outputs:
raise Exception("Has OffshoreSubstationDesign been ran yet?")
return (
self.substructure_cost + self.substation_cost
) * self.num_substations
def calc_substructure_length(self):
"""
Calculates substructure length as the site depth + 10m
"""
self.substructure_length = self.config["site"]["depth"] + 10
def calc_substructure_deck_space(self):
"""
Calculates required deck space for the substation substructure.
Coming soon!
"""
self.substructure_deck_space = 1
def calc_topside_deck_space(self):
"""
Calculates required deck space for the substation topside.
Coming soon!
"""
self.topside_deck_space = 1
def calc_num_mpt_and_rating(self):
"""
Calculates the number of main power transformers (MPTs) and their rating.
Parameters
----------
num_turbines : int
turbine_rating : float
"""
_design = self.config.get("substation_design", {})
num_turbines = self.config["plant"]["num_turbines"]
turbine_rating = self.config["turbine"]["turbine_rating"]
capacity = num_turbines * turbine_rating
self.num_substations = _design.get(
"num_substations", int(np.ceil(capacity / 500))
)
self.num_mpt = np.ceil(
num_turbines * turbine_rating / (250 * self.num_substations)
)
self.mpt_rating = (
round(
(
(num_turbines * turbine_rating * 1.15)
/ (self.num_mpt * self.num_substations)
)
/ 10.0
)
* 10.0
)
def calc_mpt_cost(self):
"""
Calculates the total cost for all MPTs.
Parameters
----------
mpt_cost_rate : float
"""
_design = self.config.get("substation_design", {})
mpt_cost_rate = _design.get("mpt_cost_rate", 12500)
self.mpt_cost = self.mpt_rating * self.num_mpt * mpt_cost_rate
def calc_topside_mass_and_cost(self):
"""
Calculates the mass and cost of the substation topsides.
Parameters
----------
topside_fab_cost_rate : int | float
topside_design_cost: int | float
"""
_design = self.config.get("substation_design", {})
topside_fab_cost_rate = _design.get("topside_fab_cost_rate", 14500)
topside_design_cost = _design.get("topside_design_cost", 4.5e6)
self.topside_mass = 3.85 * self.mpt_rating * self.num_mpt + 285
self.topside_cost = (
self.topside_mass * topside_fab_cost_rate + topside_design_cost
)
def calc_shunt_reactor_cost(self):
"""
Calculates the cost of the shunt reactor.
Parameters
----------
shunt_cost_rate : int | float
"""
_design = self.config.get("substation_design", {})
shunt_cost_rate = _design.get("shunt_cost_rate", 35000)
self.shunt_reactor_cost = (
self.mpt_rating * self.num_mpt * shunt_cost_rate * 0.5
)
def calc_switchgear_cost(self):
"""
Calculates the cost of the switchgear.
Parameters
----------
switchgear_cost : int | float
"""
_design = self.config.get("substation_design", {})
switchgear_cost = _design.get("switchgear_cost", 14.5e5)
self.switchgear_costs = self.num_mpt * switchgear_cost
def calc_ancillary_system_cost(self):
"""
Calculates cost of ancillary systems.
Parameters
----------
backup_gen_cost : int | float
workspace_cost : int | float
other_ancillary_cost : int | float
"""
_design = self.config.get("substation_design", {})
backup_gen_cost = _design.get("backup_gen_cost", 1e6)
workspace_cost = _design.get("workspace_cost", 2e6)
other_ancillary_cost = _design.get("other_ancillary_cost", 3e6)
self.ancillary_system_costs = (
backup_gen_cost + workspace_cost + other_ancillary_cost
)
def calc_assembly_cost(self):
"""
Calculates the cost of assembly on land.
Parameters
----------
topside_assembly_factor : int | float
"""
_design = self.config.get("substation_design", {})
topside_assembly_factor = _design.get("topside_assembly_factor", 0.075)
self.land_assembly_cost = (
self.switchgear_costs
+ self.shunt_reactor_cost
+ self.ancillary_system_costs
) * topside_assembly_factor
def calc_substructure_mass_and_cost(self):
"""
Calculates the mass and associated cost of the substation substructure.
Parameters
----------
oss_substructure_cost_rate : int | float
oss_pile_cost_rate : int | float
"""
_design = self.config.get("substation_design", {})
oss_substructure_cost_rate = _design.get(
"oss_substructure_cost_rate", 3000
)
oss_pile_cost_rate = _design.get("oss_pile_cost_rate", 0)
substructure_mass = 0.4 * self.topside_mass
substructure_pile_mass = 8 * substructure_mass ** 0.5574
self.substructure_cost = (
substructure_mass * oss_substructure_cost_rate
+ substructure_pile_mass * oss_pile_cost_rate
)
self.substructure_mass = substructure_mass + substructure_pile_mass
@property
def design_result(self):
"""
Returns the results of self.run().
"""
if not self._outputs:
raise Exception("Has OffshoreSubstationDesign been ran yet?")
return self._outputs
@property
def detailed_output(self):
"""Returns detailed phase information."""
_outputs = {
"num_substations": self.num_substations,
"substation_mpt_rating": self.mpt_rating,
"substation_topside_mass": self.topside_mass,
"substation_topside_cost": self.topside_cost,
"substation_substructure_mass": self.substructure_mass,
"substation_substructure_cost": self.substructure_cost,
}
return _outputs
|
the-stack_0_473 | from __future__ import absolute_import
from mock import MagicMock, patch
from sentry.testutils.cases import RuleTestCase
from sentry.rules.actions.notify_event_service import NotifyEventServiceAction
from sentry.tasks.sentry_apps import notify_sentry_app
class NotifyEventServiceActionTest(RuleTestCase):
rule_cls = NotifyEventServiceAction
def test_applies_correctly_for_plugins(self):
event = self.get_event()
plugin = MagicMock()
plugin.is_enabled.return_value = True
plugin.should_notify.return_value = True
rule = self.get_rule(data={"service": "mail"})
with patch("sentry.plugins.plugins.get") as get_plugin:
get_plugin.return_value = plugin
results = list(rule.after(event=event, state=self.get_state()))
assert len(results) is 1
assert plugin.should_notify.call_count is 1
assert results[0].callback is plugin.rule_notify
def test_applies_correctly_for_sentry_apps(self):
event = self.get_event()
self.create_sentry_app(
organization=event.organization, name="Test Application", is_alertable=True
)
rule = self.get_rule(data={"service": "test-application"})
results = list(rule.after(event=event, state=self.get_state()))
assert len(results) is 1
assert results[0].callback is notify_sentry_app
|
the-stack_0_474 | import pytest
import torch as to
import torch.nn as nn
from functools import partial
from tqdm import tqdm
from pyrado.sampling.utils import gen_batches, gen_ordered_batches
from pyrado.utils.data_types import *
from pyrado.utils.functions import noisy_nonlin_fcn
from pyrado.utils.math import cosine_similarity, cov
from pyrado.environments.pysim.ball_on_beam import BallOnBeamSim
from pyrado.policies.dummy import DummyPolicy
from pyrado.sampling.rollout import rollout
from pyrado.sampling.step_sequence import StepSequence
from pyrado.utils.nn_layers import IndiNonlinLayer
from pyrado.utils.optimizers import GSS
from pyrado.utils.averaging import RunningExpDecayingAverage, RunningMemoryAverage
from pyrado.utils.standardizing import RunningStandardizer, Standardizer
from pyrado.utils.normalizing import RunningNormalizer, normalize
@pytest.mark.parametrize(
'x, data_along_rows', [
(np.random.rand(100, 4), True),
(np.random.rand(4, 100), False)
], ids=['100_4', '4_100']
)
def test_cov(x, data_along_rows):
rowvar = not data_along_rows
cov_np = np.cov(x, rowvar=rowvar)
cov_pyrado = cov(to.from_numpy(x), data_along_rows=data_along_rows).numpy()
assert cov_pyrado.shape[0] == cov_pyrado.shape[1]
if data_along_rows:
assert cov_np.shape[0] == x.shape[1]
assert cov_pyrado.shape[0] == x.shape[1]
else:
assert cov_np.shape[0] == x.shape[0]
assert cov_pyrado.shape[0] == x.shape[0]
assert np.allclose(cov_np, cov_pyrado)
@pytest.mark.parametrize(
'env, expl_strat', [
(BallOnBeamSim(dt=0.02, max_steps=100),
DummyPolicy(BallOnBeamSim(dt=0.02, max_steps=100).spec)),
], ids=['bob_dummy']
)
def test_concat_rollouts(env, expl_strat):
ro1 = rollout(env, expl_strat)
ro2 = rollout(env, expl_strat)
ro_cat = StepSequence.concat([ro1, ro2])
assert isinstance(ro_cat, StepSequence)
assert ro_cat.length == ro1.length + ro2.length
@pytest.mark.parametrize(
'x, y', [
(to.tensor([1., 2., 3.]), to.tensor([1., 2., 3.])),
(to.tensor([1., 0., 1.]), to.tensor([1., 1e12, 1.])),
(to.tensor([0., 0., 0.]), to.tensor([1., 2, 3.])),
(to.tensor([1., 2., 3.]), to.tensor([2., 4., 6.])),
(to.tensor([1., 2., 3.]), to.tensor([-1., -2., -3.])),
], ids=['same', 'similarity_1', 'similarity_0', 'colinear_scaled', 'colinear_opposite']
)
def test_cosine_similarity(x, y):
# Only tested for vector inputs
d_cos = cosine_similarity(x, y)
assert isinstance(d_cos, to.Tensor)
# The examples are chosen to result in 0, 1, or -1
assert to.isclose(d_cos, to.tensor(0.)) or to.isclose(d_cos, to.tensor(1.)) or to.isclose(d_cos, to.tensor(-1.))
@pytest.mark.parametrize(
'x, y', [
({'a': 1, 'b': 2}, {'c': 1, 'd': 4}),
({'a': 1, 'b': 2}, {'b': 3, 'd': 4}),
], ids=['disjoint', 'overlapping']
)
def test_merge_lod_var_dtype(x, y):
z = merge_lod_var_dtype([x, y])
assert z['a'] == 1
if z['b'] == 2: # disjoint
assert z['c'] == 1
elif z['b'] == 3: # overlapping
assert len(z) == 3
else:
assert False
assert z['d'] == 4
@pytest.mark.parametrize(
'batch_size, data_size', [
(3, 30),
(3, 29),
(3, 28),
(2, 2)
], ids=['division_mod0', 'division_mod1', 'division_mod2', 'edge_case']
)
def test_gen_ordered_batches(batch_size, data_size):
from math import ceil
generator = gen_batches(batch_size, data_size)
unordered_batches = list(generator)
assert len(unordered_batches) == ceil(data_size/batch_size)
assert all(len(uob) <= batch_size for uob in unordered_batches)
generator = gen_ordered_batches(batch_size, data_size)
ordered_batches = list(generator)
assert len(ordered_batches) == ceil(data_size/batch_size)
assert all(len(ob) <= batch_size for ob in ordered_batches)
# Check if each mini-batch is sorted
assert all(all(ob[i] <= ob[i + 1] for i in range(len(ob) - 1)) for ob in ordered_batches)
@pytest.mark.parametrize('dtype', ['torch', 'numpy'], ids=['to', 'np'])
@pytest.mark.parametrize('axis', [0, 1], ids=['ax_0', 'ax_1'])
def test_normalize(dtype, axis):
for _ in range(10):
x = to.rand(5, 3) if dtype == 'torch' else np.random.rand(5, 3)
x_norm = normalize(x, axis=axis, order=1)
if isinstance(x_norm, to.Tensor):
x_norm = x_norm.numpy() # for easier checking with pytest.approx
assert np.sum(x_norm, axis=axis) == pytest.approx(1.)
@pytest.mark.parametrize(
'data_seq, axis', [
([np.array([1, 1, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, -20, 20])], 0),
([np.array([1, 1, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, -20, 20])], None),
([np.array([1, 1, 2, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, 10, -20, 20])], 0),
([np.array([1, 1, 2, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, 10, -20, 20])], None),
(
[to.tensor([1., 1., 2]), to.tensor([1., 6., 3.]), to.tensor([1., 6., 3.]),
to.tensor([10., -20., 20.])],
0),
(
[to.tensor([1., 1., 2]), to.tensor([1., 6., 3.]), to.tensor([1., 6., 3.]),
to.tensor([10., -20., 20.])],
-1),
(
[to.tensor([1., 1, 2, 2]), to.tensor([1., 6, 3]), to.tensor([1., 6, 3]),
to.tensor([10., 10, -20, 20])],
0),
(
[to.tensor([1., 1, 2, 2]), to.tensor([1., 6, 3]), to.tensor([1., 6, 3]),
to.tensor([10., 10, -20, 20])],
-1),
], ids=['np_same_length_0', 'np_same_length_None', 'np_mixed_length_0', 'np_mixed_length_None',
'to_same_length_0', 'to_same_length_-1', 'to_mixed_length_0', 'to_mixed_length_-1']
)
def test_running_standardizer(data_seq, axis):
rs = RunningStandardizer()
for data in data_seq:
z = rs(data, axis)
assert z is not None
rs.reset()
assert rs._mean is None and rs._sum_sq_diffs is None and rs._iter == 0
@pytest.mark.parametrize(
'data_seq, alpha', [
(
[np.array([1, 1, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, -20, 20])],
0.9
),
(
[to.tensor([1., 1., 2]), to.tensor([1., 6., 3.]), to.tensor([1., 6., 3.]), to.tensor([10., -20., 20.])],
0.1
),
], ids=['np', 'to']
)
def test_running_expdecay_average(data_seq, alpha):
reda = RunningExpDecayingAverage(alpha)
for data in data_seq:
z = reda(data)
assert z is not None
reda.reset(alpha=0.5)
assert reda._alpha == 0.5 and reda._prev_est is None
@pytest.mark.parametrize(
'data_seq, capacity', [
(
[np.array([1., 1, 2]), np.array([1., 1, 2]), np.array([1., 1, 2]), np.array([-2., -2, -4])],
3
),
(
[to.tensor([1., 1, 2]), to.tensor([1., 1, 2]), to.tensor([1., 1, 2]), to.tensor([-2., -2, -4])],
3
),
], ids=['np', 'to']
)
def test_running_mem_average(data_seq, capacity):
rma = RunningMemoryAverage(capacity)
for i, data in enumerate(data_seq):
z = rma(data)
if i <= 2:
to.testing.assert_allclose(z, to.tensor([1., 1, 2])) # works with PyTorch Tensors and numpy arrays
elif i == 3:
to.testing.assert_allclose(z, to.tensor([0., 0, 0])) # works with PyTorch Tensors and numpy arrays
rma.reset(capacity=5)
assert rma.capacity == 5 and rma.memory is None
@pytest.mark.parametrize(
'data_seq', [
[5*np.random.rand(25, 3), 0.1*np.random.rand(5, 3), 20*np.random.rand(70, 3)],
[5*to.rand(25, 3), 0.1*to.rand(5, 3), 20*to.rand(70, 3)]
], ids=['np', 'to']
)
def test_running_normalizer(data_seq):
rn = RunningNormalizer()
for data in data_seq:
data_norm = rn(data)
assert (-1 <= data_norm).all()
assert (data_norm <= 1).all()
@pytest.mark.parametrize(
'x', [
to.rand(1000, 1),
to.rand(1, 1000),
to.rand(1000, 1000),
np.random.rand(1, 1000),
np.random.rand(1000, 1),
np.random.rand(1000, 1000)
], ids=['to_1x1000', 'to_1000x1', 'to_1000x1000', 'np_1x1000', 'np_1000x1', 'np_1000x1000']
)
def test_stateful_standardizer(x):
ss = Standardizer()
if isinstance(x, to.Tensor):
x_stdized = ss.standardize(x)
assert x_stdized.shape == x.shape
assert to.allclose(x_stdized.mean(), to.zeros(1))
assert to.allclose(x_stdized.std(), to.ones(1))
x_restrd = ss.unstandardize(x_stdized)
assert x_restrd.shape == x.shape
assert to.allclose(x_restrd, x, rtol=1e-02, atol=1e-05)
elif isinstance(x, np.ndarray):
x_stdized = ss.standardize(x)
assert x_stdized.shape == x.shape
assert np.allclose(x_stdized.mean(), np.zeros(1))
assert np.allclose(x_stdized.std(), np.ones(1))
x_restrd = ss.unstandardize(x_stdized)
assert x_restrd.shape == x.shape
assert np.allclose(x_restrd, x, rtol=1e-02, atol=1e-05)
@pytest.mark.parametrize(
'g, ed', [
(1., 2.),
(np.array([-1., 2.]), np.eye(2))
], ids=['scalar', 'array']
)
def test_ds_spec(g, ed):
# Base class
dss = DSSpec(function='name', goal=g)
assert isinstance(dss, dict)
assert dss['function'] == 'name'
if isinstance(g, np.ndarray):
assert np.all(dss['goal'] == g)
else:
assert dss['goal'] == g
# Linear first order subclass
lds = LinDSSpec(function='lin', goal=g, errorDynamics=ed)
assert isinstance(dss, dict)
assert lds['function'] == 'lin'
if isinstance(g, np.ndarray):
assert np.all(lds['goal'] == g)
assert np.all(lds['errorDynamics'] == ed)
else:
assert lds['goal'] == g
assert lds['errorDynamics'] == ed
# Mass-Spring-Damper subclass
msds = MSDDSSpec(function='msd', goal=g, damping=2., attractorStiffness=3., mass=4.)
assert isinstance(dss, dict)
assert msds['function'] == 'msd'
if isinstance(g, np.ndarray):
assert np.all(msds['goal'] == g)
else:
assert msds['goal'] == g
assert msds['damping'] == 2.
assert msds['attractorStiffness'] == 3.
assert msds['mass'] == 4.
@pytest.mark.optim
@pytest.mark.visualization
@pytest.mark.parametrize(
'identical_bounds', [
True, False
], ids=['identical', 'separate']
)
def test_gss_optimizer_identical_bounds(identical_bounds):
class Dummy:
def loss_fcn(self):
# Some function to minimize
return (self.x + self.y + 4)**2
def __init__(self):
# Test with different lower and upper bounds
self.x, self.y = to.tensor([0.]), to.tensor([4.])
x_min, x_max = to.tensor([-10.]), to.tensor([5.])
if identical_bounds:
self.optim = GSS([{'params': self.x}, {'params': self.y}], x_min, x_max)
else:
x_min_override = to.tensor([-6.])
self.optim = GSS([{'params': self.x, 'param_min': x_min_override}, {'params': self.y}], x_min, x_max)
print(self.optim)
dummy = Dummy()
for i in range(2):
dummy.optim.step(dummy.loss_fcn)
assert dummy.x != dummy.y
print(f'x = {dummy.x.item()} \t y = {dummy.y.item()}')
@pytest.mark.optim
def test_gss_optimizer_functional():
class Dummy:
def loss_fcn(self):
# Some function to minimize
return (self.x + 4)**2
def __init__(self):
# Test with different lower and upper bounds
self.x = to.tensor([0.])
x_min, x_max = to.tensor([-10.]), to.tensor([10.])
self.optim = GSS([{'params': self.x}], x_min, x_max)
dummy = Dummy()
for i in range(100):
dummy.optim.step(dummy.loss_fcn)
assert to.norm(dummy.x + 4) < 1e-4
@pytest.mark.optim
@pytest.mark.visualization
def test_gss_optimizer_nlin_fcn():
from matplotlib import pyplot as plt
# Parameters
x_grid = to.linspace(-2., 3., 200)
f = 1.
noise_std = 0.1
# Init param and optimizer
x_init = to.rand(1)*(x_grid.max() - x_grid.min())/2 + x_grid.min() + (x_grid.max() - x_grid.min())/4 # [.25, .75]
x = nn.Parameter(to.tensor([x_init]), requires_grad=False)
optim = GSS([x], param_min=x_grid.min().unsqueeze(0), param_max=x_grid.max().unsqueeze(0))
obj_fcn = partial(noisy_nonlin_fcn, x=x, f=f, noise_std=noise_std)
num_epochs = 10
# Init plotting
fig = plt.figure()
plt.plot(x_grid, noisy_nonlin_fcn(x=x_grid, f=f), label='noise free fcn')
plt.scatter(x.data.numpy(), obj_fcn().numpy(), s=40, marker='x', color='k', label='init guess')
colors = plt.get_cmap('inferno')(np.linspace(0, 1, num_epochs))
for e in tqdm(range(num_epochs), total=num_epochs):
# Evaluate at a the current point
optim.step(obj_fcn)
# Plot current evaluation
plt.plot(x_grid, noisy_nonlin_fcn(x=x_grid, f=f, noise_std=noise_std), alpha=0.2)
plt.scatter(x.data.numpy(), obj_fcn().numpy(), s=16, color=colors[e])
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.legend()
plt.show()
assert noisy_nonlin_fcn(x, f=f, noise_std=noise_std) < noisy_nonlin_fcn(x_init, f=f, noise_std=noise_std)
@pytest.mark.parametrize('in_features', [1, 3], ids=['1dim', '3dim'])
@pytest.mark.parametrize('same_nonlin', [True, False], ids=['same_nonlin', 'different_nonlin'])
@pytest.mark.parametrize('bias', [True, False], ids=['bias', 'no_bias'])
@pytest.mark.parametrize('weight', [True, False], ids=['weight', 'no_weight'])
def test_indi_nonlin_layer(in_features, same_nonlin, bias, weight):
if not same_nonlin and in_features > 1:
nonlin = in_features*[to.tanh]
else:
nonlin = to.sigmoid
layer = IndiNonlinLayer(in_features, nonlin, bias, weight)
assert isinstance(layer, nn.Module)
i = to.randn(in_features)
o = layer(i)
assert isinstance(o, to.Tensor)
assert i.shape == o.shape
|
the-stack_0_475 | # --------------
# import packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score ,confusion_matrix
# Code starts here
# load data
news = pd.read_csv(path)
# subset data
news = news[['TITLE','CATEGORY']]
# distribution of classes
dist = news['CATEGORY'].value_counts()
# display class distribution
print(dist.head())
# display data
print(news.head())
# Code ends here
# --------------
# Code starts here
# stopwords
stop = (set(stopwords.words('english')))
# retain only alphabets
news['TITLE'] = news['TITLE'].apply(lambda x : re.sub("[^a-zA-Z]", " ",x) )
# convert to lowercase and tokenize
news['TITLE'] = news['TITLE'].apply(lambda x : x.lower().split())
# remove stopwords
news['TITLE'] = news['TITLE'].apply(lambda x : [i for i in x if i not in stop])
# join list elements
print(news['TITLE'].head(2))
news['TITLE'] = news['TITLE'].apply(lambda x : ' '.join(x))
print(news['TITLE'].head(2))
# split into training and test sets
X_train, X_test, y_train, y_test = train_test_split(news['TITLE'], news['CATEGORY'], test_size=0.2, random_state=3)
# Code ends here
# --------------
# Code starts here
# initialize count vectorizer
count_vectorizer = CountVectorizer()
# initialize tfidf vectorizer
tfidf_vectorizer = TfidfVectorizer(ngram_range=(1,3))
# fit and transform with count vectorizer
X_train_count= count_vectorizer.fit_transform(X_train)
X_test_count = count_vectorizer.transform(X_test)
# fit and transform with tfidf vectorizer
X_train_tfidf= tfidf_vectorizer.fit_transform(X_train)
X_test_tfidf = tfidf_vectorizer.transform(X_test)
# Code ends here
# --------------
# Code starts here
# initialize multinomial naive bayes
nb_1 = MultinomialNB()
nb_2 = MultinomialNB()
# fit on count vectorizer training data
nb_1.fit(X_train_count, y_train)
# fit on tfidf vectorizer training data
nb_2.fit(X_train_tfidf, y_train)
# accuracy with count vectorizer
acc_count_nb = accuracy_score(nb_1.predict(X_test_count), y_test)
# accuracy with tfidf vectorizer
acc_tfidf_nb = accuracy_score(nb_2.predict(X_test_tfidf), y_test)
# display accuracies
print('Count Vectorizer accuracy is', acc_count_nb)
print('TFIDF accuracy is', acc_tfidf_nb)
# Code ends here
# --------------
import warnings
warnings.filterwarnings('ignore')
# initialize logistic regression
logreg_1 = OneVsRestClassifier(LogisticRegression(random_state=10))
logreg_2 = OneVsRestClassifier(LogisticRegression(random_state=10))
# fit on count vectorizer training data
logreg_1.fit(X_train_count, y_train)
logreg_2.fit(X_train_tfidf, y_train)
# fit on tfidf vectorizer training data
acc_count_logreg = accuracy_score(logreg_1.predict(X_test_count), y_test)
acc_tfidf_logreg = accuracy_score(logreg_2.predict(X_test_tfidf), y_test)
# accuracy with count vectorizer
print('Count vectorizer accurancy is', acc_count_logreg)
# accuracy with tfidf vectorizer
print('TFIDF accuracy is', acc_tfidf_logreg)
# display accuracies
# Code ends here
|
the-stack_0_476 | from bisect import bisect_right
from itertools import accumulate
from math import inf, sqrt
from numbers import Number
class ApproximateHistogram:
"""
Streaming, approximate histogram
Based on http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf
Performance of adding a point is about 5x faster than
https://github.com/carsonfarmer/streamhist (unmaintained).
The output of quantile() will match numpy.quantile() exactly until
the number of points reaches max_bins, and then gracefully transition
to an approximation.
"""
def __init__(self, max_bins):
self._max_bins = max_bins
self._bins = [] # (point, count)
self._costs = [] # item i is _bins[i+1].point - _bins[i].point
self._count = 0
# TODO: maintain min/max as bin entries with infinite merge cost
self._min = inf
self._max = -inf
@staticmethod
def _update_costs(costs, l, i, val):
"""update costs array to reflect l.insert(i, val)"""
if i > 0:
new_cost = val[0] - l[i - 1][0]
costs.insert(i - 1, new_cost)
if i < len(costs):
costs[i] = l[i + 1][0] - val[0]
elif len(l) > 1:
costs.insert(0, l[1][0] - val[0])
# assert costs == approx([b - a for (a, _), (b, _) in zip(l, l[1:])], rel=1e-4)
@staticmethod
def _update_costs_for_merge(costs, l, i, val):
"""update costs array to reflect l[i:i+2] = (val, )"""
# TODO: combine with update_costs()
if 0 < i < len(costs) - 1:
costs[i - 1:i + 2] = val[0] - l[i - 1][0], l[i + 1][0] - val[0]
elif i > 0:
costs[i - 1:i + 1] = (val[0] - l[i - 1][0], )
else:
costs[i:i + 2] = (l[i + 1][0] - val[0], )
# assert costs == approx([b - a for (a, _), (b, _) in zip(l, l[1:])], rel=1e-4)
@classmethod
def _insert_with_cost(cls, costs, l, val):
i = bisect_right(l, val)
l.insert(i, val)
cls._update_costs(costs, l, i, val)
def add(self, point):
"""Add point to histogram"""
# optimization: maintain cost array
self._count += 1
self._min = min(self._min, point)
self._max = max(self._max, point)
bins = self._bins
costs = self._costs
self._insert_with_cost(costs, bins, (point, 1))
if len(bins) > self._max_bins:
i = costs.index(min(costs))
(q0, k0), (q1, k1) = bins[i:i+2]
_count = k0 + k1
median = (q0 * k0 + q1 * k1) / _count
bins[i:i+2] = ((median, _count), )
self._update_costs_for_merge(costs, bins, i, (median, _count))
@property
def count(self):
"""Return number of points represented by this histogram."""
return self._count
@property
def min(self):
"""Return minimum point represented by this histogram"""
return self._min
@property
def max(self):
"""Return maximum point represented by this histogram"""
return self._max
def mean(self):
"""Return mean; O(max_bins) complexity."""
return sum(p * count for p, count in self._bins) / self._count
def std(self):
"""Return standard deviation; O(max_bins) complexity."""
mean = self.mean()
sum_squares = sum((p - mean) ** 2 * count for p, count in self._bins)
return sqrt(sum_squares / self._count)
def _quantile(self, sums, q):
if q <= 0:
return self._min
if q >= 1:
return self._max
bins = self._bins
target_sum = q * (self._count - 1) + 1
i = bisect_right(sums, target_sum) - 1
left = bins[i] if i >= 0 else (self._min, 0)
right = bins[i+1] if i+1 < len(bins) else (self._max, 0)
l0, r0 = left[0], right[0]
l1, r1 = left[1], right[1]
s = target_sum - (sums[i] if i >= 0 else 1)
if l1 <= 1 and r1 <= 1:
# We have exact info at this quantile. Match linear interpolation
# strategy of numpy.quantile().
b = l0 + (r0 - l0) * s / r1 if r1 > 0 else l0
else:
if r1 == 1:
# For exact bin on RHS, compensate for trapezoid interpolation using
# only half of count.
r1 = 2
if l1 == r1:
bp_ratio = s / l1
else:
bp_ratio = (l1 - (l1 ** 2 - 2 * s * (l1 - r1)) ** .5) / (l1 - r1)
assert bp_ratio.imag == 0
b = bp_ratio * (r0 - l0) + l0
return b
def sum(self):
"""Return sum of points; O(max_bins) complexity."""
return sum(x * count for x, count in self._bins)
def quantile(self, q):
"""Return list of values at given quantile fraction(s); O(max_bins) complexity."""
# Deviation from Ben-Haim sum strategy:
# * treat count 1 bins as "exact" rather than dividing the count at the point
# * for neighboring exact bins, use simple linear interpolation matching
# numpy.quantile()
if isinstance(q, Number):
q = (q, )
bins = self._bins
sums = [x - (y/2 if y > 1 else 0) for x, (_, y) in \
zip(accumulate(bin[1] for bin in bins), bins)]
return list(self._quantile(sums, q_item) for q_item in q)
|
the-stack_0_477 | import numpy as np
def scroll(clip, h=None, w=None, x_speed=0, y_speed=0,
x_start=0, y_start=0, apply_to="mask"):
""" Scrolls horizontally or vertically a clip, e.g. to make fin
credits """
if h is None: h = clip.h
if w is None: w = clip.w
xmax = clip.w-w-1
ymax = clip.h-h-1
def f(gf,t):
x = max(0, min(xmax, x_start+ np.round(x_speed*t)))
y = max(0, min(ymax, y_start+ np.round(y_speed*t)))
return gf(t)[y:y+h, x:x+w]
return clip.fl(f, apply_to = apply_to)
|
the-stack_0_478 | """Non-Maximum Suppression module."""
import numpy as np
import torch
def nms(detections, threshold):
"""Apply Non-Maximum Suppression over the detections.
The detections must be a tensor with two dimensions: (number of detections, 5).
Why 5? Because a detection has x1, y1, x2, y2 and score.
Heavily inspired by Adrian Rosebrock at:
https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
Why not the version of GPU? Because I couldn't make it work in my GPU.
Args:
detections (torch.Tensor): A tensor with all the detections. The shape must be
(number of detections, 5) with the score as the last value of the second
dimension.
threshold (float): The threshold for the IoU (intersection over union) to take
two detections as detecting the same object.
Returns:
torch.Tensor: A tensor with the indexes of the detections to keep.
"""
# If there aren't detections return empty
if detections.shape[0] == 0:
return torch.zeros((0))
# Get the numpy version
was_cuda = detections.is_cuda
detections = detections.cpu().numpy()
# Start the picked indexes list empty
picked = []
# Get the coordinates
x1 = detections[:, 0]
y1 = detections[:, 1]
x2 = detections[:, 2]
y2 = detections[:, 3]
scores = detections[:, 4]
# Compute the area of the bounding boxes
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# Get the indexes of the detections sorted by score
indexes = np.argsort(scores)
while len(indexes) > 0:
# Take the last index (highest score) and add it to the picked
last = len(indexes) - 1
actual = indexes[last]
picked.append(actual)
# We need to find the overlap of the bounding boxes with the actual picked bounding box
# Find the largest (more to the bottom-right) (x,y) coordinates for the start
# (top-left) of the bounding box
xx1 = np.maximum(x1[actual], x1[indexes[:last]])
yy1 = np.maximum(y1[actual], y1[indexes[:last]])
# Find the smallest (more to the top-left) (x,y) coordinates for the end (bottom-right)
# of the bounding box
xx2 = np.minimum(x2[actual], x2[indexes[:last]])
yy2 = np.minimum(y2[actual], y2[indexes[:last]])
# Compute width and height to compute the intersection over union
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
intersection = (w * h)
union = areas[actual] + areas[indexes[:last]] - intersection
iou = intersection / union
# Delete the last index and all that overlap is bigger than threshold
indexes = np.delete(indexes, np.concatenate(([last], np.where(iou > threshold)[0])))
# Return the picked indexes
picked = torch.Tensor(picked).long()
if was_cuda:
picked = picked.cuda()
return picked
|
the-stack_0_479 | import os
from functools import partial
import numpy as np
import pandas as pd
import tables
import matplotlib
import warnings
from PyQt5.QtCore import Qt, QPointF
from PyQt5.QtGui import QPixmap, QPainter, QFont, QPen, QPolygonF, QColor, QKeySequence, QBrush
from PyQt5.QtWidgets import QApplication, QMessageBox
from tierpsy.analysis.ske_create.helperIterROI import getWormROI
from tierpsy.analysis.split_fov.FOVMultiWellsSplitter import FOVMultiWellsSplitter
from tierpsy.gui.MWTrackerViewer_ui import Ui_MWTrackerViewer
from tierpsy.gui.TrackerViewerAux import TrackerViewerAuxGUI
from tierpsy.gui.PlotFeatures import PlotFeatures
from tierpsy.helper.misc import WLAB, save_modified_table
from tierpsy.analysis.split_fov.helper import get_well_color
class WellsDrawer(TrackerViewerAuxGUI):
'''
Dummy class with the wells division drawer functions
'''
def __init__(self, ui):
super().__init__(ui)
# colour
self.fovsplitter_mask = None
self.fovsplitter_feat = None
self.fovsplitter = None
self.is_fov_tosplit = None
def updateVideoFile(self, vfilename):
super().updateVideoFile(vfilename)
# check if /fov_wells exists in masked video
if self.fid is not None:
if '/fov_wells' not in self.fid:
self.is_fov_tosplit = False
else:
self.is_fov_tosplit = True
# if it exists, read it
if self.is_fov_tosplit:
# self.wells_in_mask = pd.DataFrame(
# self.fid.get_node('/fov_wells').read())
self.fovsplitter_mask = FOVMultiWellsSplitter(self.vfilename)
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
# if no skeletons, skip
if not self.skeletons_file:
return
# check if /fov_wells exists in features video
with tables.File(self.skeletons_file, 'r') as fid:
if '/fov_wells' not in fid:
self.is_fov_tosplit = False
# print("didn't find fov wells though")
else:
self.is_fov_tosplit = True
# print("found fov wells in featuresN")
# if it exists, read it
if self.is_fov_tosplit:
# print('reading fov_wells from featuresN')
# print('pre-reading:')
# print(self.wells)
# self.wells_in_feat = pd.DataFrame(
# fid.get_node('/fov_wells').read())
self.fovsplitter_feat = FOVMultiWellsSplitter(self.skeletons_file)
def draw_wells(self, image):
'''
Draw wells.
'''
if self.is_fov_tosplit:
if self.fovsplitter_feat is not None:
self.fovsplitter = self.fovsplitter_feat
else: # fall back to mask ones
print('falling back')
self.fovsplitter = self.fovsplitter_mask
# prepare constants for drawing
self.fontsize = max(1, max(image.height(), image.width()) // 60)
penwidth = max(1, max(image.height(), image.width()) // 400)
self.penwidth = penwidth if penwidth % 2 == 1 else penwidth + 1
# self.wellsC = QColor(250, 140, 0)
if 'is_good_well' in self.fovsplitter.wells.columns:
is_color_by_well = True
else:
is_color_by_well = False
# Qt drawing code
painter = QPainter()
painter.begin(image)
pen = QPen()
pen.setWidth(self.penwidth)
painter.setFont(QFont('Decorative', self.fontsize))
# loop on wells
for _, well in self.fovsplitter.wells.iterrows():
# update color every time
if is_color_by_well:
wellC = get_well_color(well['is_good_well'], forCV=True)
wellC = QColor(*wellC)
else:
wellC = QColor(250, 140, 0)
pen.setColor(wellC)
painter.setPen(pen)
# draw well name
painter.drawText(well['x_min'] + self.fontsize*0.4,
well['y_min'] + self.fontsize*1.2,
well['well_name'])
# draw rectangle
painter.drawRect(well['x_min'],
well['y_min'],
well['x_max'] - well['x_min'],
well['y_max'] - well['y_min'])
if well['is_good_well'] == False:
painter.drawLine(well['x_min'],
well['y_min'],
well['x_max'],
well['y_max'])
painter.end()
# super().keyPressEvent(event)
class ContourDrawer(TrackerViewerAuxGUI):
'''
Dummy class with the contour functions
'''
def __init__(self, ui):
super().__init__(ui)
self.food_coordinates = None
self.wlabC = {
WLAB['U']: Qt.white,
WLAB['WORM']: Qt.green,
WLAB['WORMS']: Qt.blue,
WLAB['BAD']: Qt.darkRed,
WLAB['GOOD_SKE']: Qt.darkCyan
}
self.ui.checkBox_showFood.stateChanged.connect(self.updateImage)
self.ui.checkBox_showFood.setEnabled(False)
self.ui.checkBox_showFood.setChecked(True)
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
if not self.skeletons_file or self.trajectories_data is None:
self.food_coordinates = None
return
with tables.File(self.skeletons_file, 'r') as fid:
if not '/food_cnt_coord' in fid:
self.food_coordinates = None
self.ui.checkBox_showFood.setEnabled(False)
else:
#change from microns to pixels
self.food_coordinates = fid.get_node('/food_cnt_coord')[:]
self.food_coordinates /= self.microns_per_pixel
self.ui.checkBox_showFood.setEnabled(True)
def draw_food_contour(self, image):
if self.food_coordinates is None or not self.ui.checkBox_showFood.isChecked():
return
painter = QPainter()
painter.begin(image)
penwidth = max(1, max(image.height(), image.width()) // 800)
col = Qt.darkMagenta
p = QPolygonF()
for x,y in self.food_coordinates:
p.append(QPointF(x,y))
pen = QPen()
pen.setWidth(penwidth)
pen.setColor(col)
painter.setPen(pen)
painter.drawPolyline(p)
painter.end()
class IntensityLabeler(TrackerViewerAuxGUI):
def __init__(self, ui):
super().__init__(ui)
self.mean_intensity = None
self.ui.intensity_label.setStyleSheet('') #avoid displaying color at the start of the programı
def updateVideoFile(self, vfilename):
super().updateVideoFile(vfilename)
if self.fid is not None:
#get mean intensity information.
#Useful for the optogenetic experiments.
try:
mean_int = self.fid.get_node('/mean_intensity')[:]
#calculate the intensity range and normalize the data.
#I am ignoring any value less than 1. The viewer only works with uint8 data.
dd = mean_int[mean_int>=1]
if dd.size == 0:
raise ValueError
bot = np.min(dd)
top = np.max(dd)
rr = top-bot
# if the mean value change is less than 1 (likely continous image do nothing)
if rr <= 1:
raise ValueError
self.mean_intensity = (mean_int-bot)/(rr)
except (tables.exceptions.NoSuchNodeError, ValueError):
self.mean_intensity = None
self.ui.intensity_label.setStyleSheet('')
def display_intensity(self):
if self.mean_intensity is not None and self.frame_number < self.mean_intensity.size:
d = int(self.mean_intensity[self.frame_number]*255)
self.ui.intensity_label.setStyleSheet('QLabel {background-color: rgb(%i, %i, %i);}' % (0, 0, d))
class BlobLabeler(TrackerViewerAuxGUI):
def __init__(self, ui):
super().__init__(ui)
self.wlab = WLAB
self.label_type = 'worm_label'
self.ui.pushButton_U.clicked.connect(
partial(self._h_tag_worm, self.wlab['U']))
self.ui.pushButton_W.clicked.connect(
partial(self._h_tag_worm, self.wlab['WORM']))
self.ui.pushButton_WS.clicked.connect(
partial(self._h_tag_worm, self.wlab['WORMS']))
self.ui.pushButton_B.clicked.connect(
partial(self._h_tag_worm, self.wlab['BAD']))
self.ui.pushButton_W.setShortcut(QKeySequence(Qt.Key_W))
self.ui.pushButton_U.setShortcut(QKeySequence(Qt.Key_U))
self.ui.pushButton_WS.setShortcut(QKeySequence(Qt.Key_C))
self.ui.pushButton_B.setShortcut(QKeySequence(Qt.Key_B))
def enable_label_buttons(self, value):
self.ui.pushButton_U.setEnabled(value)
self.ui.pushButton_W.setEnabled(value)
self.ui.pushButton_WS.setEnabled(value)
self.ui.pushButton_B.setEnabled(value)
def _h_tag_worm(self, label_ind):
if not self.worm_index_type == 'worm_index_manual':
return
worm_ind = self.current_worm_index
if self.frame_data is None:
return
if not worm_ind in self.frame_data['worm_index_manual'].values:
QMessageBox.critical(
self,
'The selected worm is not in this frame.',
'Select a worm in the current frame to label.',
QMessageBox.Ok)
return
good = self.trajectories_data['worm_index_manual'] == worm_ind
self.trajectories_data.loc[good, 'worm_label'] = label_ind
self.updateImage()
class ROIWorm():
def __init__(self, wormCanvas, comboBox_ROI, checkBox_ROI):
self.worm_index = None
self.wormCanvas = wormCanvas
self.comboBox_ROI = comboBox_ROI
self.checkBox_ROI = checkBox_ROI
self.comboBox_ROI.activated.connect(self.selectROI)
def selectROI(self, index):
try:
self.worm_index = int(self.comboBox_ROI.itemText(index))
except ValueError:
self.worm_index = None
@property
def isDrawSkel(self):
return self.checkBox_ROI.isChecked()
class ROIManager(TrackerViewerAuxGUI):
def __init__(self, ui):
super().__init__(ui)
self.rois = [
ROIWorm(
self.ui.wormCanvas1,
self.ui.comboBox_ROI1,
self.ui.checkBox_ROI1
),
ROIWorm(
self.ui.wormCanvas2,
self.ui.comboBox_ROI2,
self.ui.checkBox_ROI2
)
]
self.ui.radioButton_ROI1.setShortcut(QKeySequence(Qt.Key_Up))
self.ui.radioButton_ROI2.setShortcut(QKeySequence(Qt.Key_Down))
self.ui.checkBox_ROI1.stateChanged.connect(partial(self._updateROI, self.rois[0]))
self.ui.checkBox_ROI2.stateChanged.connect(partial(self._updateROI, self.rois[1]))
self.ui.comboBox_ROI1.activated.connect(partial(self._updateROI, self.rois[0]))
self.ui.comboBox_ROI2.activated.connect(partial(self._updateROI, self.rois[1]))
# flags for RW and FF
self.RW, self.FF = 1, 2
self.ui.pushButton_ROI1_RW.clicked.connect(partial(self.roiRWFF, self.RW, self.rois[0]))
self.ui.pushButton_ROI1_FF.clicked.connect(partial(self.roiRWFF, self.FF, self.rois[0]))
self.ui.pushButton_ROI2_RW.clicked.connect(partial(self.roiRWFF, self.RW, self.rois[1]))
self.ui.pushButton_ROI2_FF.clicked.connect(partial(self.roiRWFF, self.FF, self.rois[1]))
@property
def current_roi(self):
if self.ui.radioButton_ROI1.isChecked():
return self.rois[0]
elif self.ui.radioButton_ROI2.isChecked():
return self.rois[1]
else:
raise ValueError("I shouldn't be here")
@property
def current_worm_index(self):
return self.current_roi.worm_index
def updateSkelFile(self, skeletons_file):
for roi in self.rois:
roi.worm_index = None
super().updateSkelFile(skeletons_file)
def keyPressEvent(self, event):
#MORE SHORTCUTS
# go the the start of end of a trajectory
if event.key() == Qt.Key_BracketLeft:
self.roiRWFF(self.RW, self.current_roi)
elif event.key() == Qt.Key_BracketRight:
self.roiRWFF(self.FF, self.current_roi)
super().keyPressEvent(event)
def updateROIcomboBox(self, roi):
# update valid index for the comboBox
roi.comboBox_ROI.clear()
if roi.worm_index is not None:
roi.comboBox_ROI.addItem(str(int(roi.worm_index)))
for ind in self.frame_data[self.worm_index_type]:
roi.comboBox_ROI.addItem(str(int(ind)))
if roi.worm_index is None:
w_ind = float(roi.comboBox_ROI.itemText(0))
roi.worm_index = int(w_ind)
# function that generalized the updating of the ROI
def _updateROI(self, roi):
if self.frame_data is None or not self.worm_index_type:
# no trajectories data presented, nothing to do here
roi.wormCanvas.clear()
return
self.updateROIcomboBox(roi)
# extract individual worm ROI
good = self.frame_data[self.worm_index_type] == roi.worm_index
row_data = self.frame_data.loc[good].squeeze()
if row_data.size == 0 or \
np.isnan(row_data['coord_x']) or \
np.isnan(row_data['coord_y']):
# invalid data nothing to do here
roi.wormCanvas.clear()
return
worm_img, roi_corner = getWormROI(self.frame_img,
row_data['coord_x'],
row_data['coord_y'],
row_data['roi_size']
)
roi_ori_size = worm_img.shape
worm_img = np.ascontiguousarray(worm_img)
worm_qimg = self._convert2Qimg(worm_img)
canvas_size = min(roi.wormCanvas.height(), roi.wormCanvas.width())
worm_qimg = worm_qimg.scaled(
canvas_size, canvas_size, Qt.KeepAspectRatio)
worm_qimg = self.drawSkelResult(worm_img, worm_qimg, row_data, roi.isDrawSkel, roi_corner, read_center=False)
pixmap = QPixmap.fromImage(worm_qimg)
roi.wormCanvas.setPixmap(pixmap)
def updateROIs(self):
for roi in self.rois:
self._updateROI(roi)
def clearROIs(self):
for roi in self.rois:
roi.wormCanvas.clear()
# move to the first or the last frames of a trajectory
def roiRWFF(self, rwff, roi):
if self.frame_data is None:
return
# use 1 for rewind RW or 2 of fast forward
good = self.trajectories_data[self.worm_index_type] == roi.worm_index
frames = self.trajectories_data.loc[good, 'frame_number']
if frames.size == 0:
return
if rwff == self.RW:
self.frame_number = frames.min()
elif rwff == self.FF:
self.frame_number = frames.max()
else:
raise ValueError('Invalid rwff value : {} '.format(rwff))
self.ui.spinBox_frame.setValue(self.frame_number)
class TrajectoryEditor(ROIManager):
def __init__(self, ui):
super().__init__(ui)
self.ui.pushButton_join.clicked.connect(self.joinTraj)
self.ui.pushButton_split.clicked.connect(self.splitTraj)
#SHORTCUTS
self.ui.pushButton_join.setShortcut(QKeySequence(Qt.Key_J))
self.ui.pushButton_split.setShortcut(QKeySequence(Qt.Key_S))
def enable_trajectories_buttons(self, value):
self.ui.pushButton_join.setEnabled(value)
self.ui.pushButton_split.setEnabled(value)
def joinTraj(self):
if self.worm_index_type != 'worm_index_manual' \
or self.frame_data is None:
return
worm_ind1 = self.rois[0].worm_index
worm_ind2 = self.rois[1].worm_index
if worm_ind1 == worm_ind2:
QMessageBox.critical(
self,
'Cannot join the same trajectory with itself',
'Cannot join the same trajectory with itself.',
QMessageBox.Ok)
return
index1 = (self.trajectories_data[
'worm_index_manual'] == worm_ind1).values
index2 = (self.trajectories_data[
'worm_index_manual'] == worm_ind2).values
# if the trajectories do not overlap they shouldn't have frame_number
# indexes in commun
frame_number = self.trajectories_data.loc[
index1 | index2, 'frame_number']
if frame_number.size != np.unique(frame_number).size:
QMessageBox.critical(
self,
'Cannot join overlaping trajectories',
'Cannot join overlaping trajectories.',
QMessageBox.Ok)
return
if not (worm_ind1 in self.frame_data[
'worm_index_manual'].values or worm_ind2 in self.frame_data['worm_index_manual'].values):
reply = QMessageBox.question(
self,
'Message',
"The none of the selected worms to join is not in this frame. Are you sure to continue?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.No:
return
# get the first row for each segment to extract some data
first_row1 = self.trajectories_data.loc[index1, :].iloc[0]
first_row2 = self.trajectories_data.loc[index2, :].iloc[0]
# join trajectories
self.trajectories_data.loc[
index2, 'worm_label'] = first_row1['worm_label']
self.trajectories_data.loc[index2, 'worm_index_manual'] = worm_ind1
self.rois[0].worm_index = worm_ind1
self.rois[1].worm_index = worm_ind1
#this might be too slow. I might need to change it
self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)
self.updateImage()
def splitTraj(self):
if self.worm_index_type != 'worm_index_manual' \
or self.frame_data is None:
return
worm_ind = self.current_worm_index
if not worm_ind in self.frame_data['worm_index_manual'].data:
QMessageBox.critical(
self,
'Worm index is not in the current frame.',
'Worm index is not in the current frame. Select a valid index.',
QMessageBox.Ok)
return
last_index = self.trajectories_data['worm_index_manual'].max()
new_ind1 = last_index + 1
new_ind2 = last_index + 2
good = self.trajectories_data['worm_index_manual'] == worm_ind
frames = self.trajectories_data.loc[good, 'frame_number']
frames = frames.sort_values(inplace=False)
good = frames < self.frame_number
index1 = frames[good].index
index2 = frames[~good].index
self.trajectories_data.ix[index1, 'worm_index_manual'] = new_ind1
self.trajectories_data.ix[index2, 'worm_index_manual'] = new_ind2
self.rois[0].index = new_ind1
self.rois[1].index = new_ind2
#this might be too slow. I might need to change it
self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)
self.updateImage()
class FeatureReaderBase(TrackerViewerAuxGUI):
index_cols = ['worm_index', 'timestamp', 'motion_modes', 'skeleton_id', 'well_name']
valid_fields = ['/timeseries_data', '/features_timeseries']
def __init__(self, ui):
self.timeseries_data = None
self.feat_column = ''
super().__init__(ui)
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
try:
self.traj_colors = {}
with pd.HDFStore(self.skeletons_file, 'r') as ske_file_id:
for field in self.valid_fields:
if field in ske_file_id:
self.timeseries_data = ske_file_id[field]
if field == '/timeseries_data':
blob_features = ske_file_id['/blob_features']
blob_features.columns = ['blob_' + x for x in blob_features.columns]
self.timeseries_data = pd.concat((self.timeseries_data, blob_features), axis=1)
break
else:
raise KeyError
if not len(self.timeseries_data) != len(self.trajectories_data):
ValueError('timeseries_data and trajectories_data does not match. You might be using an old version of featuresN.hdf5')
self.valid_features = [x for x in self.timeseries_data.columns if x not in self.index_cols]
except (TypeError, AttributeError, IOError, KeyError, tables.exceptions.HDF5ExtError):
self.valid_features = None
self.timeseries_data = None
class MarkersDrawer(FeatureReaderBase):
def __init__(self, ui):
super().__init__(ui)
self.traj_colors = {}
self.n_points_traj = 250
self.n_colors = 256
cmap = matplotlib.cm.get_cmap("bwr")
palette = [cmap(x) for x in np.linspace(0, 1, self.n_colors)]
#palette = sns.color_palette("RdBu_r", self.n_colors)
palette = np.round(np.array(palette)*255).astype(np.int)
self.palette = [QColor(*x) for x in palette]
self.drawT = {x: self.ui.comboBox_drawType.findText(x , flags=Qt.MatchContains)
for x in ['boxes', 'traj']}
self.showT = {x: self.ui.comboBox_showLabels.findText(x , flags=Qt.MatchContains)
for x in ['hide', 'all', 'filter']}
self.ui.comboBox_showLabels.setCurrentIndex(self.showT['all'])
self.ui.comboBox_showLabels.currentIndexChanged.connect(self.updateImage)
self.ui.comboBox_drawType.currentIndexChanged.connect(self.updateImage)
self.ui.feature_column.currentIndexChanged.connect(self.change_feature)
self.ui.feat_max_value.valueChanged.connect(self.updateImage)
self.ui.feat_min_value.valueChanged.connect(self.updateImage)
self.ui.is_color_features.stateChanged.connect(self.updateImage)
self.enable_color_feats(False)
self.ui.spinBox_step.valueChanged.connect(self.updateImage)
def updateSkelFile(self, skeletons_file):
self.ui.is_color_features.setChecked(False)
super().updateSkelFile(skeletons_file)
self.ui.feature_column.clear()
if self.timeseries_data is None:
#no feature data
self.enable_color_feats(False)
else:
self.enable_color_feats(True)
self.ui.feature_column.addItems(self.valid_features)
self._h_find_feat_limits()
def change_feature(self):
self._h_find_feat_limits()
self.updateImage()
def _h_find_feat_limits(self):
self.feat_column = str(self.ui.feature_column.currentText())
print(self.feat_column)
if self.feat_column and self.timeseries_data is not None:
f_max = self.timeseries_data[self.feat_column].max()
f_min = self.timeseries_data[self.feat_column].min()
q1, q2 = self.timeseries_data[self.feat_column].quantile([0.02, 0.98])
else:
f_min, f_max, q1, q2 = 0,0,0,0
self.ui.feat_max_value.setRange(f_min, f_max)
self.ui.feat_min_value.setRange(f_min, f_max)
self.ui.feat_min_value.setValue(q1)
self.ui.feat_max_value.setValue(q2)
def enable_color_feats(self, value):
self.ui.feature_column.setEnabled(value)
self.ui.feat_min_value.setEnabled(value)
self.ui.feat_max_value.setEnabled(value)
self.ui.is_color_features.setEnabled(value)
def _h_assign_feat_color(self, irow):
feat_val = self.timeseries_data.loc[irow, self.feat_column]
if (feat_val != feat_val):
return Qt.black
#this function can and should be optimized
f_min = self.ui.feat_min_value.value()
f_max = self.ui.feat_max_value.value()
if f_min == f_max: #dummy range in case all the values are the same
f_min, f_max = -1, 1
elif f_min > f_max:
return Qt.black
nn = np.clip((feat_val - f_min)/(f_max - f_min), 0, 1)
ind = int(np.round(nn*(self.n_colors-1)))
col = self.palette[ind]
return col
def draw_worm_markers(self, image):
'''
Draw traj worm trajectory.
'''
if not self.worm_index_type in self.frame_data or \
self.ui.comboBox_showLabels.currentIndex() == self.showT['hide']:
return
if hasattr(self, 'current_worm_index'):
current_index = self.current_worm_index
else:
current_index = -1
painter = QPainter()
painter.begin(image)
self.fontsize = max(1, max(image.height(), image.width()) // 120)
penwidth = max(1, max(image.height(), image.width()) // 800)
self.penwidth = penwidth if penwidth % 2 == 1 else penwidth + 1
if not self.label_type in self.frame_data:
self.frame_data[self.label_type] = self.wlab['U']
for row_id, row_data in self.frame_data.iterrows():
# check if the coordinates are nan
if np.isnan(row_data['coord_x']) or np.isnan(row_data['coord_y']):
continue
#if select between showing filtered index or not
if self.ui.comboBox_showLabels.currentIndex() == self.showT['filter']:
continue
is_current_index = current_index == int(row_data[self.worm_index_type])
cb_ind = self.ui.comboBox_drawType.currentIndex()
if cb_ind == self.drawT['boxes']:
self.draw_boxes(painter, row_id, row_data, is_current_index)
elif cb_ind == self.drawT['traj']:
self.draw_trajectories(painter, row_data, is_current_index)
painter.end()
def _h_get_trajectory(self, worm_index, current_frame):
worm_data = self.traj_worm_index_grouped.get_group(worm_index)
valid_index = worm_data.index[worm_data['frame_number']<= current_frame]
ini = max(0, valid_index.size - self.frame_step*self.n_points_traj)
traj_ind = valid_index.values[ini::self.frame_step]
traj_data = worm_data.loc[traj_ind]
return traj_data
def draw_trajectories(self, painter, row_data, is_current_index):
if self.traj_worm_index_grouped is None:
return
worm_index = int(row_data[self.worm_index_type])
current_frame = row_data['frame_number']
traj_data = self._h_get_trajectory(worm_index, current_frame)
traj_data = traj_data.dropna(subset=['coord_x', 'coord_y'])
x_v = traj_data['coord_x'].round()
y_v = traj_data['coord_y'].round()
points = [QPointF(*map(int, c)) for c in zip(x_v, y_v)]
if self.ui.is_color_features.isChecked():
vec_color = [self._h_assign_feat_color(x) for x in traj_data.index]
pen = QPen()
pen.setWidth(self.penwidth)
for p1, p2, c in zip(points[1:], points[:-1], vec_color):
pen.setColor(c)
painter.setPen(pen)
painter.drawLine(p1, p2)
else:
pol = QPolygonF()
for p in points:
pol.append(p)
if not worm_index in self.traj_colors:
self.traj_colors[worm_index] = QColor(*np.random.randint(50, 230, 3))
col = self.traj_colors[worm_index]
pen = QPen()
pen.setWidth(self.penwidth)
pen.setColor(col)
painter.setPen(pen)
painter.drawPolyline(pol)
def draw_boxes(self, painter, row_id, row_data, is_current_index):
'''
Draw traj worm trajectory.
'''
worm_index = int(row_data[self.worm_index_type])
x = int(round(row_data['coord_x']))
y = int(round(row_data['coord_y']))
label_color = self.wlabC[int(row_data[self.label_type])]
if not self.ui.is_color_features.isChecked():
label_color = self.wlabC[int(row_data[self.label_type])]
else:
label_color = self._h_assign_feat_color(row_id)
pen = QPen()
pen.setColor(label_color)
pen.setWidth(self.penwidth)
painter.setPen(pen)
painter.setFont(QFont('Decorative', self.fontsize))
painter.drawText(x, y, str(worm_index))
bb = row_data['roi_size']
painter.drawRect(x - bb / 2, y - bb / 2, bb, bb)
if is_current_index:
b_size = bb//5
offset = bb/2 - b_size
painter.fillRect(x + offset, y + offset, b_size, b_size, QBrush(label_color))
class PlotCommunicator(FeatureReaderBase, ROIManager):
def __init__(self, ui=''):
super().__init__(ui)
self.ui.pushButton_plot.setEnabled(False)
self.ui.pushButton_plot.clicked.connect(self.show_plot)
self.plotter = None
def closePrev(self):
if self.plotter is not None:
self.plotter.close()
self.plotter = None
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
self.closePrev()
if self.timeseries_data is None:
self.ui.pushButton_plot.setEnabled(False)
else:
self.ui.pushButton_plot.setEnabled(True)
def show_plot(self):
self.closePrev()
self.plotter = PlotFeatures(self.skeletons_file,
self.timeseries_data,
self.traj_worm_index_grouped,
self.time_units,
self.xy_units,
self.fps,
parent = self)
self.plotter.setWindowFlags(self.plotter.windowFlags() | Qt.WindowStaysOnTopHint)
self.plotter.show()
self.update_plot()
def update_plot(self):
if self.plotter:
self.plotter.plot(self.current_worm_index, self.feat_column)
class MWTrackerViewer_GUI( MarkersDrawer, PlotCommunicator,
ContourDrawer, BlobLabeler, IntensityLabeler, TrajectoryEditor, WellsDrawer):
def __init__(self, ui='', argv=''):
if not ui:
super().__init__(Ui_MWTrackerViewer())
else:
super().__init__(ui)
self.setWindowTitle("Multi-Worm Viewer")
self.vfilename = '' if len(argv) <= 1 else argv[1]
self.videos_dir = r"/Volumes/behavgenom$/GeckoVideo/MaskedVideos/"
self.results_dir = ''
self.skeletons_file = ''
self.worm_index_type = 'worm_index_manual'
self.frame_data = None
self.ui.comboBox_labelType.currentIndexChanged.connect(self.selectWormIndexType)
self.ui.pushButton_save.clicked.connect(self.saveData)
# select worm ROI when doubleclick a worm
self.mainImage._canvas.mouseDoubleClickEvent = self.selectWorm
self.mainImage._canvas.mouseRightClickEvent = self.toggleWellStatus
self.ui.comboBox_ROI1.activated.connect(self.update_plot)
self.ui.comboBox_ROI2.activated.connect(self.update_plot)
def saveData(self):
'''save data from manual labelling. pytables saving format is more convenient than pandas'''
if os.name == 'nt':
# I Windows the paths return by QFileDialog use / as the file
# separation character. We need to correct it.
for field_name in ['vfilename', 'skeletons_file']:
setattr(
self, field_name, getattr(
self, field_name).replace(
'/', os.sep))
has_skeletons_file = ((self.skeletons_file is not None)
and (self.skeletons_file != ''))
if has_skeletons_file:
save_modified_table(self.skeletons_file,
self.trajectories_data,
'trajectories_data')
if self.is_fov_tosplit:
if has_skeletons_file:
self.fovsplitter.write_fov_wells_to_file(self.skeletons_file)
else:
warnings.warn('No skeletons file. Saving wells info in masked video')
self.fid.close()
self.fovsplitter.write_fov_wells_to_file(self.vfilename)
# self.fid = tables.File(self.vfilename, 'r')
self.updateVideoFile(self.vfilename)
if has_skeletons_file:
self.updateSkelFile(self.skeletons_file)
def updateVideoFile(self, vfilename):
super().updateVideoFile(vfilename)
self.updateImage()
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
if self.trajectories_data is None:
#empty file nothing to do here
self.updateImage()
return
#correct the `worm_index_N` to the actual name `worm_index_manual`
if 'worm_index_N' in self.trajectories_data:
self.trajectories_data = self.trajectories_data.rename(
columns={'worm_index_N': 'worm_index_manual'})
#if this is really a trajectories_data not (_features.hdf5) add `worm_index_manual` if it does not exists
if not 'worm_index_manual' in self.trajectories_data and not self.is_estimated_trajectories_data:
self.trajectories_data['worm_label'] = self.wlab['U']
self.trajectories_data['worm_index_manual'] = self.trajectories_data['worm_index_joined']
#deactiate the save option if we are dealing with estimated data...
self.ui.pushButton_save.setEnabled(not self.is_estimated_trajectories_data)
#add this column if it does not exist
if not 'has_skeleton' in self.trajectories_data:
self.trajectories_data['has_skeleton'] = self.trajectories_data['skeleton_id'] >= 0
self.updateWormIndexTypeMenu()
self.updateImage()
def updateWormIndexTypeMenu(self):
possible_indexes = [x.replace('worm_index_', '') for x in self.trajectories_data.columns if x.startswith('worm_index_')]
assert len(set(possible_indexes)) == len(possible_indexes) #all indexes ending must be different
menu_names = sorted([x + ' index' for x in possible_indexes])
self.ui.comboBox_labelType.clear()
self.ui.comboBox_labelType.addItems(menu_names)
if 'manual' in possible_indexes:
dd = self.ui.comboBox_labelType.findText('manual index')
self.ui.comboBox_labelType.setCurrentIndex(dd);
self.selectWormIndexType()
def selectWormIndexType(self):
index_option = self.ui.comboBox_labelType.currentText()
if not index_option:
return
assert index_option.endswith(' index')
self.worm_index_type = 'worm_index_' + index_option.replace(' index', '')
# select between automatic and manual worm indexing and label
if self.worm_index_type == 'worm_index_manual':
self.label_type = 'worm_label'
self.enable_trajectories_buttons(True)
self.enable_label_buttons(True)
else:
self.label_type = 'auto_label'
self.enable_trajectories_buttons(False)
self.enable_label_buttons(False)
#recalculate the grouped indexes
self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)
self.updateImage()
# update image
def updateImage(self):
if (self.image_group is None) and (self.isimgstore is False):
return
super(TrackerViewerAuxGUI, self).readCurrentFrame()
# read the data of the particles that exists in the frame
self.frame_data = self.getFrameData(self.frame_number)
#draw extra info only if the worm_index_type is valid
if self.frame_data is not None and \
self.worm_index_type in self.frame_data:
#filter any -1 index
self.frame_data = self.frame_data[self.frame_data[self.worm_index_type]>=0]
if self.frame_data.size > 0:
self.draw_worm_markers(self.frame_qimg)
self.draw_food_contour(self.frame_qimg)
self.updateROIs()
else:
self.clearROIs()
# plot wells
self.draw_wells(self.frame_qimg)
# create the pixmap for the label
self.mainImage.setPixmap(self.frame_qimg)
self.display_intensity()
def selectWorm(self, event):
x = event.pos().x()
y = event.pos().y()
print(x,y)
if self.frame_data is None or self.frame_data.size == 0:
return
R = (x - self.frame_data['coord_x'])**2 + \
(y - self.frame_data['coord_y'])**2
ind = R.idxmin()
good_row = self.frame_data.loc[ind]
if np.sqrt(R.loc[ind]) < good_row['roi_size']:
self.current_roi.worm_index = int(good_row[self.worm_index_type])
self.update_plot()
self.updateImage()
def toggleWellStatus(self, event):
# abort if not multifov
if self.is_fov_tosplit != True:
return
# event is for sure a right click or this does not get called
x = event.pos().x()
y = event.pos().y()
# this will always return something. n/a if clicking outside a well
well_name = self.fovsplitter.find_well_of_xy(x, y)[0].decode('utf-8')
idx = self.fovsplitter.wells['well_name'] == str(well_name)
self.fovsplitter.wells.loc[idx, 'is_good_well'] = \
np.mod(self.fovsplitter.wells.loc[idx, 'is_good_well']+1, 2)
# print(self.fovsplitter.wells)
self.updateImage()
def joinTraj(self):
super().joinTraj()
self.update_plot()
def splitTraj(self):
super().splitTraj()
self.update_plot()
def change_feature(self):
super().change_feature()
self.update_plot()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
main = MWTrackerViewer_GUI(argv=sys.argv)
#mask_file = '/Users/avelinojaver/OneDrive - Imperial College London/tierpsy_examples/mutliworm_example/BRC20067_worms10_food1-10_Set2_Pos5_Ch2_02062017_121709.hdf5'
#mask_file = '/Volumes/rescomp1/data/WormData/screenings/Pratheeban/First_Set/MaskedVideos/Old_Adult/16_07_22/W3_ELA_1.0_Ch1_22072016_131149.hdf5'
#mask_file = '/Users/avelinojaver/Documents/GitHub/tierpsy-tracker/tests/data/AVI_VIDEOS/MaskedVideos/AVI_VIDEOS_1.hdf5'
# mask_file = '/Users/avelinojaver/Documents/GitHub/tierpsy-tracker/tests/data/WT2/MaskedVideos/WT2.hdf5'
mask_file = '/Users/lferiani/Hackathon/multiwell_tierpsy/12_FEAT_TIERPSY_forGUI/MaskedVideos/20191205/syngenta_screen_run1_bluelight_20191205_151104.22956805/metadata.hdf5'
main.updateVideoFile(mask_file)
main.show()
sys.exit(app.exec_())
|
the-stack_0_480 | from typing import List
import numpy as np
class DNNLayer:
def __init__(self, out_shape, depends_on: List["DNNLayer"] = tuple(), param_count=0):
assert out_shape is not None # get around varargs restriction
self.extra_repr_params = {}
self.unique_idx = "{}{:02d}".format(self.__class__.__name__, id(self) % 100)
self.out_shape = out_shape
self.depends_on = depends_on
self.param_count = param_count
def __repr__(self):
args = self.extra_repr_params
args["out_shape"] = self.out_shape
args["param_count"] = self.param_count
args["depends_on"] = "[{}]".format(", ".join([x.unique_idx for x in self.depends_on]))
return "{}({})".format(self.unique_idx, ",".join(["{}={}".format(k, v) for k, v in args.items()]))
class QueryKeyValueMatrix(DNNLayer):
# Fusing Query, Key, And Value into 1
def __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):
super().__init__(
out_shape=(3 * SEQ_LEN,I,ATTN_HEADS), # [seq_lean X intermediate_vector_dim] for 12 heads
depends_on=[input] if input is not None else [],
param_count=3 * HIDDEN_DIM*I*ATTN_HEADS)
self.flop = 3 * SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS
class QKTMatrix(DNNLayer):
# Fusing Masking and Dropout
def __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):
super().__init__(
out_shape=(SEQ_LEN,I,ATTN_HEADS),
depends_on=[input] if input is not None else [], # Different to accept a list
param_count=0)
self.flop = SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS + np.prod(self.out_shape) + np.prod(self.out_shape) # QKT + mask + dropout
class Mask(DNNLayer):
def __init__(self, input: DNNLayer):
super().__init__(
out_shape=input.out_shape,
depends_on=[input] if input is not None else [],
param_count=0)
self.flop = np.prod(self.out_shape)
class QKTVMatrix(DNNLayer):
# QKTV + Concat
def __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):
super().__init__(
out_shape=(SEQ_LEN,I * ATTN_HEADS),
depends_on=[input] if input is not None else [],
param_count=0)
self.flop = SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS + SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS # QKTVMatrix + Concat
class Concat(DNNLayer):
def __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):
super().__init__(
out_shape=(SEQ_LEN,I * ATTN_HEADS),
depends_on=[input] if input is not None else [],
param_count=HIDDEN_DIM*I*ATTN_HEADS)
# self.flop = SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS
self.flop = 0
class LinearLayerReLU(DNNLayer):
def __init__(self, in_features: int, out_features: int, input: DNNLayer):
super().__init__(
self.find_outshape(in_features, out_features, input),
[input] if input is not None else [],
param_count=((in_features + 1) * out_features),
)
self.extra_repr_params["in_features"] = in_features
self.extra_repr_params["out_features"] = out_features
self.in_features = in_features
self.out_features = out_features
self.flop = 2 * self.param_count + self.out_features + np.prod(self.out_shape) # (Linear) + ReLU
def find_outshape(self, in_features, out_features, input):
assert len(input.out_shape) == 2 and input.out_shape[1] == in_features, f"{input.out_shape}, {in_features}"
return (input.out_shape[0], out_features)
def selfattn_flop(B, H, K, Tc, Tg, cache_length=0):
assert cache_length >= 0, "cache_length should be non-negative"
x = DNNLayer(out_shape=(B, Tc, H))
qkt = QKTMatrix(SEQ_LEN=Tc, HIDDEN_DIM=H, I=H//K, ATTN_HEADS=K, input=x)
mask = Mask(input=x)
flops = qkt.flop + mask.flop
for i in range(1, Tg):
x = DNNLayer(out_shape=(B, Tc + i, H))
if i <= cache_length:
qkt = QKTMatrix(SEQ_LEN=1, HIDDEN_DIM=H, I=H//K, ATTN_HEADS=K, input=x)
else:
qkt = QKTMatrix(SEQ_LEN=Tc + i, HIDDEN_DIM=H, I=H//K, ATTN_HEADS=K, input=x)
flops += qkt.flop
print(f"selfattn_flop: {flops}")
return flops
if __name__ == "__main__":
hparams = {"117M": (12, 768), "345M": (24, 1024), "762M": (36, 1280), "1542M": (48, 1600)}
K = 4
B, H = hparams["117M"]
Tc = 128
Tg = 128
selfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=0)
selfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=64)
selfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=128)
|
the-stack_0_481 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class TradeFundBillDetail(object):
def __init__(self):
self._amount = None
self._asset_type_code = None
self._asset_user_id = None
self._biz_pay_type = None
self._create_time = None
self._payment_no = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def asset_type_code(self):
return self._asset_type_code
@asset_type_code.setter
def asset_type_code(self, value):
self._asset_type_code = value
@property
def asset_user_id(self):
return self._asset_user_id
@asset_user_id.setter
def asset_user_id(self, value):
self._asset_user_id = value
@property
def biz_pay_type(self):
return self._biz_pay_type
@biz_pay_type.setter
def biz_pay_type(self, value):
self._biz_pay_type = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def payment_no(self):
return self._payment_no
@payment_no.setter
def payment_no(self, value):
self._payment_no = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.asset_type_code:
if hasattr(self.asset_type_code, 'to_alipay_dict'):
params['asset_type_code'] = self.asset_type_code.to_alipay_dict()
else:
params['asset_type_code'] = self.asset_type_code
if self.asset_user_id:
if hasattr(self.asset_user_id, 'to_alipay_dict'):
params['asset_user_id'] = self.asset_user_id.to_alipay_dict()
else:
params['asset_user_id'] = self.asset_user_id
if self.biz_pay_type:
if hasattr(self.biz_pay_type, 'to_alipay_dict'):
params['biz_pay_type'] = self.biz_pay_type.to_alipay_dict()
else:
params['biz_pay_type'] = self.biz_pay_type
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.payment_no:
if hasattr(self.payment_no, 'to_alipay_dict'):
params['payment_no'] = self.payment_no.to_alipay_dict()
else:
params['payment_no'] = self.payment_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = TradeFundBillDetail()
if 'amount' in d:
o.amount = d['amount']
if 'asset_type_code' in d:
o.asset_type_code = d['asset_type_code']
if 'asset_user_id' in d:
o.asset_user_id = d['asset_user_id']
if 'biz_pay_type' in d:
o.biz_pay_type = d['biz_pay_type']
if 'create_time' in d:
o.create_time = d['create_time']
if 'payment_no' in d:
o.payment_no = d['payment_no']
return o
|
the-stack_0_483 | '''
There are a total of numCourses courses you have to take, labeled from 0 to numCourses-1.
Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?
**Example 1**
`Input: numCourses = 2, prerequisites = [[1,0]]`
`Output: true`
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0. So it is possible.
**Example 2**
`Input: numCourses = 2, prerequisites = [[1,0],[0,1]]`
`Output: false`
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0, and to take course 0 you should
also have finished course 1. So it is impossible.
**Note**
You may assume that there are no duplicate edges in the input prerequisites.
'''
from collections import defaultdict
class Solution(object):
def __init__(self):
self.eligibleCourses = []
self.visited = []
def seedEligibleCourses(self, g):
for index, node in g.items():
if len(node) == 0 and index not in self.visited:
self.eligibleCourses.append(index)
def dfs(self, node, g):
if node in self.visited:
return
self.visited.append(node)
for _, n in g.items():
if node in n:
n.remove(node)
for successor in g[node]:
if successor not in self.visited:
self.eligibleCourses.append(successor)
self.dfs(node, g)
def canFinish(self, numCourses, prerequisites):
if not prerequisites:
return True
graph = defaultdict(list)
for relation in prerequisites:
currentCourse, prerequisite = relation[0], relation[1]
graph[prerequisite].append(currentCourse) # post order!!
if currentCourse not in graph:
graph[currentCourse] = []
self.seedEligibleCourses(graph)
while self.eligibleCourses:
current = self.eligibleCourses.pop(0)
self.dfs(current, graph)
self.seedEligibleCourses(graph)
for _, n in graph.items():
if len(n) > 0:
return False
return True
|