max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
test/tests.py | gzu300/Linear_Algebra | 0 | 8900 | <filename>test/tests.py
import unittest
from pkg import Linear_Algebra
import numpy as np
class TestLU(unittest.TestCase):
def setUp(self):
self.U_answer = np.around(np.array([[2,1,0],[0,3/2,1],[0,0,4/3]], dtype=float), decimals=2).tolist()
self.L_answer = np.around(np.array([[1,0,0],[1/2,1,0],[0,2/3,1]], dtype=float), decimals=2).tolist()
def test_perm(self):
answer = np.array([[0,1,0], [1,0,0], [0,0,1]], dtype=float).tolist()
result = Linear_Algebra.make_perm_mx(3, 0, 1).tolist()
self.assertEqual(result, answer)
def test_LU(self):
L_result, U_result = np.around(Linear_Algebra.LU(np.array([[2,1,0],[1,2,1],[0,1,2]], dtype=float)), decimals=2).tolist()
self.assertEqual(U_result, self.U_answer)
self.assertEqual(L_result, self.L_answer)
class TestDet(unittest.TestCase):
def setUp(self):
self.input_mx = np.array([[2,-1,0,0],[-1,2,-1,0],[0,-1,2,-1],[0,0,-1,2]], dtype=float)
def test_find_det(self):
result = np.around(Linear_Algebra.find_det(A = self.input_mx), decimals=2).tolist()
answer = np.around(5, decimals=2).tolist()
self.assertEqual(result, answer)
if __name__ == '__main__':
unittest.main() | <filename>test/tests.py
import unittest
from pkg import Linear_Algebra
import numpy as np
class TestLU(unittest.TestCase):
def setUp(self):
self.U_answer = np.around(np.array([[2,1,0],[0,3/2,1],[0,0,4/3]], dtype=float), decimals=2).tolist()
self.L_answer = np.around(np.array([[1,0,0],[1/2,1,0],[0,2/3,1]], dtype=float), decimals=2).tolist()
def test_perm(self):
answer = np.array([[0,1,0], [1,0,0], [0,0,1]], dtype=float).tolist()
result = Linear_Algebra.make_perm_mx(3, 0, 1).tolist()
self.assertEqual(result, answer)
def test_LU(self):
L_result, U_result = np.around(Linear_Algebra.LU(np.array([[2,1,0],[1,2,1],[0,1,2]], dtype=float)), decimals=2).tolist()
self.assertEqual(U_result, self.U_answer)
self.assertEqual(L_result, self.L_answer)
class TestDet(unittest.TestCase):
def setUp(self):
self.input_mx = np.array([[2,-1,0,0],[-1,2,-1,0],[0,-1,2,-1],[0,0,-1,2]], dtype=float)
def test_find_det(self):
result = np.around(Linear_Algebra.find_det(A = self.input_mx), decimals=2).tolist()
answer = np.around(5, decimals=2).tolist()
self.assertEqual(result, answer)
if __name__ == '__main__':
unittest.main() | none | 1 | 3.038956 | 3 |
|
src/backend/common/models/favorite.py | ofekashery/the-blue-alliance | 266 | 8901 | from backend.common.models.mytba import MyTBAModel
class Favorite(MyTBAModel):
"""
In order to make strongly consistent DB requests, instances of this class
should be created with a parent that is the associated Account key.
"""
def __init__(self, *args, **kwargs):
super(Favorite, self).__init__(*args, **kwargs)
| from backend.common.models.mytba import MyTBAModel
class Favorite(MyTBAModel):
"""
In order to make strongly consistent DB requests, instances of this class
should be created with a parent that is the associated Account key.
"""
def __init__(self, *args, **kwargs):
super(Favorite, self).__init__(*args, **kwargs)
| en | 0.956623 | In order to make strongly consistent DB requests, instances of this class should be created with a parent that is the associated Account key. | 2.327444 | 2 |
Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/filebrowsebutton.py | MontyThibault/centre-of-mass-awareness | 27 | 8902 | #----------------------------------------------------------------------
# Name: wxPython.lib.filebrowsebutton
# Purpose: Composite controls that provide a Browse button next to
# either a wxTextCtrl or a wxComboBox. The Browse button
# launches a wxFileDialog and loads the result into the
# other control.
#
# Author: <NAME>
#
# RCS-ID: $Id: filebrowsebutton.py 59674 2009-03-20 21:00:16Z RD $
# Copyright: (c) 2000 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
# 12/02/2003 - <NAME> (<EMAIL>)
#
# o 2.5 Compatability changes
#
import os
import types
import wx
#----------------------------------------------------------------------
class FileBrowseButton(wx.Panel):
"""
A control to allow the user to type in a filename or browse with
the standard file dialog to select file
"""
def __init__ (self, parent, id= -1,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
style = wx.TAB_TRAVERSAL,
labelText= "File Entry:",
buttonText= "Browse",
toolTip= "Type filename or click browse to choose file",
# following are the values for a file dialog box
dialogTitle = "Choose a file",
startDirectory = ".",
initialValue = "",
fileMask = "*.*",
fileMode = wx.OPEN,
# callback for when value changes (optional)
changeCallback= lambda x:x,
labelWidth = 0,
name = 'fileBrowseButton',
):
"""
:param labelText: Text for label to left of text field
:param buttonText: Text for button which launches the file dialog
:param toolTip: Help text
:param dialogTitle: Title used in file dialog
:param startDirectory: Default directory for file dialog startup
:param fileMask: File mask (glob pattern, such as *.*) to use in file dialog
:param fileMode: wx.OPEN or wx.SAVE, indicates type of file dialog to use
:param changeCallback: Optional callback called for all changes in value of the control
:param labelWidth: Width of the label
"""
# store variables
self.labelText = labelText
self.buttonText = buttonText
self.toolTip = toolTip
self.dialogTitle = dialogTitle
self.startDirectory = startDirectory
self.initialValue = initialValue
self.fileMask = fileMask
self.fileMode = fileMode
self.changeCallback = changeCallback
self.callCallback = True
self.labelWidth = labelWidth
# create the dialog
self.createDialog(parent, id, pos, size, style, name )
# Setting a value causes the changeCallback to be called.
# In this case that would be before the return of the
# constructor. Not good. So a default value on
# SetValue is used to disable the callback
self.SetValue( initialValue, 0)
def createDialog( self, parent, id, pos, size, style, name ):
"""Setup the graphic representation of the dialog"""
wx.Panel.__init__ (self, parent, id, pos, size, style, name)
self.SetMinSize(size) # play nice with sizers
box = wx.BoxSizer(wx.HORIZONTAL)
self.label = self.createLabel( )
box.Add( self.label, 0, wx.CENTER )
self.textControl = self.createTextControl()
box.Add( self.textControl, 1, wx.LEFT|wx.CENTER, 5)
self.browseButton = self.createBrowseButton()
box.Add( self.browseButton, 0, wx.LEFT|wx.CENTER, 5)
# add a border around the whole thing and resize the panel to fit
outsidebox = wx.BoxSizer(wx.VERTICAL)
outsidebox.Add(box, 1, wx.EXPAND|wx.ALL, 3)
outsidebox.Fit(self)
self.SetAutoLayout(True)
self.SetSizer( outsidebox )
self.Layout()
if type( size ) == types.TupleType:
size = apply( wx.Size, size)
self.SetDimensions(-1, -1, size.width, size.height, wx.SIZE_USE_EXISTING)
# if size.width != -1 or size.height != -1:
# self.SetSize(size)
def SetBackgroundColour(self,color):
wx.Panel.SetBackgroundColour(self,color)
self.label.SetBackgroundColour(color)
def createLabel( self ):
"""Create the label/caption"""
label = wx.StaticText(self, -1, self.labelText, style =wx.ALIGN_RIGHT )
font = label.GetFont()
w, h, d, e = self.GetFullTextExtent(self.labelText, font)
if self.labelWidth > 0:
label.SetSize((self.labelWidth+5, h))
else:
label.SetSize((w+5, h))
return label
def createTextControl( self):
"""Create the text control"""
textControl = wx.TextCtrl(self, -1)
textControl.SetToolTipString( self.toolTip )
if self.changeCallback:
textControl.Bind(wx.EVT_TEXT, self.OnChanged)
textControl.Bind(wx.EVT_COMBOBOX, self.OnChanged)
return textControl
def OnChanged(self, evt):
if self.callCallback and self.changeCallback:
self.changeCallback(evt)
def createBrowseButton( self):
"""Create the browse-button control"""
button =wx.Button(self, -1, self.buttonText)
button.SetToolTipString( self.toolTip )
button.Bind(wx.EVT_BUTTON, self.OnBrowse)
return button
def OnBrowse (self, event = None):
""" Going to browse for file... """
current = self.GetValue()
directory = os.path.split(current)
if os.path.isdir( current):
directory = current
current = ''
elif directory and os.path.isdir( directory[0] ):
current = directory[1]
directory = directory [0]
else:
directory = self.startDirectory
current = ''
dlg = wx.FileDialog(self, self.dialogTitle, directory, current,
self.fileMask, self.fileMode)
if dlg.ShowModal() == wx.ID_OK:
self.SetValue(dlg.GetPath())
dlg.Destroy()
def GetValue (self):
"""
retrieve current value of text control
"""
return self.textControl.GetValue()
def SetValue (self, value, callBack=1):
"""set current value of text control"""
save = self.callCallback
self.callCallback = callBack
self.textControl.SetValue(value)
self.callCallback = save
def Enable (self, value=True):
""" Convenient enabling/disabling of entire control """
self.label.Enable (value)
self.textControl.Enable (value)
return self.browseButton.Enable (value)
def Disable (self,):
""" Convenient disabling of entire control """
self.Enable(False)
def GetLabel( self ):
""" Retrieve the label's current text """
return self.label.GetLabel()
def SetLabel( self, value ):
""" Set the label's current text """
rvalue = self.label.SetLabel( value )
self.Refresh( True )
return rvalue
class FileBrowseButtonWithHistory( FileBrowseButton ):
"""
with following additions:
__init__(..., history=None)
history -- optional list of paths for initial history drop-down
(must be passed by name, not a positional argument)
If history is callable it will must return a list used
for the history drop-down
changeCallback -- as for FileBrowseButton, but with a work-around
for win32 systems which don't appear to create wx.EVT_COMBOBOX
events properly. There is a (slight) chance that this work-around
will cause some systems to create two events for each Combobox
selection. If you discover this condition, please report it!
As for a FileBrowseButton.__init__ otherwise.
GetHistoryControl()
Return reference to the control which implements interfaces
required for manipulating the history list. See GetHistoryControl
documentation for description of what that interface is.
GetHistory()
Return current history list
SetHistory( value=(), selectionIndex = None )
Set current history list, if selectionIndex is not None, select that index
"""
def __init__( self, *arguments, **namedarguments):
self.history = namedarguments.get( "history" )
if self.history:
del namedarguments["history"]
self.historyCallBack=None
if callable(self.history):
self.historyCallBack=self.history
self.history=None
name = namedarguments.get('name', 'fileBrowseButtonWithHistory')
namedarguments['name'] = name
FileBrowseButton.__init__(self, *arguments, **namedarguments)
def createTextControl( self):
"""Create the text control"""
textControl = wx.ComboBox(self, -1, style = wx.CB_DROPDOWN )
textControl.SetToolTipString( self.toolTip )
textControl.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
if self.changeCallback:
textControl.Bind(wx.EVT_TEXT, self.OnChanged)
textControl.Bind(wx.EVT_COMBOBOX, self.OnChanged)
if self.history:
history=self.history
self.history=None
self.SetHistory( history, control=textControl)
return textControl
def GetHistoryControl( self ):
"""
Return a pointer to the control which provides (at least)
the following methods for manipulating the history list:
Append( item ) -- add item
Clear() -- clear all items
Delete( index ) -- 0-based index to delete from list
SetSelection( index ) -- 0-based index to select in list
Semantics of the methods follow those for the wxComboBox control
"""
return self.textControl
def SetHistory( self, value=(), selectionIndex = None, control=None ):
"""Set the current history list"""
if control is None:
control = self.GetHistoryControl()
if self.history == value:
return
self.history = value
# Clear history values not the selected one.
tempValue=control.GetValue()
# clear previous values
control.Clear()
control.SetValue(tempValue)
# walk through, appending new values
for path in value:
control.Append( path )
if selectionIndex is not None:
control.SetSelection( selectionIndex )
def GetHistory( self ):
"""Return the current history list"""
if self.historyCallBack != None:
return self.historyCallBack()
elif self.history:
return list( self.history )
else:
return []
def OnSetFocus(self, event):
"""When the history scroll is selected, update the history"""
if self.historyCallBack != None:
self.SetHistory( self.historyCallBack(), control=self.textControl)
event.Skip()
if wx.Platform == "__WXMSW__":
def SetValue (self, value, callBack=1):
""" Convenient setting of text control value, works
around limitation of wx.ComboBox """
save = self.callCallback
self.callCallback = callBack
self.textControl.SetValue(value)
self.callCallback = save
# Hack to call an event handler
class LocalEvent:
def __init__(self, string):
self._string=string
def GetString(self):
return self._string
if callBack==1:
# The callback wasn't being called when SetValue was used ??
# So added this explicit call to it
self.changeCallback(LocalEvent(value))
class DirBrowseButton(FileBrowseButton):
def __init__(self, parent, id = -1,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.TAB_TRAVERSAL,
labelText = 'Select a directory:',
buttonText = 'Browse',
toolTip = 'Type directory name or browse to select',
dialogTitle = '',
startDirectory = '.',
changeCallback = None,
dialogClass = wx.DirDialog,
newDirectory = False,
name = 'dirBrowseButton'):
FileBrowseButton.__init__(self, parent, id, pos, size, style,
labelText, buttonText, toolTip,
dialogTitle, startDirectory,
changeCallback = changeCallback,
name = name)
self.dialogClass = dialogClass
self.newDirectory = newDirectory
#
def OnBrowse(self, ev = None):
style=0
if not self.newDirectory:
style |= wx.DD_DIR_MUST_EXIST
dialog = self.dialogClass(self,
message = self.dialogTitle,
defaultPath = self.startDirectory,
style = style)
if dialog.ShowModal() == wx.ID_OK:
self.SetValue(dialog.GetPath())
dialog.Destroy()
#
#----------------------------------------------------------------------
if __name__ == "__main__":
#from skeletonbuilder import rulesfile
class SimpleCallback:
def __init__( self, tag ):
self.tag = tag
def __call__( self, event ):
print self.tag, event.GetString()
class DemoFrame( wx.Frame ):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "File entry with browse", size=(500,260))
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
panel = wx.Panel (self,-1)
innerbox = wx.BoxSizer(wx.VERTICAL)
control = FileBrowseButton(
panel,
initialValue = "z:\\temp",
)
innerbox.Add( control, 0, wx.EXPAND )
middlecontrol = FileBrowseButtonWithHistory(
panel,
labelText = "With History",
initialValue = "d:\\temp",
history = ["c:\\temp", "c:\\tmp", "r:\\temp","z:\\temp"],
changeCallback= SimpleCallback( "With History" ),
)
innerbox.Add( middlecontrol, 0, wx.EXPAND )
middlecontrol = FileBrowseButtonWithHistory(
panel,
labelText = "History callback",
initialValue = "d:\\temp",
history = self.historyCallBack,
changeCallback= SimpleCallback( "History callback" ),
)
innerbox.Add( middlecontrol, 0, wx.EXPAND )
self.bottomcontrol = control = FileBrowseButton(
panel,
labelText = "With Callback",
style = wx.SUNKEN_BORDER|wx.CLIP_CHILDREN ,
changeCallback= SimpleCallback( "With Callback" ),
)
innerbox.Add( control, 0, wx.EXPAND)
self.bottommostcontrol = control = DirBrowseButton(
panel,
labelText = "Simple dir browse button",
style = wx.SUNKEN_BORDER|wx.CLIP_CHILDREN)
innerbox.Add( control, 0, wx.EXPAND)
ID = wx.NewId()
innerbox.Add( wx.Button( panel, ID,"Change Label", ), 1, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.OnChangeLabel , id=ID)
ID = wx.NewId()
innerbox.Add( wx.Button( panel, ID,"Change Value", ), 1, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.OnChangeValue, id=ID )
panel.SetAutoLayout(True)
panel.SetSizer( innerbox )
self.history={"c:\\temp":1, "c:\\tmp":1, "r:\\temp":1,"z:\\temp":1}
def historyCallBack(self):
keys=self.history.keys()
keys.sort()
return keys
def OnFileNameChangedHistory (self, event):
self.history[event.GetString ()]=1
def OnCloseMe(self, event):
self.Close(True)
def OnChangeLabel( self, event ):
self.bottomcontrol.SetLabel( "Label Updated" )
def OnChangeValue( self, event ):
self.bottomcontrol.SetValue( "r:\\somewhere\\over\\the\\rainbow.htm" )
def OnCloseWindow(self, event):
self.Destroy()
class DemoApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame = DemoFrame(None)
frame.Show(True)
self.SetTopWindow(frame)
return True
def test( ):
app = DemoApp(0)
app.MainLoop()
print 'Creating dialog'
test( )
| #----------------------------------------------------------------------
# Name: wxPython.lib.filebrowsebutton
# Purpose: Composite controls that provide a Browse button next to
# either a wxTextCtrl or a wxComboBox. The Browse button
# launches a wxFileDialog and loads the result into the
# other control.
#
# Author: <NAME>
#
# RCS-ID: $Id: filebrowsebutton.py 59674 2009-03-20 21:00:16Z RD $
# Copyright: (c) 2000 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
# 12/02/2003 - <NAME> (<EMAIL>)
#
# o 2.5 Compatability changes
#
import os
import types
import wx
#----------------------------------------------------------------------
class FileBrowseButton(wx.Panel):
"""
A control to allow the user to type in a filename or browse with
the standard file dialog to select file
"""
def __init__ (self, parent, id= -1,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
style = wx.TAB_TRAVERSAL,
labelText= "File Entry:",
buttonText= "Browse",
toolTip= "Type filename or click browse to choose file",
# following are the values for a file dialog box
dialogTitle = "Choose a file",
startDirectory = ".",
initialValue = "",
fileMask = "*.*",
fileMode = wx.OPEN,
# callback for when value changes (optional)
changeCallback= lambda x:x,
labelWidth = 0,
name = 'fileBrowseButton',
):
"""
:param labelText: Text for label to left of text field
:param buttonText: Text for button which launches the file dialog
:param toolTip: Help text
:param dialogTitle: Title used in file dialog
:param startDirectory: Default directory for file dialog startup
:param fileMask: File mask (glob pattern, such as *.*) to use in file dialog
:param fileMode: wx.OPEN or wx.SAVE, indicates type of file dialog to use
:param changeCallback: Optional callback called for all changes in value of the control
:param labelWidth: Width of the label
"""
# store variables
self.labelText = labelText
self.buttonText = buttonText
self.toolTip = toolTip
self.dialogTitle = dialogTitle
self.startDirectory = startDirectory
self.initialValue = initialValue
self.fileMask = fileMask
self.fileMode = fileMode
self.changeCallback = changeCallback
self.callCallback = True
self.labelWidth = labelWidth
# create the dialog
self.createDialog(parent, id, pos, size, style, name )
# Setting a value causes the changeCallback to be called.
# In this case that would be before the return of the
# constructor. Not good. So a default value on
# SetValue is used to disable the callback
self.SetValue( initialValue, 0)
def createDialog( self, parent, id, pos, size, style, name ):
"""Setup the graphic representation of the dialog"""
wx.Panel.__init__ (self, parent, id, pos, size, style, name)
self.SetMinSize(size) # play nice with sizers
box = wx.BoxSizer(wx.HORIZONTAL)
self.label = self.createLabel( )
box.Add( self.label, 0, wx.CENTER )
self.textControl = self.createTextControl()
box.Add( self.textControl, 1, wx.LEFT|wx.CENTER, 5)
self.browseButton = self.createBrowseButton()
box.Add( self.browseButton, 0, wx.LEFT|wx.CENTER, 5)
# add a border around the whole thing and resize the panel to fit
outsidebox = wx.BoxSizer(wx.VERTICAL)
outsidebox.Add(box, 1, wx.EXPAND|wx.ALL, 3)
outsidebox.Fit(self)
self.SetAutoLayout(True)
self.SetSizer( outsidebox )
self.Layout()
if type( size ) == types.TupleType:
size = apply( wx.Size, size)
self.SetDimensions(-1, -1, size.width, size.height, wx.SIZE_USE_EXISTING)
# if size.width != -1 or size.height != -1:
# self.SetSize(size)
def SetBackgroundColour(self,color):
wx.Panel.SetBackgroundColour(self,color)
self.label.SetBackgroundColour(color)
def createLabel( self ):
"""Create the label/caption"""
label = wx.StaticText(self, -1, self.labelText, style =wx.ALIGN_RIGHT )
font = label.GetFont()
w, h, d, e = self.GetFullTextExtent(self.labelText, font)
if self.labelWidth > 0:
label.SetSize((self.labelWidth+5, h))
else:
label.SetSize((w+5, h))
return label
def createTextControl( self):
"""Create the text control"""
textControl = wx.TextCtrl(self, -1)
textControl.SetToolTipString( self.toolTip )
if self.changeCallback:
textControl.Bind(wx.EVT_TEXT, self.OnChanged)
textControl.Bind(wx.EVT_COMBOBOX, self.OnChanged)
return textControl
def OnChanged(self, evt):
if self.callCallback and self.changeCallback:
self.changeCallback(evt)
def createBrowseButton( self):
"""Create the browse-button control"""
button =wx.Button(self, -1, self.buttonText)
button.SetToolTipString( self.toolTip )
button.Bind(wx.EVT_BUTTON, self.OnBrowse)
return button
def OnBrowse (self, event = None):
""" Going to browse for file... """
current = self.GetValue()
directory = os.path.split(current)
if os.path.isdir( current):
directory = current
current = ''
elif directory and os.path.isdir( directory[0] ):
current = directory[1]
directory = directory [0]
else:
directory = self.startDirectory
current = ''
dlg = wx.FileDialog(self, self.dialogTitle, directory, current,
self.fileMask, self.fileMode)
if dlg.ShowModal() == wx.ID_OK:
self.SetValue(dlg.GetPath())
dlg.Destroy()
def GetValue (self):
"""
retrieve current value of text control
"""
return self.textControl.GetValue()
def SetValue (self, value, callBack=1):
"""set current value of text control"""
save = self.callCallback
self.callCallback = callBack
self.textControl.SetValue(value)
self.callCallback = save
def Enable (self, value=True):
""" Convenient enabling/disabling of entire control """
self.label.Enable (value)
self.textControl.Enable (value)
return self.browseButton.Enable (value)
def Disable (self,):
""" Convenient disabling of entire control """
self.Enable(False)
def GetLabel( self ):
""" Retrieve the label's current text """
return self.label.GetLabel()
def SetLabel( self, value ):
""" Set the label's current text """
rvalue = self.label.SetLabel( value )
self.Refresh( True )
return rvalue
class FileBrowseButtonWithHistory( FileBrowseButton ):
"""
with following additions:
__init__(..., history=None)
history -- optional list of paths for initial history drop-down
(must be passed by name, not a positional argument)
If history is callable it will must return a list used
for the history drop-down
changeCallback -- as for FileBrowseButton, but with a work-around
for win32 systems which don't appear to create wx.EVT_COMBOBOX
events properly. There is a (slight) chance that this work-around
will cause some systems to create two events for each Combobox
selection. If you discover this condition, please report it!
As for a FileBrowseButton.__init__ otherwise.
GetHistoryControl()
Return reference to the control which implements interfaces
required for manipulating the history list. See GetHistoryControl
documentation for description of what that interface is.
GetHistory()
Return current history list
SetHistory( value=(), selectionIndex = None )
Set current history list, if selectionIndex is not None, select that index
"""
def __init__( self, *arguments, **namedarguments):
self.history = namedarguments.get( "history" )
if self.history:
del namedarguments["history"]
self.historyCallBack=None
if callable(self.history):
self.historyCallBack=self.history
self.history=None
name = namedarguments.get('name', 'fileBrowseButtonWithHistory')
namedarguments['name'] = name
FileBrowseButton.__init__(self, *arguments, **namedarguments)
def createTextControl( self):
"""Create the text control"""
textControl = wx.ComboBox(self, -1, style = wx.CB_DROPDOWN )
textControl.SetToolTipString( self.toolTip )
textControl.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
if self.changeCallback:
textControl.Bind(wx.EVT_TEXT, self.OnChanged)
textControl.Bind(wx.EVT_COMBOBOX, self.OnChanged)
if self.history:
history=self.history
self.history=None
self.SetHistory( history, control=textControl)
return textControl
def GetHistoryControl( self ):
"""
Return a pointer to the control which provides (at least)
the following methods for manipulating the history list:
Append( item ) -- add item
Clear() -- clear all items
Delete( index ) -- 0-based index to delete from list
SetSelection( index ) -- 0-based index to select in list
Semantics of the methods follow those for the wxComboBox control
"""
return self.textControl
def SetHistory( self, value=(), selectionIndex = None, control=None ):
"""Set the current history list"""
if control is None:
control = self.GetHistoryControl()
if self.history == value:
return
self.history = value
# Clear history values not the selected one.
tempValue=control.GetValue()
# clear previous values
control.Clear()
control.SetValue(tempValue)
# walk through, appending new values
for path in value:
control.Append( path )
if selectionIndex is not None:
control.SetSelection( selectionIndex )
def GetHistory( self ):
"""Return the current history list"""
if self.historyCallBack != None:
return self.historyCallBack()
elif self.history:
return list( self.history )
else:
return []
def OnSetFocus(self, event):
"""When the history scroll is selected, update the history"""
if self.historyCallBack != None:
self.SetHistory( self.historyCallBack(), control=self.textControl)
event.Skip()
if wx.Platform == "__WXMSW__":
def SetValue (self, value, callBack=1):
""" Convenient setting of text control value, works
around limitation of wx.ComboBox """
save = self.callCallback
self.callCallback = callBack
self.textControl.SetValue(value)
self.callCallback = save
# Hack to call an event handler
class LocalEvent:
def __init__(self, string):
self._string=string
def GetString(self):
return self._string
if callBack==1:
# The callback wasn't being called when SetValue was used ??
# So added this explicit call to it
self.changeCallback(LocalEvent(value))
class DirBrowseButton(FileBrowseButton):
def __init__(self, parent, id = -1,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.TAB_TRAVERSAL,
labelText = 'Select a directory:',
buttonText = 'Browse',
toolTip = 'Type directory name or browse to select',
dialogTitle = '',
startDirectory = '.',
changeCallback = None,
dialogClass = wx.DirDialog,
newDirectory = False,
name = 'dirBrowseButton'):
FileBrowseButton.__init__(self, parent, id, pos, size, style,
labelText, buttonText, toolTip,
dialogTitle, startDirectory,
changeCallback = changeCallback,
name = name)
self.dialogClass = dialogClass
self.newDirectory = newDirectory
#
def OnBrowse(self, ev = None):
style=0
if not self.newDirectory:
style |= wx.DD_DIR_MUST_EXIST
dialog = self.dialogClass(self,
message = self.dialogTitle,
defaultPath = self.startDirectory,
style = style)
if dialog.ShowModal() == wx.ID_OK:
self.SetValue(dialog.GetPath())
dialog.Destroy()
#
#----------------------------------------------------------------------
if __name__ == "__main__":
#from skeletonbuilder import rulesfile
class SimpleCallback:
def __init__( self, tag ):
self.tag = tag
def __call__( self, event ):
print self.tag, event.GetString()
class DemoFrame( wx.Frame ):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "File entry with browse", size=(500,260))
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
panel = wx.Panel (self,-1)
innerbox = wx.BoxSizer(wx.VERTICAL)
control = FileBrowseButton(
panel,
initialValue = "z:\\temp",
)
innerbox.Add( control, 0, wx.EXPAND )
middlecontrol = FileBrowseButtonWithHistory(
panel,
labelText = "With History",
initialValue = "d:\\temp",
history = ["c:\\temp", "c:\\tmp", "r:\\temp","z:\\temp"],
changeCallback= SimpleCallback( "With History" ),
)
innerbox.Add( middlecontrol, 0, wx.EXPAND )
middlecontrol = FileBrowseButtonWithHistory(
panel,
labelText = "History callback",
initialValue = "d:\\temp",
history = self.historyCallBack,
changeCallback= SimpleCallback( "History callback" ),
)
innerbox.Add( middlecontrol, 0, wx.EXPAND )
self.bottomcontrol = control = FileBrowseButton(
panel,
labelText = "With Callback",
style = wx.SUNKEN_BORDER|wx.CLIP_CHILDREN ,
changeCallback= SimpleCallback( "With Callback" ),
)
innerbox.Add( control, 0, wx.EXPAND)
self.bottommostcontrol = control = DirBrowseButton(
panel,
labelText = "Simple dir browse button",
style = wx.SUNKEN_BORDER|wx.CLIP_CHILDREN)
innerbox.Add( control, 0, wx.EXPAND)
ID = wx.NewId()
innerbox.Add( wx.Button( panel, ID,"Change Label", ), 1, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.OnChangeLabel , id=ID)
ID = wx.NewId()
innerbox.Add( wx.Button( panel, ID,"Change Value", ), 1, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.OnChangeValue, id=ID )
panel.SetAutoLayout(True)
panel.SetSizer( innerbox )
self.history={"c:\\temp":1, "c:\\tmp":1, "r:\\temp":1,"z:\\temp":1}
def historyCallBack(self):
keys=self.history.keys()
keys.sort()
return keys
def OnFileNameChangedHistory (self, event):
self.history[event.GetString ()]=1
def OnCloseMe(self, event):
self.Close(True)
def OnChangeLabel( self, event ):
self.bottomcontrol.SetLabel( "Label Updated" )
def OnChangeValue( self, event ):
self.bottomcontrol.SetValue( "r:\\somewhere\\over\\the\\rainbow.htm" )
def OnCloseWindow(self, event):
self.Destroy()
class DemoApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame = DemoFrame(None)
frame.Show(True)
self.SetTopWindow(frame)
return True
def test( ):
app = DemoApp(0)
app.MainLoop()
print 'Creating dialog'
test( )
| en | 0.697016 | #---------------------------------------------------------------------- # Name: wxPython.lib.filebrowsebutton # Purpose: Composite controls that provide a Browse button next to # either a wxTextCtrl or a wxComboBox. The Browse button # launches a wxFileDialog and loads the result into the # other control. # # Author: <NAME> # # RCS-ID: $Id: filebrowsebutton.py 59674 2009-03-20 21:00:16Z RD $ # Copyright: (c) 2000 by Total Control Software # Licence: wxWindows license #---------------------------------------------------------------------- # 12/02/2003 - <NAME> (<EMAIL>) # # o 2.5 Compatability changes # #---------------------------------------------------------------------- A control to allow the user to type in a filename or browse with the standard file dialog to select file # following are the values for a file dialog box # callback for when value changes (optional) :param labelText: Text for label to left of text field :param buttonText: Text for button which launches the file dialog :param toolTip: Help text :param dialogTitle: Title used in file dialog :param startDirectory: Default directory for file dialog startup :param fileMask: File mask (glob pattern, such as *.*) to use in file dialog :param fileMode: wx.OPEN or wx.SAVE, indicates type of file dialog to use :param changeCallback: Optional callback called for all changes in value of the control :param labelWidth: Width of the label # store variables # create the dialog # Setting a value causes the changeCallback to be called. # In this case that would be before the return of the # constructor. Not good. So a default value on # SetValue is used to disable the callback Setup the graphic representation of the dialog # play nice with sizers # add a border around the whole thing and resize the panel to fit # if size.width != -1 or size.height != -1: # self.SetSize(size) Create the label/caption Create the text control Create the browse-button control Going to browse for file... retrieve current value of text control set current value of text control Convenient enabling/disabling of entire control Convenient disabling of entire control Retrieve the label's current text Set the label's current text with following additions: __init__(..., history=None) history -- optional list of paths for initial history drop-down (must be passed by name, not a positional argument) If history is callable it will must return a list used for the history drop-down changeCallback -- as for FileBrowseButton, but with a work-around for win32 systems which don't appear to create wx.EVT_COMBOBOX events properly. There is a (slight) chance that this work-around will cause some systems to create two events for each Combobox selection. If you discover this condition, please report it! As for a FileBrowseButton.__init__ otherwise. GetHistoryControl() Return reference to the control which implements interfaces required for manipulating the history list. See GetHistoryControl documentation for description of what that interface is. GetHistory() Return current history list SetHistory( value=(), selectionIndex = None ) Set current history list, if selectionIndex is not None, select that index Create the text control Return a pointer to the control which provides (at least) the following methods for manipulating the history list: Append( item ) -- add item Clear() -- clear all items Delete( index ) -- 0-based index to delete from list SetSelection( index ) -- 0-based index to select in list Semantics of the methods follow those for the wxComboBox control Set the current history list # Clear history values not the selected one. # clear previous values # walk through, appending new values Return the current history list When the history scroll is selected, update the history Convenient setting of text control value, works around limitation of wx.ComboBox # Hack to call an event handler # The callback wasn't being called when SetValue was used ?? # So added this explicit call to it # # #---------------------------------------------------------------------- #from skeletonbuilder import rulesfile | 2.717795 | 3 |
modules/pygsm/devicewrapper.py | whanderley/eden | 205 | 8903 | <gh_stars>100-1000
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
# arch: pacman -S python-pyserial
# debian/ubuntu: apt-get install python-serial
import serial
import re
import errors
class DeviceWrapper(object):
def __init__(self, logger, *args, **kwargs):
self.device = serial.Serial(*args, **kwargs)
self.logger = logger
def isOpen(self):
return self.device.isOpen()
def close(self):
self.device.close()
def write(self, str):
self.device.write(str)
def _read(self, read_term=None, read_timeout=None):
"""Read from the modem (blocking) until _terminator_ is hit,
(defaults to \r\n, which reads a single "line"), and return."""
buffer = []
# if a different timeout was requested just
# for _this_ read, store and override the
# current device setting (not thread safe!)
if read_timeout is not None:
old_timeout = self.device.timeout
self.device.timeout = read_timeout
def __reset_timeout():
"""restore the device's previous timeout
setting, if we overrode it earlier."""
if read_timeout is not None:
self.device.timeout =\
old_timeout
# the default terminator reads
# until a newline is hit
if read_term is None:
read_term = "\r\n"
while(True):
buf = self.device.read()
buffer.append(buf)
# if a timeout was hit, raise an exception including the raw data that
# we've already read (in case the calling func was _expecting_ a timeout
# (wouldn't it be nice if serial.Serial.read returned None for this?)
if buf == '':
__reset_timeout()
raise(errors.GsmReadTimeoutError(buffer))
# if last n characters of the buffer match the read
# terminator, return what we've received so far
if ''.join(buffer[-len(read_term):]) == read_term:
buf_str = ''.join(buffer)
__reset_timeout()
self._log(repr(buf_str), 'read')
return buf_str
def read_lines(self, read_term=None, read_timeout=None):
"""Read from the modem (blocking) one line at a time until a response
terminator ("OK", "ERROR", or "CMx ERROR...") is hit, then return
a list containing the lines."""
buffer = []
# keep on looping until a command terminator
# is encountered. these are NOT the same as the
# "read_term" argument - only OK or ERROR is valid
while(True):
buf = self._read(
read_term=read_term,
read_timeout=read_timeout)
buf = buf.strip()
buffer.append(buf)
# most commands return OK for success, but there
# are some exceptions. we're not checking those
# here (unlike RubyGSM), because they should be
# handled when they're _expected_
if buf == "OK":
return buffer
# some errors contain useful error codes, so raise a
# proper error with a description from pygsm/errors.py
m = re.match(r"^\+(CM[ES]) ERROR: (\d+)$", buf)
if m is not None:
type, code = m.groups()
raise(errors.GsmModemError(type, int(code)))
# ...some errors are not so useful
# (at+cmee=1 should enable error codes)
if buf == "ERROR":
raise(errors.GsmModemError)
def _log(self, str, type="debug"):
if hasattr(self, "logger"):
self.logger(self, str, type) | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
# arch: pacman -S python-pyserial
# debian/ubuntu: apt-get install python-serial
import serial
import re
import errors
class DeviceWrapper(object):
def __init__(self, logger, *args, **kwargs):
self.device = serial.Serial(*args, **kwargs)
self.logger = logger
def isOpen(self):
return self.device.isOpen()
def close(self):
self.device.close()
def write(self, str):
self.device.write(str)
def _read(self, read_term=None, read_timeout=None):
"""Read from the modem (blocking) until _terminator_ is hit,
(defaults to \r\n, which reads a single "line"), and return."""
buffer = []
# if a different timeout was requested just
# for _this_ read, store and override the
# current device setting (not thread safe!)
if read_timeout is not None:
old_timeout = self.device.timeout
self.device.timeout = read_timeout
def __reset_timeout():
"""restore the device's previous timeout
setting, if we overrode it earlier."""
if read_timeout is not None:
self.device.timeout =\
old_timeout
# the default terminator reads
# until a newline is hit
if read_term is None:
read_term = "\r\n"
while(True):
buf = self.device.read()
buffer.append(buf)
# if a timeout was hit, raise an exception including the raw data that
# we've already read (in case the calling func was _expecting_ a timeout
# (wouldn't it be nice if serial.Serial.read returned None for this?)
if buf == '':
__reset_timeout()
raise(errors.GsmReadTimeoutError(buffer))
# if last n characters of the buffer match the read
# terminator, return what we've received so far
if ''.join(buffer[-len(read_term):]) == read_term:
buf_str = ''.join(buffer)
__reset_timeout()
self._log(repr(buf_str), 'read')
return buf_str
def read_lines(self, read_term=None, read_timeout=None):
"""Read from the modem (blocking) one line at a time until a response
terminator ("OK", "ERROR", or "CMx ERROR...") is hit, then return
a list containing the lines."""
buffer = []
# keep on looping until a command terminator
# is encountered. these are NOT the same as the
# "read_term" argument - only OK or ERROR is valid
while(True):
buf = self._read(
read_term=read_term,
read_timeout=read_timeout)
buf = buf.strip()
buffer.append(buf)
# most commands return OK for success, but there
# are some exceptions. we're not checking those
# here (unlike RubyGSM), because they should be
# handled when they're _expected_
if buf == "OK":
return buffer
# some errors contain useful error codes, so raise a
# proper error with a description from pygsm/errors.py
m = re.match(r"^\+(CM[ES]) ERROR: (\d+)$", buf)
if m is not None:
type, code = m.groups()
raise(errors.GsmModemError(type, int(code)))
# ...some errors are not so useful
# (at+cmee=1 should enable error codes)
if buf == "ERROR":
raise(errors.GsmModemError)
def _log(self, str, type="debug"):
if hasattr(self, "logger"):
self.logger(self, str, type) | en | 0.861807 | #!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 # arch: pacman -S python-pyserial # debian/ubuntu: apt-get install python-serial Read from the modem (blocking) until _terminator_ is hit, (defaults to \r\n, which reads a single "line"), and return. # if a different timeout was requested just # for _this_ read, store and override the # current device setting (not thread safe!) restore the device's previous timeout setting, if we overrode it earlier. # the default terminator reads # until a newline is hit # if a timeout was hit, raise an exception including the raw data that # we've already read (in case the calling func was _expecting_ a timeout # (wouldn't it be nice if serial.Serial.read returned None for this?) # if last n characters of the buffer match the read # terminator, return what we've received so far Read from the modem (blocking) one line at a time until a response terminator ("OK", "ERROR", or "CMx ERROR...") is hit, then return a list containing the lines. # keep on looping until a command terminator # is encountered. these are NOT the same as the # "read_term" argument - only OK or ERROR is valid # most commands return OK for success, but there # are some exceptions. we're not checking those # here (unlike RubyGSM), because they should be # handled when they're _expected_ # some errors contain useful error codes, so raise a # proper error with a description from pygsm/errors.py # ...some errors are not so useful # (at+cmee=1 should enable error codes) | 2.622741 | 3 |
day1/loops.py | alqmy/The-Garage-Summer-Of-Code | 0 | 8904 | <gh_stars>0
# while True:
# # ejecuta esto
# print("Hola")
real = 7
print("Entre un numero entre el 1 y el 10")
guess = int(input())
# =/=
while guess != real:
print("Ese no es el numero")
print("Entre un numero entre el 1 y el 10")
guess = int(input())
# el resto
print("Yay! Lo sacastes!")
| # while True:
# # ejecuta esto
# print("Hola")
real = 7
print("Entre un numero entre el 1 y el 10")
guess = int(input())
# =/=
while guess != real:
print("Ese no es el numero")
print("Entre un numero entre el 1 y el 10")
guess = int(input())
# el resto
print("Yay! Lo sacastes!") | es | 0.695699 | # while True: # # ejecuta esto # print("Hola") # =/= # el resto | 4.034661 | 4 |
pentest-scripts/learning-python-for-forensics/Chapter 6/rot13.py | paulveillard/cybersecurity-penetration-testing | 6 | 8905 | <reponame>paulveillard/cybersecurity-penetration-testing
def rotCode(data):
"""
The rotCode function encodes/decodes data using string indexing
:param data: A string
:return: The rot-13 encoded/decoded string
"""
rot_chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
substitutions = []
# Walk through each individual character
for c in data:
# Walk through each individual character
if c.isupper():
try:
# Find the position of the character in rot_chars list
index = rot_chars.index(c.lower())
except ValueError:
substitutions.append(c)
continue
# Calculate the relative index that is 13 characters away from the index
substitutions.append((rot_chars[(index-13)]).upper())
else:
try:
# Find the position of the character in rot_chars list
index = rot_chars.index(c)
except ValueError:
substitutions.append(c)
continue
substitutions.append(rot_chars[((index-13))])
return ''.join(substitutions)
if __name__ == '__main__':
print rotCode('Jul, EBG-13?')
| def rotCode(data):
"""
The rotCode function encodes/decodes data using string indexing
:param data: A string
:return: The rot-13 encoded/decoded string
"""
rot_chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
substitutions = []
# Walk through each individual character
for c in data:
# Walk through each individual character
if c.isupper():
try:
# Find the position of the character in rot_chars list
index = rot_chars.index(c.lower())
except ValueError:
substitutions.append(c)
continue
# Calculate the relative index that is 13 characters away from the index
substitutions.append((rot_chars[(index-13)]).upper())
else:
try:
# Find the position of the character in rot_chars list
index = rot_chars.index(c)
except ValueError:
substitutions.append(c)
continue
substitutions.append(rot_chars[((index-13))])
return ''.join(substitutions)
if __name__ == '__main__':
print rotCode('Jul, EBG-13?') | en | 0.730046 | The rotCode function encodes/decodes data using string indexing :param data: A string :return: The rot-13 encoded/decoded string # Walk through each individual character # Walk through each individual character # Find the position of the character in rot_chars list # Calculate the relative index that is 13 characters away from the index # Find the position of the character in rot_chars list | 4.375463 | 4 |
examples/sim_tfidf.py | sunyilgdx/CwVW-SIF | 12 | 8906 | <gh_stars>10-100
import pickle, sys
sys.path.append('../src')
import data_io, sim_algo, eval, params
## run
# wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from <NAME>'s github (https://github.com/jwieting/iclr2016)
# '../data/glove.840B.300d.txt' # need to download it first
# ]
wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from <NAME>'s github (https://github.com/jwieting/iclr2016)
'../data/glove.6B.50d.txt' # need to download it first
]
rmpcs = [0,1]
comment4para = [ # need to align with the following loop
['word vector files', wordfiles], # comments and values,
['remove principal component or not', rmpcs]
]
params = params.params()
parr4para = {}
sarr4para = {}
for wordfile in wordfiles:
(words, We) = data_io.getWordmap(wordfile)
weight4ind = data_io.getIDFWeight(wordfile)
for rmpc in rmpcs:
print('word vectors loaded from %s' % wordfile)
print('word weights computed from idf')
params.rmpc = rmpc
print('remove the first %d principal components' % rmpc)
# eval just one example dataset
parr, sarr = eval.sim_evaluate_one(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
## eval all datasets; need to obtained datasets from <NAME> (https://github.com/jwieting/iclr2016)
# parr, sarr = eval.sim_evaluate_all(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
paras = (wordfile, rmpc)
parr4para[paras] = parr
sarr4para[paras] = sarr
## save result
save_result = False # True
result_file = 'result/sim_tfidf.result'
if save_result:
with open(result_file, 'w') as f:
pickle.dump([parr4para, sarr4para, comment4para] , f)
| import pickle, sys
sys.path.append('../src')
import data_io, sim_algo, eval, params
## run
# wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from <NAME>'s github (https://github.com/jwieting/iclr2016)
# '../data/glove.840B.300d.txt' # need to download it first
# ]
wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from <NAME>'s github (https://github.com/jwieting/iclr2016)
'../data/glove.6B.50d.txt' # need to download it first
]
rmpcs = [0,1]
comment4para = [ # need to align with the following loop
['word vector files', wordfiles], # comments and values,
['remove principal component or not', rmpcs]
]
params = params.params()
parr4para = {}
sarr4para = {}
for wordfile in wordfiles:
(words, We) = data_io.getWordmap(wordfile)
weight4ind = data_io.getIDFWeight(wordfile)
for rmpc in rmpcs:
print('word vectors loaded from %s' % wordfile)
print('word weights computed from idf')
params.rmpc = rmpc
print('remove the first %d principal components' % rmpc)
# eval just one example dataset
parr, sarr = eval.sim_evaluate_one(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
## eval all datasets; need to obtained datasets from <NAME> (https://github.com/jwieting/iclr2016)
# parr, sarr = eval.sim_evaluate_all(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
paras = (wordfile, rmpc)
parr4para[paras] = parr
sarr4para[paras] = sarr
## save result
save_result = False # True
result_file = 'result/sim_tfidf.result'
if save_result:
with open(result_file, 'w') as f:
pickle.dump([parr4para, sarr4para, comment4para] , f) | en | 0.728266 | ## run # wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from <NAME>'s github (https://github.com/jwieting/iclr2016) # '../data/glove.840B.300d.txt' # need to download it first # ] #'../data/paragram_sl999_small.txt', # need to download it from <NAME>'s github (https://github.com/jwieting/iclr2016) # need to download it first # need to align with the following loop # comments and values, # eval just one example dataset ## eval all datasets; need to obtained datasets from <NAME> (https://github.com/jwieting/iclr2016) # parr, sarr = eval.sim_evaluate_all(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params) ## save result # True | 2.405972 | 2 |
tests/cases/cls.py | div72/py2many | 345 | 8907 | <gh_stars>100-1000
class Foo:
def bar(self):
return "a"
if __name__ == "__main__":
f = Foo()
b = f.bar()
print(b) | class Foo:
def bar(self):
return "a"
if __name__ == "__main__":
f = Foo()
b = f.bar()
print(b) | none | 1 | 2.912009 | 3 |
|
theano-rfnn/mnist_loader.py | jhja/RFNN | 55 | 8908 | <filename>theano-rfnn/mnist_loader.py
import numpy as np
import os
from random import shuffle
datasets_dir = './../data/'
def one_hot(x,n):
if type(x) == list:
x = np.array(x)
x = x.flatten()
o_h = np.zeros((len(x),n))
o_h[np.arange(len(x)),x] = 1
return o_h
def mnist(ntrain=60000,ntest=10000,onehot=True):
ntrain=np.array(ntrain).astype(int).squeeze()
data_dir = os.path.join(datasets_dir,'mnist/')
fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trY = loaded[8:].reshape((60000))
fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teY = loaded[8:].reshape((10000))
trY_shuffle = []
trX_shuffle = []
index_shuf = range(len(trY))
shuffle(index_shuf)
for i in index_shuf:
trY_shuffle.append(trY[i])
trX_shuffle.append(trX[i])
trX = np.asarray(trX_shuffle)
trY = np.asarray(trY_shuffle)
trX = trX/255.
teX = teX/255.
trX = trX[:ntrain]
trY = trY[:ntrain]
teX = teX[:ntest]
teY = teY[:ntest]
if onehot:
trY = one_hot(trY, 10)
teY = one_hot(teY, 10)
else:
trY = np.asarray(trY)
teY = np.asarray(teY)
return trX,teX,trY,teY
| <filename>theano-rfnn/mnist_loader.py
import numpy as np
import os
from random import shuffle
datasets_dir = './../data/'
def one_hot(x,n):
if type(x) == list:
x = np.array(x)
x = x.flatten()
o_h = np.zeros((len(x),n))
o_h[np.arange(len(x)),x] = 1
return o_h
def mnist(ntrain=60000,ntest=10000,onehot=True):
ntrain=np.array(ntrain).astype(int).squeeze()
data_dir = os.path.join(datasets_dir,'mnist/')
fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trY = loaded[8:].reshape((60000))
fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teY = loaded[8:].reshape((10000))
trY_shuffle = []
trX_shuffle = []
index_shuf = range(len(trY))
shuffle(index_shuf)
for i in index_shuf:
trY_shuffle.append(trY[i])
trX_shuffle.append(trX[i])
trX = np.asarray(trX_shuffle)
trY = np.asarray(trY_shuffle)
trX = trX/255.
teX = teX/255.
trX = trX[:ntrain]
trY = trY[:ntrain]
teX = teX[:ntest]
teY = teY[:ntest]
if onehot:
trY = one_hot(trY, 10)
teY = one_hot(teY, 10)
else:
trY = np.asarray(trY)
teY = np.asarray(teY)
return trX,teX,trY,teY
| none | 1 | 2.452466 | 2 |
|
Ejercicio 2.py | crltsnch/Ejercicios-grupales | 0 | 8909 | import math
import os
import random
import re
import sys
def compareTriplets(a, b):
puntosA=0
puntosB=0
for i in range (0,3):
if a[i]<b[i]:
puntosB+=1
elif a[i]>b[i]:
puntosA+=1
puntosTotales=[puntosA, puntosB]
return puntosTotales
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'] + 'solucion2.txt', 'w')
print("Escribe las notas de a")
a = list(map(int, input().rstrip().split()))
print("Escribe las notas de b")
b = list(map(int, input().rstrip().split()))
result = compareTriplets(a, b)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close() | import math
import os
import random
import re
import sys
def compareTriplets(a, b):
puntosA=0
puntosB=0
for i in range (0,3):
if a[i]<b[i]:
puntosB+=1
elif a[i]>b[i]:
puntosA+=1
puntosTotales=[puntosA, puntosB]
return puntosTotales
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'] + 'solucion2.txt', 'w')
print("Escribe las notas de a")
a = list(map(int, input().rstrip().split()))
print("Escribe las notas de b")
b = list(map(int, input().rstrip().split()))
result = compareTriplets(a, b)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close() | none | 1 | 3.621906 | 4 |
|
app/routes/router.py | nityagautam/ReportDashboard-backend | 1 | 8910 | #===============================================================
# @author: <EMAIL>
# @written: 08 December 2021
# @desc: Routes for the Backend server
#===============================================================
# Import section with referecne of entry file or main file;
from __main__ import application
from flask import jsonify, render_template, url_for, request, redirect
# Local sample data import
from app.config.uiconfig import app_ui_config
from app import sample_data
# ==============================================================
# App Routes/Gateways
# ==============================================================
@application.route('/test', methods=['GET'])
def test():
return '<h4>HELLO WORLD!</h4><hr/> it works!'
@application.route('/', methods=['GET'])
@application.route('/home', methods=['GET'])
@application.route('/dashboard', methods=['GET'])
def root():
return render_template("dashboard.html", app_data=app_ui_config, data=sample_data.latest_data)
@application.route('/history', methods=['GET'])
def history():
return render_template("history.html", app_data=app_ui_config, data=sample_data.history_data)
@application.route('/about', methods=['GET'])
def about():
return render_template("about.html", app_data=app_ui_config, data=sample_data.latest_data)
@application.route('/get-notes', methods=['POST'])
def get_todo():
print("KEY :: VALUE (from the received form data)")
print([(key, val) for key, val in zip(request.form.keys(), request.form.values())])
return redirect("/notes", code=302)
@application.route('/notes')
def info():
return render_template("notes.html", app_data=app_ui_config)
@application.route('/sample-data')
def get_sample_data():
return jsonify(app_ui_config)
# ==============================================================
# Error Handlers Starts
# ==============================================================
# 404 Handler; We can also pass the specific request errors codes to the decorator;
@application.errorhandler(404)
def not_found(err):
return render_template("error.html", app_data=app_ui_config, error_data=err), 400
# Exception/Error handler; We can also pass the specific errors to the decorator;
@application.errorhandler(TypeError)
def server_error(err):
application.logger.exception(err)
return render_template("error.html", app_data=app_ui_config, error_data=err), 500
# Exception/Error handler; We can also pass the specific errors to the decorator;
@application.errorhandler(Exception)
def server_error(err):
application.logger.exception(err)
return render_template("error.html", app_data=app_ui_config, error_data=err), 500
# ==============================================================
# Error Handlers Ends
# ==============================================================
# Route For Sample data
@application.route('/data')
def get_data():
data = {
"reports": [
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_1",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "50", "fail": "0", "ignored": "0", "skipped": "0"},
"total_time": "35 min."
},
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_2",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "10", "fail": "2", "ignored": "0", "skipped": "0"},
"total_time": "0.2345 secs."
},
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_3",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "100", "fail": "5", "ignored": "0", "skipped": "0"},
"total_time": "5 days"
}
]
}
return jsonify(data)
# ==============================================================
# Extra routes starts
# ==============================================================
@application.route('/sample1')
def sample1():
return render_template("web-analytics-overview.html")
@application.route('/sample2')
def sample2():
return render_template("web-analytics-real-time.html")
@application.route('/logo')
def get_logo():
"""
Queries the snapshot data for both Serenity and JMeter projects from the MongoDB.
Renders the Snapshot view of html
:return: N/A
"""
# set template directory of the Flask App to the path set by the user as command line arg.
return f'<html><head><title>Root</title><head><body><hr/> Welcome to the main page <hr/> ' \
f'Building image from static public location: <br/> ' \
f'<img src=\'{url_for("static", filename="images/logo.svg")}\' /> </body></html>'
| #===============================================================
# @author: <EMAIL>
# @written: 08 December 2021
# @desc: Routes for the Backend server
#===============================================================
# Import section with referecne of entry file or main file;
from __main__ import application
from flask import jsonify, render_template, url_for, request, redirect
# Local sample data import
from app.config.uiconfig import app_ui_config
from app import sample_data
# ==============================================================
# App Routes/Gateways
# ==============================================================
@application.route('/test', methods=['GET'])
def test():
return '<h4>HELLO WORLD!</h4><hr/> it works!'
@application.route('/', methods=['GET'])
@application.route('/home', methods=['GET'])
@application.route('/dashboard', methods=['GET'])
def root():
return render_template("dashboard.html", app_data=app_ui_config, data=sample_data.latest_data)
@application.route('/history', methods=['GET'])
def history():
return render_template("history.html", app_data=app_ui_config, data=sample_data.history_data)
@application.route('/about', methods=['GET'])
def about():
return render_template("about.html", app_data=app_ui_config, data=sample_data.latest_data)
@application.route('/get-notes', methods=['POST'])
def get_todo():
print("KEY :: VALUE (from the received form data)")
print([(key, val) for key, val in zip(request.form.keys(), request.form.values())])
return redirect("/notes", code=302)
@application.route('/notes')
def info():
return render_template("notes.html", app_data=app_ui_config)
@application.route('/sample-data')
def get_sample_data():
return jsonify(app_ui_config)
# ==============================================================
# Error Handlers Starts
# ==============================================================
# 404 Handler; We can also pass the specific request errors codes to the decorator;
@application.errorhandler(404)
def not_found(err):
return render_template("error.html", app_data=app_ui_config, error_data=err), 400
# Exception/Error handler; We can also pass the specific errors to the decorator;
@application.errorhandler(TypeError)
def server_error(err):
application.logger.exception(err)
return render_template("error.html", app_data=app_ui_config, error_data=err), 500
# Exception/Error handler; We can also pass the specific errors to the decorator;
@application.errorhandler(Exception)
def server_error(err):
application.logger.exception(err)
return render_template("error.html", app_data=app_ui_config, error_data=err), 500
# ==============================================================
# Error Handlers Ends
# ==============================================================
# Route For Sample data
@application.route('/data')
def get_data():
data = {
"reports": [
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_1",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "50", "fail": "0", "ignored": "0", "skipped": "0"},
"total_time": "35 min."
},
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_2",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "10", "fail": "2", "ignored": "0", "skipped": "0"},
"total_time": "0.2345 secs."
},
{
"build": "build_no",
"created": "Imported 05052021T11:30:00:00IST",
"platform": "Imported Win/Unix/Mac",
"project_name": "project_name_3",
"report_location_path": "path/to/report/location/index.html",
"report_summary": {"pass": "100", "fail": "5", "ignored": "0", "skipped": "0"},
"total_time": "5 days"
}
]
}
return jsonify(data)
# ==============================================================
# Extra routes starts
# ==============================================================
@application.route('/sample1')
def sample1():
return render_template("web-analytics-overview.html")
@application.route('/sample2')
def sample2():
return render_template("web-analytics-real-time.html")
@application.route('/logo')
def get_logo():
"""
Queries the snapshot data for both Serenity and JMeter projects from the MongoDB.
Renders the Snapshot view of html
:return: N/A
"""
# set template directory of the Flask App to the path set by the user as command line arg.
return f'<html><head><title>Root</title><head><body><hr/> Welcome to the main page <hr/> ' \
f'Building image from static public location: <br/> ' \
f'<img src=\'{url_for("static", filename="images/logo.svg")}\' /> </body></html>'
| en | 0.623894 | #=============================================================== # @author: <EMAIL> # @written: 08 December 2021 # @desc: Routes for the Backend server #=============================================================== # Import section with referecne of entry file or main file; # Local sample data import # ============================================================== # App Routes/Gateways # ============================================================== # ============================================================== # Error Handlers Starts # ============================================================== # 404 Handler; We can also pass the specific request errors codes to the decorator; # Exception/Error handler; We can also pass the specific errors to the decorator; # Exception/Error handler; We can also pass the specific errors to the decorator; # ============================================================== # Error Handlers Ends # ============================================================== # Route For Sample data # ============================================================== # Extra routes starts # ============================================================== Queries the snapshot data for both Serenity and JMeter projects from the MongoDB.
Renders the Snapshot view of html
:return: N/A # set template directory of the Flask App to the path set by the user as command line arg. | 2.380348 | 2 |
idaes/generic_models/properties/core/examples/ASU_PR.py | carldlaird/idaes-pse | 112 | 8911 | <filename>idaes/generic_models/properties/core/examples/ASU_PR.py
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Air separation phase equilibrium package using Peng-Robinson EoS.
Example property package using the Generic Property Package Framework.
This example shows how to set up a property package to do air separation
phase equilibrium in the generic framework using Peng-Robinson equation
along with methods drawn from the pre-built IDAES property libraries.
The example includes two dictionaries.
1. The dictionary named configuration contains parameters obtained from
The Properties of Gases and Liquids (1987) 4th edition and NIST.
2. The dictionary named configuration_Dowling_2015 contains parameters used in
A framework for efficient large scale equation-oriented flowsheet optimization
(2015) Dowling. The parameters are extracted from Properties of Gases and
Liquids (1977) 3rd edition for Antoine's vapor equation and acentric factors
and converted values from the Properties of Gases and Liquids (1977)
3rd edition to j.
"""
# Import Python libraries
import logging
# Import Pyomo units
from pyomo.environ import units as pyunits
# Import IDAES cores
from idaes.core import LiquidPhase, VaporPhase, Component
from idaes.generic_models.properties.core.state_definitions import FTPx
from idaes.generic_models.properties.core.eos.ceos import Cubic, CubicType
from idaes.generic_models.properties.core.phase_equil import SmoothVLE
from idaes.generic_models.properties.core.phase_equil.bubble_dew import \
LogBubbleDew
from idaes.generic_models.properties.core.phase_equil.forms import log_fugacity
from idaes.generic_models.properties.core.pure import RPP4
from idaes.generic_models.properties.core.pure import NIST
from idaes.generic_models.properties.core.pure import RPP3
# Set up logger
_log = logging.getLogger(__name__)
# ---------------------------------------------------------------------
# Configuration dictionary for a Peng-Robinson Oxygen-Argon-Nitrogen system
# Data Sources:
# [1] The Properties of Gases and Liquids (1987)
# 4th edition, Chemical Engineering Series - <NAME>
# [2] NIST, https://webbook.nist.gov/
# Retrieved 16th August, 2020
# [3] The Properties of Gases and Liquids (1987)
# 3rd edition, Chemical Engineering Series - <NAME>
# Cp parameters where converted to j in Dowling 2015
# [4] A framework for efficient large scale equation-oriented flowsheet optimization (2015)
# Computers and Chemical Engineering - <NAME>
configuration = {
# Specifying components
"components": {
"nitrogen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (34e5, pyunits.Pa), # [1]
"temperature_crit": (126.2, pyunits.K), # [1]
"omega": 0.037, # [1]
"cp_mol_ig_comp_coeff": {
"A": (3.115E1,
pyunits.J/pyunits.mol/pyunits.K), # [1]
"B": (-1.357E-2,
pyunits.J/pyunits.mol/pyunits.K**2),
"C": (2.680E-5,
pyunits.J/pyunits.mol/pyunits.K**3),
"D": (-1.168E-8,
pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
191.61, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
"A": (3.7362, None), # [2]
"B": (264.651, pyunits.K),
"C": (-6.788, pyunits.K)}}},
"argon": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (39.948E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (48.98e5, pyunits.Pa), # [1]
"temperature_crit": (150.86, pyunits.K), # [1]
"omega": 0.001, # [1]
"cp_mol_ig_comp_coeff": {
"A": (2.050E1,
pyunits.J/pyunits.mol/pyunits.K), # [1]
"B": (0.0, pyunits.J/pyunits.mol/pyunits.K**2),
"C": (0.0, pyunits.J/pyunits.mol/pyunits.K**3),
"D": (0.0, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
154.8, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {"A": (3.29555, None), # [2]
"B": (215.24, pyunits.K),
"C": (-22.233, pyunits.K)}}},
"oxygen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (31.999E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (50.43e5, pyunits.Pa), # [1]
"temperature_crit": (154.58, pyunits.K), # [1]
"omega": 0.025, # [1]
"cp_mol_ig_comp_coeff": {
"A": (2.811E1, pyunits.J/pyunits.mol/pyunits.K),
"B": (-3.680E-6,
pyunits.J/pyunits.mol/pyunits.K**2),
"C": (1.746E-5, pyunits.J/pyunits.mol/pyunits.K**3),
"D": (-1.065E-8,
pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
205.152, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
"A": (3.85845, None), # [2]
"B": (325.675, pyunits.K),
"C": (-5.667, pyunits.K)}}}},
# Specifying phases
"phases": {"Liq": {"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}},
"Vap": {"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}}},
# Set base units of measurement
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s),
"temperature": (10, 300, 350, pyunits.K),
"pressure": (5e4, 1e5, 1e7, pyunits.Pa)},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000,
("nitrogen", "argon"): -0.26e-2,
("nitrogen", "oxygen"): -0.119e-1,
("argon", "nitrogen"): -0.26e-2,
("argon", "argon"): 0.000,
("argon", "oxygen"): 0.104e-1,
("oxygen", "nitrogen"): -0.119e-1,
("oxygen", "argon"): 0.104e-1,
("oxygen", "oxygen"): 0.000}}}
configuration_Dowling_2015 = {
# Specifying components
"components": {
"nitrogen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (33.943875e5, pyunits.Pa), # [4]
"temperature_crit": (126.2, pyunits.K), # [4]
"omega": 0.04, # [3]
"cp_mol_ig_comp_coeff": {
'A': (3.112896E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-1.356E-2, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (2.6878E-5, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (-1.167E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
191.61, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
'A': (14.9342, None), # [3]
'B': (588.72, pyunits.K),
'C': (-6.60, pyunits.K)}}},
"argon": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (39.948E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (48.737325e5, pyunits.Pa), # [4]
"temperature_crit": (150.86, pyunits.K), # [4]
"omega": -0.004, # [1]
"cp_mol_ig_comp_coeff": {
'A': (2.0790296E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-3.209E-05, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (5.163E-08, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (0.0, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (
154.8, pyunits.J/pyunits.mol/pyunits.K), # [3]
"pressure_sat_comp_coeff": {
'A': (15.2330, None), # [3]
'B': (700.51, pyunits.K),
'C': (-5.84, pyunits.K)}}},
"oxygen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (31.999E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (50.45985e5, pyunits.Pa), # [4]
"temperature_crit": (154.58, pyunits.K), # [4]
"omega": 0.021, # [1]
"cp_mol_ig_comp_coeff": {
'A': (2.8087192E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-3.678E-6, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (1.745E-5, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (-1.064E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
205.152, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
'A': (15.4075, None), # [3]
'B': (734.55, pyunits.K),
'C': (-6.45, pyunits.K)}}}},
# Specifying phases
"phases": {"Liq": {"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}},
"Vap": {"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}}},
# Set base units of measurement
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s),
"temperature": (10, 300, 350, pyunits.K),
"pressure": (5e4, 1e5, 1e7, pyunits.Pa)},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000,
("nitrogen", "argon"): -0.26e-2,
("nitrogen", "oxygen"): -0.119e-1,
("argon", "nitrogen"): -0.26e-2,
("argon", "argon"): 0.000,
("argon", "oxygen"): 0.104e-1,
("oxygen", "nitrogen"): -0.119e-1,
("oxygen", "argon"): 0.104e-1,
("oxygen", "oxygen"): 0.000}}}
| <filename>idaes/generic_models/properties/core/examples/ASU_PR.py
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Air separation phase equilibrium package using Peng-Robinson EoS.
Example property package using the Generic Property Package Framework.
This example shows how to set up a property package to do air separation
phase equilibrium in the generic framework using Peng-Robinson equation
along with methods drawn from the pre-built IDAES property libraries.
The example includes two dictionaries.
1. The dictionary named configuration contains parameters obtained from
The Properties of Gases and Liquids (1987) 4th edition and NIST.
2. The dictionary named configuration_Dowling_2015 contains parameters used in
A framework for efficient large scale equation-oriented flowsheet optimization
(2015) Dowling. The parameters are extracted from Properties of Gases and
Liquids (1977) 3rd edition for Antoine's vapor equation and acentric factors
and converted values from the Properties of Gases and Liquids (1977)
3rd edition to j.
"""
# Import Python libraries
import logging
# Import Pyomo units
from pyomo.environ import units as pyunits
# Import IDAES cores
from idaes.core import LiquidPhase, VaporPhase, Component
from idaes.generic_models.properties.core.state_definitions import FTPx
from idaes.generic_models.properties.core.eos.ceos import Cubic, CubicType
from idaes.generic_models.properties.core.phase_equil import SmoothVLE
from idaes.generic_models.properties.core.phase_equil.bubble_dew import \
LogBubbleDew
from idaes.generic_models.properties.core.phase_equil.forms import log_fugacity
from idaes.generic_models.properties.core.pure import RPP4
from idaes.generic_models.properties.core.pure import NIST
from idaes.generic_models.properties.core.pure import RPP3
# Set up logger
_log = logging.getLogger(__name__)
# ---------------------------------------------------------------------
# Configuration dictionary for a Peng-Robinson Oxygen-Argon-Nitrogen system
# Data Sources:
# [1] The Properties of Gases and Liquids (1987)
# 4th edition, Chemical Engineering Series - <NAME>
# [2] NIST, https://webbook.nist.gov/
# Retrieved 16th August, 2020
# [3] The Properties of Gases and Liquids (1987)
# 3rd edition, Chemical Engineering Series - <NAME>
# Cp parameters where converted to j in Dowling 2015
# [4] A framework for efficient large scale equation-oriented flowsheet optimization (2015)
# Computers and Chemical Engineering - <NAME>
configuration = {
# Specifying components
"components": {
"nitrogen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (34e5, pyunits.Pa), # [1]
"temperature_crit": (126.2, pyunits.K), # [1]
"omega": 0.037, # [1]
"cp_mol_ig_comp_coeff": {
"A": (3.115E1,
pyunits.J/pyunits.mol/pyunits.K), # [1]
"B": (-1.357E-2,
pyunits.J/pyunits.mol/pyunits.K**2),
"C": (2.680E-5,
pyunits.J/pyunits.mol/pyunits.K**3),
"D": (-1.168E-8,
pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
191.61, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
"A": (3.7362, None), # [2]
"B": (264.651, pyunits.K),
"C": (-6.788, pyunits.K)}}},
"argon": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (39.948E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (48.98e5, pyunits.Pa), # [1]
"temperature_crit": (150.86, pyunits.K), # [1]
"omega": 0.001, # [1]
"cp_mol_ig_comp_coeff": {
"A": (2.050E1,
pyunits.J/pyunits.mol/pyunits.K), # [1]
"B": (0.0, pyunits.J/pyunits.mol/pyunits.K**2),
"C": (0.0, pyunits.J/pyunits.mol/pyunits.K**3),
"D": (0.0, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
154.8, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {"A": (3.29555, None), # [2]
"B": (215.24, pyunits.K),
"C": (-22.233, pyunits.K)}}},
"oxygen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (31.999E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (50.43e5, pyunits.Pa), # [1]
"temperature_crit": (154.58, pyunits.K), # [1]
"omega": 0.025, # [1]
"cp_mol_ig_comp_coeff": {
"A": (2.811E1, pyunits.J/pyunits.mol/pyunits.K),
"B": (-3.680E-6,
pyunits.J/pyunits.mol/pyunits.K**2),
"C": (1.746E-5, pyunits.J/pyunits.mol/pyunits.K**3),
"D": (-1.065E-8,
pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
205.152, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
"A": (3.85845, None), # [2]
"B": (325.675, pyunits.K),
"C": (-5.667, pyunits.K)}}}},
# Specifying phases
"phases": {"Liq": {"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}},
"Vap": {"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}}},
# Set base units of measurement
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s),
"temperature": (10, 300, 350, pyunits.K),
"pressure": (5e4, 1e5, 1e7, pyunits.Pa)},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000,
("nitrogen", "argon"): -0.26e-2,
("nitrogen", "oxygen"): -0.119e-1,
("argon", "nitrogen"): -0.26e-2,
("argon", "argon"): 0.000,
("argon", "oxygen"): 0.104e-1,
("oxygen", "nitrogen"): -0.119e-1,
("oxygen", "argon"): 0.104e-1,
("oxygen", "oxygen"): 0.000}}}
configuration_Dowling_2015 = {
# Specifying components
"components": {
"nitrogen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (33.943875e5, pyunits.Pa), # [4]
"temperature_crit": (126.2, pyunits.K), # [4]
"omega": 0.04, # [3]
"cp_mol_ig_comp_coeff": {
'A': (3.112896E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-1.356E-2, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (2.6878E-5, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (-1.167E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
191.61, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
'A': (14.9342, None), # [3]
'B': (588.72, pyunits.K),
'C': (-6.60, pyunits.K)}}},
"argon": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (39.948E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (48.737325e5, pyunits.Pa), # [4]
"temperature_crit": (150.86, pyunits.K), # [4]
"omega": -0.004, # [1]
"cp_mol_ig_comp_coeff": {
'A': (2.0790296E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-3.209E-05, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (5.163E-08, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (0.0, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (
154.8, pyunits.J/pyunits.mol/pyunits.K), # [3]
"pressure_sat_comp_coeff": {
'A': (15.2330, None), # [3]
'B': (700.51, pyunits.K),
'C': (-5.84, pyunits.K)}}},
"oxygen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (31.999E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (50.45985e5, pyunits.Pa), # [4]
"temperature_crit": (154.58, pyunits.K), # [4]
"omega": 0.021, # [1]
"cp_mol_ig_comp_coeff": {
'A': (2.8087192E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-3.678E-6, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (1.745E-5, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (-1.064E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
205.152, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
'A': (15.4075, None), # [3]
'B': (734.55, pyunits.K),
'C': (-6.45, pyunits.K)}}}},
# Specifying phases
"phases": {"Liq": {"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}},
"Vap": {"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}}},
# Set base units of measurement
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s),
"temperature": (10, 300, 350, pyunits.K),
"pressure": (5e4, 1e5, 1e7, pyunits.Pa)},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000,
("nitrogen", "argon"): -0.26e-2,
("nitrogen", "oxygen"): -0.119e-1,
("argon", "nitrogen"): -0.26e-2,
("argon", "argon"): 0.000,
("argon", "oxygen"): 0.104e-1,
("oxygen", "nitrogen"): -0.119e-1,
("oxygen", "argon"): 0.104e-1,
("oxygen", "oxygen"): 0.000}}}
| en | 0.74351 | ################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 # by the software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University # Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.md and LICENSE.md for full copyright and # license information. ################################################################################# Air separation phase equilibrium package using Peng-Robinson EoS. Example property package using the Generic Property Package Framework. This example shows how to set up a property package to do air separation phase equilibrium in the generic framework using Peng-Robinson equation along with methods drawn from the pre-built IDAES property libraries. The example includes two dictionaries. 1. The dictionary named configuration contains parameters obtained from The Properties of Gases and Liquids (1987) 4th edition and NIST. 2. The dictionary named configuration_Dowling_2015 contains parameters used in A framework for efficient large scale equation-oriented flowsheet optimization (2015) Dowling. The parameters are extracted from Properties of Gases and Liquids (1977) 3rd edition for Antoine's vapor equation and acentric factors and converted values from the Properties of Gases and Liquids (1977) 3rd edition to j. # Import Python libraries # Import Pyomo units # Import IDAES cores # Set up logger # --------------------------------------------------------------------- # Configuration dictionary for a Peng-Robinson Oxygen-Argon-Nitrogen system # Data Sources: # [1] The Properties of Gases and Liquids (1987) # 4th edition, Chemical Engineering Series - <NAME> # [2] NIST, https://webbook.nist.gov/ # Retrieved 16th August, 2020 # [3] The Properties of Gases and Liquids (1987) # 3rd edition, Chemical Engineering Series - <NAME> # Cp parameters where converted to j in Dowling 2015 # [4] A framework for efficient large scale equation-oriented flowsheet optimization (2015) # Computers and Chemical Engineering - <NAME> # Specifying components # [1] # [1] # [1] # [1] # [1] # [2] # [2] # [2] # [1] # [1] # [1] # [1] # [1] # [2] # [2] # [2] # [1] # [1] # [1] # [1] # [2] # [2] # [2] # Specifying phases # Set base units of measurement # Specifying state definition # Defining phase equilibria # Specifying components # [3] # [4] # [4] # [3] # [3] # [2] # [2] # [3] # [3] # [4] # [4] # [1] # [3] # [3] # [3] # [3] # [3] # [4] # [4] # [1] # [3] # [2] # [2] # [3] # Specifying phases # Set base units of measurement # Specifying state definition # Defining phase equilibria | 2.003432 | 2 |
tests/functional/test_calculator.py | bellanov/calculator | 0 | 8912 | """TODO: Move the Threads Here"""
| """TODO: Move the Threads Here"""
| en | 0.290964 | TODO: Move the Threads Here | 1.05305 | 1 |
autokeras/hypermodel/graph.py | Sette/autokeras | 0 | 8913 | <reponame>Sette/autokeras
import functools
import pickle
import kerastuner
import tensorflow as tf
from tensorflow.python.util import nest
from autokeras.hypermodel import base
from autokeras.hypermodel import compiler
class Graph(kerastuner.engine.stateful.Stateful):
"""A graph consists of connected Blocks, HyperBlocks, Preprocessors or Heads.
# Arguments
inputs: A list of input node(s) for the Graph.
outputs: A list of output node(s) for the Graph.
override_hps: A list of HyperParameters. The predefined HyperParameters that
will override the space of the Hyperparameters defined in the Hypermodels
with the same names.
"""
def __init__(self, inputs, outputs, override_hps=None):
super().__init__()
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
self._node_to_id = {}
self._nodes = []
self.blocks = []
self._block_to_id = {}
self._build_network()
self.override_hps = override_hps or []
def compile(self, func):
"""Share the information between blocks by calling functions in compiler.
# Arguments
func: A dictionary. The keys are the block classes. The values are
corresponding compile functions.
"""
for block in self.blocks:
if block.__class__ in func:
func[block.__class__](block)
def _register_hps(self, hp):
"""Register the override HyperParameters for current HyperParameters."""
for single_hp in self.override_hps:
name = single_hp.name
if name not in hp.values:
hp.register(single_hp.name,
single_hp.__class__.__name__,
single_hp.get_config())
hp.values[name] = single_hp.default
def _build_network(self):
self._node_to_id = {}
# Recursively find all the interested nodes.
for input_node in self.inputs:
self._search_network(input_node, self.outputs, set(), set())
self._nodes = sorted(list(self._node_to_id.keys()),
key=lambda x: self._node_to_id[x])
for node in (self.inputs + self.outputs):
if node not in self._node_to_id:
raise ValueError('Inputs and outputs not connected.')
# Find the blocks.
blocks = []
for input_node in self._nodes:
for block in input_node.out_blocks:
if any([output_node in self._node_to_id
for output_node in block.outputs]) and block not in blocks:
blocks.append(block)
# Check if all the inputs of the blocks are set as inputs.
for block in blocks:
for input_node in block.inputs:
if input_node not in self._node_to_id:
raise ValueError('A required input is missing for HyperModel '
'{name}.'.format(name=block.name))
# Calculate the in degree of all the nodes
in_degree = [0] * len(self._nodes)
for node_id, node in enumerate(self._nodes):
in_degree[node_id] = len([
block for block in node.in_blocks if block in blocks])
# Add the blocks in topological order.
self.blocks = []
self._block_to_id = {}
while len(blocks) != 0:
new_added = []
# Collect blocks with in degree 0.
for block in blocks:
if any([in_degree[self._node_to_id[node]]
for node in block.inputs]):
continue
new_added.append(block)
# Remove the collected blocks from blocks.
for block in new_added:
blocks.remove(block)
for block in new_added:
# Add the collected blocks to the AutoModel.
self._add_block(block)
# Decrease the in degree of the output nodes.
for output_node in block.outputs:
if output_node not in self._node_to_id:
continue
output_node_id = self._node_to_id[output_node]
in_degree[output_node_id] -= 1
def _search_network(self, input_node, outputs, in_stack_nodes,
visited_nodes):
visited_nodes.add(input_node)
in_stack_nodes.add(input_node)
outputs_reached = False
if input_node in outputs:
outputs_reached = True
for block in input_node.out_blocks:
for output_node in block.outputs:
if output_node in in_stack_nodes:
raise ValueError('The network has a cycle.')
if output_node not in visited_nodes:
self._search_network(output_node, outputs, in_stack_nodes,
visited_nodes)
if output_node in self._node_to_id.keys():
outputs_reached = True
if outputs_reached:
self._add_node(input_node)
in_stack_nodes.remove(input_node)
def _add_block(self, block):
if block not in self.blocks:
block_id = len(self.blocks)
self._block_to_id[block] = block_id
self.blocks.append(block)
def _add_node(self, input_node):
if input_node not in self._node_to_id:
self._node_to_id[input_node] = len(self._node_to_id)
def _get_block(self, name):
for block in self.blocks:
if block.name == name:
return block
raise ValueError('Cannot find block named {name}.'.format(name=name))
def get_state(self):
# TODO: Include everything including the graph structure.
block_state = {str(block_id): block.get_state()
for block_id, block in enumerate(self.blocks)}
node_state = {str(node_id): node.get_state()
for node_id, node in enumerate(self._nodes)}
return {'blocks': block_state, 'nodes': node_state}
def set_state(self, state):
# TODO: Include everything including the graph structure.
block_state = state['blocks']
node_state = state['nodes']
for block_id, block in enumerate(self.blocks):
block.set_state(block_state[str(block_id)])
for node_id, node in enumerate(self._nodes):
node.set_state(node_state[str(node_id)])
def save(self, fname):
state = self.get_state()
with tf.io.gfile.GFile(fname, 'wb') as f:
pickle.dump(state, f)
return str(fname)
def reload(self, fname):
with tf.io.gfile.GFile(fname, 'rb') as f:
state = pickle.load(f)
self.set_state(state)
def build(self, hp):
self._register_hps(hp)
class PlainGraph(Graph):
"""A graph built from a HyperGraph to produce KerasGraph and PreprocessGraph.
A PlainGraph does not contain HyperBlock. HyperGraph's hyper_build function
returns an instance of PlainGraph, which can be directly built into a KerasGraph
and a PreprocessGraph.
# Arguments
inputs: A list of input node(s) for the PlainGraph.
outputs: A list of output node(s) for the PlainGraph.
"""
def __init__(self, inputs, outputs, **kwargs):
self._keras_model_inputs = []
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
def _build_network(self):
super()._build_network()
# Find the model input nodes
for node in self._nodes:
if self._is_keras_model_inputs(node):
self._keras_model_inputs.append(node)
self._keras_model_inputs = sorted(self._keras_model_inputs,
key=lambda x: self._node_to_id[x])
@staticmethod
def _is_keras_model_inputs(node):
for block in node.in_blocks:
if not isinstance(block, base.Preprocessor):
return False
for block in node.out_blocks:
if not isinstance(block, base.Preprocessor):
return True
return False
def build_keras_graph(self):
return KerasGraph(self._keras_model_inputs,
self.outputs,
override_hps=self.override_hps)
def build_preprocess_graph(self):
return PreprocessGraph(self.inputs,
self._keras_model_inputs,
override_hps=self.override_hps)
class KerasGraph(Graph, kerastuner.HyperModel):
"""A graph and HyperModel to be built into a Keras model."""
def build(self, hp):
"""Build the HyperModel into a Keras Model."""
super().build(hp)
self.compile(compiler.AFTER)
real_nodes = {}
for input_node in self.inputs:
node_id = self._node_to_id[input_node]
real_nodes[node_id] = input_node.build()
for block in self.blocks:
if isinstance(block, base.Preprocessor):
continue
temp_inputs = [real_nodes[self._node_to_id[input_node]]
for input_node in block.inputs]
outputs = block.build(hp, inputs=temp_inputs)
outputs = nest.flatten(outputs)
for output_node, real_output_node in zip(block.outputs, outputs):
real_nodes[self._node_to_id[output_node]] = real_output_node
model = tf.keras.Model(
[real_nodes[self._node_to_id[input_node]] for input_node in
self.inputs],
[real_nodes[self._node_to_id[output_node]] for output_node in
self.outputs])
return self._compile_keras_model(hp, model)
def _get_metrics(self):
metrics = {}
for output_node in self.outputs:
block = output_node.in_blocks[0]
if isinstance(block, base.Head):
metrics[block.name] = block.metrics
return metrics
def _get_loss(self):
loss = {}
for output_node in self.outputs:
block = output_node.in_blocks[0]
if isinstance(block, base.Head):
loss[block.name] = block.loss
return loss
def _compile_keras_model(self, hp, model):
# Specify hyperparameters from compile(...)
optimizer = hp.Choice('optimizer',
['adam', 'adadelta', 'sgd'],
default='adam')
model.compile(optimizer=optimizer,
metrics=self._get_metrics(),
loss=self._get_loss())
return model
class PreprocessGraph(Graph):
"""A graph consists of only Preprocessors.
It is both a search space with Hyperparameters and a model to be fitted. It
preprocess the dataset with the Preprocessors. The output is the input to the
Keras model. It does not extend Hypermodel class because it cannot be built into
a Keras model.
"""
def preprocess(self, dataset, validation_data=None, fit=False):
"""Preprocess the data to be ready for the Keras Model.
# Arguments
dataset: tf.data.Dataset. Training data.
validation_data: tf.data.Dataset. Validation data.
fit: Boolean. Whether to fit the preprocessing layers with x and y.
# Returns
if validation data is provided.
A tuple of two preprocessed tf.data.Dataset, (train, validation).
Otherwise, return the training dataset.
"""
dataset = self._preprocess(dataset, fit=fit)
if validation_data:
validation_data = self._preprocess(validation_data)
return dataset, validation_data
def _preprocess(self, dataset, fit=False):
# A list of input node ids in the same order as the x in the dataset.
input_node_ids = [self._node_to_id[input_node] for input_node in self.inputs]
# Iterate until all the model inputs have their data.
while set(map(lambda node: self._node_to_id[node], self.outputs)
) - set(input_node_ids):
# Gather the blocks for the next iteration over the dataset.
blocks = []
for node_id in input_node_ids:
for block in self._nodes[node_id].out_blocks:
if block in self.blocks:
blocks.append(block)
if fit:
# Iterate the dataset to fit the preprocessors in current depth.
self._fit(dataset, input_node_ids, blocks)
# Transform the dataset.
output_node_ids = []
dataset = dataset.map(functools.partial(
self._transform,
input_node_ids=input_node_ids,
output_node_ids=output_node_ids,
blocks=blocks,
fit=fit))
# Build input_node_ids for next depth.
input_node_ids = output_node_ids
return dataset
def _fit(self, dataset, input_node_ids, blocks):
# Iterate the dataset to fit the preprocessors in current depth.
for x, y in dataset:
x = nest.flatten(x)
id_to_data = {
node_id: temp_x for temp_x, node_id in zip(x, input_node_ids)
}
for block in blocks:
data = [id_to_data[self._node_to_id[input_node]]
for input_node in block.inputs]
block.update(data, y=y)
# Finalize and set the shapes of the output nodes.
for block in blocks:
block.finalize()
nest.flatten(block.outputs)[0].shape = block.output_shape
def _transform(self,
x,
y,
input_node_ids,
output_node_ids,
blocks,
fit=False):
x = nest.flatten(x)
id_to_data = {
node_id: temp_x
for temp_x, node_id in zip(x, input_node_ids)
}
output_data = {}
# Transform each x by the corresponding block.
for hm in blocks:
data = [id_to_data[self._node_to_id[input_node]]
for input_node in hm.inputs]
data = tf.py_function(functools.partial(hm.transform, fit=fit),
inp=nest.flatten(data),
Tout=hm.output_types())
data = nest.flatten(data)[0]
data.set_shape(hm.output_shape)
output_data[self._node_to_id[hm.outputs[0]]] = data
# Keep the Keras Model inputs even they are not inputs to the blocks.
for node_id, data in id_to_data.items():
if self._nodes[node_id] in self.outputs:
output_data[node_id] = data
for node_id in sorted(output_data.keys()):
output_node_ids.append(node_id)
return tuple(map(
lambda node_id: output_data[node_id], output_node_ids)), y
def build(self, hp):
"""Obtain the values of all the HyperParameters.
Different from the build function of Hypermodel. This build function does not
produce a Keras model. It only obtain the hyperparameter values from
HyperParameters.
# Arguments
hp: HyperParameters.
"""
super().build(hp)
self.compile(compiler.BEFORE)
for block in self.blocks:
block.build(hp)
def copy(old_instance):
instance = old_instance.__class__()
instance.set_state(old_instance.get_state())
return instance
class HyperGraph(Graph):
"""A HyperModel based on connected Blocks and HyperBlocks.
# Arguments
inputs: A list of input node(s) for the HyperGraph.
outputs: A list of output node(s) for the HyperGraph.
"""
def __init__(self, inputs, outputs, **kwargs):
super().__init__(inputs, outputs, **kwargs)
self.compile(compiler.HYPER)
def build_graphs(self, hp):
plain_graph = self.hyper_build(hp)
preprocess_graph = plain_graph.build_preprocess_graph()
preprocess_graph.build(hp)
return (preprocess_graph,
plain_graph.build_keras_graph())
def hyper_build(self, hp):
"""Build a GraphHyperModel with no HyperBlock but only Block."""
# Make sure get_uid would count from start.
tf.keras.backend.clear_session()
inputs = []
old_node_to_new = {}
for old_input_node in self.inputs:
input_node = copy(old_input_node)
inputs.append(input_node)
old_node_to_new[old_input_node] = input_node
for old_block in self.blocks:
inputs = [old_node_to_new[input_node]
for input_node in old_block.inputs]
if isinstance(old_block, base.HyperBlock):
outputs = old_block.build(hp, inputs=inputs)
else:
outputs = copy(old_block)(inputs)
for output_node, old_output_node in zip(outputs, old_block.outputs):
old_node_to_new[old_output_node] = output_node
inputs = []
for input_node in self.inputs:
inputs.append(old_node_to_new[input_node])
outputs = []
for output_node in self.outputs:
outputs.append(old_node_to_new[output_node])
return PlainGraph(inputs, outputs, override_hps=self.override_hps)
| import functools
import pickle
import kerastuner
import tensorflow as tf
from tensorflow.python.util import nest
from autokeras.hypermodel import base
from autokeras.hypermodel import compiler
class Graph(kerastuner.engine.stateful.Stateful):
"""A graph consists of connected Blocks, HyperBlocks, Preprocessors or Heads.
# Arguments
inputs: A list of input node(s) for the Graph.
outputs: A list of output node(s) for the Graph.
override_hps: A list of HyperParameters. The predefined HyperParameters that
will override the space of the Hyperparameters defined in the Hypermodels
with the same names.
"""
def __init__(self, inputs, outputs, override_hps=None):
super().__init__()
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
self._node_to_id = {}
self._nodes = []
self.blocks = []
self._block_to_id = {}
self._build_network()
self.override_hps = override_hps or []
def compile(self, func):
"""Share the information between blocks by calling functions in compiler.
# Arguments
func: A dictionary. The keys are the block classes. The values are
corresponding compile functions.
"""
for block in self.blocks:
if block.__class__ in func:
func[block.__class__](block)
def _register_hps(self, hp):
"""Register the override HyperParameters for current HyperParameters."""
for single_hp in self.override_hps:
name = single_hp.name
if name not in hp.values:
hp.register(single_hp.name,
single_hp.__class__.__name__,
single_hp.get_config())
hp.values[name] = single_hp.default
def _build_network(self):
self._node_to_id = {}
# Recursively find all the interested nodes.
for input_node in self.inputs:
self._search_network(input_node, self.outputs, set(), set())
self._nodes = sorted(list(self._node_to_id.keys()),
key=lambda x: self._node_to_id[x])
for node in (self.inputs + self.outputs):
if node not in self._node_to_id:
raise ValueError('Inputs and outputs not connected.')
# Find the blocks.
blocks = []
for input_node in self._nodes:
for block in input_node.out_blocks:
if any([output_node in self._node_to_id
for output_node in block.outputs]) and block not in blocks:
blocks.append(block)
# Check if all the inputs of the blocks are set as inputs.
for block in blocks:
for input_node in block.inputs:
if input_node not in self._node_to_id:
raise ValueError('A required input is missing for HyperModel '
'{name}.'.format(name=block.name))
# Calculate the in degree of all the nodes
in_degree = [0] * len(self._nodes)
for node_id, node in enumerate(self._nodes):
in_degree[node_id] = len([
block for block in node.in_blocks if block in blocks])
# Add the blocks in topological order.
self.blocks = []
self._block_to_id = {}
while len(blocks) != 0:
new_added = []
# Collect blocks with in degree 0.
for block in blocks:
if any([in_degree[self._node_to_id[node]]
for node in block.inputs]):
continue
new_added.append(block)
# Remove the collected blocks from blocks.
for block in new_added:
blocks.remove(block)
for block in new_added:
# Add the collected blocks to the AutoModel.
self._add_block(block)
# Decrease the in degree of the output nodes.
for output_node in block.outputs:
if output_node not in self._node_to_id:
continue
output_node_id = self._node_to_id[output_node]
in_degree[output_node_id] -= 1
def _search_network(self, input_node, outputs, in_stack_nodes,
visited_nodes):
visited_nodes.add(input_node)
in_stack_nodes.add(input_node)
outputs_reached = False
if input_node in outputs:
outputs_reached = True
for block in input_node.out_blocks:
for output_node in block.outputs:
if output_node in in_stack_nodes:
raise ValueError('The network has a cycle.')
if output_node not in visited_nodes:
self._search_network(output_node, outputs, in_stack_nodes,
visited_nodes)
if output_node in self._node_to_id.keys():
outputs_reached = True
if outputs_reached:
self._add_node(input_node)
in_stack_nodes.remove(input_node)
def _add_block(self, block):
if block not in self.blocks:
block_id = len(self.blocks)
self._block_to_id[block] = block_id
self.blocks.append(block)
def _add_node(self, input_node):
if input_node not in self._node_to_id:
self._node_to_id[input_node] = len(self._node_to_id)
def _get_block(self, name):
for block in self.blocks:
if block.name == name:
return block
raise ValueError('Cannot find block named {name}.'.format(name=name))
def get_state(self):
# TODO: Include everything including the graph structure.
block_state = {str(block_id): block.get_state()
for block_id, block in enumerate(self.blocks)}
node_state = {str(node_id): node.get_state()
for node_id, node in enumerate(self._nodes)}
return {'blocks': block_state, 'nodes': node_state}
def set_state(self, state):
# TODO: Include everything including the graph structure.
block_state = state['blocks']
node_state = state['nodes']
for block_id, block in enumerate(self.blocks):
block.set_state(block_state[str(block_id)])
for node_id, node in enumerate(self._nodes):
node.set_state(node_state[str(node_id)])
def save(self, fname):
state = self.get_state()
with tf.io.gfile.GFile(fname, 'wb') as f:
pickle.dump(state, f)
return str(fname)
def reload(self, fname):
with tf.io.gfile.GFile(fname, 'rb') as f:
state = pickle.load(f)
self.set_state(state)
def build(self, hp):
self._register_hps(hp)
class PlainGraph(Graph):
"""A graph built from a HyperGraph to produce KerasGraph and PreprocessGraph.
A PlainGraph does not contain HyperBlock. HyperGraph's hyper_build function
returns an instance of PlainGraph, which can be directly built into a KerasGraph
and a PreprocessGraph.
# Arguments
inputs: A list of input node(s) for the PlainGraph.
outputs: A list of output node(s) for the PlainGraph.
"""
def __init__(self, inputs, outputs, **kwargs):
self._keras_model_inputs = []
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
def _build_network(self):
super()._build_network()
# Find the model input nodes
for node in self._nodes:
if self._is_keras_model_inputs(node):
self._keras_model_inputs.append(node)
self._keras_model_inputs = sorted(self._keras_model_inputs,
key=lambda x: self._node_to_id[x])
@staticmethod
def _is_keras_model_inputs(node):
for block in node.in_blocks:
if not isinstance(block, base.Preprocessor):
return False
for block in node.out_blocks:
if not isinstance(block, base.Preprocessor):
return True
return False
def build_keras_graph(self):
return KerasGraph(self._keras_model_inputs,
self.outputs,
override_hps=self.override_hps)
def build_preprocess_graph(self):
return PreprocessGraph(self.inputs,
self._keras_model_inputs,
override_hps=self.override_hps)
class KerasGraph(Graph, kerastuner.HyperModel):
"""A graph and HyperModel to be built into a Keras model."""
def build(self, hp):
"""Build the HyperModel into a Keras Model."""
super().build(hp)
self.compile(compiler.AFTER)
real_nodes = {}
for input_node in self.inputs:
node_id = self._node_to_id[input_node]
real_nodes[node_id] = input_node.build()
for block in self.blocks:
if isinstance(block, base.Preprocessor):
continue
temp_inputs = [real_nodes[self._node_to_id[input_node]]
for input_node in block.inputs]
outputs = block.build(hp, inputs=temp_inputs)
outputs = nest.flatten(outputs)
for output_node, real_output_node in zip(block.outputs, outputs):
real_nodes[self._node_to_id[output_node]] = real_output_node
model = tf.keras.Model(
[real_nodes[self._node_to_id[input_node]] for input_node in
self.inputs],
[real_nodes[self._node_to_id[output_node]] for output_node in
self.outputs])
return self._compile_keras_model(hp, model)
def _get_metrics(self):
metrics = {}
for output_node in self.outputs:
block = output_node.in_blocks[0]
if isinstance(block, base.Head):
metrics[block.name] = block.metrics
return metrics
def _get_loss(self):
loss = {}
for output_node in self.outputs:
block = output_node.in_blocks[0]
if isinstance(block, base.Head):
loss[block.name] = block.loss
return loss
def _compile_keras_model(self, hp, model):
# Specify hyperparameters from compile(...)
optimizer = hp.Choice('optimizer',
['adam', 'adadelta', 'sgd'],
default='adam')
model.compile(optimizer=optimizer,
metrics=self._get_metrics(),
loss=self._get_loss())
return model
class PreprocessGraph(Graph):
"""A graph consists of only Preprocessors.
It is both a search space with Hyperparameters and a model to be fitted. It
preprocess the dataset with the Preprocessors. The output is the input to the
Keras model. It does not extend Hypermodel class because it cannot be built into
a Keras model.
"""
def preprocess(self, dataset, validation_data=None, fit=False):
"""Preprocess the data to be ready for the Keras Model.
# Arguments
dataset: tf.data.Dataset. Training data.
validation_data: tf.data.Dataset. Validation data.
fit: Boolean. Whether to fit the preprocessing layers with x and y.
# Returns
if validation data is provided.
A tuple of two preprocessed tf.data.Dataset, (train, validation).
Otherwise, return the training dataset.
"""
dataset = self._preprocess(dataset, fit=fit)
if validation_data:
validation_data = self._preprocess(validation_data)
return dataset, validation_data
def _preprocess(self, dataset, fit=False):
# A list of input node ids in the same order as the x in the dataset.
input_node_ids = [self._node_to_id[input_node] for input_node in self.inputs]
# Iterate until all the model inputs have their data.
while set(map(lambda node: self._node_to_id[node], self.outputs)
) - set(input_node_ids):
# Gather the blocks for the next iteration over the dataset.
blocks = []
for node_id in input_node_ids:
for block in self._nodes[node_id].out_blocks:
if block in self.blocks:
blocks.append(block)
if fit:
# Iterate the dataset to fit the preprocessors in current depth.
self._fit(dataset, input_node_ids, blocks)
# Transform the dataset.
output_node_ids = []
dataset = dataset.map(functools.partial(
self._transform,
input_node_ids=input_node_ids,
output_node_ids=output_node_ids,
blocks=blocks,
fit=fit))
# Build input_node_ids for next depth.
input_node_ids = output_node_ids
return dataset
def _fit(self, dataset, input_node_ids, blocks):
# Iterate the dataset to fit the preprocessors in current depth.
for x, y in dataset:
x = nest.flatten(x)
id_to_data = {
node_id: temp_x for temp_x, node_id in zip(x, input_node_ids)
}
for block in blocks:
data = [id_to_data[self._node_to_id[input_node]]
for input_node in block.inputs]
block.update(data, y=y)
# Finalize and set the shapes of the output nodes.
for block in blocks:
block.finalize()
nest.flatten(block.outputs)[0].shape = block.output_shape
def _transform(self,
x,
y,
input_node_ids,
output_node_ids,
blocks,
fit=False):
x = nest.flatten(x)
id_to_data = {
node_id: temp_x
for temp_x, node_id in zip(x, input_node_ids)
}
output_data = {}
# Transform each x by the corresponding block.
for hm in blocks:
data = [id_to_data[self._node_to_id[input_node]]
for input_node in hm.inputs]
data = tf.py_function(functools.partial(hm.transform, fit=fit),
inp=nest.flatten(data),
Tout=hm.output_types())
data = nest.flatten(data)[0]
data.set_shape(hm.output_shape)
output_data[self._node_to_id[hm.outputs[0]]] = data
# Keep the Keras Model inputs even they are not inputs to the blocks.
for node_id, data in id_to_data.items():
if self._nodes[node_id] in self.outputs:
output_data[node_id] = data
for node_id in sorted(output_data.keys()):
output_node_ids.append(node_id)
return tuple(map(
lambda node_id: output_data[node_id], output_node_ids)), y
def build(self, hp):
"""Obtain the values of all the HyperParameters.
Different from the build function of Hypermodel. This build function does not
produce a Keras model. It only obtain the hyperparameter values from
HyperParameters.
# Arguments
hp: HyperParameters.
"""
super().build(hp)
self.compile(compiler.BEFORE)
for block in self.blocks:
block.build(hp)
def copy(old_instance):
instance = old_instance.__class__()
instance.set_state(old_instance.get_state())
return instance
class HyperGraph(Graph):
"""A HyperModel based on connected Blocks and HyperBlocks.
# Arguments
inputs: A list of input node(s) for the HyperGraph.
outputs: A list of output node(s) for the HyperGraph.
"""
def __init__(self, inputs, outputs, **kwargs):
super().__init__(inputs, outputs, **kwargs)
self.compile(compiler.HYPER)
def build_graphs(self, hp):
plain_graph = self.hyper_build(hp)
preprocess_graph = plain_graph.build_preprocess_graph()
preprocess_graph.build(hp)
return (preprocess_graph,
plain_graph.build_keras_graph())
def hyper_build(self, hp):
"""Build a GraphHyperModel with no HyperBlock but only Block."""
# Make sure get_uid would count from start.
tf.keras.backend.clear_session()
inputs = []
old_node_to_new = {}
for old_input_node in self.inputs:
input_node = copy(old_input_node)
inputs.append(input_node)
old_node_to_new[old_input_node] = input_node
for old_block in self.blocks:
inputs = [old_node_to_new[input_node]
for input_node in old_block.inputs]
if isinstance(old_block, base.HyperBlock):
outputs = old_block.build(hp, inputs=inputs)
else:
outputs = copy(old_block)(inputs)
for output_node, old_output_node in zip(outputs, old_block.outputs):
old_node_to_new[old_output_node] = output_node
inputs = []
for input_node in self.inputs:
inputs.append(old_node_to_new[input_node])
outputs = []
for output_node in self.outputs:
outputs.append(old_node_to_new[output_node])
return PlainGraph(inputs, outputs, override_hps=self.override_hps) | en | 0.782003 | A graph consists of connected Blocks, HyperBlocks, Preprocessors or Heads. # Arguments inputs: A list of input node(s) for the Graph. outputs: A list of output node(s) for the Graph. override_hps: A list of HyperParameters. The predefined HyperParameters that will override the space of the Hyperparameters defined in the Hypermodels with the same names. Share the information between blocks by calling functions in compiler. # Arguments func: A dictionary. The keys are the block classes. The values are corresponding compile functions. Register the override HyperParameters for current HyperParameters. # Recursively find all the interested nodes. # Find the blocks. # Check if all the inputs of the blocks are set as inputs. # Calculate the in degree of all the nodes # Add the blocks in topological order. # Collect blocks with in degree 0. # Remove the collected blocks from blocks. # Add the collected blocks to the AutoModel. # Decrease the in degree of the output nodes. # TODO: Include everything including the graph structure. # TODO: Include everything including the graph structure. A graph built from a HyperGraph to produce KerasGraph and PreprocessGraph. A PlainGraph does not contain HyperBlock. HyperGraph's hyper_build function returns an instance of PlainGraph, which can be directly built into a KerasGraph and a PreprocessGraph. # Arguments inputs: A list of input node(s) for the PlainGraph. outputs: A list of output node(s) for the PlainGraph. # Find the model input nodes A graph and HyperModel to be built into a Keras model. Build the HyperModel into a Keras Model. # Specify hyperparameters from compile(...) A graph consists of only Preprocessors. It is both a search space with Hyperparameters and a model to be fitted. It preprocess the dataset with the Preprocessors. The output is the input to the Keras model. It does not extend Hypermodel class because it cannot be built into a Keras model. Preprocess the data to be ready for the Keras Model. # Arguments dataset: tf.data.Dataset. Training data. validation_data: tf.data.Dataset. Validation data. fit: Boolean. Whether to fit the preprocessing layers with x and y. # Returns if validation data is provided. A tuple of two preprocessed tf.data.Dataset, (train, validation). Otherwise, return the training dataset. # A list of input node ids in the same order as the x in the dataset. # Iterate until all the model inputs have their data. # Gather the blocks for the next iteration over the dataset. # Iterate the dataset to fit the preprocessors in current depth. # Transform the dataset. # Build input_node_ids for next depth. # Iterate the dataset to fit the preprocessors in current depth. # Finalize and set the shapes of the output nodes. # Transform each x by the corresponding block. # Keep the Keras Model inputs even they are not inputs to the blocks. Obtain the values of all the HyperParameters. Different from the build function of Hypermodel. This build function does not produce a Keras model. It only obtain the hyperparameter values from HyperParameters. # Arguments hp: HyperParameters. A HyperModel based on connected Blocks and HyperBlocks. # Arguments inputs: A list of input node(s) for the HyperGraph. outputs: A list of output node(s) for the HyperGraph. Build a GraphHyperModel with no HyperBlock but only Block. # Make sure get_uid would count from start. | 2.745121 | 3 |
Python/longest-valid-parentheses.py | shreyventure/LeetCode-Solutions | 388 | 8914 | '''
Speed: 95.97%
Memory: 24.96%
Time complexity: O(n)
Space complexity: O(n)
'''
class Solution(object):
def longestValidParentheses(self, s):
ans=0
stack=[-1]
for i in range(len(s)):
if(s[i]=='('):
stack.append(i)
else:
stack.pop()
if(len(stack)==0):
stack.append(i)
else:
ans=max(ans,i-stack[-1])
return ans | '''
Speed: 95.97%
Memory: 24.96%
Time complexity: O(n)
Space complexity: O(n)
'''
class Solution(object):
def longestValidParentheses(self, s):
ans=0
stack=[-1]
for i in range(len(s)):
if(s[i]=='('):
stack.append(i)
else:
stack.pop()
if(len(stack)==0):
stack.append(i)
else:
ans=max(ans,i-stack[-1])
return ans | en | 0.806613 | Speed: 95.97% Memory: 24.96% Time complexity: O(n) Space complexity: O(n) | 3.479942 | 3 |
setup.py | i25ffz/openaes | 0 | 8915 | <reponame>i25ffz/openaes
from distutils.core import setup, Extension
import os.path
kw = {
'name':"PyOpenAES",
'version':"0.10.0",
'description':"OpenAES cryptographic library for Python.",
'ext_modules':[
Extension(
'openaes',
include_dirs = ['inc', 'src/isaac'],
# define_macros=[('ENABLE_PYTHON', '1')],
sources = [
os.path.join('src/oaes_lib.c'),
os.path.join('src/oaes_py.c'),
os.path.join('src/isaac/rand.c')
]
)
]
}
setup(**kw) | from distutils.core import setup, Extension
import os.path
kw = {
'name':"PyOpenAES",
'version':"0.10.0",
'description':"OpenAES cryptographic library for Python.",
'ext_modules':[
Extension(
'openaes',
include_dirs = ['inc', 'src/isaac'],
# define_macros=[('ENABLE_PYTHON', '1')],
sources = [
os.path.join('src/oaes_lib.c'),
os.path.join('src/oaes_py.c'),
os.path.join('src/isaac/rand.c')
]
)
]
}
setup(**kw) | it | 0.148557 | # define_macros=[('ENABLE_PYTHON', '1')], | 1.283661 | 1 |
examples/isosurface_demo2.py | jayvdb/scitools | 62 | 8916 | #!/usr/bin/env python
# Example taken from:
# http://www.mathworks.com/access/helpdesk/help/techdoc/visualize/f5-3371.html
from scitools.easyviz import *
from time import sleep
from scipy import io
setp(interactive=False)
# Displaying an Isosurface:
mri = io.loadmat('mri_matlab_v6.mat')
D = mri['D']
#Ds = smooth3(D);
isosurface(D,5,indexing='xy')
#hiso = isosurface(Ds,5),
# 'FaceColor',[1,.75,.65],...
# 'EdgeColor','none');
shading('interp')
# Adding an Isocap to Show a Cutaway Surface:
#hcap = patch(isocaps(D,5),...
# 'FaceColor','interp',...
# 'EdgeColor','none');
#colormap(map)
# Define the View:
view(45,30)
axis('tight')
daspect([1,1,.4])
# Add Lighting:
#lightangle(45,30);
#set(gcf,'Renderer','zbuffer'); lighting phong
#isonormals(Ds,hiso)
#set(hcap,'AmbientStrength',.6)
#set(hiso,'SpecularColorReflectance',0,'SpecularExponent',50)
show()
raw_input('Press Return key to quit: ')
#savefig('tmp_isosurf2a.eps')
#savefig('tmp_isosurf2a.png')
| #!/usr/bin/env python
# Example taken from:
# http://www.mathworks.com/access/helpdesk/help/techdoc/visualize/f5-3371.html
from scitools.easyviz import *
from time import sleep
from scipy import io
setp(interactive=False)
# Displaying an Isosurface:
mri = io.loadmat('mri_matlab_v6.mat')
D = mri['D']
#Ds = smooth3(D);
isosurface(D,5,indexing='xy')
#hiso = isosurface(Ds,5),
# 'FaceColor',[1,.75,.65],...
# 'EdgeColor','none');
shading('interp')
# Adding an Isocap to Show a Cutaway Surface:
#hcap = patch(isocaps(D,5),...
# 'FaceColor','interp',...
# 'EdgeColor','none');
#colormap(map)
# Define the View:
view(45,30)
axis('tight')
daspect([1,1,.4])
# Add Lighting:
#lightangle(45,30);
#set(gcf,'Renderer','zbuffer'); lighting phong
#isonormals(Ds,hiso)
#set(hcap,'AmbientStrength',.6)
#set(hiso,'SpecularColorReflectance',0,'SpecularExponent',50)
show()
raw_input('Press Return key to quit: ')
#savefig('tmp_isosurf2a.eps')
#savefig('tmp_isosurf2a.png')
| en | 0.388866 | #!/usr/bin/env python # Example taken from: # http://www.mathworks.com/access/helpdesk/help/techdoc/visualize/f5-3371.html # Displaying an Isosurface: #Ds = smooth3(D); #hiso = isosurface(Ds,5), # 'FaceColor',[1,.75,.65],... # 'EdgeColor','none'); # Adding an Isocap to Show a Cutaway Surface: #hcap = patch(isocaps(D,5),... # 'FaceColor','interp',... # 'EdgeColor','none'); #colormap(map) # Define the View: # Add Lighting: #lightangle(45,30); #set(gcf,'Renderer','zbuffer'); lighting phong #isonormals(Ds,hiso) #set(hcap,'AmbientStrength',.6) #set(hiso,'SpecularColorReflectance',0,'SpecularExponent',50) #savefig('tmp_isosurf2a.eps') #savefig('tmp_isosurf2a.png') | 2.55106 | 3 |
structural_model/util_morphology.py | zibneuro/udvary-et-al-2022 | 1 | 8917 | <gh_stars>1-10
import os
import numpy as np
import json
import util_amira
def getEdgeLabelName(label):
if(label == 6):
return "axon"
elif(label == 4):
return "apical"
elif(label == 5):
return "basal"
elif(label == 7):
return "soma"
else:
return "other"
def getSomaPosition(points):
somaPos = []
for p in points:
if(p["edge_label"] == "soma"):
somaPos.append(p["position"])
return np.mean(np.vstack(tuple(somaPos)), axis=0)
def loadAmiraExport(filename):
with open(filename) as f:
lines = f.readlines()
labels = lines[0].rstrip().split(",")
points = []
for i in range(1, len(lines)):
line = lines[i].rstrip().split(",")
point = {}
point["edge_id"] = int(line[labels.index("edge_id")])
point["source_node_id"] = int(line[labels.index("source_node")])
point["target_node_id"] = int(line[labels.index("target_node")])
point["edge_label"] = getEdgeLabelName(
int(line[labels.index("edge_label")]))
point["edge_point_id"] = int(line[labels.index("edge_point")])
point["position"] = np.array([float(line[labels.index("x")]), float(
line[labels.index("y")]), float(line[labels.index("z")])])
point["radius"] = float(line[labels.index("radius")])
point["inside_vS1"] = int(line[labels.index("inside_vS1")])
if(point["edge_label"] != "other"):
points.append(point)
return points
def separateCompartments(edgePoints):
apical = []
basal = []
axon = []
for edgePoint in edgePoints:
if(edgePoint["edge_label"] == "apical"):
apical.append(edgePoint)
elif(edgePoint["edge_label"] == "basal"):
basal.append(edgePoint)
elif(edgePoint["edge_label"] == "axon"):
axon.append(edgePoint)
compartments = {}
compartments["apical"] = apical
compartments["basal"] = basal
compartments["axon"] = axon
return compartments
def loadGraphset(networkDir):
if(os.path.exists(os.path.join(networkDir, "morphologies", "Morphologies.am"))):
graphset = util_amira.readSpatialGraphSet(os.path.join(networkDir, "morphologies", "Morphologies.am"), legacy=False)
else:
graphset = util_amira.readSpatialGraphSet(os.path.join(networkDir, "morphologies", "MorphologiesWithNeuronIDs.am"), legacy=True)
return graphset
def writeToCache(filename, transformation, neuronId):
transformationFile = "/tmp/transformation_{}".format(neuronId)
np.savetxt(transformationFile, transformation)
meta = {
"morphologyFile" : filename,
"transformationFile" : transformationFile
}
metaFile = "/tmp/meta_{}.json".format(neuronId)
with open(metaFile, "w") as f:
print("meta", meta)
json.dump(meta, f)
def readFromCache(neuronId):
metaFile = "/tmp/meta_{}.json".format(neuronId)
with open(metaFile) as f:
meta = json.load(f)
transformationFile = meta["transformationFile"]
T = np.loadtxt(transformationFile)
morphologyFile = meta["morphologyFile"]
return morphologyFile, T
def loadAxon(graphset, neuronId, saveToCache = False, loadFromCache = False):
if(loadFromCache):
filename, T = readFromCache(neuronId)
else:
idx = len(graphset[neuronId]) - 1
filename = graphset[neuronId][idx]["file"]
T = graphset[neuronId][idx]["transformation"]
if(saveToCache):
writeToCache(filename, T, neuronId)
return util_amira.readSpatialGraph(filename, T)
def loadDendrite(graphset, neuronId, saveToCache = False, loadFromCache = False):
if(loadFromCache):
filename, T = readFromCache(neuronId)
else:
filename = graphset[neuronId][0]["file"]
T = graphset[neuronId][0]["transformation"]
if(saveToCache):
writeToCache(filename, T, neuronId)
return util_amira.readSpatialGraph(filename, T) | import os
import numpy as np
import json
import util_amira
def getEdgeLabelName(label):
if(label == 6):
return "axon"
elif(label == 4):
return "apical"
elif(label == 5):
return "basal"
elif(label == 7):
return "soma"
else:
return "other"
def getSomaPosition(points):
somaPos = []
for p in points:
if(p["edge_label"] == "soma"):
somaPos.append(p["position"])
return np.mean(np.vstack(tuple(somaPos)), axis=0)
def loadAmiraExport(filename):
with open(filename) as f:
lines = f.readlines()
labels = lines[0].rstrip().split(",")
points = []
for i in range(1, len(lines)):
line = lines[i].rstrip().split(",")
point = {}
point["edge_id"] = int(line[labels.index("edge_id")])
point["source_node_id"] = int(line[labels.index("source_node")])
point["target_node_id"] = int(line[labels.index("target_node")])
point["edge_label"] = getEdgeLabelName(
int(line[labels.index("edge_label")]))
point["edge_point_id"] = int(line[labels.index("edge_point")])
point["position"] = np.array([float(line[labels.index("x")]), float(
line[labels.index("y")]), float(line[labels.index("z")])])
point["radius"] = float(line[labels.index("radius")])
point["inside_vS1"] = int(line[labels.index("inside_vS1")])
if(point["edge_label"] != "other"):
points.append(point)
return points
def separateCompartments(edgePoints):
apical = []
basal = []
axon = []
for edgePoint in edgePoints:
if(edgePoint["edge_label"] == "apical"):
apical.append(edgePoint)
elif(edgePoint["edge_label"] == "basal"):
basal.append(edgePoint)
elif(edgePoint["edge_label"] == "axon"):
axon.append(edgePoint)
compartments = {}
compartments["apical"] = apical
compartments["basal"] = basal
compartments["axon"] = axon
return compartments
def loadGraphset(networkDir):
if(os.path.exists(os.path.join(networkDir, "morphologies", "Morphologies.am"))):
graphset = util_amira.readSpatialGraphSet(os.path.join(networkDir, "morphologies", "Morphologies.am"), legacy=False)
else:
graphset = util_amira.readSpatialGraphSet(os.path.join(networkDir, "morphologies", "MorphologiesWithNeuronIDs.am"), legacy=True)
return graphset
def writeToCache(filename, transformation, neuronId):
transformationFile = "/tmp/transformation_{}".format(neuronId)
np.savetxt(transformationFile, transformation)
meta = {
"morphologyFile" : filename,
"transformationFile" : transformationFile
}
metaFile = "/tmp/meta_{}.json".format(neuronId)
with open(metaFile, "w") as f:
print("meta", meta)
json.dump(meta, f)
def readFromCache(neuronId):
metaFile = "/tmp/meta_{}.json".format(neuronId)
with open(metaFile) as f:
meta = json.load(f)
transformationFile = meta["transformationFile"]
T = np.loadtxt(transformationFile)
morphologyFile = meta["morphologyFile"]
return morphologyFile, T
def loadAxon(graphset, neuronId, saveToCache = False, loadFromCache = False):
if(loadFromCache):
filename, T = readFromCache(neuronId)
else:
idx = len(graphset[neuronId]) - 1
filename = graphset[neuronId][idx]["file"]
T = graphset[neuronId][idx]["transformation"]
if(saveToCache):
writeToCache(filename, T, neuronId)
return util_amira.readSpatialGraph(filename, T)
def loadDendrite(graphset, neuronId, saveToCache = False, loadFromCache = False):
if(loadFromCache):
filename, T = readFromCache(neuronId)
else:
filename = graphset[neuronId][0]["file"]
T = graphset[neuronId][0]["transformation"]
if(saveToCache):
writeToCache(filename, T, neuronId)
return util_amira.readSpatialGraph(filename, T) | none | 1 | 2.742996 | 3 |
|
invenio_madmp/views.py | FAIR-Data-Austria/invenio-madmp | 1 | 8918 | """Blueprint definitions for maDMP integration."""
from flask import Blueprint, jsonify, request
from invenio_db import db
from .convert import convert_dmp
from .models import DataManagementPlan
def _summarize_dmp(dmp: DataManagementPlan) -> dict:
"""Create a summary dictionary for the given DMP."""
res = {"dmp_id": dmp.dmp_id, "datasets": []}
for ds in dmp.datasets:
dataset = {"dataset_id": ds.dataset_id, "record": None}
if ds.record:
dataset["record"] = ds.record.model.json
res["datasets"].append(dataset)
return res
def create_rest_blueprint(app) -> Blueprint:
"""Create the blueprint for the REST endpoints using the current app extensions."""
# note: using flask.current_app isn't directly possible, because Invenio-MaDMP is
# registered as an extension in the API app, not the "normal" app
# (which is the one usually returned by current_app)
rest_blueprint = Blueprint("invenio_madmp", __name__)
auth = app.extensions["invenio-madmp"].auth
@rest_blueprint.route("/dmps", methods=["GET"])
@auth.login_required
def list_dmps():
"""Give a summary of all stored DMPs."""
dmps = DataManagementPlan.query.all()
res = [_summarize_dmp(dmp) for dmp in dmps]
return jsonify(res)
@rest_blueprint.route("/dmps", methods=["POST"])
@auth.login_required
def create_dmp():
"""Create a new DMP from the maDMP JSON in the request body."""
if request.json is None:
return jsonify({"error": "no json body supplied"}), 400
elif request.json.get("dmp") is None:
return jsonify({"error": "dmp not found in the body"}), 400
dmp_json = request.json.get("dmp", {})
dmp_json_id = dmp_json.get("dmp_id", {}).get("identifier")
if DataManagementPlan.get_by_dmp_id(dmp_json_id) is not None:
return jsonify({"error": "dmp with the same id already exists"}), 409
dmp = convert_dmp(dmp_json)
db.session.add(dmp)
db.session.commit()
# TODO change the returned value
return jsonify(_summarize_dmp(dmp)), 201
@rest_blueprint.route("/dmps/<dmp_id>", methods=["PATCH"])
@auth.login_required
def update_dmp(dmp_id: str = None):
"""Update the specified DMP using the maDMP JSON in the request body."""
hard_sync = request.args.get("sync", "soft") == "hard"
if request.json is None:
return jsonify({"error": "no json body supplied"}), 400
elif request.json.get("dmp") is None:
return jsonify({"error": "dmp not found in the body"}), 400
dmp_json = request.json.get("dmp", {})
dmp_json_id = dmp_json.get("dmp_id", {}).get("identifier")
if dmp_id and dmp_json_id and dmp_id != dmp_json_id:
return jsonify({"error": "mismatch between dmp id from url and body"}), 400
dmp_id = dmp_id or dmp_json_id
if DataManagementPlan.get_by_dmp_id(dmp_id) is None:
return jsonify({"error": "dmp not found"}), 404
dmp = convert_dmp(dmp_json, hard_sync)
db.session.commit()
# TODO change the returned value
return jsonify(_summarize_dmp(dmp))
@rest_blueprint.route("/dmps", methods=["PATCH"])
@auth.login_required
def update_dmp_without_id():
"""Update the specified DMP using the maDMP JSON in the request body."""
return update_dmp(None)
return rest_blueprint
| """Blueprint definitions for maDMP integration."""
from flask import Blueprint, jsonify, request
from invenio_db import db
from .convert import convert_dmp
from .models import DataManagementPlan
def _summarize_dmp(dmp: DataManagementPlan) -> dict:
"""Create a summary dictionary for the given DMP."""
res = {"dmp_id": dmp.dmp_id, "datasets": []}
for ds in dmp.datasets:
dataset = {"dataset_id": ds.dataset_id, "record": None}
if ds.record:
dataset["record"] = ds.record.model.json
res["datasets"].append(dataset)
return res
def create_rest_blueprint(app) -> Blueprint:
"""Create the blueprint for the REST endpoints using the current app extensions."""
# note: using flask.current_app isn't directly possible, because Invenio-MaDMP is
# registered as an extension in the API app, not the "normal" app
# (which is the one usually returned by current_app)
rest_blueprint = Blueprint("invenio_madmp", __name__)
auth = app.extensions["invenio-madmp"].auth
@rest_blueprint.route("/dmps", methods=["GET"])
@auth.login_required
def list_dmps():
"""Give a summary of all stored DMPs."""
dmps = DataManagementPlan.query.all()
res = [_summarize_dmp(dmp) for dmp in dmps]
return jsonify(res)
@rest_blueprint.route("/dmps", methods=["POST"])
@auth.login_required
def create_dmp():
"""Create a new DMP from the maDMP JSON in the request body."""
if request.json is None:
return jsonify({"error": "no json body supplied"}), 400
elif request.json.get("dmp") is None:
return jsonify({"error": "dmp not found in the body"}), 400
dmp_json = request.json.get("dmp", {})
dmp_json_id = dmp_json.get("dmp_id", {}).get("identifier")
if DataManagementPlan.get_by_dmp_id(dmp_json_id) is not None:
return jsonify({"error": "dmp with the same id already exists"}), 409
dmp = convert_dmp(dmp_json)
db.session.add(dmp)
db.session.commit()
# TODO change the returned value
return jsonify(_summarize_dmp(dmp)), 201
@rest_blueprint.route("/dmps/<dmp_id>", methods=["PATCH"])
@auth.login_required
def update_dmp(dmp_id: str = None):
"""Update the specified DMP using the maDMP JSON in the request body."""
hard_sync = request.args.get("sync", "soft") == "hard"
if request.json is None:
return jsonify({"error": "no json body supplied"}), 400
elif request.json.get("dmp") is None:
return jsonify({"error": "dmp not found in the body"}), 400
dmp_json = request.json.get("dmp", {})
dmp_json_id = dmp_json.get("dmp_id", {}).get("identifier")
if dmp_id and dmp_json_id and dmp_id != dmp_json_id:
return jsonify({"error": "mismatch between dmp id from url and body"}), 400
dmp_id = dmp_id or dmp_json_id
if DataManagementPlan.get_by_dmp_id(dmp_id) is None:
return jsonify({"error": "dmp not found"}), 404
dmp = convert_dmp(dmp_json, hard_sync)
db.session.commit()
# TODO change the returned value
return jsonify(_summarize_dmp(dmp))
@rest_blueprint.route("/dmps", methods=["PATCH"])
@auth.login_required
def update_dmp_without_id():
"""Update the specified DMP using the maDMP JSON in the request body."""
return update_dmp(None)
return rest_blueprint
| en | 0.768715 | Blueprint definitions for maDMP integration. Create a summary dictionary for the given DMP. Create the blueprint for the REST endpoints using the current app extensions. # note: using flask.current_app isn't directly possible, because Invenio-MaDMP is # registered as an extension in the API app, not the "normal" app # (which is the one usually returned by current_app) Give a summary of all stored DMPs. Create a new DMP from the maDMP JSON in the request body. # TODO change the returned value Update the specified DMP using the maDMP JSON in the request body. # TODO change the returned value Update the specified DMP using the maDMP JSON in the request body. | 2.51376 | 3 |
retrieval/urls.py | aipassio/visual_retrieval | 0 | 8919 | <gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('retrieval_insert', views.retrieval_insert, name='retrieval_insert'),
path('retrieval_get', views.retrieval_get, name='retrieval_get')
] | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('retrieval_insert', views.retrieval_insert, name='retrieval_insert'),
path('retrieval_get', views.retrieval_get, name='retrieval_get')
] | none | 1 | 1.656419 | 2 |
|
scripts/Interfacing/encoder_class.py | noshluk2/Wifi-Signal-Robot-localization | 0 | 8920 | <reponame>noshluk2/Wifi-Signal-Robot-localization<filename>scripts/Interfacing/encoder_class.py
import RPi.GPIO as GPIO
import threading
class Encoder(object):
def __init__(self, r_en_a,r_en_b,l_en_a,l_en_b):
GPIO.setmode(GPIO.BCM)
GPIO.setup(r_en_a, GPIO.IN)
GPIO.setup(r_en_b, GPIO.IN)
GPIO.setup(l_en_a, GPIO.IN)
GPIO.setup(l_en_b, GPIO.IN)
self.l_en_a=l_en_a;self.l_en_b=l_en_b;
self.r_en_a=r_en_a;self.r_en_b=r_en_b;
GPIO.add_event_detect(r_en_a, GPIO.BOTH, callback=self.Update_encR)
GPIO.add_event_detect(l_en_a, GPIO.BOTH, callback=self.Update_encL)
self.count_R =0
self.count_L=0
def Update_encR(self,channel):
if GPIO.input(self.r_en_a) == GPIO.input(self.r_en_b):
self.count_R=self.count_R + 1
else :
self.count_R = self.count_R - 1
def Update_encL(self,channel):
if GPIO.input(self.l_en_a) == GPIO.input(self.l_en_b):
self.count_L=self.count_L + 1
else :
self.count_L = self.count_L - 1
return (self.count_L)
def get_r_enc(self):
return self.count_R
def get_l_enc(self):
return self.count_L
def clear_encoders(self):
self.count_R=0
self.count_L=0
# r_en_a = 27
# r_en_b = 10
# l_en_a = 5
# l_en_b = 6
# enc_obj = Encoder(27,10,5,6)
# def update_encoders():
# threading.Timer(1,update_encoders).start()
# print(" looping ")
# update_encoders() | import RPi.GPIO as GPIO
import threading
class Encoder(object):
def __init__(self, r_en_a,r_en_b,l_en_a,l_en_b):
GPIO.setmode(GPIO.BCM)
GPIO.setup(r_en_a, GPIO.IN)
GPIO.setup(r_en_b, GPIO.IN)
GPIO.setup(l_en_a, GPIO.IN)
GPIO.setup(l_en_b, GPIO.IN)
self.l_en_a=l_en_a;self.l_en_b=l_en_b;
self.r_en_a=r_en_a;self.r_en_b=r_en_b;
GPIO.add_event_detect(r_en_a, GPIO.BOTH, callback=self.Update_encR)
GPIO.add_event_detect(l_en_a, GPIO.BOTH, callback=self.Update_encL)
self.count_R =0
self.count_L=0
def Update_encR(self,channel):
if GPIO.input(self.r_en_a) == GPIO.input(self.r_en_b):
self.count_R=self.count_R + 1
else :
self.count_R = self.count_R - 1
def Update_encL(self,channel):
if GPIO.input(self.l_en_a) == GPIO.input(self.l_en_b):
self.count_L=self.count_L + 1
else :
self.count_L = self.count_L - 1
return (self.count_L)
def get_r_enc(self):
return self.count_R
def get_l_enc(self):
return self.count_L
def clear_encoders(self):
self.count_R=0
self.count_L=0
# r_en_a = 27
# r_en_b = 10
# l_en_a = 5
# l_en_b = 6
# enc_obj = Encoder(27,10,5,6)
# def update_encoders():
# threading.Timer(1,update_encoders).start()
# print(" looping ")
# update_encoders() | es | 0.094661 | # r_en_a = 27 # r_en_b = 10 # l_en_a = 5 # l_en_b = 6 # enc_obj = Encoder(27,10,5,6) # def update_encoders(): # threading.Timer(1,update_encoders).start() # print(" looping ") # update_encoders() | 2.770159 | 3 |
systori/apps/equipment/urls.py | systori/systori | 12 | 8921 | <filename>systori/apps/equipment/urls.py
from django.conf.urls import url
from django.urls import path, include
from systori.apps.user.authorization import office_auth
from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate
urlpatterns = [
# two url rules to make the active_filter keyword optional
url(
r"^equipment/$", office_auth(EquipmentListView.as_view()), name="equipment.list"
),
url(
r"^equipment/(?P<active_filter>[\w-]+)$",
office_auth(EquipmentListView.as_view()),
name="equipment.list",
),
url(
r"^equipment-(?P<pk>\d+)$",
office_auth(EquipmentView.as_view()),
name="equipment.view",
),
url(
r"^create-equipment$",
office_auth(EquipmentCreate.as_view()),
name="equipment.create",
),
url(
r"^equipment-(?P<pk>\d+)/edit$",
office_auth(EquipmentUpdate.as_view()),
name="equipment.edit",
),
url(
r"^equipment-(?P<pk>\d+)/delete$",
office_auth(EquipmentDelete.as_view()),
name="equipment.delete",
),
url(
r"^equipment-(?P<pk>\d+)/create-refueling-stop$",
office_auth(RefuelingStopCreate.as_view()),
name="refueling_stop.create",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/update$",
office_auth(RefuelingStopUpdate.as_view()),
name="refueling_stop.update",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/delete",
office_auth(RefuelingStopDelete.as_view()),
name="refueling_stop.delete",
),
url(
r"^equipment-(?P<pk>\d+)/create-maintenance",
office_auth(MaintenanceCreate.as_view()),
name="maintenance.create",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/update$",
office_auth(MaintenanceUpdate.as_view()),
name="maintenance.update",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/delete",
office_auth(MaintenanceDelete.as_view()),
name="maintenance.delete",
),
]
| <filename>systori/apps/equipment/urls.py
from django.conf.urls import url
from django.urls import path, include
from systori.apps.user.authorization import office_auth
from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate
urlpatterns = [
# two url rules to make the active_filter keyword optional
url(
r"^equipment/$", office_auth(EquipmentListView.as_view()), name="equipment.list"
),
url(
r"^equipment/(?P<active_filter>[\w-]+)$",
office_auth(EquipmentListView.as_view()),
name="equipment.list",
),
url(
r"^equipment-(?P<pk>\d+)$",
office_auth(EquipmentView.as_view()),
name="equipment.view",
),
url(
r"^create-equipment$",
office_auth(EquipmentCreate.as_view()),
name="equipment.create",
),
url(
r"^equipment-(?P<pk>\d+)/edit$",
office_auth(EquipmentUpdate.as_view()),
name="equipment.edit",
),
url(
r"^equipment-(?P<pk>\d+)/delete$",
office_auth(EquipmentDelete.as_view()),
name="equipment.delete",
),
url(
r"^equipment-(?P<pk>\d+)/create-refueling-stop$",
office_auth(RefuelingStopCreate.as_view()),
name="refueling_stop.create",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/update$",
office_auth(RefuelingStopUpdate.as_view()),
name="refueling_stop.update",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/delete",
office_auth(RefuelingStopDelete.as_view()),
name="refueling_stop.delete",
),
url(
r"^equipment-(?P<pk>\d+)/create-maintenance",
office_auth(MaintenanceCreate.as_view()),
name="maintenance.create",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/update$",
office_auth(MaintenanceUpdate.as_view()),
name="maintenance.update",
),
url(
r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/delete",
office_auth(MaintenanceDelete.as_view()),
name="maintenance.delete",
),
]
| en | 0.686743 | # two url rules to make the active_filter keyword optional | 2.071165 | 2 |
40_3.py | rursvd/pynumerical2 | 0 | 8922 | <reponame>rursvd/pynumerical2<gh_stars>0
from numpy import zeros
# Define ab2 function
def ab2(f,t0,tf,y0,n):
h = (tf - t0)/n
t = zeros(n+1)
y = zeros(n+1)
t[0] = t0
y[0] = y0
y[1] = y[0] + h * f(t[0],y[0])
t[1] = t[0] + h
for i in range(1,n):
y[i+1] = y[i] + (3.0/2.0) * h * f(t[i],y[i])-1.0/2.0 * h * f(t[i-1],y[i-1])
t[i+1] = t[i] + h
return t,y
# Define functions
def f(t,y):
return t - y
# Set initial conditions
t0 = 0.0
tf = 1.0
y0 = 1.0
n = 5
# Execute AB2
t, yab2 = ab2(f,t0,tf,y0,n)
# Print results
print("%5s %8s" % ('t','y'))
for i in range(n+1):
print("%8.4f %8.4f" % (t[i],yab2[i]))
| from numpy import zeros
# Define ab2 function
def ab2(f,t0,tf,y0,n):
h = (tf - t0)/n
t = zeros(n+1)
y = zeros(n+1)
t[0] = t0
y[0] = y0
y[1] = y[0] + h * f(t[0],y[0])
t[1] = t[0] + h
for i in range(1,n):
y[i+1] = y[i] + (3.0/2.0) * h * f(t[i],y[i])-1.0/2.0 * h * f(t[i-1],y[i-1])
t[i+1] = t[i] + h
return t,y
# Define functions
def f(t,y):
return t - y
# Set initial conditions
t0 = 0.0
tf = 1.0
y0 = 1.0
n = 5
# Execute AB2
t, yab2 = ab2(f,t0,tf,y0,n)
# Print results
print("%5s %8s" % ('t','y'))
for i in range(n+1):
print("%8.4f %8.4f" % (t[i],yab2[i])) | en | 0.49442 | # Define ab2 function # Define functions # Set initial conditions # Execute AB2 # Print results | 3.163809 | 3 |
test/test_parse_cs.py | NeonDaniel/lingua-franca | 0 | 8923 | <reponame>NeonDaniel/lingua-franca
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from datetime import datetime, timedelta
from lingua_franca import get_default_lang, set_default_lang, \
load_language, unload_language
from lingua_franca.parse import extract_datetime
from lingua_franca.parse import extract_duration
from lingua_franca.parse import extract_number, extract_numbers
from lingua_franca.parse import fuzzy_match
from lingua_franca.parse import get_gender
from lingua_franca.parse import match_one
from lingua_franca.parse import normalize
def setUpModule():
load_language("cs-cz")
set_default_lang("cs")
def tearDownModule():
unload_language("cs")
class TestFuzzyMatch(unittest.TestCase):
def test_matches(self):
self.assertTrue(fuzzy_match("ty a já", "ty a já") >= 1.0)
self.assertTrue(fuzzy_match("ty a já", "ty") < 0.5)
self.assertTrue(fuzzy_match("Ty", "ty") >= 0.5)
self.assertTrue(fuzzy_match("ty a já", "ty") ==
fuzzy_match("ty", "ty a já"))
self.assertTrue(fuzzy_match("ty a já", "on nebo oni") < 0.23)
def test_match_one(self):
# test list of choices
choices = ['frank', 'kate', 'harry', 'henry']
self.assertEqual(match_one('frank', choices)[0], 'frank')
self.assertEqual(match_one('fran', choices)[0], 'frank')
self.assertEqual(match_one('enry', choices)[0], 'henry')
self.assertEqual(match_one('katt', choices)[0], 'kate')
# test dictionary of choices
choices = {'frank': 1, 'kate': 2, 'harry': 3, 'henry': 4}
self.assertEqual(match_one('frank', choices)[0], 1)
self.assertEqual(match_one('enry', choices)[0], 4)
class TestNormalize(unittest.TestCase):
def test_extract_number(self):
self.assertEqual(extract_number("tohle je první test",
ordinals=True), 1)
self.assertEqual(extract_number("tohle je 2 test"), 2)
self.assertEqual(extract_number("tohle je druhý test",
ordinals=True), 2)
#self.assertEqual(extract_number("tohle je třetí test"), 1.0 / 3.0)
self.assertEqual(extract_number("tohle je třetí test",
ordinals=True), 3.0)
self.assertEqual(extract_number("ten čtvrtý", ordinals=True), 4.0)
self.assertEqual(extract_number(
"ten třicátý šestý", ordinals=True), 36.0)
self.assertEqual(extract_number("tohle je test číslo 4"), 4)
self.assertEqual(extract_number("jedna třetina šálku"), 1.0 / 3.0)
self.assertEqual(extract_number("tři šálky"), 3)
self.assertEqual(extract_number("1/3 šálku"), 1.0 / 3.0)
self.assertEqual(extract_number("čtvrtina šálku"), 0.25)
self.assertEqual(extract_number("1/4 cup"), 0.25)
self.assertEqual(extract_number("jedna čtvrtina šálku"), 0.25)
self.assertEqual(extract_number("2/3 šálků"), 2.0 / 3.0)
self.assertEqual(extract_number("3/4 šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("1 a 3/4 šálků"), 1.75)
self.assertEqual(extract_number("1 šálek a půl"), 1.5)
self.assertEqual(extract_number("jeden šálek a polovina"), 1.5)
self.assertEqual(extract_number("jedna a půl šálků"), 1.5)
self.assertEqual(extract_number("jedna a jedna polovina šálků"), 1.5)
self.assertEqual(extract_number("tři čtvrtina šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("tři čtvrtiny šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("dvacet dva"), 22)
self.assertEqual(extract_number(
"Dvacet dva s velkým písmenam na začátku"), 22)
self.assertEqual(extract_number(
"dvacet Dva s dva krát velkým písmem"), 22)
self.assertEqual(extract_number(
"dvacet Dva s různou velikostí písmen"), 22)
self.assertEqual(extract_number("Dvacet dva a Tři Pětiny"), 22.6)
self.assertEqual(extract_number("dvě sto"), 200)
self.assertEqual(extract_number("devět tisíc"), 9000)
self.assertEqual(extract_number("šest sto šedesát šest"), 666)
self.assertEqual(extract_number("dva million"), 2000000)
self.assertEqual(extract_number("dva million pět sto tisíc "
"tun žhavého kovu"), 2500000)
self.assertEqual(extract_number("šest trillion"), 6000000000000.0)
self.assertEqual(extract_number("šest trilion", short_scale=False),
6e+18)
self.assertEqual(extract_number("jedna tečka pět"), 1.5)
self.assertEqual(extract_number("tři tečka čtrnáct"), 3.14)
self.assertEqual(extract_number("nula tečka dva"), 0.2)
self.assertEqual(extract_number("billion roků "),
1000000000.0)
self.assertEqual(extract_number("bilion roků",
short_scale=False),
1000000000000.0)
self.assertEqual(extract_number("jedno sto tisíc"), 100000)
self.assertEqual(extract_number("mínus 2"), -2)
self.assertEqual(extract_number("záporné sedmdesát"), -70)
self.assertEqual(extract_number("tisíc million"), 1000000000)
self.assertEqual(extract_number("miliarda", short_scale=False),
1000000000)
self.assertEqual(extract_number("šestina třetina"),
1 / 6 / 3)
self.assertEqual(extract_number("šestina třetí", ordinals=True),
3)
self.assertEqual(extract_number("třicet sekund"), 30)
self.assertEqual(extract_number("třicátý druhý", ordinals=True), 32)
self.assertEqual(extract_number("tohle je billiontý test",
ordinals=True), 1e09)
print("tohle udělat později")
#self.assertEqual(extract_number("tohle je billiontý test"), 1e-9)
self.assertEqual(extract_number("tohle je biliontý test",
ordinals=True,
short_scale=False), 1e12)
print("tohle udělat později")
# self.assertEqual(extract_number("tohle je biliontý test",
# short_scale=False), 1e-12)
# Verify non-power multiples of ten no longer discard
# adjacent multipliers
self.assertEqual(extract_number("dvacet tisíc"), 20000)
self.assertEqual(extract_number("padesát million"), 50000000)
# Verify smaller powers of ten no longer cause miscalculation of larger
# powers of ten (see MycroftAI#86)
self.assertEqual(extract_number("dvacet billion tři sto million \
devět sto padesát tisíc šest sto \
sedmdesát pět tečka osm"),
20300950675.8)
self.assertEqual(extract_number("devět sto devadesát devět million devět \
sto devadesát devět tisíc devět \
sto devadesát devět tečka devět"),
999999999.9)
# TODO why does "trillion" result in xxxx.0?
self.assertEqual(extract_number("osm sto trillion dva sto \
padesát sedm"), 800000000000257.0)
# TODO handle this case
# self.assertEqual(
# extract_number("6 dot six six six"),
# 6.666)
self.assertTrue(extract_number("Tenisový hráč je rychlý") is False)
self.assertTrue(extract_number("křehký") is False)
self.assertTrue(extract_number("křehká nula") is not False)
self.assertEqual(extract_number("křehká nula"), 0)
#self.assertTrue(extract_number("grobo 0") is not False)
#self.assertEqual(extract_number("grobo 0"), 0)
self.assertEqual(extract_number("dvojice piv"), 2)
self.assertEqual(extract_number("dvojice sto piv"), 200)
self.assertEqual(extract_number("dvojice tisíc piv"), 2000)
self.assertEqual(extract_number(
"tohle je 7 test", ordinals=True), 7)
self.assertEqual(extract_number(
"tohle je 7 test", ordinals=False), 7)
self.assertTrue(extract_number("tohle je n. test") is False)
self.assertEqual(extract_number("tohle je 1. test"), 1)
self.assertEqual(extract_number("tohle je 2. test"), 2)
self.assertEqual(extract_number("tohle je 3. test"), 3)
self.assertEqual(extract_number("tohle je 31. test"), 31)
self.assertEqual(extract_number("tohle je 32. test"), 32)
self.assertEqual(extract_number("tohle je 33. test"), 33)
self.assertEqual(extract_number("tohle je 34. test"), 34)
self.assertEqual(extract_number("celkem 100%"), 100)
def test_extract_duration_cs(self):
self.assertEqual(extract_duration("10 sekund"),
(timedelta(seconds=10.0), ""))
self.assertEqual(extract_duration("5 minut"),
(timedelta(minutes=5), ""))
self.assertEqual(extract_duration("2 hodiny"),
(timedelta(hours=2), ""))
self.assertEqual(extract_duration("3 dny"),
(timedelta(days=3), ""))
self.assertEqual(extract_duration("25 týdnů"),
(timedelta(weeks=25), ""))
self.assertEqual(extract_duration("sedm hodin"),
(timedelta(hours=7), ""))
self.assertEqual(extract_duration("7.5 sekund"),
(timedelta(seconds=7.5), ""))
self.assertEqual(extract_duration("osm a polovina dne třicet"
" devět sekund"),
(timedelta(days=8.5, seconds=39), ""))
self.assertEqual(extract_duration("Nastav časovač na 30 minut"),
(timedelta(minutes=30), "nastav časovač na"))
self.assertEqual(extract_duration("Čtyři a půl minuty do"
" západu"),
(timedelta(minutes=4.5), "do západu"))
self.assertEqual(extract_duration("devatenáct minut po hodině"),
(timedelta(minutes=19), "po hodině"))
self.assertEqual(extract_duration("vzbuď mě za tři týdny, čtyři"
" sto devadesát sedm dní, a"
" tři sto 91.6 sekund"),
(timedelta(weeks=3, days=497, seconds=391.6),
"vzbuď mě za , , a"))
self.assertEqual(extract_duration("film je jedna hodina, padesát sedm"
" a půl minuty dlouhý"),
(timedelta(hours=1, minutes=57.5),
"film je , dlouhý"))
self.assertEqual(extract_duration("10-sekund"),
(timedelta(seconds=10.0), ""))
self.assertEqual(extract_duration("5-minut"),
(timedelta(minutes=5), ""))
def test_extractdatetime_cs(self):
def extractWithFormat(text):
date = datetime(2017, 6, 27, 13, 4) # Tue June 27, 2017 @ 1:04pm
[extractedDate, leftover] = extract_datetime(text, date)
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(normalize(text))
self.assertEqual(res[0], expected_date, "for=" + text)
self.assertEqual(res[1], expected_leftover, "for=" + text)
testExtract("nyní je čas",
"2017-06-27 13:04:00", "je čas")
testExtract("za sekundu",
"2017-06-27 13:04:01", "")
testExtract("za minutu",
"2017-06-27 13:05:00", "")
# testExtract("ve dvou minutách",
# "2017-06-27 13:06:00", "")
# testExtract("in a couple of minutes",
# "2017-06-27 13:06:00", "")
# testExtract("ve dvou hodinách",
# "2017-06-27 15:04:00", "")
# testExtract("in a couple of hours",
# "2017-06-27 15:04:00", "")
# testExtract("v dvoje týden",
# "2017-07-11 00:00:00", "")
# testExtract("in a couple of weeks",
# "2017-07-11 00:00:00", "")
# testExtract("v dvoje měsíc",
# "2017-08-27 00:00:00", "")
# testExtract("v dvoje rok",
# "2019-06-27 00:00:00", "")
# testExtract("in a couple of months",
# "2017-08-27 00:00:00", "")
# testExtract("in a couple of years",
# "2019-06-27 00:00:00", "")
testExtract("v desetiletí",
"2027-06-27 00:00:00", "")
# testExtract("in a couple of decades",
# "2037-06-27 00:00:00", "")
testExtract("další desetiletí",
"2027-06-27 00:00:00", "")
testExtract("v století",
"2117-06-27 00:00:00", "")
testExtract("v tisíciletí",
"3017-06-27 00:00:00", "")
testExtract("v dvoje desetiletí",
"2037-06-27 00:00:00", "")
testExtract("v 5 desetiletí",
"2067-06-27 00:00:00", "")
testExtract("v dvoje století",
"2217-06-27 00:00:00", "")
# testExtract("in a couple of centuries",
# "2217-06-27 00:00:00", "")
testExtract("v 2 století",
"2217-06-27 00:00:00", "")
testExtract("v dvoje tisíciletí",
"4017-06-27 00:00:00", "")
# testExtract("in a couple of millenniums",
# "4017-06-27 00:00:00", "")
testExtract("v hodina",
"2017-06-27 14:04:00", "")
testExtract("chci to během hodiny",
"2017-06-27 14:04:00", "chci to")
testExtract("za 1 sekundu",
"2017-06-27 13:04:01", "")
testExtract("za 2 sekundy",
"2017-06-27 13:04:02", "")
testExtract("Nastav časovač na 1 minutu",
"2017-06-27 13:05:00", "nastav časovač")
testExtract("Nastav časovač na půl hodina",
"2017-06-27 13:34:00", "nastav časovač")
testExtract("Nastav časovač na 5 den od dnes",
"2017-07-02 00:00:00", "nastav časovač")
testExtract("den po zítřku",
"2017-06-29 00:00:00", "")
testExtract("Jaké je počasí den po zítřku?",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("Připomeň mi v 10:45 pm",
"2017-06-27 22:45:00", "připomeň mi")
testExtract("jaké je počasí v pátek ráno",
"2017-06-30 08:00:00", "jaké je počasí")
testExtract("jaké je zítřejší počasí",
"2017-06-28 00:00:00", "jaké je počasí")
testExtract("jaké je počasí toto odpoledne",
"2017-06-27 15:00:00", "jaké je počasí")
testExtract("jaké je počasí tento večer",
"2017-06-27 19:00:00", "jaké je počasí")
testExtract("jaké bylo počasí toto ráno",
"2017-06-27 08:00:00", "jaké bylo počasí")
testExtract("připomeň mi abych zavolal mámě v 8 týden a 2 dny",
"2017-08-24 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v srpen 3",
"2017-08-03 00:00:00", "připomeň mi abych zavolal mámě") # přidat i třetího slovně
testExtract("připomeň mi zítra abych zavolal mámě v 7am",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi zítra abych zavolal mámě v 10pm",
"2017-06-28 22:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7am",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v hodina",
"2017-06-27 14:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 1730",
"2017-06-27 17:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 0630",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30 hodina",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30 hodina",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě večer v 7 hodin",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin večer",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin ráno",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v Čtvrtek večer v 7 hodin",
"2017-06-29 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v Čtvrtek ráno v 7 hodin",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin Čtvrtek ráno",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7:00 Čtvrtek ráno",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
# TODO: This test is imperfect due to "at 7:00" still in the
# remainder. But let it pass for now since time is correct
testExtract("připomeň mi abych zavolal mámě v 7:00 Čtvrtek večer",
"2017-06-29 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 Středa večer",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 Středa v večer",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě Středa večer v 8",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za dvě hodiny",
"2017-06-27 15:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za 2 hodiny",
"2017-06-27 15:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za 15 minut",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za patnáct minut",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za půl hodina",
"2017-06-27 13:34:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za půl hodina",
"2017-06-27 13:34:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za čtvrt hodina",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za čtvrt hodina",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am 2 den po této sobota",
"2017-07-03 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("Přehraj <NAME> hudbu 2 dny od Pátek",
"2017-07-02 00:00:00", "přehraj <NAME>")
testExtract("Začni invazi v 3:45 pm v Čtvrtek",
"2017-06-29 15:45:00", "začni invazi")
testExtract("V Pondělí, objednej koláč z pekárny",
"2017-07-03 00:00:00", "objednej koláč z pekárny")
testExtract("Přehraj Happy Birthday hudbu 5 roků od dnes",
"2022-06-27 00:00:00", "přehraj happy birthday hudbu")
testExtract("Skype Mámě v 12:45 pm další Čtvrtek",
"2017-07-06 12:45:00", "skype mámě")
testExtract("Jaké je počasí příští Pátek?",
"2017-06-30 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští Středa?",
"2017-07-05 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští Čtvrtek?",
"2017-07-06 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští pátek ráno",
"2017-06-30 08:00:00", "jaké je počasí")
testExtract("jaké je počasí příští pátek večer",
"2017-06-30 19:00:00", "jaké je počasí")
testExtract("jaké je počasí příští pátek odpoledne",
"2017-06-30 15:00:00", "jaké je počasí")
testExtract("připomeň mi abych zavolal mámě v srpen třetího",
"2017-08-03 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("Kup ohňostroj v 4 Červenec",
"2017-07-04 00:00:00", "kup ohňostroj")
testExtract("jaké je počasí 2 týdny od další pátek",
"2017-07-14 00:00:00", "jaké je počasí")
testExtract("jaké je počasí Středa v 0700 hodina",
"2017-06-28 07:00:00", "jaké je počasí")
testExtract("Nastav budík Středa v 7 hodin",
"2017-06-28 07:00:00", "nastav budík")
testExtract("Nastav schůzku v 12:45 pm další Čtvrtek",
"2017-07-06 12:45:00", "nastav schůzku")
testExtract("Jaké je počasí tento Čtvrtek?",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("nastav návštěvu na 2 týdny a 6 dní od Sobota",
"2017-07-21 00:00:00", "nastav návštěvu")
testExtract("Zahaj invazi v 03 45 v Čtvrtek",
"2017-06-29 03:45:00", "zahaj invazi")
testExtract("Zahaj invazi v 800 hodin v Čtvrtek",
"2017-06-29 08:00:00", "zahaj invazi")
testExtract("Zahaj párty v 8 hodin v večer v Čtvrtek",
"2017-06-29 20:00:00", "zahaj párty")
testExtract("Zahaj invazi v 8 v večer v Čtvrtek",
"2017-06-29 20:00:00", "zahaj invazi")
testExtract("Zahaj invazi v Čtvrtek v poledne",
"2017-06-29 12:00:00", "<NAME>")
testExtract("Zahaj invazi v Čtvrtek v půlnoc",
"2017-06-29 00:00:00", "zahaj invazi")
testExtract("Zahaj invazi v Čtvrtek v 0500",
"2017-06-29 05:00:00", "zahaj invazi")
testExtract("připomeň mi abych vstal v 4 roky",
"2021-06-27 00:00:00", "připomeň mi abych vstal")
testExtract("připomeň mi abych vstal v 4 roky a 4 dny",
"2021-07-01 00:00:00", "připomeň mi abych vstal")
testExtract("jaké je počasí 3 dny po zítra?",
"2017-07-01 00:00:00", "jaké je počasí")
testExtract("prosinec 3",
"2017-12-03 00:00:00", "")
testExtract("sejdeme se v 8:00 dnes večer",
"2017-06-27 20:00:00", "sejdeme se")
testExtract("sejdeme se v 5pm",
"2017-06-27 17:00:00", "sejdeme se")
testExtract("sejdeme se v 8 am",
"2017-06-28 08:00:00", "sejdeme se")
testExtract("připomeň mi abych vstal v 8 am",
"2017-06-28 08:00:00", "připomeň mi abych vstal")
testExtract("jaké je počasí v úterý",
"2017-06-27 00:00:00", "jaké je počasí")
testExtract("jaké je počasí v pondělí",
"2017-07-03 00:00:00", "jaké je počasí")
testExtract("jaké je počasí toto Středa",
"2017-06-28 00:00:00", "jaké je počasí")
testExtract("v Čtvrtek jaké je počasí",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("tento Čtvrtek jaké je počasí",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("poslední pondělí jaké bylo počasí",
"2017-06-26 00:00:00", "jaké bylo počasí")
testExtract("nastav budík na Středa večer v 8",
"2017-06-28 20:00:00", "nastav budík")
testExtract("nastav budík na Středa v 3 hodiny v odpoledne",
"2017-06-28 15:00:00", "nastav budík")
testExtract("nastav budík na Středa v 3 hodiny v ráno",
"2017-06-28 03:00:00", "nastav budík")
testExtract("nastav budík na Středa ráno v 7 hodin",
"2017-06-28 07:00:00", "nastav budík")
testExtract("nastav budík na dnes v 7 hodin",
"2017-06-27 19:00:00", "nastav budík")
testExtract("nastav budík na tento večer v 7 hodin",
"2017-06-27 19:00:00", "nastav budík")
# TODO: This test is imperfect due to the "at 7:00" still in the
# remainder. But let it pass for now since time is correct
testExtract("nastav budík na tento večer v 7:00",
"2017-06-27 19:00:00", "nastav budík v 7:00")
testExtract("večer v červen 5 2017 připomeň mi" +
" abych zavolal mámě",
"2017-06-05 19:00:00", "připomeň mi abych zavolal mámě")
# TODO: This test is imperfect due to the missing "for" in the
# remainder. But let it pass for now since time is correct
testExtract("aktualizuj můj kalendář na ranní schůzku s julius" +
" v březnu 4",
"2018-03-04 08:00:00",
"aktualizuj můj kalendář schůzku s julius")
testExtract("připomeň mi abych zavolal mámě další úterý",
"2017-07-04 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě 3 týdny",
"2017-07-18 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 týdny",
"2017-08-22 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 týdny a 2 dny",
"2017-08-24 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 4 dny",
"2017-07-01 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 3 měsíce",
"2017-09-27 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 2 roky a 2 dny",
"2019-06-29 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě další týden",
"2017-07-04 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am v Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am tato Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10 další Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am další Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
# test yesterday
testExtract("jaký den byl včera",
"2017-06-26 00:00:00", "jaký den byl")
testExtract("jaký den byl den před včera",
"2017-06-25 00:00:00", "jaký den byl")
testExtract("měl jsem večeři včera v 6",
"2017-06-26 06:00:00", "měl jsem večeři")
testExtract("měl jsem večeři včera v 6 am",
"2017-06-26 06:00:00", "měl jsem večeři")
testExtract("měl jsem večeři včera v 6 pm",
"2017-06-26 18:00:00", "měl jsem večeři")
# Below two tests, ensure that time is picked
# even if no am/pm is specified
# in case of weekdays/tonight
testExtract("nastav budík na 9 o víkendech",
"2017-06-27 21:00:00", "nastav budík víkendech")
testExtract("na 8 dnes večer",
"2017-06-27 20:00:00", "")
testExtract("na 8:30pm dnes večer",
"2017-06-27 20:30:00", "")
# Tests a time with ':' & without am/pm
testExtract("nastav budík na dnes večer 9:30",
"2017-06-27 21:30:00", "nastav budík")
testExtract("nastav budík na 9:00 na dnes večer",
"2017-06-27 21:00:00", "nastav budík")
# Check if it picks intent irrespective of correctness
testExtract("nastav budík na 9 hodin dnes večer",
"2017-06-27 21:00:00", "nastav budík")
testExtract("připomeň mi hru dnes v noci v 11:30",
"2017-06-27 23:30:00", "připomeň mi hru")
testExtract("nastav budík v 7:30 o výkendech",
"2017-06-27 19:30:00", "nastav budík o výkendech")
# "# days <from X/after X>"
testExtract("mé narozeniny jsou 2 dny od dnes",
"2017-06-29 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny po dnes",
"2017-06-29 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny od zítra",
"2017-06-30 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny od zítra",
"2017-06-30 00:00:00", "mé narozeniny jsou")
testExtract("připomeň mi abych zavolal mámě v 10am 2 dny po další Sobota",
"2017-07-10 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("mé narozeniny jsou 2 dny od včera",
"2017-06-28 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny po včera",
"2017-06-28 00:00:00", "mé narozeniny jsou")
# "# days ago>"
testExtract("mé narozeniny byly před 1 den",
"2017-06-26 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 2 dny",
"2017-06-25 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 3 dny",
"2017-06-24 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 4 dny",
"2017-06-23 00:00:00", "mé narozeniny byly")
# TODO this test is imperfect due to "tonight" in the reminder, but let is pass since the date is correct
testExtract("sejdeme se dnes v noci",
"2017-06-27 22:00:00", "sejdeme se noci")
# TODO this test is imperfect due to "at night" in the reminder, but let is pass since the date is correct
testExtract("sejdeme se později v noci",
"2017-06-27 22:00:00", "sejdeme se později v noci")
# TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct
testExtract("Jaké bude počasí zítra v noci",
"2017-06-28 22:00:00", "jaké bude počasí v noci")
# TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct
testExtract("jaké bude počasí příští úterý v noci",
"2017-07-04 22:00:00", "jaké bude počasí v noci")
def test_extract_ambiguous_time_cs(self):
morning = datetime(2017, 6, 27, 8, 1, 2)
večer = datetime(2017, 6, 27, 20, 1, 2)
noonish = datetime(2017, 6, 27, 12, 1, 2)
self.assertEqual(
extract_datetime('krmení ryb'), None)
self.assertEqual(
extract_datetime('den'), None)
self.assertEqual(
extract_datetime('týden'), None)
self.assertEqual(
extract_datetime('měsíc'), None)
self.assertEqual(
extract_datetime('rok'), None)
self.assertEqual(
extract_datetime(' '), None)
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', morning)[0],
datetime(2017, 6, 27, 10, 0, 0))
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', noonish)[0],
datetime(2017, 6, 27, 22, 0, 0))
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', večer)[0],
datetime(2017, 6, 27, 22, 0, 0))
"""
In Czech is May and may have different format
def test_extract_date_with_may_I_cs(self):
now = datetime(2019, 7, 4, 8, 1, 2)
may_date = datetime(2019, 5, 2, 10, 11, 20)
self.assertEqual(
extract_datetime('Můžu vědět jaký je to čas zítra', now)[0],
datetime(2019, 7, 5, 0, 0, 0))
self.assertEqual(
extract_datetime('Můžu vědět kdy je 10 hodin', now)[0],
datetime(2019, 7, 4, 10, 0, 0))
self.assertEqual(
extract_datetime('24. můžu chtít připomenutí', may_date)[0],
datetime(2019, 5, 24, 0, 0, 0))
"""
def test_extract_relativedatetime_cs(self):
def extractWithFormat(text):
date = datetime(2017, 6, 27, 10, 1, 2)
[extractedDate, leftover] = extract_datetime(text, date)
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(normalize(text))
self.assertEqual(res[0], expected_date, "for=" + text)
self.assertEqual(res[1], expected_leftover, "for=" + text)
testExtract("sejdeme se za 5 minut",
"2017-06-27 10:06:02", "sejdeme se")
testExtract("sejdeme se za 5minut",
"2017-06-27 10:06:02", "sejdeme se")
testExtract("sejdeme se za 5 sekund",
"2017-06-27 10:01:07", "sejdeme se")
testExtract("sejdeme se za 1 hodinu",
"2017-06-27 11:01:02", "sejdeme se")
testExtract("sejdeme se za 2 hodiny",
"2017-06-27 12:01:02", "sejdeme se")
print("TODO") # Need better normaliting procedure for czech inflexion
# testExtract("sejdeme se za 2hodiny",
# "2017-06-27 12:01:02", "sejdeme se")
testExtract("sejdeme se za 1 minutu",
"2017-06-27 10:02:02", "sejdeme se")
testExtract("sejdeme se za 1 sekundu",
"2017-06-27 10:01:03", "sejdeme se")
testExtract("sejdeme se za 5sekund",
"2017-06-27 10:01:07", "sejdeme se")
def test_spaces(self):
self.assertEqual(normalize(" tohle je test"),
"tohle je test")
self.assertEqual(normalize(" tohle je test "),
"tohle je test")
self.assertEqual(normalize(" tohle je jedna test"),
"tohle je 1 test")
def test_numbers(self):
self.assertEqual(normalize("tohle je jedna dva tři test"),
"tohle je 1 2 3 test")
self.assertEqual(normalize(" to je čtyři pět šest test"),
"to je 4 5 6 test")
self.assertEqual(normalize("to je sedum osum devět test"),
"to je 7 8 9 test")
self.assertEqual(normalize("to je sedm osm devět test"),
"to je 7 8 9 test")
self.assertEqual(normalize("tohle je deset jedenáct dvanáct test"),
"tohle je 10 11 12 test")
self.assertEqual(normalize("tohle je třináct čtrnáct test"),
"tohle je 13 14 test")
self.assertEqual(normalize("tohle je patnáct šestnáct sedmnáct"),
"tohle je 15 16 17")
self.assertEqual(normalize("tohle je osmnáct devatenáct dvacet"),
"tohle je 18 19 20")
self.assertEqual(normalize("tohle je jedna devatenáct dvacet dva"),
"tohle je 1 19 20 2")
self.assertEqual(normalize("tohle je jedna sto"),
"tohle je 1 sto")
self.assertEqual(normalize("tohle je jedna dva dvacet dva"),
"tohle je 1 2 20 2")
self.assertEqual(normalize("tohle je jedna a půl"),
"tohle je 1 a půl")
self.assertEqual(normalize("tohle je jedna a půl a pět šest"),
"tohle je 1 a půl a 5 6")
def test_multiple_numbers(self):
self.assertEqual(extract_numbers("tohle je jedna dva tři test"),
[1.0, 2.0, 3.0])
self.assertEqual(extract_numbers("to je čtyři pět šest test"),
[4.0, 5.0, 6.0])
self.assertEqual(extract_numbers("tohle je deset jedenáct dvanáct test"),
[10.0, 11.0, 12.0])
self.assertEqual(extract_numbers("tohle je jedna dvacet jedna test"),
[1.0, 21.0])
self.assertEqual(extract_numbers("1 pes, sedm prasat, <NAME> "
"farmu, 3 krát 5 makaréna"),
[1, 7, 3, 5])
self.assertEqual(extract_numbers("dva piva pro dva medvědy"),
[2.0, 2.0])
self.assertEqual(extract_numbers("dvacet 20 dvacet"),
[20, 20, 20])
self.assertEqual(extract_numbers("dvacet 20 22"),
[20.0, 20.0, 22.0])
self.assertEqual(extract_numbers("dvacet dvacet dva dvacet"),
[20, 22, 20])
self.assertEqual(extract_numbers("dvacet 2"),
[22.0])
self.assertEqual(extract_numbers("dvacet 20 dvacet 2"),
[20, 20, 22])
self.assertEqual(extract_numbers("třetina jedna"),
[1 / 3, 1])
self.assertEqual(extract_numbers("třetí", ordinals=True), [3])
self.assertEqual(extract_numbers("šest trillion", short_scale=True),
[6e12])
self.assertEqual(extract_numbers("šest trilion", short_scale=False),
[6e18])
self.assertEqual(extract_numbers("dvě prasátka a šest trillion bakterií",
short_scale=True), [2, 6e12])
self.assertEqual(extract_numbers("dvě prasátka a šest trilion bakterií",
short_scale=False), [2, 6e18])
self.assertEqual(extract_numbers("třicátý druhý nebo první",
ordinals=True), [32, 1])
self.assertEqual(extract_numbers("tohle je sedm osm devět a"
" půl test"),
[7.0, 8.0, 9.5])
if __name__ == "__main__":
unittest.main()
| #
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from datetime import datetime, timedelta
from lingua_franca import get_default_lang, set_default_lang, \
load_language, unload_language
from lingua_franca.parse import extract_datetime
from lingua_franca.parse import extract_duration
from lingua_franca.parse import extract_number, extract_numbers
from lingua_franca.parse import fuzzy_match
from lingua_franca.parse import get_gender
from lingua_franca.parse import match_one
from lingua_franca.parse import normalize
def setUpModule():
load_language("cs-cz")
set_default_lang("cs")
def tearDownModule():
unload_language("cs")
class TestFuzzyMatch(unittest.TestCase):
def test_matches(self):
self.assertTrue(fuzzy_match("ty a já", "ty a já") >= 1.0)
self.assertTrue(fuzzy_match("ty a já", "ty") < 0.5)
self.assertTrue(fuzzy_match("Ty", "ty") >= 0.5)
self.assertTrue(fuzzy_match("ty a já", "ty") ==
fuzzy_match("ty", "ty a já"))
self.assertTrue(fuzzy_match("ty a já", "on nebo oni") < 0.23)
def test_match_one(self):
# test list of choices
choices = ['frank', 'kate', 'harry', 'henry']
self.assertEqual(match_one('frank', choices)[0], 'frank')
self.assertEqual(match_one('fran', choices)[0], 'frank')
self.assertEqual(match_one('enry', choices)[0], 'henry')
self.assertEqual(match_one('katt', choices)[0], 'kate')
# test dictionary of choices
choices = {'frank': 1, 'kate': 2, 'harry': 3, 'henry': 4}
self.assertEqual(match_one('frank', choices)[0], 1)
self.assertEqual(match_one('enry', choices)[0], 4)
class TestNormalize(unittest.TestCase):
def test_extract_number(self):
self.assertEqual(extract_number("tohle je první test",
ordinals=True), 1)
self.assertEqual(extract_number("tohle je 2 test"), 2)
self.assertEqual(extract_number("tohle je druhý test",
ordinals=True), 2)
#self.assertEqual(extract_number("tohle je třetí test"), 1.0 / 3.0)
self.assertEqual(extract_number("tohle je třetí test",
ordinals=True), 3.0)
self.assertEqual(extract_number("ten čtvrtý", ordinals=True), 4.0)
self.assertEqual(extract_number(
"ten třicátý šestý", ordinals=True), 36.0)
self.assertEqual(extract_number("tohle je test číslo 4"), 4)
self.assertEqual(extract_number("jedna třetina šálku"), 1.0 / 3.0)
self.assertEqual(extract_number("tři šálky"), 3)
self.assertEqual(extract_number("1/3 šálku"), 1.0 / 3.0)
self.assertEqual(extract_number("čtvrtina šálku"), 0.25)
self.assertEqual(extract_number("1/4 cup"), 0.25)
self.assertEqual(extract_number("jedna čtvrtina šálku"), 0.25)
self.assertEqual(extract_number("2/3 šálků"), 2.0 / 3.0)
self.assertEqual(extract_number("3/4 šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("1 a 3/4 šálků"), 1.75)
self.assertEqual(extract_number("1 šálek a půl"), 1.5)
self.assertEqual(extract_number("jeden šálek a polovina"), 1.5)
self.assertEqual(extract_number("jedna a půl šálků"), 1.5)
self.assertEqual(extract_number("jedna a jedna polovina šálků"), 1.5)
self.assertEqual(extract_number("tři čtvrtina šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("tři čtvrtiny šálků"), 3.0 / 4.0)
self.assertEqual(extract_number("dvacet dva"), 22)
self.assertEqual(extract_number(
"Dvacet dva s velkým písmenam na začátku"), 22)
self.assertEqual(extract_number(
"dvacet Dva s dva krát velkým písmem"), 22)
self.assertEqual(extract_number(
"dvacet Dva s různou velikostí písmen"), 22)
self.assertEqual(extract_number("Dvacet dva a Tři Pětiny"), 22.6)
self.assertEqual(extract_number("dvě sto"), 200)
self.assertEqual(extract_number("devět tisíc"), 9000)
self.assertEqual(extract_number("šest sto šedesát šest"), 666)
self.assertEqual(extract_number("dva million"), 2000000)
self.assertEqual(extract_number("dva million pět sto tisíc "
"tun žhavého kovu"), 2500000)
self.assertEqual(extract_number("šest trillion"), 6000000000000.0)
self.assertEqual(extract_number("šest trilion", short_scale=False),
6e+18)
self.assertEqual(extract_number("jedna tečka pět"), 1.5)
self.assertEqual(extract_number("tři tečka čtrnáct"), 3.14)
self.assertEqual(extract_number("nula tečka dva"), 0.2)
self.assertEqual(extract_number("billion roků "),
1000000000.0)
self.assertEqual(extract_number("bilion roků",
short_scale=False),
1000000000000.0)
self.assertEqual(extract_number("jedno sto tisíc"), 100000)
self.assertEqual(extract_number("mínus 2"), -2)
self.assertEqual(extract_number("záporné sedmdesát"), -70)
self.assertEqual(extract_number("tisíc million"), 1000000000)
self.assertEqual(extract_number("miliarda", short_scale=False),
1000000000)
self.assertEqual(extract_number("šestina třetina"),
1 / 6 / 3)
self.assertEqual(extract_number("šestina třetí", ordinals=True),
3)
self.assertEqual(extract_number("třicet sekund"), 30)
self.assertEqual(extract_number("třicátý druhý", ordinals=True), 32)
self.assertEqual(extract_number("tohle je billiontý test",
ordinals=True), 1e09)
print("tohle udělat později")
#self.assertEqual(extract_number("tohle je billiontý test"), 1e-9)
self.assertEqual(extract_number("tohle je biliontý test",
ordinals=True,
short_scale=False), 1e12)
print("tohle udělat později")
# self.assertEqual(extract_number("tohle je biliontý test",
# short_scale=False), 1e-12)
# Verify non-power multiples of ten no longer discard
# adjacent multipliers
self.assertEqual(extract_number("dvacet tisíc"), 20000)
self.assertEqual(extract_number("padesát million"), 50000000)
# Verify smaller powers of ten no longer cause miscalculation of larger
# powers of ten (see MycroftAI#86)
self.assertEqual(extract_number("dvacet billion tři sto million \
devět sto padesát tisíc šest sto \
sedmdesát pět tečka osm"),
20300950675.8)
self.assertEqual(extract_number("devět sto devadesát devět million devět \
sto devadesát devět tisíc devět \
sto devadesát devět tečka devět"),
999999999.9)
# TODO why does "trillion" result in xxxx.0?
self.assertEqual(extract_number("osm sto trillion dva sto \
padesát sedm"), 800000000000257.0)
# TODO handle this case
# self.assertEqual(
# extract_number("6 dot six six six"),
# 6.666)
self.assertTrue(extract_number("Tenisový hráč je rychlý") is False)
self.assertTrue(extract_number("křehký") is False)
self.assertTrue(extract_number("křehká nula") is not False)
self.assertEqual(extract_number("křehká nula"), 0)
#self.assertTrue(extract_number("grobo 0") is not False)
#self.assertEqual(extract_number("grobo 0"), 0)
self.assertEqual(extract_number("dvojice piv"), 2)
self.assertEqual(extract_number("dvojice sto piv"), 200)
self.assertEqual(extract_number("dvojice tisíc piv"), 2000)
self.assertEqual(extract_number(
"tohle je 7 test", ordinals=True), 7)
self.assertEqual(extract_number(
"tohle je 7 test", ordinals=False), 7)
self.assertTrue(extract_number("tohle je n. test") is False)
self.assertEqual(extract_number("tohle je 1. test"), 1)
self.assertEqual(extract_number("tohle je 2. test"), 2)
self.assertEqual(extract_number("tohle je 3. test"), 3)
self.assertEqual(extract_number("tohle je 31. test"), 31)
self.assertEqual(extract_number("tohle je 32. test"), 32)
self.assertEqual(extract_number("tohle je 33. test"), 33)
self.assertEqual(extract_number("tohle je 34. test"), 34)
self.assertEqual(extract_number("celkem 100%"), 100)
def test_extract_duration_cs(self):
self.assertEqual(extract_duration("10 sekund"),
(timedelta(seconds=10.0), ""))
self.assertEqual(extract_duration("5 minut"),
(timedelta(minutes=5), ""))
self.assertEqual(extract_duration("2 hodiny"),
(timedelta(hours=2), ""))
self.assertEqual(extract_duration("3 dny"),
(timedelta(days=3), ""))
self.assertEqual(extract_duration("25 týdnů"),
(timedelta(weeks=25), ""))
self.assertEqual(extract_duration("sedm hodin"),
(timedelta(hours=7), ""))
self.assertEqual(extract_duration("7.5 sekund"),
(timedelta(seconds=7.5), ""))
self.assertEqual(extract_duration("osm a polovina dne třicet"
" devět sekund"),
(timedelta(days=8.5, seconds=39), ""))
self.assertEqual(extract_duration("Nastav časovač na 30 minut"),
(timedelta(minutes=30), "nastav časovač na"))
self.assertEqual(extract_duration("Čtyři a půl minuty do"
" západu"),
(timedelta(minutes=4.5), "do západu"))
self.assertEqual(extract_duration("devatenáct minut po hodině"),
(timedelta(minutes=19), "po hodině"))
self.assertEqual(extract_duration("vzbuď mě za tři týdny, čtyři"
" sto devadesát sedm dní, a"
" tři sto 91.6 sekund"),
(timedelta(weeks=3, days=497, seconds=391.6),
"vzbuď mě za , , a"))
self.assertEqual(extract_duration("film je jedna hodina, padesát sedm"
" a půl minuty dlouhý"),
(timedelta(hours=1, minutes=57.5),
"film je , dlouhý"))
self.assertEqual(extract_duration("10-sekund"),
(timedelta(seconds=10.0), ""))
self.assertEqual(extract_duration("5-minut"),
(timedelta(minutes=5), ""))
def test_extractdatetime_cs(self):
def extractWithFormat(text):
date = datetime(2017, 6, 27, 13, 4) # Tue June 27, 2017 @ 1:04pm
[extractedDate, leftover] = extract_datetime(text, date)
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(normalize(text))
self.assertEqual(res[0], expected_date, "for=" + text)
self.assertEqual(res[1], expected_leftover, "for=" + text)
testExtract("nyní je čas",
"2017-06-27 13:04:00", "je čas")
testExtract("za sekundu",
"2017-06-27 13:04:01", "")
testExtract("za minutu",
"2017-06-27 13:05:00", "")
# testExtract("ve dvou minutách",
# "2017-06-27 13:06:00", "")
# testExtract("in a couple of minutes",
# "2017-06-27 13:06:00", "")
# testExtract("ve dvou hodinách",
# "2017-06-27 15:04:00", "")
# testExtract("in a couple of hours",
# "2017-06-27 15:04:00", "")
# testExtract("v dvoje týden",
# "2017-07-11 00:00:00", "")
# testExtract("in a couple of weeks",
# "2017-07-11 00:00:00", "")
# testExtract("v dvoje měsíc",
# "2017-08-27 00:00:00", "")
# testExtract("v dvoje rok",
# "2019-06-27 00:00:00", "")
# testExtract("in a couple of months",
# "2017-08-27 00:00:00", "")
# testExtract("in a couple of years",
# "2019-06-27 00:00:00", "")
testExtract("v desetiletí",
"2027-06-27 00:00:00", "")
# testExtract("in a couple of decades",
# "2037-06-27 00:00:00", "")
testExtract("další desetiletí",
"2027-06-27 00:00:00", "")
testExtract("v století",
"2117-06-27 00:00:00", "")
testExtract("v tisíciletí",
"3017-06-27 00:00:00", "")
testExtract("v dvoje desetiletí",
"2037-06-27 00:00:00", "")
testExtract("v 5 desetiletí",
"2067-06-27 00:00:00", "")
testExtract("v dvoje století",
"2217-06-27 00:00:00", "")
# testExtract("in a couple of centuries",
# "2217-06-27 00:00:00", "")
testExtract("v 2 století",
"2217-06-27 00:00:00", "")
testExtract("v dvoje tisíciletí",
"4017-06-27 00:00:00", "")
# testExtract("in a couple of millenniums",
# "4017-06-27 00:00:00", "")
testExtract("v hodina",
"2017-06-27 14:04:00", "")
testExtract("chci to během hodiny",
"2017-06-27 14:04:00", "chci to")
testExtract("za 1 sekundu",
"2017-06-27 13:04:01", "")
testExtract("za 2 sekundy",
"2017-06-27 13:04:02", "")
testExtract("Nastav časovač na 1 minutu",
"2017-06-27 13:05:00", "nastav časovač")
testExtract("Nastav časovač na půl hodina",
"2017-06-27 13:34:00", "nastav časovač")
testExtract("Nastav časovač na 5 den od dnes",
"2017-07-02 00:00:00", "nastav časovač")
testExtract("den po zítřku",
"2017-06-29 00:00:00", "")
testExtract("Jaké je počasí den po zítřku?",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("Připomeň mi v 10:45 pm",
"2017-06-27 22:45:00", "připomeň mi")
testExtract("jaké je počasí v pátek ráno",
"2017-06-30 08:00:00", "jaké je počasí")
testExtract("jaké je zítřejší počasí",
"2017-06-28 00:00:00", "jaké je počasí")
testExtract("jaké je počasí toto odpoledne",
"2017-06-27 15:00:00", "jaké je počasí")
testExtract("jaké je počasí tento večer",
"2017-06-27 19:00:00", "jaké je počasí")
testExtract("jaké bylo počasí toto ráno",
"2017-06-27 08:00:00", "jaké bylo počasí")
testExtract("připomeň mi abych zavolal mámě v 8 týden a 2 dny",
"2017-08-24 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v srpen 3",
"2017-08-03 00:00:00", "připomeň mi abych zavolal mámě") # přidat i třetího slovně
testExtract("připomeň mi zítra abych zavolal mámě v 7am",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi zítra abych zavolal mámě v 10pm",
"2017-06-28 22:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7am",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v hodina",
"2017-06-27 14:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 1730",
"2017-06-27 17:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 0630",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30 hodina",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 06 30 hodina",
"2017-06-28 06:30:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě večer v 7 hodin",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin večer",
"2017-06-27 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin ráno",
"2017-06-28 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v Čtvrtek večer v 7 hodin",
"2017-06-29 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v Čtvrtek ráno v 7 hodin",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7 hodin Čtvrtek ráno",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 7:00 Čtvrtek ráno",
"2017-06-29 07:00:00", "připomeň mi abych zavolal mámě")
# TODO: This test is imperfect due to "at 7:00" still in the
# remainder. But let it pass for now since time is correct
testExtract("připomeň mi abych zavolal mámě v 7:00 Čtvrtek večer",
"2017-06-29 19:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 Středa večer",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 Středa v večer",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě Středa večer v 8",
"2017-06-28 20:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za dvě hodiny",
"2017-06-27 15:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za 2 hodiny",
"2017-06-27 15:04:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za 15 minut",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za patnáct minut",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za půl hodina",
"2017-06-27 13:34:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za půl hodina",
"2017-06-27 13:34:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za čtvrt hodina",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě za čtvrt hodina",
"2017-06-27 13:19:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am 2 den po této sobota",
"2017-07-03 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("Přehraj <NAME> hudbu 2 dny od Pátek",
"2017-07-02 00:00:00", "přehraj <NAME>")
testExtract("Začni invazi v 3:45 pm v Čtvrtek",
"2017-06-29 15:45:00", "začni invazi")
testExtract("V Pondělí, objednej koláč z pekárny",
"2017-07-03 00:00:00", "objednej koláč z pekárny")
testExtract("Přehraj Happy Birthday hudbu 5 roků od dnes",
"2022-06-27 00:00:00", "přehraj happy birthday hudbu")
testExtract("Skype Mámě v 12:45 pm další Čtvrtek",
"2017-07-06 12:45:00", "skype mámě")
testExtract("Jaké je počasí příští Pátek?",
"2017-06-30 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští Středa?",
"2017-07-05 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští Čtvrtek?",
"2017-07-06 00:00:00", "jaké je počasí")
testExtract("Jaké je počasí příští pátek ráno",
"2017-06-30 08:00:00", "jaké je počasí")
testExtract("jaké je počasí příští pátek večer",
"2017-06-30 19:00:00", "jaké je počasí")
testExtract("jaké je počasí příští pátek odpoledne",
"2017-06-30 15:00:00", "jaké je počasí")
testExtract("připomeň mi abych zavolal mámě v srpen třetího",
"2017-08-03 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("Kup ohňostroj v 4 Červenec",
"2017-07-04 00:00:00", "kup ohňostroj")
testExtract("jaké je počasí 2 týdny od další pátek",
"2017-07-14 00:00:00", "jaké je počasí")
testExtract("jaké je počasí Středa v 0700 hodina",
"2017-06-28 07:00:00", "jaké je počasí")
testExtract("Nastav budík Středa v 7 hodin",
"2017-06-28 07:00:00", "nastav budík")
testExtract("Nastav schůzku v 12:45 pm další Čtvrtek",
"2017-07-06 12:45:00", "nastav schůzku")
testExtract("Jaké je počasí tento Čtvrtek?",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("nastav návštěvu na 2 týdny a 6 dní od Sobota",
"2017-07-21 00:00:00", "nastav návštěvu")
testExtract("Zahaj invazi v 03 45 v Čtvrtek",
"2017-06-29 03:45:00", "zahaj invazi")
testExtract("Zahaj invazi v 800 hodin v Čtvrtek",
"2017-06-29 08:00:00", "zahaj invazi")
testExtract("Zahaj párty v 8 hodin v večer v Čtvrtek",
"2017-06-29 20:00:00", "zahaj párty")
testExtract("Zahaj invazi v 8 v večer v Čtvrtek",
"2017-06-29 20:00:00", "zahaj invazi")
testExtract("Zahaj invazi v Čtvrtek v poledne",
"2017-06-29 12:00:00", "<NAME>")
testExtract("Zahaj invazi v Čtvrtek v půlnoc",
"2017-06-29 00:00:00", "zahaj invazi")
testExtract("Zahaj invazi v Čtvrtek v 0500",
"2017-06-29 05:00:00", "zahaj invazi")
testExtract("připomeň mi abych vstal v 4 roky",
"2021-06-27 00:00:00", "připomeň mi abych vstal")
testExtract("připomeň mi abych vstal v 4 roky a 4 dny",
"2021-07-01 00:00:00", "připomeň mi abych vstal")
testExtract("jaké je počasí 3 dny po zítra?",
"2017-07-01 00:00:00", "jaké je počasí")
testExtract("prosinec 3",
"2017-12-03 00:00:00", "")
testExtract("sejdeme se v 8:00 dnes večer",
"2017-06-27 20:00:00", "sejdeme se")
testExtract("sejdeme se v 5pm",
"2017-06-27 17:00:00", "sejdeme se")
testExtract("sejdeme se v 8 am",
"2017-06-28 08:00:00", "sejdeme se")
testExtract("připomeň mi abych vstal v 8 am",
"2017-06-28 08:00:00", "připomeň mi abych vstal")
testExtract("jaké je počasí v úterý",
"2017-06-27 00:00:00", "jaké je počasí")
testExtract("jaké je počasí v pondělí",
"2017-07-03 00:00:00", "jaké je počasí")
testExtract("jaké je počasí toto Středa",
"2017-06-28 00:00:00", "jaké je počasí")
testExtract("v Čtvrtek jaké je počasí",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("tento Čtvrtek jaké je počasí",
"2017-06-29 00:00:00", "jaké je počasí")
testExtract("poslední pondělí jaké bylo počasí",
"2017-06-26 00:00:00", "jaké bylo počasí")
testExtract("nastav budík na Středa večer v 8",
"2017-06-28 20:00:00", "nastav budík")
testExtract("nastav budík na Středa v 3 hodiny v odpoledne",
"2017-06-28 15:00:00", "nastav budík")
testExtract("nastav budík na Středa v 3 hodiny v ráno",
"2017-06-28 03:00:00", "nastav budík")
testExtract("nastav budík na Středa ráno v 7 hodin",
"2017-06-28 07:00:00", "nastav budík")
testExtract("nastav budík na dnes v 7 hodin",
"2017-06-27 19:00:00", "nastav budík")
testExtract("nastav budík na tento večer v 7 hodin",
"2017-06-27 19:00:00", "nastav budík")
# TODO: This test is imperfect due to the "at 7:00" still in the
# remainder. But let it pass for now since time is correct
testExtract("nastav budík na tento večer v 7:00",
"2017-06-27 19:00:00", "nastav budík v 7:00")
testExtract("večer v červen 5 2017 připomeň mi" +
" abych zavolal mámě",
"2017-06-05 19:00:00", "připomeň mi abych zavolal mámě")
# TODO: This test is imperfect due to the missing "for" in the
# remainder. But let it pass for now since time is correct
testExtract("aktualizuj můj kalendář na ranní schůzku s julius" +
" v březnu 4",
"2018-03-04 08:00:00",
"aktualizuj můj kalendář schůzku s julius")
testExtract("připomeň mi abych zavolal mámě další úterý",
"2017-07-04 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě 3 týdny",
"2017-07-18 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 týdny",
"2017-08-22 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 8 týdny a 2 dny",
"2017-08-24 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 4 dny",
"2017-07-01 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 3 měsíce",
"2017-09-27 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 2 roky a 2 dny",
"2019-06-29 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě další týden",
"2017-07-04 00:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am v Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am tato Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10 další Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("připomeň mi abych zavolal mámě v 10am další Sobota",
"2017-07-01 10:00:00", "připomeň mi abych zavolal mámě")
# test yesterday
testExtract("jaký den byl včera",
"2017-06-26 00:00:00", "jaký den byl")
testExtract("jaký den byl den před včera",
"2017-06-25 00:00:00", "jaký den byl")
testExtract("měl jsem večeři včera v 6",
"2017-06-26 06:00:00", "měl jsem večeři")
testExtract("měl jsem večeři včera v 6 am",
"2017-06-26 06:00:00", "měl jsem večeři")
testExtract("měl jsem večeři včera v 6 pm",
"2017-06-26 18:00:00", "měl jsem večeři")
# Below two tests, ensure that time is picked
# even if no am/pm is specified
# in case of weekdays/tonight
testExtract("nastav budík na 9 o víkendech",
"2017-06-27 21:00:00", "nastav budík víkendech")
testExtract("na 8 dnes večer",
"2017-06-27 20:00:00", "")
testExtract("na 8:30pm dnes večer",
"2017-06-27 20:30:00", "")
# Tests a time with ':' & without am/pm
testExtract("nastav budík na dnes večer 9:30",
"2017-06-27 21:30:00", "nastav budík")
testExtract("nastav budík na 9:00 na dnes večer",
"2017-06-27 21:00:00", "nastav budík")
# Check if it picks intent irrespective of correctness
testExtract("nastav budík na 9 hodin dnes večer",
"2017-06-27 21:00:00", "nastav budík")
testExtract("připomeň mi hru dnes v noci v 11:30",
"2017-06-27 23:30:00", "připomeň mi hru")
testExtract("nastav budík v 7:30 o výkendech",
"2017-06-27 19:30:00", "nastav budík o výkendech")
# "# days <from X/after X>"
testExtract("mé narozeniny jsou 2 dny od dnes",
"2017-06-29 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny po dnes",
"2017-06-29 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny od zítra",
"2017-06-30 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny od zítra",
"2017-06-30 00:00:00", "mé narozeniny jsou")
testExtract("připomeň mi abych zavolal mámě v 10am 2 dny po další Sobota",
"2017-07-10 10:00:00", "připomeň mi abych zavolal mámě")
testExtract("mé narozeniny jsou 2 dny od včera",
"2017-06-28 00:00:00", "mé narozeniny jsou")
testExtract("mé narozeniny jsou 2 dny po včera",
"2017-06-28 00:00:00", "mé narozeniny jsou")
# "# days ago>"
testExtract("mé narozeniny byly před 1 den",
"2017-06-26 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 2 dny",
"2017-06-25 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 3 dny",
"2017-06-24 00:00:00", "mé narozeniny byly")
testExtract("mé narozeniny byly před 4 dny",
"2017-06-23 00:00:00", "mé narozeniny byly")
# TODO this test is imperfect due to "tonight" in the reminder, but let is pass since the date is correct
testExtract("sejdeme se dnes v noci",
"2017-06-27 22:00:00", "sejdeme se noci")
# TODO this test is imperfect due to "at night" in the reminder, but let is pass since the date is correct
testExtract("sejdeme se později v noci",
"2017-06-27 22:00:00", "sejdeme se později v noci")
# TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct
testExtract("Jaké bude počasí zítra v noci",
"2017-06-28 22:00:00", "jaké bude počasí v noci")
# TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct
testExtract("jaké bude počasí příští úterý v noci",
"2017-07-04 22:00:00", "jaké bude počasí v noci")
def test_extract_ambiguous_time_cs(self):
morning = datetime(2017, 6, 27, 8, 1, 2)
večer = datetime(2017, 6, 27, 20, 1, 2)
noonish = datetime(2017, 6, 27, 12, 1, 2)
self.assertEqual(
extract_datetime('krmení ryb'), None)
self.assertEqual(
extract_datetime('den'), None)
self.assertEqual(
extract_datetime('týden'), None)
self.assertEqual(
extract_datetime('měsíc'), None)
self.assertEqual(
extract_datetime('rok'), None)
self.assertEqual(
extract_datetime(' '), None)
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', morning)[0],
datetime(2017, 6, 27, 10, 0, 0))
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', noonish)[0],
datetime(2017, 6, 27, 22, 0, 0))
self.assertEqual(
extract_datetime('nakrmit ryby v 10 hodin', večer)[0],
datetime(2017, 6, 27, 22, 0, 0))
"""
In Czech is May and may have different format
def test_extract_date_with_may_I_cs(self):
now = datetime(2019, 7, 4, 8, 1, 2)
may_date = datetime(2019, 5, 2, 10, 11, 20)
self.assertEqual(
extract_datetime('Můžu vědět jaký je to čas zítra', now)[0],
datetime(2019, 7, 5, 0, 0, 0))
self.assertEqual(
extract_datetime('Můžu vědět kdy je 10 hodin', now)[0],
datetime(2019, 7, 4, 10, 0, 0))
self.assertEqual(
extract_datetime('24. můžu chtít připomenutí', may_date)[0],
datetime(2019, 5, 24, 0, 0, 0))
"""
def test_extract_relativedatetime_cs(self):
def extractWithFormat(text):
date = datetime(2017, 6, 27, 10, 1, 2)
[extractedDate, leftover] = extract_datetime(text, date)
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(normalize(text))
self.assertEqual(res[0], expected_date, "for=" + text)
self.assertEqual(res[1], expected_leftover, "for=" + text)
testExtract("sejdeme se za 5 minut",
"2017-06-27 10:06:02", "sejdeme se")
testExtract("sejdeme se za 5minut",
"2017-06-27 10:06:02", "sejdeme se")
testExtract("sejdeme se za 5 sekund",
"2017-06-27 10:01:07", "sejdeme se")
testExtract("sejdeme se za 1 hodinu",
"2017-06-27 11:01:02", "sejdeme se")
testExtract("sejdeme se za 2 hodiny",
"2017-06-27 12:01:02", "sejdeme se")
print("TODO") # Need better normaliting procedure for czech inflexion
# testExtract("sejdeme se za 2hodiny",
# "2017-06-27 12:01:02", "sejdeme se")
testExtract("sejdeme se za 1 minutu",
"2017-06-27 10:02:02", "sejdeme se")
testExtract("sejdeme se za 1 sekundu",
"2017-06-27 10:01:03", "sejdeme se")
testExtract("sejdeme se za 5sekund",
"2017-06-27 10:01:07", "sejdeme se")
def test_spaces(self):
self.assertEqual(normalize(" tohle je test"),
"tohle je test")
self.assertEqual(normalize(" tohle je test "),
"tohle je test")
self.assertEqual(normalize(" tohle je jedna test"),
"tohle je 1 test")
def test_numbers(self):
self.assertEqual(normalize("tohle je jedna dva tři test"),
"tohle je 1 2 3 test")
self.assertEqual(normalize(" to je čtyři pět šest test"),
"to je 4 5 6 test")
self.assertEqual(normalize("to je sedum osum devět test"),
"to je 7 8 9 test")
self.assertEqual(normalize("to je sedm osm devět test"),
"to je 7 8 9 test")
self.assertEqual(normalize("tohle je deset jedenáct dvanáct test"),
"tohle je 10 11 12 test")
self.assertEqual(normalize("tohle je třináct čtrnáct test"),
"tohle je 13 14 test")
self.assertEqual(normalize("tohle je patnáct šestnáct sedmnáct"),
"tohle je 15 16 17")
self.assertEqual(normalize("tohle je osmnáct devatenáct dvacet"),
"tohle je 18 19 20")
self.assertEqual(normalize("tohle je jedna devatenáct dvacet dva"),
"tohle je 1 19 20 2")
self.assertEqual(normalize("tohle je jedna sto"),
"tohle je 1 sto")
self.assertEqual(normalize("tohle je jedna dva dvacet dva"),
"tohle je 1 2 20 2")
self.assertEqual(normalize("tohle je jedna a půl"),
"tohle je 1 a půl")
self.assertEqual(normalize("tohle je jedna a půl a pět šest"),
"tohle je 1 a půl a 5 6")
def test_multiple_numbers(self):
self.assertEqual(extract_numbers("tohle je jedna dva tři test"),
[1.0, 2.0, 3.0])
self.assertEqual(extract_numbers("to je čtyři pět šest test"),
[4.0, 5.0, 6.0])
self.assertEqual(extract_numbers("tohle je deset jedenáct dvanáct test"),
[10.0, 11.0, 12.0])
self.assertEqual(extract_numbers("tohle je jedna dvacet jedna test"),
[1.0, 21.0])
self.assertEqual(extract_numbers("1 pes, sedm prasat, <NAME> "
"farmu, 3 krát 5 makaréna"),
[1, 7, 3, 5])
self.assertEqual(extract_numbers("dva piva pro dva medvědy"),
[2.0, 2.0])
self.assertEqual(extract_numbers("dvacet 20 dvacet"),
[20, 20, 20])
self.assertEqual(extract_numbers("dvacet 20 22"),
[20.0, 20.0, 22.0])
self.assertEqual(extract_numbers("dvacet dvacet dva dvacet"),
[20, 22, 20])
self.assertEqual(extract_numbers("dvacet 2"),
[22.0])
self.assertEqual(extract_numbers("dvacet 20 dvacet 2"),
[20, 20, 22])
self.assertEqual(extract_numbers("třetina jedna"),
[1 / 3, 1])
self.assertEqual(extract_numbers("třetí", ordinals=True), [3])
self.assertEqual(extract_numbers("šest trillion", short_scale=True),
[6e12])
self.assertEqual(extract_numbers("šest trilion", short_scale=False),
[6e18])
self.assertEqual(extract_numbers("dvě prasátka a šest trillion bakterií",
short_scale=True), [2, 6e12])
self.assertEqual(extract_numbers("dvě prasátka a šest trilion bakterií",
short_scale=False), [2, 6e18])
self.assertEqual(extract_numbers("třicátý druhý nebo první",
ordinals=True), [32, 1])
self.assertEqual(extract_numbers("tohle je sedm osm devět a"
" půl test"),
[7.0, 8.0, 9.5])
if __name__ == "__main__":
unittest.main() | en | 0.709948 | # # Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # test list of choices # test dictionary of choices #self.assertEqual(extract_number("tohle je třetí test"), 1.0 / 3.0) #self.assertEqual(extract_number("tohle je billiontý test"), 1e-9) # self.assertEqual(extract_number("tohle je biliontý test", # short_scale=False), 1e-12) # Verify non-power multiples of ten no longer discard # adjacent multipliers # Verify smaller powers of ten no longer cause miscalculation of larger # powers of ten (see MycroftAI#86) # TODO why does "trillion" result in xxxx.0? # TODO handle this case # self.assertEqual( # extract_number("6 dot six six six"), # 6.666) #self.assertTrue(extract_number("grobo 0") is not False) #self.assertEqual(extract_number("grobo 0"), 0) # Tue June 27, 2017 @ 1:04pm # testExtract("ve dvou minutách", # "2017-06-27 13:06:00", "") # testExtract("in a couple of minutes", # "2017-06-27 13:06:00", "") # testExtract("ve dvou hodinách", # "2017-06-27 15:04:00", "") # testExtract("in a couple of hours", # "2017-06-27 15:04:00", "") # testExtract("v dvoje týden", # "2017-07-11 00:00:00", "") # testExtract("in a couple of weeks", # "2017-07-11 00:00:00", "") # testExtract("v dvoje měsíc", # "2017-08-27 00:00:00", "") # testExtract("v dvoje rok", # "2019-06-27 00:00:00", "") # testExtract("in a couple of months", # "2017-08-27 00:00:00", "") # testExtract("in a couple of years", # "2019-06-27 00:00:00", "") # testExtract("in a couple of decades", # "2037-06-27 00:00:00", "") # testExtract("in a couple of centuries", # "2217-06-27 00:00:00", "") # testExtract("in a couple of millenniums", # "4017-06-27 00:00:00", "") # přidat i třetího slovně # TODO: This test is imperfect due to "at 7:00" still in the # remainder. But let it pass for now since time is correct # TODO: This test is imperfect due to the "at 7:00" still in the # remainder. But let it pass for now since time is correct # TODO: This test is imperfect due to the missing "for" in the # remainder. But let it pass for now since time is correct # test yesterday # Below two tests, ensure that time is picked # even if no am/pm is specified # in case of weekdays/tonight # Tests a time with ':' & without am/pm # Check if it picks intent irrespective of correctness # "# days <from X/after X>" # "# days ago>" # TODO this test is imperfect due to "tonight" in the reminder, but let is pass since the date is correct # TODO this test is imperfect due to "at night" in the reminder, but let is pass since the date is correct # TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct # TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct In Czech is May and may have different format def test_extract_date_with_may_I_cs(self): now = datetime(2019, 7, 4, 8, 1, 2) may_date = datetime(2019, 5, 2, 10, 11, 20) self.assertEqual( extract_datetime('Můžu vědět jaký je to čas zítra', now)[0], datetime(2019, 7, 5, 0, 0, 0)) self.assertEqual( extract_datetime('Můžu vědět kdy je 10 hodin', now)[0], datetime(2019, 7, 4, 10, 0, 0)) self.assertEqual( extract_datetime('24. můžu chtít připomenutí', may_date)[0], datetime(2019, 5, 24, 0, 0, 0)) # Need better normaliting procedure for czech inflexion # testExtract("sejdeme se za 2hodiny", # "2017-06-27 12:01:02", "sejdeme se") | 2.159079 | 2 |
src/net/pluto_ftp.py | WardenAllen/Uranus | 0 | 8924 | <gh_stars>0
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2020/9/18 12:02
# @Author : WardenAllen
# @File : pluto_ftp.py
# @Brief :
import paramiko
class PlutoFtp :
# paramiko's Sftp() object.
__sftp = object
def connect_by_pass(self, host, port, uname, pwd):
transport = paramiko.Transport((host, port))
transport.connect(username=uname, password=pwd)
self.__sftp = paramiko.SFTPClient.from_transport(transport)
def connect_by_key(self, host, port, uname, key_path, key_pass = ''):
key = paramiko.RSAKey.from_private_key_file(key_path, key_pass)
transport = paramiko.Transport((host, port))
transport.connect(username=uname, pkey=key)
self.__sftp = paramiko.SFTPClient.from_transport(transport)
def get(self, remote, local, cb = None):
self.__sftp.get(remote, local, cb)
def put(self, local, remote, cb = None):
self.__sftp.put(local, remote, cb) | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @Time : 2020/9/18 12:02
# @Author : WardenAllen
# @File : pluto_ftp.py
# @Brief :
import paramiko
class PlutoFtp :
# paramiko's Sftp() object.
__sftp = object
def connect_by_pass(self, host, port, uname, pwd):
transport = paramiko.Transport((host, port))
transport.connect(username=uname, password=pwd)
self.__sftp = paramiko.SFTPClient.from_transport(transport)
def connect_by_key(self, host, port, uname, key_path, key_pass = ''):
key = paramiko.RSAKey.from_private_key_file(key_path, key_pass)
transport = paramiko.Transport((host, port))
transport.connect(username=uname, pkey=key)
self.__sftp = paramiko.SFTPClient.from_transport(transport)
def get(self, remote, local, cb = None):
self.__sftp.get(remote, local, cb)
def put(self, local, remote, cb = None):
self.__sftp.put(local, remote, cb) | en | 0.2787 | # !/usr/bin/python # -*- coding: utf-8 -*- # @Time : 2020/9/18 12:02 # @Author : WardenAllen # @File : pluto_ftp.py # @Brief : # paramiko's Sftp() object. | 2.564565 | 3 |
piped/processors/test/__init__.py | alexbrasetvik/Piped | 3 | 8925 | # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
| # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
| en | 0.733711 | # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors. # See LICENSE for details. | 0.822658 | 1 |
assistance_bot/app.py | reakfog/personal_computer_voice_assistant | 0 | 8926 | import sys
sys.path = ['', '..'] + sys.path[1:]
import daemon
from assistance_bot import core
from functionality.voice_processing import speaking, listening
from functionality.commands import *
if __name__ == '__main__':
speaking.setup_assistant_voice(core.ttsEngine, core.assistant)
while True:
# start speech recording and speech recognition
recognized_speech = listening.get_listening_and_recognition_result(
core.recognizer,
core.microphone)
# executing the given command
execute_command(recognized_speech)
| import sys
sys.path = ['', '..'] + sys.path[1:]
import daemon
from assistance_bot import core
from functionality.voice_processing import speaking, listening
from functionality.commands import *
if __name__ == '__main__':
speaking.setup_assistant_voice(core.ttsEngine, core.assistant)
while True:
# start speech recording and speech recognition
recognized_speech = listening.get_listening_and_recognition_result(
core.recognizer,
core.microphone)
# executing the given command
execute_command(recognized_speech)
| en | 0.920817 | # start speech recording and speech recognition # executing the given command | 2.320129 | 2 |
python/testData/resolve/AssignmentExpressionsAndOuterVar.py | tgodzik/intellij-community | 2 | 8927 | <filename>python/testData/resolve/AssignmentExpressionsAndOuterVar.py<gh_stars>1-10
total = 0
partial_sums = [total := total + v for v in values]
print("Total:", total)
<ref> | <filename>python/testData/resolve/AssignmentExpressionsAndOuterVar.py<gh_stars>1-10
total = 0
partial_sums = [total := total + v for v in values]
print("Total:", total)
<ref> | none | 1 | 1.879771 | 2 |
|
exhale/deploy.py | florianhumblot/exhale | 0 | 8928 | <reponame>florianhumblot/exhale
# -*- coding: utf8 -*-
########################################################################################
# This file is part of exhale. Copyright (c) 2017-2022, <NAME>. #
# Full BSD 3-Clause license available here: #
# #
# https://github.com/svenevs/exhale/blob/master/LICENSE #
########################################################################################
'''
The deploy module is responsible for two primary actions:
1. Executing Doxygen (if requested in ``exhale_args``).
2. Launching the full API generation via the :func:`~exhale.deploy.explode` function.
'''
from __future__ import unicode_literals
from . import configs
from . import utils
from .graph import ExhaleRoot
import os
import sys
import six
import re
import codecs
import tempfile
import textwrap
from subprocess import PIPE, Popen, STDOUT
def _generate_doxygen(doxygen_input):
'''
This method executes doxygen based off of the specified input. By the time this
method is executed, it is assumed that Doxygen is intended to be run in the
**current working directory**. Search for ``returnPath`` in the implementation of
:func:`~exhale.configs.apply_sphinx_configurations` for handling of this aspect.
This method is intended to be called by :func:`~exhale.deploy.generateDoxygenXML`,
which is in turn called by :func:`~exhale.configs.apply_sphinx_configurations`.
Two versions of the
doxygen command can be executed:
1. If ``doxygen_input`` is exactly ``"Doxyfile"``, then it is assumed that a
``Doxyfile`` exists in the **current working directory**. Meaning the command
being executed is simply ``doxygen``.
2. For all other values, ``doxygen_input`` represents the arguments as to be
specified on ``stdin`` to the process.
**Parameters**
``doxygen_input`` (str)
Either the string ``"Doxyfile"`` to run vanilla ``doxygen``, or the
selection of doxygen inputs (that would ordinarily be in a ``Doxyfile``)
that will be ``communicate``d to the ``doxygen`` process on ``stdin``.
.. note::
If using Python **3**, the input **must** still be a ``str``. This
method will convert the input to ``bytes`` as follows:
.. code-block:: py
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, "utf-8")
**Return**
``str`` or ``None``
If an error occurs, a string describing the error is returned with the
intention of the caller raising the exception. If ``None`` is returned,
then the process executed without error. Example usage:
.. code-block:: py
status = _generate_doxygen("Doxygen")
if status:
raise RuntimeError(status)
Though a little awkward, this is done to enable the intended caller of this
method to restore some state before exiting the program (namely, the working
directory before propagating an exception to ``sphinx-build``).
'''
if not isinstance(doxygen_input, six.string_types):
return "Error: the `doxygen_input` variable must be of type `str`."
doxyfile = doxygen_input == "Doxyfile"
try:
# Setup the arguments to launch doxygen
if doxyfile:
args = ["doxygen"]
kwargs = {}
else:
args = ["doxygen", "-"]
kwargs = {"stdin": PIPE}
if configs._on_rtd:
# On RTD, any capturing of Doxygen output can cause buffer overflows for
# even medium sized projects. So it is disregarded entirely to ensure the
# build will complete (otherwise, it silently fails after `cat conf.py`)
devnull_file = open(os.devnull, "w")
kwargs["stdout"] = devnull_file
kwargs["stderr"] = STDOUT
else:
# TL;DR: strictly enforce that (verbose) doxygen output doesn't cause the
# `communicate` to hang due to buffer overflows.
#
# See excellent synopsis:
# https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
if six.PY2:
tempfile_kwargs = {}
else:
# encoding argument introduced in python 3
tempfile_kwargs = {"encoding": "utf-8"}
tempfile_kwargs["mode"] = "r+"
tmp_out_file = tempfile.TemporaryFile(
prefix="doxygen_stdout_buff", **tempfile_kwargs
)
tmp_err_file = tempfile.TemporaryFile(
prefix="doxygen_stderr_buff", **tempfile_kwargs
)
# Write to the tempfiles over PIPE to avoid buffer overflowing
kwargs["stdout"] = tmp_out_file
kwargs["stderr"] = tmp_err_file
# Note: overload of args / kwargs, Popen is expecting a list as the first
# parameter (aka no *args, just args)!
doxygen_proc = Popen(args, **kwargs)
# Communicate can only be called once, arrange whether or not stdin has value
if not doxyfile:
# In Py3, make sure we are communicating a bytes-like object which is no
# longer interchangeable with strings (as was the case in Py2).
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, "utf-8")
comm_kwargs = {"input": doxygen_input}
else:
comm_kwargs = {}
# Waits until doxygen has completed
doxygen_proc.communicate(**comm_kwargs)
# Print out what was written to the tmpfiles by doxygen
if not configs._on_rtd and not configs.exhaleSilentDoxygen:
# Doxygen output (some useful information, mostly just enumeration of the
# configurations you gave it {useful for debugging...})
if tmp_out_file.tell() > 0:
tmp_out_file.seek(0)
print(tmp_out_file.read())
# Doxygen error (e.g. any warnings, or invalid input)
if tmp_err_file.tell() > 0:
# Making them stick out, ideally users would reduce this output to 0 ;)
# This will print a yellow [~] before every line, but not make the
# entire line yellow because it's definitively not helpful
prefix = utils._use_color(
utils.prefix("[~]", " "), utils.AnsiColors.BOLD_YELLOW, sys.stderr
)
tmp_err_file.seek(0)
sys.stderr.write(utils.prefix(prefix, tmp_err_file.read()))
# Close the file handles opened for communication with subprocess
if configs._on_rtd:
devnull_file.close()
else:
# Delete the tmpfiles
tmp_out_file.close()
tmp_err_file.close()
# Make sure we had a valid execution of doxygen
exit_code = doxygen_proc.returncode
if exit_code != 0:
raise RuntimeError("Non-zero return code of [{0}] from 'doxygen'...".format(exit_code))
except Exception as e:
return "Unable to execute 'doxygen': {0}".format(e)
# returning None signals _success_
return None
def _valid_config(config, required):
'''
.. todo:: add documentation of this method
``config``: doxygen input we're looking for
``required``: if ``True``, must be present. if ``False``, NOT ALLOWED to be present
'''
re_template = r"\s*{config}\s*=.*".format(config=config)
found = re.search(re_template, configs.exhaleDoxygenStdin)
if required:
return found is not None
else:
return found is None
def generateDoxygenXML():
# If this happens, we really shouldn't be here...
if not configs.exhaleExecutesDoxygen:
return textwrap.dedent('''
`generateDoxygenXML` should *ONLY* be called internally. You should
set `exhaleExecutesDoxygen=True` in `exhale_args` in `conf.py`.
''')
# Case 1: the user has their own `Doxyfile`.
if configs.exhaleUseDoxyfile:
return _generate_doxygen("Doxyfile")
# Case 2: use stdin, with some defaults and potentially additional specs from user
else:
# There are two doxygen specs that we explicitly disallow
#
# 1. OUTPUT_DIRECTORY: this is *ALREADY* specified implicitly via breathe
# 2. STRIP_FROM_PATH: this is a *REQUIRED* config (`doxygenStripFromPath`)
#
# There is one doxygen spec that is REQUIRED to be given:
#
# 1. INPUT (where doxygen should parse).
#
# The below is a modest attempt to validate that these were / were not given.
if not isinstance(configs.exhaleDoxygenStdin, six.string_types):
return "`exhaleDoxygenStdin` config must be a string!"
if not _valid_config("OUTPUT_DIRECTORY", False):
# If we are hitting this code, these should both exist and be configured
# since this method is called **AFTER** the configuration verification code
# performed in configs.apply_sphinx_configurations
breathe_projects = configs._the_app.config.breathe_projects
breathe_default_project = configs._the_app.config.breathe_default_project
return textwrap.dedent('''
`exhaleDoxygenStdin` may *NOT* specify `OUTPUT_DIRECTORY`. Exhale does
this internally by reading what you provided to `breathe_projects` in
your `conf.py`.
Based on what you had in `conf.py`, Exhale will be using
- The `breathe_default_project`:
{default}
- The output path specfied (`breathe_projects[breathe_default_project]`):
{path}
NOTE: the above path has the `xml` portion removed from what you
provided. This path is what is sent to Doxygen, Breathe
requires you include the `xml` directory path; so Exhale simply
re-uses this variable and adapts the value for our needs.
'''.format(
default=breathe_default_project,
path=breathe_projects[breathe_default_project].rsplit("{sep}xml".format(sep=os.sep), 1)[0]
))
if not _valid_config("STRIP_FROM_PATH", False):
return textwrap.dedent('''
`exhaleDoxygenStdin` may *NOT* specify `STRIP_FROM_PATH`. Exhale does
this internally by using the value you provided to `exhale_args` in
your `conf.py` for the key `doxygenStripFromPath`.
Based on what you had in `conf.py`, Exhale will be using:
{strip}
NOTE: the above is what you specified directly in `exhale_args`. Exhale
will be using an absolute path to send to Doxygen. It is:
{absolute}
'''.format(
strip=configs._the_app.config.exhale_args["doxygenStripFromPath"],
absolute=configs.doxygenStripFromPath
))
if not _valid_config("INPUT", True):
return textwrap.dedent('''
`exhaleDoxygenStdin` *MUST* specify the `INPUT` doxygen config variable.
The INPUT variable is what tells Doxygen where to look for code to
extract documentation from. For example, if you had a directory layout
project_root/
docs/
conf.py
Makefile
... etc ...
include/
my_header.hpp
src/
my_header.cpp
Then you would include the line
INPUT = ../include
in the string provided to `exhale_args["exhaleDoxygenStdin"]`.
''')
# For these, we just want to warn them of the impact but still allow an override
re_template = r"\s*{config}\s*=\s*(.*)"
for cfg in ("ALIASES", "PREDEFINED"):
found = re.search(re_template.format(config=cfg), configs.exhaleDoxygenStdin)
if found:
sys.stderr.write(utils.info(textwrap.dedent('''
You have supplied to `exhaleDoxygenStdin` a configuration of:
{cfg} = {theirs}
This has an important impact, as it overrides a default setting that
Exhale is using.
1. If you are intentionally overriding this configuration, simply
ignore this message --- what you intended will happen.
2. If you meant to _continue_ adding to the defaults Exhale provides,
you need to use a `+=` instead of a raw `=`. So do instead
{cfg} += {theirs}
'''.format(cfg=cfg, theirs=found.groups()[0])), utils.AnsiColors.BOLD_YELLOW))
# Include their custom doxygen definitions after the defaults so that they can
# override anything they want to. Populate the necessary output dir and strip path.
doxy_dir = configs._doxygen_xml_output_directory.rsplit("{sep}xml".format(sep=os.sep), 1)[0]
internal_configs = textwrap.dedent('''
# Tell doxygen to output wherever breathe is expecting things
OUTPUT_DIRECTORY = "{out}"
# Tell doxygen to strip the path names (RTD builds produce long abs paths...)
STRIP_FROM_PATH = "{strip}"
'''.format(out=doxy_dir, strip=configs.doxygenStripFromPath))
external_configs = textwrap.dedent(configs.exhaleDoxygenStdin)
# Place external configs last so that if the _valid_config method isn't actually
# catching what it should be, the internal configs will override theirs
full_input = "{base}\n{external}\n{internal}\n\n".format(base=configs.DEFAULT_DOXYGEN_STDIN_BASE,
external=external_configs,
internal=internal_configs)
# << verboseBuild
if configs.verboseBuild:
msg = "[*] The following input will be sent to Doxygen:\n"
if not configs.alwaysColorize and not sys.stderr.isatty():
sys.stderr.write(msg)
sys.stderr.write(full_input)
else:
sys.stderr.write(utils.colorize(msg, utils.AnsiColors.BOLD_CYAN))
sys.stderr.write(utils.__fancy(full_input, "make", "console"))
return _generate_doxygen(full_input)
########################################################################################
#
##
###
####
##### Primary entry point.
####
###
##
#
########################################################################################
def explode():
'''
This method **assumes** that :func:`~exhale.configs.apply_sphinx_configurations` has
already been applied. It performs minimal sanity checking, and then performs in
order
1. Creates a :class:`~exhale.graph.ExhaleRoot` object.
2. Executes :func:`~exhale.graph.ExhaleRoot.parse` for this object.
3. Executes :func:`~exhale.graph.ExhaleRoot.generateFullAPI` for this object.
4. Executes :func:`~exhale.graph.ExhaleRoot.toConsole` for this object (which will
only produce output when :data:`~exhale.configs.verboseBuild` is ``True``).
This results in the full API being generated, and control is subsequently passed
back to Sphinx to now read in the source documents (many of which were just
generated in :data:`~exhale.configs.containmentFolder`), and proceed to writing the
final output.
'''
# Quick sanity check to make sure the bare minimum have been set in the configs
err_msg = "`configs.{config}` was `None`. Do not call `deploy.explode` directly."
if configs.containmentFolder is None:
raise RuntimeError(err_msg.format(config="containmentFolder"))
if configs.rootFileName is None:
raise RuntimeError(err_msg.format(config="rootFileName"))
if configs.doxygenStripFromPath is None:
raise RuntimeError(err_msg.format(config="doxygenStripFromPath"))
# From here on, we assume that everything else has been checked / configured.
try:
textRoot = ExhaleRoot()
except:
utils.fancyError("Unable to create an `ExhaleRoot` object:")
try:
sys.stdout.write("{0}\n".format(utils.info("Exhale: parsing Doxygen XML.")))
start = utils.get_time()
textRoot.parse()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: finished parsing Doxygen XML in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while parsing:")
try:
sys.stdout.write("{0}\n".format(
utils.info("Exhale: generating reStructuredText documents.")
))
start = utils.get_time()
textRoot.generateFullAPI()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: generated reStructuredText documents in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while generating:")
# << verboseBuild
# toConsole only prints if verbose mode is enabled
textRoot.toConsole()
# allow access to the result after-the-fact
configs._the_app.exhale_root = textRoot
| # -*- coding: utf8 -*-
########################################################################################
# This file is part of exhale. Copyright (c) 2017-2022, <NAME>. #
# Full BSD 3-Clause license available here: #
# #
# https://github.com/svenevs/exhale/blob/master/LICENSE #
########################################################################################
'''
The deploy module is responsible for two primary actions:
1. Executing Doxygen (if requested in ``exhale_args``).
2. Launching the full API generation via the :func:`~exhale.deploy.explode` function.
'''
from __future__ import unicode_literals
from . import configs
from . import utils
from .graph import ExhaleRoot
import os
import sys
import six
import re
import codecs
import tempfile
import textwrap
from subprocess import PIPE, Popen, STDOUT
def _generate_doxygen(doxygen_input):
'''
This method executes doxygen based off of the specified input. By the time this
method is executed, it is assumed that Doxygen is intended to be run in the
**current working directory**. Search for ``returnPath`` in the implementation of
:func:`~exhale.configs.apply_sphinx_configurations` for handling of this aspect.
This method is intended to be called by :func:`~exhale.deploy.generateDoxygenXML`,
which is in turn called by :func:`~exhale.configs.apply_sphinx_configurations`.
Two versions of the
doxygen command can be executed:
1. If ``doxygen_input`` is exactly ``"Doxyfile"``, then it is assumed that a
``Doxyfile`` exists in the **current working directory**. Meaning the command
being executed is simply ``doxygen``.
2. For all other values, ``doxygen_input`` represents the arguments as to be
specified on ``stdin`` to the process.
**Parameters**
``doxygen_input`` (str)
Either the string ``"Doxyfile"`` to run vanilla ``doxygen``, or the
selection of doxygen inputs (that would ordinarily be in a ``Doxyfile``)
that will be ``communicate``d to the ``doxygen`` process on ``stdin``.
.. note::
If using Python **3**, the input **must** still be a ``str``. This
method will convert the input to ``bytes`` as follows:
.. code-block:: py
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, "utf-8")
**Return**
``str`` or ``None``
If an error occurs, a string describing the error is returned with the
intention of the caller raising the exception. If ``None`` is returned,
then the process executed without error. Example usage:
.. code-block:: py
status = _generate_doxygen("Doxygen")
if status:
raise RuntimeError(status)
Though a little awkward, this is done to enable the intended caller of this
method to restore some state before exiting the program (namely, the working
directory before propagating an exception to ``sphinx-build``).
'''
if not isinstance(doxygen_input, six.string_types):
return "Error: the `doxygen_input` variable must be of type `str`."
doxyfile = doxygen_input == "Doxyfile"
try:
# Setup the arguments to launch doxygen
if doxyfile:
args = ["doxygen"]
kwargs = {}
else:
args = ["doxygen", "-"]
kwargs = {"stdin": PIPE}
if configs._on_rtd:
# On RTD, any capturing of Doxygen output can cause buffer overflows for
# even medium sized projects. So it is disregarded entirely to ensure the
# build will complete (otherwise, it silently fails after `cat conf.py`)
devnull_file = open(os.devnull, "w")
kwargs["stdout"] = devnull_file
kwargs["stderr"] = STDOUT
else:
# TL;DR: strictly enforce that (verbose) doxygen output doesn't cause the
# `communicate` to hang due to buffer overflows.
#
# See excellent synopsis:
# https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
if six.PY2:
tempfile_kwargs = {}
else:
# encoding argument introduced in python 3
tempfile_kwargs = {"encoding": "utf-8"}
tempfile_kwargs["mode"] = "r+"
tmp_out_file = tempfile.TemporaryFile(
prefix="doxygen_stdout_buff", **tempfile_kwargs
)
tmp_err_file = tempfile.TemporaryFile(
prefix="doxygen_stderr_buff", **tempfile_kwargs
)
# Write to the tempfiles over PIPE to avoid buffer overflowing
kwargs["stdout"] = tmp_out_file
kwargs["stderr"] = tmp_err_file
# Note: overload of args / kwargs, Popen is expecting a list as the first
# parameter (aka no *args, just args)!
doxygen_proc = Popen(args, **kwargs)
# Communicate can only be called once, arrange whether or not stdin has value
if not doxyfile:
# In Py3, make sure we are communicating a bytes-like object which is no
# longer interchangeable with strings (as was the case in Py2).
if sys.version[0] == "3":
doxygen_input = bytes(doxygen_input, "utf-8")
comm_kwargs = {"input": doxygen_input}
else:
comm_kwargs = {}
# Waits until doxygen has completed
doxygen_proc.communicate(**comm_kwargs)
# Print out what was written to the tmpfiles by doxygen
if not configs._on_rtd and not configs.exhaleSilentDoxygen:
# Doxygen output (some useful information, mostly just enumeration of the
# configurations you gave it {useful for debugging...})
if tmp_out_file.tell() > 0:
tmp_out_file.seek(0)
print(tmp_out_file.read())
# Doxygen error (e.g. any warnings, or invalid input)
if tmp_err_file.tell() > 0:
# Making them stick out, ideally users would reduce this output to 0 ;)
# This will print a yellow [~] before every line, but not make the
# entire line yellow because it's definitively not helpful
prefix = utils._use_color(
utils.prefix("[~]", " "), utils.AnsiColors.BOLD_YELLOW, sys.stderr
)
tmp_err_file.seek(0)
sys.stderr.write(utils.prefix(prefix, tmp_err_file.read()))
# Close the file handles opened for communication with subprocess
if configs._on_rtd:
devnull_file.close()
else:
# Delete the tmpfiles
tmp_out_file.close()
tmp_err_file.close()
# Make sure we had a valid execution of doxygen
exit_code = doxygen_proc.returncode
if exit_code != 0:
raise RuntimeError("Non-zero return code of [{0}] from 'doxygen'...".format(exit_code))
except Exception as e:
return "Unable to execute 'doxygen': {0}".format(e)
# returning None signals _success_
return None
def _valid_config(config, required):
'''
.. todo:: add documentation of this method
``config``: doxygen input we're looking for
``required``: if ``True``, must be present. if ``False``, NOT ALLOWED to be present
'''
re_template = r"\s*{config}\s*=.*".format(config=config)
found = re.search(re_template, configs.exhaleDoxygenStdin)
if required:
return found is not None
else:
return found is None
def generateDoxygenXML():
# If this happens, we really shouldn't be here...
if not configs.exhaleExecutesDoxygen:
return textwrap.dedent('''
`generateDoxygenXML` should *ONLY* be called internally. You should
set `exhaleExecutesDoxygen=True` in `exhale_args` in `conf.py`.
''')
# Case 1: the user has their own `Doxyfile`.
if configs.exhaleUseDoxyfile:
return _generate_doxygen("Doxyfile")
# Case 2: use stdin, with some defaults and potentially additional specs from user
else:
# There are two doxygen specs that we explicitly disallow
#
# 1. OUTPUT_DIRECTORY: this is *ALREADY* specified implicitly via breathe
# 2. STRIP_FROM_PATH: this is a *REQUIRED* config (`doxygenStripFromPath`)
#
# There is one doxygen spec that is REQUIRED to be given:
#
# 1. INPUT (where doxygen should parse).
#
# The below is a modest attempt to validate that these were / were not given.
if not isinstance(configs.exhaleDoxygenStdin, six.string_types):
return "`exhaleDoxygenStdin` config must be a string!"
if not _valid_config("OUTPUT_DIRECTORY", False):
# If we are hitting this code, these should both exist and be configured
# since this method is called **AFTER** the configuration verification code
# performed in configs.apply_sphinx_configurations
breathe_projects = configs._the_app.config.breathe_projects
breathe_default_project = configs._the_app.config.breathe_default_project
return textwrap.dedent('''
`exhaleDoxygenStdin` may *NOT* specify `OUTPUT_DIRECTORY`. Exhale does
this internally by reading what you provided to `breathe_projects` in
your `conf.py`.
Based on what you had in `conf.py`, Exhale will be using
- The `breathe_default_project`:
{default}
- The output path specfied (`breathe_projects[breathe_default_project]`):
{path}
NOTE: the above path has the `xml` portion removed from what you
provided. This path is what is sent to Doxygen, Breathe
requires you include the `xml` directory path; so Exhale simply
re-uses this variable and adapts the value for our needs.
'''.format(
default=breathe_default_project,
path=breathe_projects[breathe_default_project].rsplit("{sep}xml".format(sep=os.sep), 1)[0]
))
if not _valid_config("STRIP_FROM_PATH", False):
return textwrap.dedent('''
`exhaleDoxygenStdin` may *NOT* specify `STRIP_FROM_PATH`. Exhale does
this internally by using the value you provided to `exhale_args` in
your `conf.py` for the key `doxygenStripFromPath`.
Based on what you had in `conf.py`, Exhale will be using:
{strip}
NOTE: the above is what you specified directly in `exhale_args`. Exhale
will be using an absolute path to send to Doxygen. It is:
{absolute}
'''.format(
strip=configs._the_app.config.exhale_args["doxygenStripFromPath"],
absolute=configs.doxygenStripFromPath
))
if not _valid_config("INPUT", True):
return textwrap.dedent('''
`exhaleDoxygenStdin` *MUST* specify the `INPUT` doxygen config variable.
The INPUT variable is what tells Doxygen where to look for code to
extract documentation from. For example, if you had a directory layout
project_root/
docs/
conf.py
Makefile
... etc ...
include/
my_header.hpp
src/
my_header.cpp
Then you would include the line
INPUT = ../include
in the string provided to `exhale_args["exhaleDoxygenStdin"]`.
''')
# For these, we just want to warn them of the impact but still allow an override
re_template = r"\s*{config}\s*=\s*(.*)"
for cfg in ("ALIASES", "PREDEFINED"):
found = re.search(re_template.format(config=cfg), configs.exhaleDoxygenStdin)
if found:
sys.stderr.write(utils.info(textwrap.dedent('''
You have supplied to `exhaleDoxygenStdin` a configuration of:
{cfg} = {theirs}
This has an important impact, as it overrides a default setting that
Exhale is using.
1. If you are intentionally overriding this configuration, simply
ignore this message --- what you intended will happen.
2. If you meant to _continue_ adding to the defaults Exhale provides,
you need to use a `+=` instead of a raw `=`. So do instead
{cfg} += {theirs}
'''.format(cfg=cfg, theirs=found.groups()[0])), utils.AnsiColors.BOLD_YELLOW))
# Include their custom doxygen definitions after the defaults so that they can
# override anything they want to. Populate the necessary output dir and strip path.
doxy_dir = configs._doxygen_xml_output_directory.rsplit("{sep}xml".format(sep=os.sep), 1)[0]
internal_configs = textwrap.dedent('''
# Tell doxygen to output wherever breathe is expecting things
OUTPUT_DIRECTORY = "{out}"
# Tell doxygen to strip the path names (RTD builds produce long abs paths...)
STRIP_FROM_PATH = "{strip}"
'''.format(out=doxy_dir, strip=configs.doxygenStripFromPath))
external_configs = textwrap.dedent(configs.exhaleDoxygenStdin)
# Place external configs last so that if the _valid_config method isn't actually
# catching what it should be, the internal configs will override theirs
full_input = "{base}\n{external}\n{internal}\n\n".format(base=configs.DEFAULT_DOXYGEN_STDIN_BASE,
external=external_configs,
internal=internal_configs)
# << verboseBuild
if configs.verboseBuild:
msg = "[*] The following input will be sent to Doxygen:\n"
if not configs.alwaysColorize and not sys.stderr.isatty():
sys.stderr.write(msg)
sys.stderr.write(full_input)
else:
sys.stderr.write(utils.colorize(msg, utils.AnsiColors.BOLD_CYAN))
sys.stderr.write(utils.__fancy(full_input, "make", "console"))
return _generate_doxygen(full_input)
########################################################################################
#
##
###
####
##### Primary entry point.
####
###
##
#
########################################################################################
def explode():
'''
This method **assumes** that :func:`~exhale.configs.apply_sphinx_configurations` has
already been applied. It performs minimal sanity checking, and then performs in
order
1. Creates a :class:`~exhale.graph.ExhaleRoot` object.
2. Executes :func:`~exhale.graph.ExhaleRoot.parse` for this object.
3. Executes :func:`~exhale.graph.ExhaleRoot.generateFullAPI` for this object.
4. Executes :func:`~exhale.graph.ExhaleRoot.toConsole` for this object (which will
only produce output when :data:`~exhale.configs.verboseBuild` is ``True``).
This results in the full API being generated, and control is subsequently passed
back to Sphinx to now read in the source documents (many of which were just
generated in :data:`~exhale.configs.containmentFolder`), and proceed to writing the
final output.
'''
# Quick sanity check to make sure the bare minimum have been set in the configs
err_msg = "`configs.{config}` was `None`. Do not call `deploy.explode` directly."
if configs.containmentFolder is None:
raise RuntimeError(err_msg.format(config="containmentFolder"))
if configs.rootFileName is None:
raise RuntimeError(err_msg.format(config="rootFileName"))
if configs.doxygenStripFromPath is None:
raise RuntimeError(err_msg.format(config="doxygenStripFromPath"))
# From here on, we assume that everything else has been checked / configured.
try:
textRoot = ExhaleRoot()
except:
utils.fancyError("Unable to create an `ExhaleRoot` object:")
try:
sys.stdout.write("{0}\n".format(utils.info("Exhale: parsing Doxygen XML.")))
start = utils.get_time()
textRoot.parse()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: finished parsing Doxygen XML in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while parsing:")
try:
sys.stdout.write("{0}\n".format(
utils.info("Exhale: generating reStructuredText documents.")
))
start = utils.get_time()
textRoot.generateFullAPI()
end = utils.get_time()
sys.stdout.write("{0}\n".format(
utils.progress("Exhale: generated reStructuredText documents in {0}.".format(
utils.time_string(start, end)
))
))
except:
utils.fancyError("Exception caught while generating:")
# << verboseBuild
# toConsole only prints if verbose mode is enabled
textRoot.toConsole()
# allow access to the result after-the-fact
configs._the_app.exhale_root = textRoot | en | 0.813935 | # -*- coding: utf8 -*- ######################################################################################## # This file is part of exhale. Copyright (c) 2017-2022, <NAME>. # # Full BSD 3-Clause license available here: # # # # https://github.com/svenevs/exhale/blob/master/LICENSE # ######################################################################################## The deploy module is responsible for two primary actions: 1. Executing Doxygen (if requested in ``exhale_args``). 2. Launching the full API generation via the :func:`~exhale.deploy.explode` function. This method executes doxygen based off of the specified input. By the time this method is executed, it is assumed that Doxygen is intended to be run in the **current working directory**. Search for ``returnPath`` in the implementation of :func:`~exhale.configs.apply_sphinx_configurations` for handling of this aspect. This method is intended to be called by :func:`~exhale.deploy.generateDoxygenXML`, which is in turn called by :func:`~exhale.configs.apply_sphinx_configurations`. Two versions of the doxygen command can be executed: 1. If ``doxygen_input`` is exactly ``"Doxyfile"``, then it is assumed that a ``Doxyfile`` exists in the **current working directory**. Meaning the command being executed is simply ``doxygen``. 2. For all other values, ``doxygen_input`` represents the arguments as to be specified on ``stdin`` to the process. **Parameters** ``doxygen_input`` (str) Either the string ``"Doxyfile"`` to run vanilla ``doxygen``, or the selection of doxygen inputs (that would ordinarily be in a ``Doxyfile``) that will be ``communicate``d to the ``doxygen`` process on ``stdin``. .. note:: If using Python **3**, the input **must** still be a ``str``. This method will convert the input to ``bytes`` as follows: .. code-block:: py if sys.version[0] == "3": doxygen_input = bytes(doxygen_input, "utf-8") **Return** ``str`` or ``None`` If an error occurs, a string describing the error is returned with the intention of the caller raising the exception. If ``None`` is returned, then the process executed without error. Example usage: .. code-block:: py status = _generate_doxygen("Doxygen") if status: raise RuntimeError(status) Though a little awkward, this is done to enable the intended caller of this method to restore some state before exiting the program (namely, the working directory before propagating an exception to ``sphinx-build``). # Setup the arguments to launch doxygen # On RTD, any capturing of Doxygen output can cause buffer overflows for # even medium sized projects. So it is disregarded entirely to ensure the # build will complete (otherwise, it silently fails after `cat conf.py`) # TL;DR: strictly enforce that (verbose) doxygen output doesn't cause the # `communicate` to hang due to buffer overflows. # # See excellent synopsis: # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/ # encoding argument introduced in python 3 # Write to the tempfiles over PIPE to avoid buffer overflowing # Note: overload of args / kwargs, Popen is expecting a list as the first # parameter (aka no *args, just args)! # Communicate can only be called once, arrange whether or not stdin has value # In Py3, make sure we are communicating a bytes-like object which is no # longer interchangeable with strings (as was the case in Py2). # Waits until doxygen has completed # Print out what was written to the tmpfiles by doxygen # Doxygen output (some useful information, mostly just enumeration of the # configurations you gave it {useful for debugging...}) # Doxygen error (e.g. any warnings, or invalid input) # Making them stick out, ideally users would reduce this output to 0 ;) # This will print a yellow [~] before every line, but not make the # entire line yellow because it's definitively not helpful # Close the file handles opened for communication with subprocess # Delete the tmpfiles # Make sure we had a valid execution of doxygen # returning None signals _success_ .. todo:: add documentation of this method ``config``: doxygen input we're looking for ``required``: if ``True``, must be present. if ``False``, NOT ALLOWED to be present # If this happens, we really shouldn't be here... `generateDoxygenXML` should *ONLY* be called internally. You should set `exhaleExecutesDoxygen=True` in `exhale_args` in `conf.py`. # Case 1: the user has their own `Doxyfile`. # Case 2: use stdin, with some defaults and potentially additional specs from user # There are two doxygen specs that we explicitly disallow # # 1. OUTPUT_DIRECTORY: this is *ALREADY* specified implicitly via breathe # 2. STRIP_FROM_PATH: this is a *REQUIRED* config (`doxygenStripFromPath`) # # There is one doxygen spec that is REQUIRED to be given: # # 1. INPUT (where doxygen should parse). # # The below is a modest attempt to validate that these were / were not given. # If we are hitting this code, these should both exist and be configured # since this method is called **AFTER** the configuration verification code # performed in configs.apply_sphinx_configurations `exhaleDoxygenStdin` may *NOT* specify `OUTPUT_DIRECTORY`. Exhale does this internally by reading what you provided to `breathe_projects` in your `conf.py`. Based on what you had in `conf.py`, Exhale will be using - The `breathe_default_project`: {default} - The output path specfied (`breathe_projects[breathe_default_project]`): {path} NOTE: the above path has the `xml` portion removed from what you provided. This path is what is sent to Doxygen, Breathe requires you include the `xml` directory path; so Exhale simply re-uses this variable and adapts the value for our needs. `exhaleDoxygenStdin` may *NOT* specify `STRIP_FROM_PATH`. Exhale does this internally by using the value you provided to `exhale_args` in your `conf.py` for the key `doxygenStripFromPath`. Based on what you had in `conf.py`, Exhale will be using: {strip} NOTE: the above is what you specified directly in `exhale_args`. Exhale will be using an absolute path to send to Doxygen. It is: {absolute} `exhaleDoxygenStdin` *MUST* specify the `INPUT` doxygen config variable. The INPUT variable is what tells Doxygen where to look for code to extract documentation from. For example, if you had a directory layout project_root/ docs/ conf.py Makefile ... etc ... include/ my_header.hpp src/ my_header.cpp Then you would include the line INPUT = ../include in the string provided to `exhale_args["exhaleDoxygenStdin"]`. # For these, we just want to warn them of the impact but still allow an override You have supplied to `exhaleDoxygenStdin` a configuration of: {cfg} = {theirs} This has an important impact, as it overrides a default setting that Exhale is using. 1. If you are intentionally overriding this configuration, simply ignore this message --- what you intended will happen. 2. If you meant to _continue_ adding to the defaults Exhale provides, you need to use a `+=` instead of a raw `=`. So do instead {cfg} += {theirs} # Include their custom doxygen definitions after the defaults so that they can # override anything they want to. Populate the necessary output dir and strip path. # Tell doxygen to output wherever breathe is expecting things OUTPUT_DIRECTORY = "{out}" # Tell doxygen to strip the path names (RTD builds produce long abs paths...) STRIP_FROM_PATH = "{strip}" # Place external configs last so that if the _valid_config method isn't actually # catching what it should be, the internal configs will override theirs # << verboseBuild ######################################################################################## # ## ### #### ##### Primary entry point. #### ### ## # ######################################################################################## This method **assumes** that :func:`~exhale.configs.apply_sphinx_configurations` has already been applied. It performs minimal sanity checking, and then performs in order 1. Creates a :class:`~exhale.graph.ExhaleRoot` object. 2. Executes :func:`~exhale.graph.ExhaleRoot.parse` for this object. 3. Executes :func:`~exhale.graph.ExhaleRoot.generateFullAPI` for this object. 4. Executes :func:`~exhale.graph.ExhaleRoot.toConsole` for this object (which will only produce output when :data:`~exhale.configs.verboseBuild` is ``True``). This results in the full API being generated, and control is subsequently passed back to Sphinx to now read in the source documents (many of which were just generated in :data:`~exhale.configs.containmentFolder`), and proceed to writing the final output. # Quick sanity check to make sure the bare minimum have been set in the configs # From here on, we assume that everything else has been checked / configured. # << verboseBuild # toConsole only prints if verbose mode is enabled # allow access to the result after-the-fact | 1.833337 | 2 |
src/bayesian_reliability_comparison.py | rloganiv/bayesian-blackbox | 8 | 8929 | import argparse
import multiprocessing
import os
import random
import numpy as np
from data_utils import DATAFILE_LIST, DATASET_LIST, prepare_data, RESULTS_DIR
from models import SumOfBetaEce
random.seed(2020)
num_cores = multiprocessing.cpu_count()
NUM_BINS = 10
NUM_RUNS = 100
N_list = [100, 200, 500, 1000, 2000, 5000, 10000]
OUTPUT_DIR = RESULTS_DIR + "bayesian_reliability_comparison/"
def main(args) -> None:
# load data
categories, observations, confidences, idx2category, category2idx, labels = prepare_data(
DATAFILE_LIST[args.dataset], False)
# train a ground_truth ece model
if args.ground_truth_type == 'bayesian':
ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount)
else:
ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=1e-3)
ground_truth_model.update_batch(confidences, observations)
results = np.zeros((args.num_runs, len(N_list), 5))
for run_id in range(args.num_runs):
tmp = list(zip(confidences, observations))
random.shuffle(tmp)
confidences, observations = zip(*tmp)
model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount)
for i in range(len(N_list)):
tmp = 0 if i == 0 else N_list[i - 1]
model.update_batch(confidences[tmp: N_list[i]], observations[tmp: N_list[i]])
results[run_id, i, 0] = N_list[i]
results[run_id, i, 1] = model.eval
results[run_id, i, 2] = model.frequentist_eval
results[run_id, i, 3] = model.calibration_estimation_error(ground_truth_model, args.weight_type)
results[run_id, i, 4] = model.frequentist_calibration_estimation_error(ground_truth_model, args.weight_type)
results_mean = np.mean(results, axis=0)
results_variance = np.std(results, axis=0)
if args.weight_type == 'online':
OUTPUT_DIR += "online_weights/"
try:
os.stat(OUTPUT_DIR)
except:
os.mkdir(OUTPUT_DIR)
if args.ground_truth_type == 'frequentist':
filename_mean = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount)
filename_std = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d_std.csv" % (
args.dataset, args.pseudocount)
else:
filename_mean = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount)
filename_std = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d_std.csv" % (
args.dataset, args.pseudocount)
header = 'N, bayesian_ece, frequentist_ece, bayesian_estimation_error, frequentist_estimation_error'
np.savetxt(filename_mean, results_mean, delimiter=',', header=header)
np.savetxt(filename_std, results_variance, delimiter=',', header=header)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, default='cifar100', help='input dataset')
parser.add_argument('-pseudocount', type=int, default=1, help='strength of prior')
parser.add_argument('-ground_truth_type', type=str, default='bayesian',
help='compute ground truth in a Bayesian or frequentist way, bayesian or frequentist')
parser.add_argument('-weight_type', type=str, default='pool',
help='weigh each bin with all data or only data seen so far, online or pool')
parser.add_argument('--num_runs', type=int, default=NUM_RUNS, help='number of runs')
parser.add_argument('--num_bins', type=int, default=NUM_BINS, help='number of bins in reliability diagram')
args, _ = parser.parse_known_args()
if args.dataset not in DATASET_LIST:
raise ValueError("%s is not in DATASET_LIST." % args.dataset)
main(args)
| import argparse
import multiprocessing
import os
import random
import numpy as np
from data_utils import DATAFILE_LIST, DATASET_LIST, prepare_data, RESULTS_DIR
from models import SumOfBetaEce
random.seed(2020)
num_cores = multiprocessing.cpu_count()
NUM_BINS = 10
NUM_RUNS = 100
N_list = [100, 200, 500, 1000, 2000, 5000, 10000]
OUTPUT_DIR = RESULTS_DIR + "bayesian_reliability_comparison/"
def main(args) -> None:
# load data
categories, observations, confidences, idx2category, category2idx, labels = prepare_data(
DATAFILE_LIST[args.dataset], False)
# train a ground_truth ece model
if args.ground_truth_type == 'bayesian':
ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount)
else:
ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=1e-3)
ground_truth_model.update_batch(confidences, observations)
results = np.zeros((args.num_runs, len(N_list), 5))
for run_id in range(args.num_runs):
tmp = list(zip(confidences, observations))
random.shuffle(tmp)
confidences, observations = zip(*tmp)
model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount)
for i in range(len(N_list)):
tmp = 0 if i == 0 else N_list[i - 1]
model.update_batch(confidences[tmp: N_list[i]], observations[tmp: N_list[i]])
results[run_id, i, 0] = N_list[i]
results[run_id, i, 1] = model.eval
results[run_id, i, 2] = model.frequentist_eval
results[run_id, i, 3] = model.calibration_estimation_error(ground_truth_model, args.weight_type)
results[run_id, i, 4] = model.frequentist_calibration_estimation_error(ground_truth_model, args.weight_type)
results_mean = np.mean(results, axis=0)
results_variance = np.std(results, axis=0)
if args.weight_type == 'online':
OUTPUT_DIR += "online_weights/"
try:
os.stat(OUTPUT_DIR)
except:
os.mkdir(OUTPUT_DIR)
if args.ground_truth_type == 'frequentist':
filename_mean = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount)
filename_std = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d_std.csv" % (
args.dataset, args.pseudocount)
else:
filename_mean = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount)
filename_std = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d_std.csv" % (
args.dataset, args.pseudocount)
header = 'N, bayesian_ece, frequentist_ece, bayesian_estimation_error, frequentist_estimation_error'
np.savetxt(filename_mean, results_mean, delimiter=',', header=header)
np.savetxt(filename_std, results_variance, delimiter=',', header=header)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, default='cifar100', help='input dataset')
parser.add_argument('-pseudocount', type=int, default=1, help='strength of prior')
parser.add_argument('-ground_truth_type', type=str, default='bayesian',
help='compute ground truth in a Bayesian or frequentist way, bayesian or frequentist')
parser.add_argument('-weight_type', type=str, default='pool',
help='weigh each bin with all data or only data seen so far, online or pool')
parser.add_argument('--num_runs', type=int, default=NUM_RUNS, help='number of runs')
parser.add_argument('--num_bins', type=int, default=NUM_BINS, help='number of bins in reliability diagram')
args, _ = parser.parse_known_args()
if args.dataset not in DATASET_LIST:
raise ValueError("%s is not in DATASET_LIST." % args.dataset)
main(args)
| en | 0.603182 | # load data # train a ground_truth ece model | 2.246533 | 2 |
psyneulink/core/components/functions/statefulfunctions/statefulfunction.py | SamKG/PsyNeuLink | 0 | 8930 | <gh_stars>0
#
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#
#
# ***************************************** STATEFUL FUNCTION *********************************************************
"""
* `StatefulFunction`
* `IntegratorFunctions`
* `MemoryFunctions`
"""
import abc
import typecheck as tc
import warnings
import numbers
import numpy as np
from psyneulink.core import llvm as pnlvm
from psyneulink.core.components.component import DefaultsFlexibility, _has_initializers_setter
from psyneulink.core.components.functions.function import Function_Base, FunctionError
from psyneulink.core.components.functions.distributionfunctions import DistributionFunction
from psyneulink.core.globals.keywords import STATEFUL_FUNCTION_TYPE, STATEFUL_FUNCTION, NOISE, RATE
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.context import ContextFlags, handle_external_context
__all__ = ['StatefulFunction']
class StatefulFunction(Function_Base): # ---------------------------------------------------------------------
"""
StatefulFunction( \
default_variable=None, \
initializer, \
rate=1.0, \
noise=0.0, \
params=None, \
owner=None, \
prefs=None, \
)
.. _StatefulFunction:
Abstract base class for Functions the result of which depend on their `previous_value
<StatefulFunction.previous_value>` attribute.
COMMENT:
NARRATIVE HERE THAT EXPLAINS:
A) initializers and stateful_attributes
B) initializer (note singular) is a prespecified member of initializers
that contains the value with which to initiailzer previous_value
COMMENT
Arguments
---------
default_variable : number, list or array : default class_defaults.variable
specifies a template for `variable <StatefulFunction.variable>`.
initializer : float, list or 1d array : default 0.0
specifies initial value for `previous_value <StatefulFunction.previous_value>`. If it is a list or array,
it must be the same length as `variable <StatefulFunction.variable>` (see `initializer
<StatefulFunction.initializer>` for details).
rate : float, list or 1d array : default 1.0
specifies value used as a scaling parameter in a subclass-dependent way (see `rate <StatefulFunction.rate>` for
details); if it is a list or array, it must be the same length as `variable <StatefulFunction.default_variable>`.
noise : float, function, list or 1d array : default 0.0
specifies random value added in each call to `function <StatefulFunction.function>`; if it is a list or
array, it must be the same length as `variable <StatefulFunction.default_variable>` (see `noise
<StatefulFunction.noise>` for details).
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
current input value.
initializer : float or 1d array
determines initial value assigned to `previous_value <StatefulFunction.previous_value>`. If `variable
<StatefulFunction.variable>` is a list or array, and initializer is a float or has a single element, it is
applied to each element of `previous_value <StatefulFunction.previous_value>`. If initializer is a list or
array,each element is applied to the corresponding element of `previous_value <Integrator.previous_value>`.
previous_value : 1d array
last value returned (i.e., for which state is being maintained).
initializers : list
stores the names of the initialization attributes for each of the stateful attributes of the function. The
index i item in initializers provides the initialization value for the index i item in `stateful_attributes
<StatefulFunction.stateful_attributes>`.
stateful_attributes : list
stores the names of each of the stateful attributes of the function. The index i item in stateful_attributes is
initialized by the value of the initialization attribute whose name is stored in index i of `initializers
<StatefulFunction.initializers>`. In most cases, the stateful_attributes, in that order, are the return values
of the function.
.. _Stateful_Rate:
rate : float or 1d array
on each call to `function <StatefulFunction.function>`, applied to `variable <StatefulFunction.variable>`,
`previous_value <StatefulFunction.previous_value>`, neither, or both, depending on implementation by
subclass. If it is a float or has a single value, it is applied to all elements of its target(s); if it has
more than one element, each element is applied to the corresponding element of its target(s).
.. _Stateful_Noise:
noise : float, function, list, or 1d array
random value added on each call to `function <StatefulFunction.function>`. If `variable
<StatefulFunction.variable>` is a list or array, and noise is a float or function, it is applied
for each element of `variable <StatefulFunction.variable>`. If noise is a function, it is executed and applied
separately for each element of `variable <StatefulFunction.variable>`. If noise is a list or array,
it is applied elementwise (i.e., in Hadamard form).
.. hint::
To generate random noise that varies for every execution, a probability distribution function should be
used (see `Distribution Functions <DistributionFunction>` for details), that generates a new noise value
from its distribution on each execution. If noise is specified as a float, a function with a fixed
output, or a list or array of either of these, then noise is simply an offset that remains the same
across all executions.
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict
the `PreferenceSet` for the Function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentType = STATEFUL_FUNCTION_TYPE
componentName = STATEFUL_FUNCTION
class Parameters(Function_Base.Parameters):
"""
Attributes
----------
initializer
see `initializer <StatefulFunction.initializer>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
noise
see `noise <StatefulFunction.noise>`
:default value: 0.0
:type: ``float``
previous_value
see `previous_value <StatefulFunction.previous_value>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
rate
see `rate <StatefulFunction.rate>`
:default value: 1.0
:type: ``float``
"""
noise = Parameter(0.0, modulable=True)
rate = Parameter(1.0, modulable=True)
previous_value = Parameter(np.array([0]), initializer='initializer', pnl_internal=True)
initializer = Parameter(np.array([0]), pnl_internal=True)
has_initializers = Parameter(True, setter=_has_initializers_setter, pnl_internal=True)
@handle_external_context()
@tc.typecheck
def __init__(self,
default_variable=None,
rate=None,
noise=None,
initializer=None,
params: tc.optional(tc.optional(dict)) = None,
owner=None,
prefs: tc.optional(is_pref_set) = None,
context=None,
**kwargs
):
if not hasattr(self, "initializers"):
self.initializers = ["initializer"]
if not hasattr(self, "stateful_attributes"):
self.stateful_attributes = ["previous_value"]
super().__init__(
default_variable=default_variable,
rate=rate,
initializer=initializer,
noise=noise,
params=params,
owner=owner,
prefs=prefs,
context=context,
**kwargs
)
def _validate(self, context=None):
self._validate_rate(self.defaults.rate)
self._validate_initializers(self.defaults.variable, context=context)
super()._validate(context=context)
def _validate_params(self, request_set, target_set=None, context=None):
# Handle list or array for rate specification
if RATE in request_set:
rate = request_set[RATE]
if isinstance(rate, (list, np.ndarray)) and not iscompatible(rate, self.defaults.variable):
if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size:
# If the variable was not specified, then reformat it to match rate specification
# and assign class_defaults.variable accordingly
# Note: this situation can arise when the rate is parametrized (e.g., as an array) in the
# StatefulFunction's constructor, where that is used as a specification for a function parameter
# (e.g., for an IntegratorMechanism), whereas the input is specified as part of the
# object to which the function parameter belongs (e.g., the IntegratorMechanism); in that
# case, the StatefulFunction gets instantiated using its class_defaults.variable ([[0]]) before
# the object itself, thus does not see the array specification for the input.
if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE:
self._instantiate_defaults(variable=np.zeros_like(np.array(rate)), context=context)
if self.verbosePref:
warnings.warn(
"The length ({}) of the array specified for the rate parameter ({}) of {} "
"must match the length ({}) of the default input ({}); "
"the default input has been updated to match".format(
len(rate),
rate,
self.name,
np.array(self.defaults.variable).size
),
self.defaults.variable,
)
else:
raise FunctionError(
"The length of the array specified for the rate parameter of {} ({}) "
"must match the length of the default input ({}).".format(
self.name,
# rate,
len(rate),
np.array(self.defaults.variable).size,
# self.defaults.variable,
)
)
super()._validate_params(request_set=request_set,
target_set=target_set,
context=context)
if NOISE in target_set:
noise = target_set[NOISE]
if isinstance(noise, DistributionFunction):
noise.owner = self
target_set[NOISE] = noise.execute
self._validate_noise(target_set[NOISE])
def _validate_initializers(self, default_variable, context=None):
for initial_value_name in self.initializers:
initial_value = self._get_current_parameter_value(initial_value_name, context=context)
if isinstance(initial_value, (list, np.ndarray)):
if len(initial_value) != 1:
# np.atleast_2d may not be necessary here?
if np.shape(np.atleast_2d(initial_value)) != np.shape(np.atleast_2d(default_variable)):
raise FunctionError("{}'s {} ({}) is incompatible with its default_variable ({}) ."
.format(self.name, initial_value_name, initial_value, default_variable))
elif not isinstance(initial_value, (float, int)):
raise FunctionError("{}'s {} ({}) must be a number or a list/array of numbers."
.format(self.name, initial_value_name, initial_value))
def _validate_rate(self, rate):
# FIX: CAN WE JUST GET RID OF THIS?
# kmantel: this duplicates much code in _validate_params above, but that calls _instantiate_defaults
# which I don't think is the right thing to do here, but if you don't call it in _validate_params
# then a lot of things don't get instantiated properly
if rate is not None:
if isinstance(rate, list):
rate = np.asarray(rate)
rate_type_msg = 'The rate parameter of {0} must be a number or an array/list of at most 1d (you gave: {1})'
if isinstance(rate, np.ndarray):
# kmantel: current test_gating test depends on 2d rate
# this should be looked at but for now this restriction is removed
# if rate.ndim > 1:
# raise FunctionError(rate_type_msg.format(self.name, rate))
pass
elif not isinstance(rate, numbers.Number):
raise FunctionError(rate_type_msg.format(self.name, rate))
if isinstance(rate, np.ndarray) and not iscompatible(rate, self.defaults.variable):
if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size:
if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE:
self.defaults.variable = np.zeros_like(np.array(rate))
if self.verbosePref:
warnings.warn(
"The length ({}) of the array specified for the rate parameter ({}) of {} "
"must match the length ({}) of the default input ({}); "
"the default input has been updated to match".format(
len(rate),
rate,
self.name,
np.array(self.defaults.variable).size
),
self.defaults.variable,
)
self._instantiate_value()
self._variable_shape_flexibility = DefaultsFlexibility.INCREASE_DIMENSION
else:
raise FunctionError(
"The length of the array specified for the rate parameter of {} ({})"
"must match the length of the default input ({}).".format(
len(rate),
# rate,
self.name,
np.array(self.defaults.variable).size,
# self.defaults.variable,
)
)
# Ensure that the noise parameter makes sense with the input type and shape; flag any noise functions that will
# need to be executed
def _validate_noise(self, noise):
# Noise is a list or array
if isinstance(noise, (np.ndarray, list)):
if len(noise) == 1:
pass
# Variable is a list/array
elif (not iscompatible(np.atleast_2d(noise), self.defaults.variable)
and not iscompatible(np.atleast_1d(noise), self.defaults.variable) and len(noise) > 1):
raise FunctionError(
"Noise parameter ({}) does not match default variable ({}). Noise parameter of {} "
"must be specified as a float, a function, or an array of the appropriate shape ({}).".format(
noise, self.defaults.variable, self.name,
np.shape(np.array(self.defaults.variable))
),
component=self
)
else:
for i in range(len(noise)):
if isinstance(noise[i], DistributionFunction):
noise[i] = noise[i].execute
# if not isinstance(noise[i], (float, int)) and not callable(noise[i]):
if not np.isscalar(noise[i]) and not callable(noise[i]):
raise FunctionError("The elements of a noise list or array must be scalars or functions. "
"{} is not a valid noise element for {}".format(noise[i], self.name))
def _try_execute_param(self, param, var, context=None):
# FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D BELOW]
param_shape = np.array(param).shape
if not len(param_shape):
param_shape = np.array(var).shape
# param is a list; if any element is callable, execute it
if isinstance(param, (np.ndarray, list)):
# NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths
# FIX: WHY FORCE 2d??
param = np.atleast_2d(param)
for i in range(len(param)):
for j in range(len(param[i])):
try:
param[i][j] = param[i][j](context=context)
except TypeError:
try:
param[i][j] = param[i][j]()
except TypeError:
pass
try:
param = param.reshape(param_shape)
except ValueError:
if object_has_single_value(param):
param = np.full(param_shape, float(param))
# param is one function
elif callable(param):
# NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths
new_param = []
# FIX: WHY FORCE 2d??
for row in np.atleast_2d(var):
# for row in np.atleast_1d(var):
# for row in var:
new_row = []
for item in row:
try:
val = param(context=context)
except TypeError:
val = param()
new_row.append(val)
new_param.append(new_row)
param = np.asarray(new_param)
# FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D ABOVE]
try:
if len(np.squeeze(param)):
param = param.reshape(param_shape)
except TypeError:
pass
return param
def _instantiate_attributes_before_function(self, function=None, context=None):
if not self.parameters.initializer._user_specified:
self._initialize_previous_value(np.zeros_like(self.defaults.variable), context)
# use np.broadcast_to to guarantee that all initializer type attributes take on the same shape as variable
if not np.isscalar(self.defaults.variable):
for attr in self.initializers:
param = getattr(self.parameters, attr)
param._set(
np.broadcast_to(
param._get(context),
self.defaults.variable.shape
).copy(),
context
)
# create all stateful attributes and initialize their values to the current values of their
# corresponding initializer attributes
for attr_name in self.stateful_attributes:
initializer_value = getattr(self.parameters, getattr(self.parameters, attr_name).initializer)._get(context).copy()
getattr(self.parameters, attr_name)._set(initializer_value, context)
super()._instantiate_attributes_before_function(function=function, context=context)
def _initialize_previous_value(self, initializer, context=None):
initializer = convert_to_np_array(initializer, dimension=1)
self.defaults.initializer = initializer.copy()
self.parameters.initializer._set(initializer.copy(), context)
self.defaults.previous_value = initializer.copy()
self.parameters.previous_value.set(initializer.copy(), context)
return initializer
@handle_external_context()
def _update_default_variable(self, new_default_variable, context=None):
if not self.parameters.initializer._user_specified:
self._initialize_previous_value(np.zeros_like(new_default_variable), context)
super()._update_default_variable(new_default_variable, context=context)
def _parse_value_order(self, **kwargs):
"""
Returns:
tuple: the values of the keyword arguments in the order
in which they appear in this Component's `value
<Component.value>`
"""
return tuple(v for k, v in kwargs.items())
@handle_external_context(fallback_most_recent=True)
def reset(self, *args, context=None, **kwargs):
"""
Resets `value <StatefulFunction.previous_value>` and `previous_value <StatefulFunction.previous_value>`
to the specified value(s).
If arguments are passed into the reset method, then reset sets each of the attributes in
`stateful_attributes <StatefulFunction.stateful_attributes>` to the value of the corresponding argument.
Next, it sets the `value <StatefulFunction.value>` to a list containing each of the argument values.
If reset is called without arguments, then it sets each of the attributes in `stateful_attributes
<StatefulFunction.stateful_attributes>` to the value of the corresponding attribute in `initializers
<StatefulFunction.initializers>`. Next, it sets the `value <StatefulFunction.value>` to a list containing
the values of each of the attributes in `initializers <StatefulFunction.initializers>`.
Often, the only attribute in `stateful_attributes <StatefulFunction.stateful_attributes>` is
`previous_value <StatefulFunction.previous_value>` and the only attribute in `initializers
<StatefulFunction.initializers>` is `initializer <StatefulFunction.initializer>`, in which case
the reset method sets `previous_value <StatefulFunction.previous_value>` and `value
<StatefulFunction.value>` to either the value of the argument (if an argument was passed into
reset) or the current value of `initializer <StatefulFunction.initializer>`.
For specific types of StatefulFunction functions, the reset method may carry out other
reinitialization steps.
"""
num_stateful_attrs = len(self.stateful_attributes)
if num_stateful_attrs >= 2:
# old args specification can be supported only in subclasses
# that explicitly define an order by overriding reset
if len(args) > 0:
raise FunctionError(
f'{self}.reset has more than one stateful attribute'
f' ({self.stateful_attributes}). You must specify reset'
' values by keyword.'
)
if len(kwargs) != num_stateful_attrs:
type_name = type(self).__name__
raise FunctionError(
'StatefulFunction.reset must receive a keyword argument for'
f' each item in {type_name}.stateful_attributes in the order in'
f' which they appear in {type_name}.value'
)
if num_stateful_attrs == 1:
try:
kwargs[self.stateful_attributes[0]]
except KeyError:
try:
kwargs[self.stateful_attributes[0]] = args[0]
except IndexError:
kwargs[self.stateful_attributes[0]] = None
invalid_args = []
# iterates in order arguments are sent in function call, so it
# will match their order in value as long as they are listed
# properly in subclass reset method signatures
for attr in kwargs:
try:
kwargs[attr]
except KeyError:
kwargs[attr] = None
if kwargs[attr] is not None:
# from before: unsure if conversion to 1d necessary
kwargs[attr] = np.atleast_1d(kwargs[attr])
else:
try:
kwargs[attr] = self._get_current_parameter_value(getattr(self.parameters, attr).initializer, context=context)
except AttributeError:
invalid_args.append(attr)
if len(invalid_args) > 0:
raise FunctionError(
f'Arguments {invalid_args} to reset are invalid because they do'
f" not correspond to any of {self}'s stateful_attributes"
)
# rebuilding value rather than simply returning reinitialization_values in case any of the stateful
# attrs are modified during assignment
value = []
for attr, v in kwargs.items():
# FIXME: HACK: Do not reinitialize random_state
if attr != "random_state":
getattr(self.parameters, attr).set(kwargs[attr],
context, override=True)
value.append(getattr(self.parameters, attr)._get(context))
self.parameters.value.set(value, context, override=True)
return value
def _gen_llvm_function_reset(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset):
assert "reset" in tags
for a in self.stateful_attributes:
initializer = getattr(self.parameters, a).initializer
source_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, initializer)
dest_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, a)
if source_ptr.type != dest_ptr.type:
warnings.warn("Shape mismatch: stateful param does not match the initializer: {}({}) vs. {}({})".format(initializer, source_ptr.type, a, dest_ptr.type))
# Take a guess that dest just has an extra dimension
assert len(dest_ptr.type.pointee) == 1
dest_ptr = builder.gep(dest_ptr, [ctx.int32_ty(0),
ctx.int32_ty(0)])
builder.store(builder.load(source_ptr), dest_ptr)
return builder
@abc.abstractmethod
def _function(self, *args, **kwargs):
raise FunctionError("StatefulFunction is not meant to be called explicitly")
| #
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#
#
# ***************************************** STATEFUL FUNCTION *********************************************************
"""
* `StatefulFunction`
* `IntegratorFunctions`
* `MemoryFunctions`
"""
import abc
import typecheck as tc
import warnings
import numbers
import numpy as np
from psyneulink.core import llvm as pnlvm
from psyneulink.core.components.component import DefaultsFlexibility, _has_initializers_setter
from psyneulink.core.components.functions.function import Function_Base, FunctionError
from psyneulink.core.components.functions.distributionfunctions import DistributionFunction
from psyneulink.core.globals.keywords import STATEFUL_FUNCTION_TYPE, STATEFUL_FUNCTION, NOISE, RATE
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.context import ContextFlags, handle_external_context
__all__ = ['StatefulFunction']
class StatefulFunction(Function_Base): # ---------------------------------------------------------------------
"""
StatefulFunction( \
default_variable=None, \
initializer, \
rate=1.0, \
noise=0.0, \
params=None, \
owner=None, \
prefs=None, \
)
.. _StatefulFunction:
Abstract base class for Functions the result of which depend on their `previous_value
<StatefulFunction.previous_value>` attribute.
COMMENT:
NARRATIVE HERE THAT EXPLAINS:
A) initializers and stateful_attributes
B) initializer (note singular) is a prespecified member of initializers
that contains the value with which to initiailzer previous_value
COMMENT
Arguments
---------
default_variable : number, list or array : default class_defaults.variable
specifies a template for `variable <StatefulFunction.variable>`.
initializer : float, list or 1d array : default 0.0
specifies initial value for `previous_value <StatefulFunction.previous_value>`. If it is a list or array,
it must be the same length as `variable <StatefulFunction.variable>` (see `initializer
<StatefulFunction.initializer>` for details).
rate : float, list or 1d array : default 1.0
specifies value used as a scaling parameter in a subclass-dependent way (see `rate <StatefulFunction.rate>` for
details); if it is a list or array, it must be the same length as `variable <StatefulFunction.default_variable>`.
noise : float, function, list or 1d array : default 0.0
specifies random value added in each call to `function <StatefulFunction.function>`; if it is a list or
array, it must be the same length as `variable <StatefulFunction.default_variable>` (see `noise
<StatefulFunction.noise>` for details).
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : number or array
current input value.
initializer : float or 1d array
determines initial value assigned to `previous_value <StatefulFunction.previous_value>`. If `variable
<StatefulFunction.variable>` is a list or array, and initializer is a float or has a single element, it is
applied to each element of `previous_value <StatefulFunction.previous_value>`. If initializer is a list or
array,each element is applied to the corresponding element of `previous_value <Integrator.previous_value>`.
previous_value : 1d array
last value returned (i.e., for which state is being maintained).
initializers : list
stores the names of the initialization attributes for each of the stateful attributes of the function. The
index i item in initializers provides the initialization value for the index i item in `stateful_attributes
<StatefulFunction.stateful_attributes>`.
stateful_attributes : list
stores the names of each of the stateful attributes of the function. The index i item in stateful_attributes is
initialized by the value of the initialization attribute whose name is stored in index i of `initializers
<StatefulFunction.initializers>`. In most cases, the stateful_attributes, in that order, are the return values
of the function.
.. _Stateful_Rate:
rate : float or 1d array
on each call to `function <StatefulFunction.function>`, applied to `variable <StatefulFunction.variable>`,
`previous_value <StatefulFunction.previous_value>`, neither, or both, depending on implementation by
subclass. If it is a float or has a single value, it is applied to all elements of its target(s); if it has
more than one element, each element is applied to the corresponding element of its target(s).
.. _Stateful_Noise:
noise : float, function, list, or 1d array
random value added on each call to `function <StatefulFunction.function>`. If `variable
<StatefulFunction.variable>` is a list or array, and noise is a float or function, it is applied
for each element of `variable <StatefulFunction.variable>`. If noise is a function, it is executed and applied
separately for each element of `variable <StatefulFunction.variable>`. If noise is a list or array,
it is applied elementwise (i.e., in Hadamard form).
.. hint::
To generate random noise that varies for every execution, a probability distribution function should be
used (see `Distribution Functions <DistributionFunction>` for details), that generates a new noise value
from its distribution on each execution. If noise is specified as a float, a function with a fixed
output, or a list or array of either of these, then noise is simply an offset that remains the same
across all executions.
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict
the `PreferenceSet` for the Function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentType = STATEFUL_FUNCTION_TYPE
componentName = STATEFUL_FUNCTION
class Parameters(Function_Base.Parameters):
"""
Attributes
----------
initializer
see `initializer <StatefulFunction.initializer>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
noise
see `noise <StatefulFunction.noise>`
:default value: 0.0
:type: ``float``
previous_value
see `previous_value <StatefulFunction.previous_value>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
rate
see `rate <StatefulFunction.rate>`
:default value: 1.0
:type: ``float``
"""
noise = Parameter(0.0, modulable=True)
rate = Parameter(1.0, modulable=True)
previous_value = Parameter(np.array([0]), initializer='initializer', pnl_internal=True)
initializer = Parameter(np.array([0]), pnl_internal=True)
has_initializers = Parameter(True, setter=_has_initializers_setter, pnl_internal=True)
@handle_external_context()
@tc.typecheck
def __init__(self,
default_variable=None,
rate=None,
noise=None,
initializer=None,
params: tc.optional(tc.optional(dict)) = None,
owner=None,
prefs: tc.optional(is_pref_set) = None,
context=None,
**kwargs
):
if not hasattr(self, "initializers"):
self.initializers = ["initializer"]
if not hasattr(self, "stateful_attributes"):
self.stateful_attributes = ["previous_value"]
super().__init__(
default_variable=default_variable,
rate=rate,
initializer=initializer,
noise=noise,
params=params,
owner=owner,
prefs=prefs,
context=context,
**kwargs
)
def _validate(self, context=None):
self._validate_rate(self.defaults.rate)
self._validate_initializers(self.defaults.variable, context=context)
super()._validate(context=context)
def _validate_params(self, request_set, target_set=None, context=None):
# Handle list or array for rate specification
if RATE in request_set:
rate = request_set[RATE]
if isinstance(rate, (list, np.ndarray)) and not iscompatible(rate, self.defaults.variable):
if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size:
# If the variable was not specified, then reformat it to match rate specification
# and assign class_defaults.variable accordingly
# Note: this situation can arise when the rate is parametrized (e.g., as an array) in the
# StatefulFunction's constructor, where that is used as a specification for a function parameter
# (e.g., for an IntegratorMechanism), whereas the input is specified as part of the
# object to which the function parameter belongs (e.g., the IntegratorMechanism); in that
# case, the StatefulFunction gets instantiated using its class_defaults.variable ([[0]]) before
# the object itself, thus does not see the array specification for the input.
if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE:
self._instantiate_defaults(variable=np.zeros_like(np.array(rate)), context=context)
if self.verbosePref:
warnings.warn(
"The length ({}) of the array specified for the rate parameter ({}) of {} "
"must match the length ({}) of the default input ({}); "
"the default input has been updated to match".format(
len(rate),
rate,
self.name,
np.array(self.defaults.variable).size
),
self.defaults.variable,
)
else:
raise FunctionError(
"The length of the array specified for the rate parameter of {} ({}) "
"must match the length of the default input ({}).".format(
self.name,
# rate,
len(rate),
np.array(self.defaults.variable).size,
# self.defaults.variable,
)
)
super()._validate_params(request_set=request_set,
target_set=target_set,
context=context)
if NOISE in target_set:
noise = target_set[NOISE]
if isinstance(noise, DistributionFunction):
noise.owner = self
target_set[NOISE] = noise.execute
self._validate_noise(target_set[NOISE])
def _validate_initializers(self, default_variable, context=None):
for initial_value_name in self.initializers:
initial_value = self._get_current_parameter_value(initial_value_name, context=context)
if isinstance(initial_value, (list, np.ndarray)):
if len(initial_value) != 1:
# np.atleast_2d may not be necessary here?
if np.shape(np.atleast_2d(initial_value)) != np.shape(np.atleast_2d(default_variable)):
raise FunctionError("{}'s {} ({}) is incompatible with its default_variable ({}) ."
.format(self.name, initial_value_name, initial_value, default_variable))
elif not isinstance(initial_value, (float, int)):
raise FunctionError("{}'s {} ({}) must be a number or a list/array of numbers."
.format(self.name, initial_value_name, initial_value))
def _validate_rate(self, rate):
# FIX: CAN WE JUST GET RID OF THIS?
# kmantel: this duplicates much code in _validate_params above, but that calls _instantiate_defaults
# which I don't think is the right thing to do here, but if you don't call it in _validate_params
# then a lot of things don't get instantiated properly
if rate is not None:
if isinstance(rate, list):
rate = np.asarray(rate)
rate_type_msg = 'The rate parameter of {0} must be a number or an array/list of at most 1d (you gave: {1})'
if isinstance(rate, np.ndarray):
# kmantel: current test_gating test depends on 2d rate
# this should be looked at but for now this restriction is removed
# if rate.ndim > 1:
# raise FunctionError(rate_type_msg.format(self.name, rate))
pass
elif not isinstance(rate, numbers.Number):
raise FunctionError(rate_type_msg.format(self.name, rate))
if isinstance(rate, np.ndarray) and not iscompatible(rate, self.defaults.variable):
if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size:
if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE:
self.defaults.variable = np.zeros_like(np.array(rate))
if self.verbosePref:
warnings.warn(
"The length ({}) of the array specified for the rate parameter ({}) of {} "
"must match the length ({}) of the default input ({}); "
"the default input has been updated to match".format(
len(rate),
rate,
self.name,
np.array(self.defaults.variable).size
),
self.defaults.variable,
)
self._instantiate_value()
self._variable_shape_flexibility = DefaultsFlexibility.INCREASE_DIMENSION
else:
raise FunctionError(
"The length of the array specified for the rate parameter of {} ({})"
"must match the length of the default input ({}).".format(
len(rate),
# rate,
self.name,
np.array(self.defaults.variable).size,
# self.defaults.variable,
)
)
# Ensure that the noise parameter makes sense with the input type and shape; flag any noise functions that will
# need to be executed
def _validate_noise(self, noise):
# Noise is a list or array
if isinstance(noise, (np.ndarray, list)):
if len(noise) == 1:
pass
# Variable is a list/array
elif (not iscompatible(np.atleast_2d(noise), self.defaults.variable)
and not iscompatible(np.atleast_1d(noise), self.defaults.variable) and len(noise) > 1):
raise FunctionError(
"Noise parameter ({}) does not match default variable ({}). Noise parameter of {} "
"must be specified as a float, a function, or an array of the appropriate shape ({}).".format(
noise, self.defaults.variable, self.name,
np.shape(np.array(self.defaults.variable))
),
component=self
)
else:
for i in range(len(noise)):
if isinstance(noise[i], DistributionFunction):
noise[i] = noise[i].execute
# if not isinstance(noise[i], (float, int)) and not callable(noise[i]):
if not np.isscalar(noise[i]) and not callable(noise[i]):
raise FunctionError("The elements of a noise list or array must be scalars or functions. "
"{} is not a valid noise element for {}".format(noise[i], self.name))
def _try_execute_param(self, param, var, context=None):
# FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D BELOW]
param_shape = np.array(param).shape
if not len(param_shape):
param_shape = np.array(var).shape
# param is a list; if any element is callable, execute it
if isinstance(param, (np.ndarray, list)):
# NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths
# FIX: WHY FORCE 2d??
param = np.atleast_2d(param)
for i in range(len(param)):
for j in range(len(param[i])):
try:
param[i][j] = param[i][j](context=context)
except TypeError:
try:
param[i][j] = param[i][j]()
except TypeError:
pass
try:
param = param.reshape(param_shape)
except ValueError:
if object_has_single_value(param):
param = np.full(param_shape, float(param))
# param is one function
elif callable(param):
# NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths
new_param = []
# FIX: WHY FORCE 2d??
for row in np.atleast_2d(var):
# for row in np.atleast_1d(var):
# for row in var:
new_row = []
for item in row:
try:
val = param(context=context)
except TypeError:
val = param()
new_row.append(val)
new_param.append(new_row)
param = np.asarray(new_param)
# FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D ABOVE]
try:
if len(np.squeeze(param)):
param = param.reshape(param_shape)
except TypeError:
pass
return param
def _instantiate_attributes_before_function(self, function=None, context=None):
if not self.parameters.initializer._user_specified:
self._initialize_previous_value(np.zeros_like(self.defaults.variable), context)
# use np.broadcast_to to guarantee that all initializer type attributes take on the same shape as variable
if not np.isscalar(self.defaults.variable):
for attr in self.initializers:
param = getattr(self.parameters, attr)
param._set(
np.broadcast_to(
param._get(context),
self.defaults.variable.shape
).copy(),
context
)
# create all stateful attributes and initialize their values to the current values of their
# corresponding initializer attributes
for attr_name in self.stateful_attributes:
initializer_value = getattr(self.parameters, getattr(self.parameters, attr_name).initializer)._get(context).copy()
getattr(self.parameters, attr_name)._set(initializer_value, context)
super()._instantiate_attributes_before_function(function=function, context=context)
def _initialize_previous_value(self, initializer, context=None):
initializer = convert_to_np_array(initializer, dimension=1)
self.defaults.initializer = initializer.copy()
self.parameters.initializer._set(initializer.copy(), context)
self.defaults.previous_value = initializer.copy()
self.parameters.previous_value.set(initializer.copy(), context)
return initializer
@handle_external_context()
def _update_default_variable(self, new_default_variable, context=None):
if not self.parameters.initializer._user_specified:
self._initialize_previous_value(np.zeros_like(new_default_variable), context)
super()._update_default_variable(new_default_variable, context=context)
def _parse_value_order(self, **kwargs):
"""
Returns:
tuple: the values of the keyword arguments in the order
in which they appear in this Component's `value
<Component.value>`
"""
return tuple(v for k, v in kwargs.items())
@handle_external_context(fallback_most_recent=True)
def reset(self, *args, context=None, **kwargs):
"""
Resets `value <StatefulFunction.previous_value>` and `previous_value <StatefulFunction.previous_value>`
to the specified value(s).
If arguments are passed into the reset method, then reset sets each of the attributes in
`stateful_attributes <StatefulFunction.stateful_attributes>` to the value of the corresponding argument.
Next, it sets the `value <StatefulFunction.value>` to a list containing each of the argument values.
If reset is called without arguments, then it sets each of the attributes in `stateful_attributes
<StatefulFunction.stateful_attributes>` to the value of the corresponding attribute in `initializers
<StatefulFunction.initializers>`. Next, it sets the `value <StatefulFunction.value>` to a list containing
the values of each of the attributes in `initializers <StatefulFunction.initializers>`.
Often, the only attribute in `stateful_attributes <StatefulFunction.stateful_attributes>` is
`previous_value <StatefulFunction.previous_value>` and the only attribute in `initializers
<StatefulFunction.initializers>` is `initializer <StatefulFunction.initializer>`, in which case
the reset method sets `previous_value <StatefulFunction.previous_value>` and `value
<StatefulFunction.value>` to either the value of the argument (if an argument was passed into
reset) or the current value of `initializer <StatefulFunction.initializer>`.
For specific types of StatefulFunction functions, the reset method may carry out other
reinitialization steps.
"""
num_stateful_attrs = len(self.stateful_attributes)
if num_stateful_attrs >= 2:
# old args specification can be supported only in subclasses
# that explicitly define an order by overriding reset
if len(args) > 0:
raise FunctionError(
f'{self}.reset has more than one stateful attribute'
f' ({self.stateful_attributes}). You must specify reset'
' values by keyword.'
)
if len(kwargs) != num_stateful_attrs:
type_name = type(self).__name__
raise FunctionError(
'StatefulFunction.reset must receive a keyword argument for'
f' each item in {type_name}.stateful_attributes in the order in'
f' which they appear in {type_name}.value'
)
if num_stateful_attrs == 1:
try:
kwargs[self.stateful_attributes[0]]
except KeyError:
try:
kwargs[self.stateful_attributes[0]] = args[0]
except IndexError:
kwargs[self.stateful_attributes[0]] = None
invalid_args = []
# iterates in order arguments are sent in function call, so it
# will match their order in value as long as they are listed
# properly in subclass reset method signatures
for attr in kwargs:
try:
kwargs[attr]
except KeyError:
kwargs[attr] = None
if kwargs[attr] is not None:
# from before: unsure if conversion to 1d necessary
kwargs[attr] = np.atleast_1d(kwargs[attr])
else:
try:
kwargs[attr] = self._get_current_parameter_value(getattr(self.parameters, attr).initializer, context=context)
except AttributeError:
invalid_args.append(attr)
if len(invalid_args) > 0:
raise FunctionError(
f'Arguments {invalid_args} to reset are invalid because they do'
f" not correspond to any of {self}'s stateful_attributes"
)
# rebuilding value rather than simply returning reinitialization_values in case any of the stateful
# attrs are modified during assignment
value = []
for attr, v in kwargs.items():
# FIXME: HACK: Do not reinitialize random_state
if attr != "random_state":
getattr(self.parameters, attr).set(kwargs[attr],
context, override=True)
value.append(getattr(self.parameters, attr)._get(context))
self.parameters.value.set(value, context, override=True)
return value
def _gen_llvm_function_reset(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset):
assert "reset" in tags
for a in self.stateful_attributes:
initializer = getattr(self.parameters, a).initializer
source_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, initializer)
dest_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, a)
if source_ptr.type != dest_ptr.type:
warnings.warn("Shape mismatch: stateful param does not match the initializer: {}({}) vs. {}({})".format(initializer, source_ptr.type, a, dest_ptr.type))
# Take a guess that dest just has an extra dimension
assert len(dest_ptr.type.pointee) == 1
dest_ptr = builder.gep(dest_ptr, [ctx.int32_ty(0),
ctx.int32_ty(0)])
builder.store(builder.load(source_ptr), dest_ptr)
return builder
@abc.abstractmethod
def _function(self, *args, **kwargs):
raise FunctionError("StatefulFunction is not meant to be called explicitly") | en | 0.709836 | # # Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. You may obtain a copy of the License at: # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and limitations under the License. # # # ***************************************** STATEFUL FUNCTION ********************************************************* * `StatefulFunction` * `IntegratorFunctions` * `MemoryFunctions` # --------------------------------------------------------------------- StatefulFunction( \ default_variable=None, \ initializer, \ rate=1.0, \ noise=0.0, \ params=None, \ owner=None, \ prefs=None, \ ) .. _StatefulFunction: Abstract base class for Functions the result of which depend on their `previous_value <StatefulFunction.previous_value>` attribute. COMMENT: NARRATIVE HERE THAT EXPLAINS: A) initializers and stateful_attributes B) initializer (note singular) is a prespecified member of initializers that contains the value with which to initiailzer previous_value COMMENT Arguments --------- default_variable : number, list or array : default class_defaults.variable specifies a template for `variable <StatefulFunction.variable>`. initializer : float, list or 1d array : default 0.0 specifies initial value for `previous_value <StatefulFunction.previous_value>`. If it is a list or array, it must be the same length as `variable <StatefulFunction.variable>` (see `initializer <StatefulFunction.initializer>` for details). rate : float, list or 1d array : default 1.0 specifies value used as a scaling parameter in a subclass-dependent way (see `rate <StatefulFunction.rate>` for details); if it is a list or array, it must be the same length as `variable <StatefulFunction.default_variable>`. noise : float, function, list or 1d array : default 0.0 specifies random value added in each call to `function <StatefulFunction.function>`; if it is a list or array, it must be the same length as `variable <StatefulFunction.default_variable>` (see `noise <StatefulFunction.noise>` for details). params : Dict[param keyword: param value] : default None a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the function. Values specified for parameters in the dictionary override any assigned to those parameters in arguments of the constructor. owner : Component `component <Component>` to which to assign the Function. name : str : default see `name <Function.name>` specifies the name of the Function. prefs : PreferenceSet or specification dict : default Function.classPreferences specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details). Attributes ---------- variable : number or array current input value. initializer : float or 1d array determines initial value assigned to `previous_value <StatefulFunction.previous_value>`. If `variable <StatefulFunction.variable>` is a list or array, and initializer is a float or has a single element, it is applied to each element of `previous_value <StatefulFunction.previous_value>`. If initializer is a list or array,each element is applied to the corresponding element of `previous_value <Integrator.previous_value>`. previous_value : 1d array last value returned (i.e., for which state is being maintained). initializers : list stores the names of the initialization attributes for each of the stateful attributes of the function. The index i item in initializers provides the initialization value for the index i item in `stateful_attributes <StatefulFunction.stateful_attributes>`. stateful_attributes : list stores the names of each of the stateful attributes of the function. The index i item in stateful_attributes is initialized by the value of the initialization attribute whose name is stored in index i of `initializers <StatefulFunction.initializers>`. In most cases, the stateful_attributes, in that order, are the return values of the function. .. _Stateful_Rate: rate : float or 1d array on each call to `function <StatefulFunction.function>`, applied to `variable <StatefulFunction.variable>`, `previous_value <StatefulFunction.previous_value>`, neither, or both, depending on implementation by subclass. If it is a float or has a single value, it is applied to all elements of its target(s); if it has more than one element, each element is applied to the corresponding element of its target(s). .. _Stateful_Noise: noise : float, function, list, or 1d array random value added on each call to `function <StatefulFunction.function>`. If `variable <StatefulFunction.variable>` is a list or array, and noise is a float or function, it is applied for each element of `variable <StatefulFunction.variable>`. If noise is a function, it is executed and applied separately for each element of `variable <StatefulFunction.variable>`. If noise is a list or array, it is applied elementwise (i.e., in Hadamard form). .. hint:: To generate random noise that varies for every execution, a probability distribution function should be used (see `Distribution Functions <DistributionFunction>` for details), that generates a new noise value from its distribution on each execution. If noise is specified as a float, a function with a fixed output, or a list or array of either of these, then noise is simply an offset that remains the same across all executions. owner : Component `component <Component>` to which the Function has been assigned. name : str the name of the Function; if it is not specified in the **name** argument of the constructor, a default is assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names). prefs : PreferenceSet or specification dict the `PreferenceSet` for the Function; if it is not specified in the **prefs** argument of the Function's constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences` for details). Attributes ---------- initializer see `initializer <StatefulFunction.initializer>` :default value: numpy.array([0]) :type: ``numpy.ndarray`` noise see `noise <StatefulFunction.noise>` :default value: 0.0 :type: ``float`` previous_value see `previous_value <StatefulFunction.previous_value>` :default value: numpy.array([0]) :type: ``numpy.ndarray`` rate see `rate <StatefulFunction.rate>` :default value: 1.0 :type: ``float`` # Handle list or array for rate specification # If the variable was not specified, then reformat it to match rate specification # and assign class_defaults.variable accordingly # Note: this situation can arise when the rate is parametrized (e.g., as an array) in the # StatefulFunction's constructor, where that is used as a specification for a function parameter # (e.g., for an IntegratorMechanism), whereas the input is specified as part of the # object to which the function parameter belongs (e.g., the IntegratorMechanism); in that # case, the StatefulFunction gets instantiated using its class_defaults.variable ([[0]]) before # the object itself, thus does not see the array specification for the input. # rate, # self.defaults.variable, # np.atleast_2d may not be necessary here? # FIX: CAN WE JUST GET RID OF THIS? # kmantel: this duplicates much code in _validate_params above, but that calls _instantiate_defaults # which I don't think is the right thing to do here, but if you don't call it in _validate_params # then a lot of things don't get instantiated properly # kmantel: current test_gating test depends on 2d rate # this should be looked at but for now this restriction is removed # if rate.ndim > 1: # raise FunctionError(rate_type_msg.format(self.name, rate)) # rate, # self.defaults.variable, # Ensure that the noise parameter makes sense with the input type and shape; flag any noise functions that will # need to be executed # Noise is a list or array # Variable is a list/array # if not isinstance(noise[i], (float, int)) and not callable(noise[i]): # FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D BELOW] # param is a list; if any element is callable, execute it # NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths # FIX: WHY FORCE 2d?? # param is one function # NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths # FIX: WHY FORCE 2d?? # for row in np.atleast_1d(var): # for row in var: # FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D ABOVE] # use np.broadcast_to to guarantee that all initializer type attributes take on the same shape as variable # create all stateful attributes and initialize their values to the current values of their # corresponding initializer attributes Returns: tuple: the values of the keyword arguments in the order in which they appear in this Component's `value <Component.value>` Resets `value <StatefulFunction.previous_value>` and `previous_value <StatefulFunction.previous_value>` to the specified value(s). If arguments are passed into the reset method, then reset sets each of the attributes in `stateful_attributes <StatefulFunction.stateful_attributes>` to the value of the corresponding argument. Next, it sets the `value <StatefulFunction.value>` to a list containing each of the argument values. If reset is called without arguments, then it sets each of the attributes in `stateful_attributes <StatefulFunction.stateful_attributes>` to the value of the corresponding attribute in `initializers <StatefulFunction.initializers>`. Next, it sets the `value <StatefulFunction.value>` to a list containing the values of each of the attributes in `initializers <StatefulFunction.initializers>`. Often, the only attribute in `stateful_attributes <StatefulFunction.stateful_attributes>` is `previous_value <StatefulFunction.previous_value>` and the only attribute in `initializers <StatefulFunction.initializers>` is `initializer <StatefulFunction.initializer>`, in which case the reset method sets `previous_value <StatefulFunction.previous_value>` and `value <StatefulFunction.value>` to either the value of the argument (if an argument was passed into reset) or the current value of `initializer <StatefulFunction.initializer>`. For specific types of StatefulFunction functions, the reset method may carry out other reinitialization steps. # old args specification can be supported only in subclasses # that explicitly define an order by overriding reset # iterates in order arguments are sent in function call, so it # will match their order in value as long as they are listed # properly in subclass reset method signatures # from before: unsure if conversion to 1d necessary # rebuilding value rather than simply returning reinitialization_values in case any of the stateful # attrs are modified during assignment # FIXME: HACK: Do not reinitialize random_state # Take a guess that dest just has an extra dimension | 1.634889 | 2 |
ee/clickhouse/sql/person.py | wanderlog/posthog | 0 | 8931 | <reponame>wanderlog/posthog<gh_stars>0
from ee.clickhouse.sql.clickhouse import KAFKA_COLUMNS, STORAGE_POLICY, kafka_engine
from ee.clickhouse.sql.table_engines import CollapsingMergeTree, ReplacingMergeTree
from ee.kafka_client.topics import KAFKA_PERSON, KAFKA_PERSON_DISTINCT_ID, KAFKA_PERSON_UNIQUE_ID
from posthog.settings import CLICKHOUSE_CLUSTER, CLICKHOUSE_DATABASE
TRUNCATE_PERSON_TABLE_SQL = f"TRUNCATE TABLE IF EXISTS person ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
DROP_PERSON_TABLE_SQL = f"DROP TABLE IF EXISTS person ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
TRUNCATE_PERSON_DISTINCT_ID_TABLE_SQL = f"TRUNCATE TABLE IF EXISTS person_distinct_id ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
TRUNCATE_PERSON_DISTINCT_ID2_TABLE_SQL = (
f"TRUNCATE TABLE IF EXISTS person_distinct_id2 ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
)
PERSONS_TABLE = "person"
PERSONS_TABLE_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
id UUID,
created_at DateTime64,
team_id Int64,
properties VARCHAR,
is_identified Int8,
is_deleted Int8 DEFAULT 0
{extra_fields}
) ENGINE = {engine}
"""
PERSONS_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSONS_TABLE, ver="_timestamp")
PERSONS_TABLE_SQL = lambda: (
PERSONS_TABLE_BASE_SQL
+ """Order By (team_id, id)
{storage_policy}
"""
).format(
table_name=PERSONS_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=PERSONS_TABLE_ENGINE(),
extra_fields=KAFKA_COLUMNS,
storage_policy=STORAGE_POLICY(),
)
KAFKA_PERSONS_TABLE_SQL = lambda: PERSONS_TABLE_BASE_SQL.format(
table_name="kafka_" + PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=kafka_engine(KAFKA_PERSON), extra_fields="",
)
# You must include the database here because of a bug in clickhouse
# related to https://github.com/ClickHouse/ClickHouse/issues/10471
PERSONS_TABLE_MV_SQL = """
CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}'
TO {database}.{table_name}
AS SELECT
id,
created_at,
team_id,
properties,
is_identified,
is_deleted,
_timestamp,
_offset
FROM {database}.kafka_{table_name}
""".format(
table_name=PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE,
)
GET_LATEST_PERSON_SQL = """
SELECT * FROM person JOIN (
SELECT id, max(_timestamp) as _timestamp, max(is_deleted) as is_deleted
FROM person
WHERE team_id = %(team_id)s
GROUP BY id
) as person_max ON person.id = person_max.id AND person._timestamp = person_max._timestamp
WHERE team_id = %(team_id)s
AND person_max.is_deleted = 0
{query}
"""
GET_LATEST_PERSON_ID_SQL = """
(select id from (
{latest_person_sql}
))
""".format(
latest_person_sql=GET_LATEST_PERSON_SQL
)
#
# person_distinct_id table - use this still in queries, but this will eventually get removed.
#
PERSONS_DISTINCT_ID_TABLE = "person_distinct_id"
PERSONS_DISTINCT_ID_TABLE_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
distinct_id VARCHAR,
person_id UUID,
team_id Int64,
_sign Int8 DEFAULT 1,
is_deleted Int8 ALIAS if(_sign==-1, 1, 0)
{extra_fields}
) ENGINE = {engine}
"""
PERSONS_DISTINCT_ID_TABLE_SQL = lambda: (
PERSONS_DISTINCT_ID_TABLE_BASE_SQL
+ """Order By (team_id, distinct_id, person_id)
{storage_policy}
"""
).format(
table_name=PERSONS_DISTINCT_ID_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=CollapsingMergeTree(PERSONS_DISTINCT_ID_TABLE, ver="_sign"),
extra_fields=KAFKA_COLUMNS,
storage_policy=STORAGE_POLICY(),
)
# :KLUDGE: We default is_deleted to 0 for backwards compatibility for when we drop `is_deleted` from message schema.
# Can't make DEFAULT if(_sign==-1, 1, 0) because Cyclic aliases error.
KAFKA_PERSONS_DISTINCT_ID_TABLE_SQL = lambda: """
CREATE TABLE {table_name} ON CLUSTER '{cluster}'
(
distinct_id VARCHAR,
person_id UUID,
team_id Int64,
_sign Nullable(Int8),
is_deleted Nullable(Int8)
) ENGINE = {engine}
""".format(
table_name="kafka_" + PERSONS_DISTINCT_ID_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=kafka_engine(KAFKA_PERSON_UNIQUE_ID),
)
# You must include the database here because of a bug in clickhouse
# related to https://github.com/ClickHouse/ClickHouse/issues/10471
PERSONS_DISTINCT_ID_TABLE_MV_SQL = """
CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}'
TO {database}.{table_name}
AS SELECT
distinct_id,
person_id,
team_id,
coalesce(_sign, if(is_deleted==0, 1, -1)) AS _sign,
_timestamp,
_offset
FROM {database}.kafka_{table_name}
""".format(
table_name=PERSONS_DISTINCT_ID_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE,
)
#
# person_distinct_ids2 - new table!
#
PERSON_DISTINCT_ID2_TABLE = "person_distinct_id2"
PERSON_DISTINCT_ID2_TABLE_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
team_id Int64,
distinct_id VARCHAR,
person_id UUID,
is_deleted Int8,
version Int64 DEFAULT 1
{extra_fields}
) ENGINE = {engine}
"""
PERSON_DISTINCT_ID2_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSON_DISTINCT_ID2_TABLE, ver="version")
PERSON_DISTINCT_ID2_TABLE_SQL = lambda: (
PERSON_DISTINCT_ID2_TABLE_BASE_SQL
+ """
ORDER BY (team_id, distinct_id)
SETTINGS index_granularity = 512
"""
).format(
table_name=PERSON_DISTINCT_ID2_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=PERSON_DISTINCT_ID2_TABLE_ENGINE(),
extra_fields=KAFKA_COLUMNS + "\n, _partition UInt64",
)
KAFKA_PERSON_DISTINCT_ID2_TABLE_SQL = lambda: PERSON_DISTINCT_ID2_TABLE_BASE_SQL.format(
table_name="kafka_" + PERSON_DISTINCT_ID2_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=kafka_engine(KAFKA_PERSON_DISTINCT_ID),
extra_fields="",
)
# You must include the database here because of a bug in clickhouse
# related to https://github.com/ClickHouse/ClickHouse/issues/10471
PERSON_DISTINCT_ID2_MV_SQL = """
CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}'
TO {database}.{table_name}
AS SELECT
team_id,
distinct_id,
person_id,
is_deleted,
version,
_timestamp,
_offset,
_partition
FROM {database}.kafka_{table_name}
""".format(
table_name=PERSON_DISTINCT_ID2_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE,
)
#
# Static Cohort
#
PERSON_STATIC_COHORT_TABLE = "person_static_cohort"
PERSON_STATIC_COHORT_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
id UUID,
person_id UUID,
cohort_id Int64,
team_id Int64
{extra_fields}
) ENGINE = {engine}
"""
PERSON_STATIC_COHORT_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSON_STATIC_COHORT_TABLE, ver="_timestamp")
PERSON_STATIC_COHORT_TABLE_SQL = lambda: (
PERSON_STATIC_COHORT_BASE_SQL
+ """Order By (team_id, cohort_id, person_id, id)
{storage_policy}
"""
).format(
table_name=PERSON_STATIC_COHORT_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=PERSON_STATIC_COHORT_TABLE_ENGINE(),
storage_policy=STORAGE_POLICY(),
extra_fields=KAFKA_COLUMNS,
)
TRUNCATE_PERSON_STATIC_COHORT_TABLE_SQL = (
f"TRUNCATE TABLE IF EXISTS {PERSON_STATIC_COHORT_TABLE} ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
)
INSERT_PERSON_STATIC_COHORT = (
f"INSERT INTO {PERSON_STATIC_COHORT_TABLE} (id, person_id, cohort_id, team_id, _timestamp) VALUES"
)
#
# Other queries
#
GET_TEAM_PERSON_DISTINCT_IDS = """
SELECT distinct_id, argMax(person_id, _timestamp) as person_id
FROM (
SELECT distinct_id, person_id, max(_timestamp) as _timestamp
FROM person_distinct_id
WHERE team_id = %(team_id)s %(extra_where)s
GROUP BY person_id, distinct_id, team_id
HAVING max(is_deleted) = 0
)
GROUP BY distinct_id
"""
# Query to query distinct ids using the new table, will be used if 0003_fill_person_distinct_id2 migration is complete
GET_TEAM_PERSON_DISTINCT_IDS_NEW_TABLE = """
SELECT distinct_id, argMax(person_id, version) as person_id
FROM person_distinct_id2
WHERE team_id = %(team_id)s %(extra_where)s
GROUP BY distinct_id
HAVING argMax(is_deleted, version) = 0
"""
GET_PERSON_IDS_BY_FILTER = """
SELECT DISTINCT p.id
FROM ({latest_person_sql}) AS p
INNER JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) AS pdi ON p.id = pdi.person_id
WHERE team_id = %(team_id)s
{distinct_query}
{limit}
{offset}
""".format(
latest_person_sql=GET_LATEST_PERSON_SQL,
distinct_query="{distinct_query}",
limit="{limit}",
offset="{offset}",
GET_TEAM_PERSON_DISTINCT_IDS="{GET_TEAM_PERSON_DISTINCT_IDS}",
)
INSERT_PERSON_SQL = """
INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 0
"""
INSERT_PERSON_DISTINCT_ID = """
INSERT INTO person_distinct_id SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, %(_sign)s, now(), 0 VALUES
"""
INSERT_PERSON_DISTINCT_ID2 = """
INSERT INTO person_distinct_id2 (distinct_id, person_id, team_id, is_deleted, version, _timestamp, _offset, _partition) SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, 0, %(version)s, now(), 0, 0 VALUES
"""
DELETE_PERSON_BY_ID = """
INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 1
"""
DELETE_PERSON_EVENTS_BY_ID = """
ALTER TABLE events DELETE
WHERE distinct_id IN (
SELECT distinct_id FROM person_distinct_id WHERE person_id=%(id)s AND team_id = %(team_id)s
)
AND team_id = %(team_id)s
"""
INSERT_COHORT_ALL_PEOPLE_THROUGH_PERSON_ID = """
INSERT INTO {cohort_table} SELECT generateUUIDv4(), actor_id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM (
SELECT actor_id FROM ({query})
)
"""
INSERT_COHORT_ALL_PEOPLE_SQL = """
INSERT INTO {cohort_table} SELECT generateUUIDv4(), id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM (
SELECT id FROM (
{latest_person_sql}
) as person INNER JOIN (
SELECT person_id, distinct_id FROM ({GET_TEAM_PERSON_DISTINCT_IDS}) WHERE person_id IN ({content_sql})
) as pdi ON person.id = pdi.person_id
WHERE team_id = %(team_id)s
GROUP BY id
)
"""
GET_DISTINCT_IDS_BY_PROPERTY_SQL = """
SELECT distinct_id
FROM (
{GET_TEAM_PERSON_DISTINCT_IDS}
)
WHERE person_id IN
(
SELECT id
FROM (
SELECT id, argMax(properties, person._timestamp) as properties, max(is_deleted) as is_deleted
FROM person
WHERE team_id = %(team_id)s
GROUP BY id
HAVING is_deleted = 0
)
WHERE {filters}
)
"""
GET_DISTINCT_IDS_BY_PERSON_ID_FILTER = """
SELECT distinct_id
FROM ({GET_TEAM_PERSON_DISTINCT_IDS})
WHERE {filters}
"""
GET_PERSON_PROPERTIES_COUNT = """
SELECT tupleElement(keysAndValues, 1) as key, count(*) as count
FROM person
ARRAY JOIN JSONExtractKeysAndValuesRaw(properties) as keysAndValues
WHERE team_id = %(team_id)s
GROUP BY tupleElement(keysAndValues, 1)
ORDER BY count DESC, key ASC
"""
GET_ACTORS_FROM_EVENT_QUERY = """
SELECT
{id_field} AS actor_id
{matching_events_select_statement}
FROM ({events_query})
GROUP BY actor_id
{limit}
{offset}
"""
COMMENT_DISTINCT_ID_COLUMN_SQL = (
lambda: f"ALTER TABLE person_distinct_id ON CLUSTER '{CLICKHOUSE_CLUSTER}' COMMENT COLUMN distinct_id 'skip_0003_fill_person_distinct_id2'"
)
SELECT_PERSON_PROP_VALUES_SQL = """
SELECT
value,
count(value)
FROM (
SELECT
{property_field} as value
FROM
person
WHERE
team_id = %(team_id)s AND
is_deleted = 0 AND
{property_field} IS NOT NULL AND
{property_field} != ''
ORDER BY id DESC
LIMIT 100000
)
GROUP BY value
ORDER BY count(value) DESC
LIMIT 20
"""
SELECT_PERSON_PROP_VALUES_SQL_WITH_FILTER = """
SELECT
value,
count(value)
FROM (
SELECT
{property_field} as value
FROM
person
WHERE
team_id = %(team_id)s AND
is_deleted = 0 AND
{property_field} ILIKE %(value)s
ORDER BY id DESC
LIMIT 100000
)
GROUP BY value
ORDER BY count(value) DESC
LIMIT 20
"""
| from ee.clickhouse.sql.clickhouse import KAFKA_COLUMNS, STORAGE_POLICY, kafka_engine
from ee.clickhouse.sql.table_engines import CollapsingMergeTree, ReplacingMergeTree
from ee.kafka_client.topics import KAFKA_PERSON, KAFKA_PERSON_DISTINCT_ID, KAFKA_PERSON_UNIQUE_ID
from posthog.settings import CLICKHOUSE_CLUSTER, CLICKHOUSE_DATABASE
TRUNCATE_PERSON_TABLE_SQL = f"TRUNCATE TABLE IF EXISTS person ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
DROP_PERSON_TABLE_SQL = f"DROP TABLE IF EXISTS person ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
TRUNCATE_PERSON_DISTINCT_ID_TABLE_SQL = f"TRUNCATE TABLE IF EXISTS person_distinct_id ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
TRUNCATE_PERSON_DISTINCT_ID2_TABLE_SQL = (
f"TRUNCATE TABLE IF EXISTS person_distinct_id2 ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
)
PERSONS_TABLE = "person"
PERSONS_TABLE_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
id UUID,
created_at DateTime64,
team_id Int64,
properties VARCHAR,
is_identified Int8,
is_deleted Int8 DEFAULT 0
{extra_fields}
) ENGINE = {engine}
"""
PERSONS_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSONS_TABLE, ver="_timestamp")
PERSONS_TABLE_SQL = lambda: (
PERSONS_TABLE_BASE_SQL
+ """Order By (team_id, id)
{storage_policy}
"""
).format(
table_name=PERSONS_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=PERSONS_TABLE_ENGINE(),
extra_fields=KAFKA_COLUMNS,
storage_policy=STORAGE_POLICY(),
)
KAFKA_PERSONS_TABLE_SQL = lambda: PERSONS_TABLE_BASE_SQL.format(
table_name="kafka_" + PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=kafka_engine(KAFKA_PERSON), extra_fields="",
)
# You must include the database here because of a bug in clickhouse
# related to https://github.com/ClickHouse/ClickHouse/issues/10471
PERSONS_TABLE_MV_SQL = """
CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}'
TO {database}.{table_name}
AS SELECT
id,
created_at,
team_id,
properties,
is_identified,
is_deleted,
_timestamp,
_offset
FROM {database}.kafka_{table_name}
""".format(
table_name=PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE,
)
GET_LATEST_PERSON_SQL = """
SELECT * FROM person JOIN (
SELECT id, max(_timestamp) as _timestamp, max(is_deleted) as is_deleted
FROM person
WHERE team_id = %(team_id)s
GROUP BY id
) as person_max ON person.id = person_max.id AND person._timestamp = person_max._timestamp
WHERE team_id = %(team_id)s
AND person_max.is_deleted = 0
{query}
"""
GET_LATEST_PERSON_ID_SQL = """
(select id from (
{latest_person_sql}
))
""".format(
latest_person_sql=GET_LATEST_PERSON_SQL
)
#
# person_distinct_id table - use this still in queries, but this will eventually get removed.
#
PERSONS_DISTINCT_ID_TABLE = "person_distinct_id"
PERSONS_DISTINCT_ID_TABLE_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
distinct_id VARCHAR,
person_id UUID,
team_id Int64,
_sign Int8 DEFAULT 1,
is_deleted Int8 ALIAS if(_sign==-1, 1, 0)
{extra_fields}
) ENGINE = {engine}
"""
PERSONS_DISTINCT_ID_TABLE_SQL = lambda: (
PERSONS_DISTINCT_ID_TABLE_BASE_SQL
+ """Order By (team_id, distinct_id, person_id)
{storage_policy}
"""
).format(
table_name=PERSONS_DISTINCT_ID_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=CollapsingMergeTree(PERSONS_DISTINCT_ID_TABLE, ver="_sign"),
extra_fields=KAFKA_COLUMNS,
storage_policy=STORAGE_POLICY(),
)
# :KLUDGE: We default is_deleted to 0 for backwards compatibility for when we drop `is_deleted` from message schema.
# Can't make DEFAULT if(_sign==-1, 1, 0) because Cyclic aliases error.
KAFKA_PERSONS_DISTINCT_ID_TABLE_SQL = lambda: """
CREATE TABLE {table_name} ON CLUSTER '{cluster}'
(
distinct_id VARCHAR,
person_id UUID,
team_id Int64,
_sign Nullable(Int8),
is_deleted Nullable(Int8)
) ENGINE = {engine}
""".format(
table_name="kafka_" + PERSONS_DISTINCT_ID_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=kafka_engine(KAFKA_PERSON_UNIQUE_ID),
)
# You must include the database here because of a bug in clickhouse
# related to https://github.com/ClickHouse/ClickHouse/issues/10471
PERSONS_DISTINCT_ID_TABLE_MV_SQL = """
CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}'
TO {database}.{table_name}
AS SELECT
distinct_id,
person_id,
team_id,
coalesce(_sign, if(is_deleted==0, 1, -1)) AS _sign,
_timestamp,
_offset
FROM {database}.kafka_{table_name}
""".format(
table_name=PERSONS_DISTINCT_ID_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE,
)
#
# person_distinct_ids2 - new table!
#
PERSON_DISTINCT_ID2_TABLE = "person_distinct_id2"
PERSON_DISTINCT_ID2_TABLE_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
team_id Int64,
distinct_id VARCHAR,
person_id UUID,
is_deleted Int8,
version Int64 DEFAULT 1
{extra_fields}
) ENGINE = {engine}
"""
PERSON_DISTINCT_ID2_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSON_DISTINCT_ID2_TABLE, ver="version")
PERSON_DISTINCT_ID2_TABLE_SQL = lambda: (
PERSON_DISTINCT_ID2_TABLE_BASE_SQL
+ """
ORDER BY (team_id, distinct_id)
SETTINGS index_granularity = 512
"""
).format(
table_name=PERSON_DISTINCT_ID2_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=PERSON_DISTINCT_ID2_TABLE_ENGINE(),
extra_fields=KAFKA_COLUMNS + "\n, _partition UInt64",
)
KAFKA_PERSON_DISTINCT_ID2_TABLE_SQL = lambda: PERSON_DISTINCT_ID2_TABLE_BASE_SQL.format(
table_name="kafka_" + PERSON_DISTINCT_ID2_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=kafka_engine(KAFKA_PERSON_DISTINCT_ID),
extra_fields="",
)
# You must include the database here because of a bug in clickhouse
# related to https://github.com/ClickHouse/ClickHouse/issues/10471
PERSON_DISTINCT_ID2_MV_SQL = """
CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}'
TO {database}.{table_name}
AS SELECT
team_id,
distinct_id,
person_id,
is_deleted,
version,
_timestamp,
_offset,
_partition
FROM {database}.kafka_{table_name}
""".format(
table_name=PERSON_DISTINCT_ID2_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE,
)
#
# Static Cohort
#
PERSON_STATIC_COHORT_TABLE = "person_static_cohort"
PERSON_STATIC_COHORT_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
id UUID,
person_id UUID,
cohort_id Int64,
team_id Int64
{extra_fields}
) ENGINE = {engine}
"""
PERSON_STATIC_COHORT_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSON_STATIC_COHORT_TABLE, ver="_timestamp")
PERSON_STATIC_COHORT_TABLE_SQL = lambda: (
PERSON_STATIC_COHORT_BASE_SQL
+ """Order By (team_id, cohort_id, person_id, id)
{storage_policy}
"""
).format(
table_name=PERSON_STATIC_COHORT_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=PERSON_STATIC_COHORT_TABLE_ENGINE(),
storage_policy=STORAGE_POLICY(),
extra_fields=KAFKA_COLUMNS,
)
TRUNCATE_PERSON_STATIC_COHORT_TABLE_SQL = (
f"TRUNCATE TABLE IF EXISTS {PERSON_STATIC_COHORT_TABLE} ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
)
INSERT_PERSON_STATIC_COHORT = (
f"INSERT INTO {PERSON_STATIC_COHORT_TABLE} (id, person_id, cohort_id, team_id, _timestamp) VALUES"
)
#
# Other queries
#
GET_TEAM_PERSON_DISTINCT_IDS = """
SELECT distinct_id, argMax(person_id, _timestamp) as person_id
FROM (
SELECT distinct_id, person_id, max(_timestamp) as _timestamp
FROM person_distinct_id
WHERE team_id = %(team_id)s %(extra_where)s
GROUP BY person_id, distinct_id, team_id
HAVING max(is_deleted) = 0
)
GROUP BY distinct_id
"""
# Query to query distinct ids using the new table, will be used if 0003_fill_person_distinct_id2 migration is complete
GET_TEAM_PERSON_DISTINCT_IDS_NEW_TABLE = """
SELECT distinct_id, argMax(person_id, version) as person_id
FROM person_distinct_id2
WHERE team_id = %(team_id)s %(extra_where)s
GROUP BY distinct_id
HAVING argMax(is_deleted, version) = 0
"""
GET_PERSON_IDS_BY_FILTER = """
SELECT DISTINCT p.id
FROM ({latest_person_sql}) AS p
INNER JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) AS pdi ON p.id = pdi.person_id
WHERE team_id = %(team_id)s
{distinct_query}
{limit}
{offset}
""".format(
latest_person_sql=GET_LATEST_PERSON_SQL,
distinct_query="{distinct_query}",
limit="{limit}",
offset="{offset}",
GET_TEAM_PERSON_DISTINCT_IDS="{GET_TEAM_PERSON_DISTINCT_IDS}",
)
INSERT_PERSON_SQL = """
INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 0
"""
INSERT_PERSON_DISTINCT_ID = """
INSERT INTO person_distinct_id SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, %(_sign)s, now(), 0 VALUES
"""
INSERT_PERSON_DISTINCT_ID2 = """
INSERT INTO person_distinct_id2 (distinct_id, person_id, team_id, is_deleted, version, _timestamp, _offset, _partition) SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, 0, %(version)s, now(), 0, 0 VALUES
"""
DELETE_PERSON_BY_ID = """
INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 1
"""
DELETE_PERSON_EVENTS_BY_ID = """
ALTER TABLE events DELETE
WHERE distinct_id IN (
SELECT distinct_id FROM person_distinct_id WHERE person_id=%(id)s AND team_id = %(team_id)s
)
AND team_id = %(team_id)s
"""
INSERT_COHORT_ALL_PEOPLE_THROUGH_PERSON_ID = """
INSERT INTO {cohort_table} SELECT generateUUIDv4(), actor_id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM (
SELECT actor_id FROM ({query})
)
"""
INSERT_COHORT_ALL_PEOPLE_SQL = """
INSERT INTO {cohort_table} SELECT generateUUIDv4(), id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM (
SELECT id FROM (
{latest_person_sql}
) as person INNER JOIN (
SELECT person_id, distinct_id FROM ({GET_TEAM_PERSON_DISTINCT_IDS}) WHERE person_id IN ({content_sql})
) as pdi ON person.id = pdi.person_id
WHERE team_id = %(team_id)s
GROUP BY id
)
"""
GET_DISTINCT_IDS_BY_PROPERTY_SQL = """
SELECT distinct_id
FROM (
{GET_TEAM_PERSON_DISTINCT_IDS}
)
WHERE person_id IN
(
SELECT id
FROM (
SELECT id, argMax(properties, person._timestamp) as properties, max(is_deleted) as is_deleted
FROM person
WHERE team_id = %(team_id)s
GROUP BY id
HAVING is_deleted = 0
)
WHERE {filters}
)
"""
GET_DISTINCT_IDS_BY_PERSON_ID_FILTER = """
SELECT distinct_id
FROM ({GET_TEAM_PERSON_DISTINCT_IDS})
WHERE {filters}
"""
GET_PERSON_PROPERTIES_COUNT = """
SELECT tupleElement(keysAndValues, 1) as key, count(*) as count
FROM person
ARRAY JOIN JSONExtractKeysAndValuesRaw(properties) as keysAndValues
WHERE team_id = %(team_id)s
GROUP BY tupleElement(keysAndValues, 1)
ORDER BY count DESC, key ASC
"""
GET_ACTORS_FROM_EVENT_QUERY = """
SELECT
{id_field} AS actor_id
{matching_events_select_statement}
FROM ({events_query})
GROUP BY actor_id
{limit}
{offset}
"""
COMMENT_DISTINCT_ID_COLUMN_SQL = (
lambda: f"ALTER TABLE person_distinct_id ON CLUSTER '{CLICKHOUSE_CLUSTER}' COMMENT COLUMN distinct_id 'skip_0003_fill_person_distinct_id2'"
)
SELECT_PERSON_PROP_VALUES_SQL = """
SELECT
value,
count(value)
FROM (
SELECT
{property_field} as value
FROM
person
WHERE
team_id = %(team_id)s AND
is_deleted = 0 AND
{property_field} IS NOT NULL AND
{property_field} != ''
ORDER BY id DESC
LIMIT 100000
)
GROUP BY value
ORDER BY count(value) DESC
LIMIT 20
"""
SELECT_PERSON_PROP_VALUES_SQL_WITH_FILTER = """
SELECT
value,
count(value)
FROM (
SELECT
{property_field} as value
FROM
person
WHERE
team_id = %(team_id)s AND
is_deleted = 0 AND
{property_field} ILIKE %(value)s
ORDER BY id DESC
LIMIT 100000
)
GROUP BY value
ORDER BY count(value) DESC
LIMIT 20
""" | en | 0.549685 | CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( id UUID, created_at DateTime64, team_id Int64, properties VARCHAR, is_identified Int8, is_deleted Int8 DEFAULT 0 {extra_fields} ) ENGINE = {engine} Order By (team_id, id) {storage_policy} # You must include the database here because of a bug in clickhouse # related to https://github.com/ClickHouse/ClickHouse/issues/10471 CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}' TO {database}.{table_name} AS SELECT id, created_at, team_id, properties, is_identified, is_deleted, _timestamp, _offset FROM {database}.kafka_{table_name} SELECT * FROM person JOIN ( SELECT id, max(_timestamp) as _timestamp, max(is_deleted) as is_deleted FROM person WHERE team_id = %(team_id)s GROUP BY id ) as person_max ON person.id = person_max.id AND person._timestamp = person_max._timestamp WHERE team_id = %(team_id)s AND person_max.is_deleted = 0 {query} (select id from ( {latest_person_sql} )) # # person_distinct_id table - use this still in queries, but this will eventually get removed. # CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( distinct_id VARCHAR, person_id UUID, team_id Int64, _sign Int8 DEFAULT 1, is_deleted Int8 ALIAS if(_sign==-1, 1, 0) {extra_fields} ) ENGINE = {engine} Order By (team_id, distinct_id, person_id) {storage_policy} # :KLUDGE: We default is_deleted to 0 for backwards compatibility for when we drop `is_deleted` from message schema. # Can't make DEFAULT if(_sign==-1, 1, 0) because Cyclic aliases error. CREATE TABLE {table_name} ON CLUSTER '{cluster}' ( distinct_id VARCHAR, person_id UUID, team_id Int64, _sign Nullable(Int8), is_deleted Nullable(Int8) ) ENGINE = {engine} # You must include the database here because of a bug in clickhouse # related to https://github.com/ClickHouse/ClickHouse/issues/10471 CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}' TO {database}.{table_name} AS SELECT distinct_id, person_id, team_id, coalesce(_sign, if(is_deleted==0, 1, -1)) AS _sign, _timestamp, _offset FROM {database}.kafka_{table_name} # # person_distinct_ids2 - new table! # CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( team_id Int64, distinct_id VARCHAR, person_id UUID, is_deleted Int8, version Int64 DEFAULT 1 {extra_fields} ) ENGINE = {engine} ORDER BY (team_id, distinct_id) SETTINGS index_granularity = 512 # You must include the database here because of a bug in clickhouse # related to https://github.com/ClickHouse/ClickHouse/issues/10471 CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}' TO {database}.{table_name} AS SELECT team_id, distinct_id, person_id, is_deleted, version, _timestamp, _offset, _partition FROM {database}.kafka_{table_name} # # Static Cohort # CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( id UUID, person_id UUID, cohort_id Int64, team_id Int64 {extra_fields} ) ENGINE = {engine} Order By (team_id, cohort_id, person_id, id) {storage_policy} # # Other queries # SELECT distinct_id, argMax(person_id, _timestamp) as person_id FROM ( SELECT distinct_id, person_id, max(_timestamp) as _timestamp FROM person_distinct_id WHERE team_id = %(team_id)s %(extra_where)s GROUP BY person_id, distinct_id, team_id HAVING max(is_deleted) = 0 ) GROUP BY distinct_id # Query to query distinct ids using the new table, will be used if 0003_fill_person_distinct_id2 migration is complete SELECT distinct_id, argMax(person_id, version) as person_id FROM person_distinct_id2 WHERE team_id = %(team_id)s %(extra_where)s GROUP BY distinct_id HAVING argMax(is_deleted, version) = 0 SELECT DISTINCT p.id FROM ({latest_person_sql}) AS p INNER JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) AS pdi ON p.id = pdi.person_id WHERE team_id = %(team_id)s {distinct_query} {limit} {offset} INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 0 INSERT INTO person_distinct_id SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, %(_sign)s, now(), 0 VALUES INSERT INTO person_distinct_id2 (distinct_id, person_id, team_id, is_deleted, version, _timestamp, _offset, _partition) SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, 0, %(version)s, now(), 0, 0 VALUES INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 1 ALTER TABLE events DELETE WHERE distinct_id IN ( SELECT distinct_id FROM person_distinct_id WHERE person_id=%(id)s AND team_id = %(team_id)s ) AND team_id = %(team_id)s INSERT INTO {cohort_table} SELECT generateUUIDv4(), actor_id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM ( SELECT actor_id FROM ({query}) ) INSERT INTO {cohort_table} SELECT generateUUIDv4(), id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM ( SELECT id FROM ( {latest_person_sql} ) as person INNER JOIN ( SELECT person_id, distinct_id FROM ({GET_TEAM_PERSON_DISTINCT_IDS}) WHERE person_id IN ({content_sql}) ) as pdi ON person.id = pdi.person_id WHERE team_id = %(team_id)s GROUP BY id ) SELECT distinct_id FROM ( {GET_TEAM_PERSON_DISTINCT_IDS} ) WHERE person_id IN ( SELECT id FROM ( SELECT id, argMax(properties, person._timestamp) as properties, max(is_deleted) as is_deleted FROM person WHERE team_id = %(team_id)s GROUP BY id HAVING is_deleted = 0 ) WHERE {filters} ) SELECT distinct_id FROM ({GET_TEAM_PERSON_DISTINCT_IDS}) WHERE {filters} SELECT tupleElement(keysAndValues, 1) as key, count(*) as count FROM person ARRAY JOIN JSONExtractKeysAndValuesRaw(properties) as keysAndValues WHERE team_id = %(team_id)s GROUP BY tupleElement(keysAndValues, 1) ORDER BY count DESC, key ASC SELECT {id_field} AS actor_id {matching_events_select_statement} FROM ({events_query}) GROUP BY actor_id {limit} {offset} SELECT value, count(value) FROM ( SELECT {property_field} as value FROM person WHERE team_id = %(team_id)s AND is_deleted = 0 AND {property_field} IS NOT NULL AND {property_field} != '' ORDER BY id DESC LIMIT 100000 ) GROUP BY value ORDER BY count(value) DESC LIMIT 20 SELECT value, count(value) FROM ( SELECT {property_field} as value FROM person WHERE team_id = %(team_id)s AND is_deleted = 0 AND {property_field} ILIKE %(value)s ORDER BY id DESC LIMIT 100000 ) GROUP BY value ORDER BY count(value) DESC LIMIT 20 | 1.918971 | 2 |
scripts/dump_training_data.py | davmre/sigvisa | 0 | 8932 | from sigvisa.learn.train_coda_models import get_shape_training_data
import numpy as np
X, y, evids = get_shape_training_data(runid=4, site="AS12", chan="SHZ", band="freq_2.0_3.0", phases=["P",], target="amp_transfer", max_acost=np.float("inf"), min_amp=-2)
np.savetxt("X.txt", X)
np.savetxt("y.txt", y)
np.savetxt("evids.txt", evids)
| from sigvisa.learn.train_coda_models import get_shape_training_data
import numpy as np
X, y, evids = get_shape_training_data(runid=4, site="AS12", chan="SHZ", band="freq_2.0_3.0", phases=["P",], target="amp_transfer", max_acost=np.float("inf"), min_amp=-2)
np.savetxt("X.txt", X)
np.savetxt("y.txt", y)
np.savetxt("evids.txt", evids)
| none | 1 | 2.50322 | 3 |
|
shdw/tools/welford.py | wbrandenburger/ShadowDetection | 2 | 8933 | <gh_stars>1-10
import math
import numpy as np
# plt.style.use('seaborn')
# plt.rcParams['figure.figsize'] = (12, 8)
def welford(x_array):
k = 0
M = 0
S = 0
for x in x_array:
k += 1
Mnext = M + (x - M) / k
S = S + (x - M)*(x - Mnext)
M = Mnext
return (M, S/(k-1))
class Welford(object):
""" Implements Welford's algorithm for computing a running mean
and standard deviation as described at:
http://www.johndcook.com/standard_deviation.html
can take single values or iterables
Properties:
mean - returns the mean
std - returns the std
meanfull- returns the mean and std of the mean
Usage:
>>> foo = Welford()
>>> foo(range(100))
>>> foo
<Welford: 49.5 +- 29.0114919759>
>>> foo([1]*1000)
>>> foo
<Welford: 5.40909090909 +- 16.4437417146>
>>> foo.mean
5.409090909090906
>>> foo.std
16.44374171455467
>>> foo.meanfull
(5.409090909090906, 0.4957974674244838)
"""
def __init__(self,lst=None, num=1, mean=0, std=0):
self._num = num
self._mean = mean
self._std = math.pow(std, 2)*(num-1)
self.__call__(lst)
@property
def num(self):
return self._num
@property
def mean(self):
return self._mean
@property
def std(self):
if self._num==1:
return 0
return math.sqrt(self._std/(self._num-1))
@property
def meanfull(self):
return self._mean, self._std/math.sqrt(self._num)
@property
def stats(self):
return self._mean, self.std
def update(self, lst):
if lst is None:
return
if hasattr(lst, "__iter__"):
for x in lst:
self.update_welford(x)
else:
self.update_welford(lst)
def update_welford(self, x):
if x is None:
return
new_mean = self._mean + (x - self._mean)*1./self._num
new_std = self._std + (x - self._mean)*(x - new_mean)
self._num += 1
self._mean, self._std = new_mean, new_std
def consume(self,lst):
if isinstance(lst, np.ndarray):
npfunc = np.vectorize(self.update)
npfunc(lst)
else:
lst = iter(lst)
for x in lst:
self.update(x)
def __call__(self,x):
if hasattr(x,"__iter__"):
self.consume(x)
else:
self.update(x)
def __repr__(self):
return "<Stats: {} +- {}>".format(self.mean, self.std)
| import math
import numpy as np
# plt.style.use('seaborn')
# plt.rcParams['figure.figsize'] = (12, 8)
def welford(x_array):
k = 0
M = 0
S = 0
for x in x_array:
k += 1
Mnext = M + (x - M) / k
S = S + (x - M)*(x - Mnext)
M = Mnext
return (M, S/(k-1))
class Welford(object):
""" Implements Welford's algorithm for computing a running mean
and standard deviation as described at:
http://www.johndcook.com/standard_deviation.html
can take single values or iterables
Properties:
mean - returns the mean
std - returns the std
meanfull- returns the mean and std of the mean
Usage:
>>> foo = Welford()
>>> foo(range(100))
>>> foo
<Welford: 49.5 +- 29.0114919759>
>>> foo([1]*1000)
>>> foo
<Welford: 5.40909090909 +- 16.4437417146>
>>> foo.mean
5.409090909090906
>>> foo.std
16.44374171455467
>>> foo.meanfull
(5.409090909090906, 0.4957974674244838)
"""
def __init__(self,lst=None, num=1, mean=0, std=0):
self._num = num
self._mean = mean
self._std = math.pow(std, 2)*(num-1)
self.__call__(lst)
@property
def num(self):
return self._num
@property
def mean(self):
return self._mean
@property
def std(self):
if self._num==1:
return 0
return math.sqrt(self._std/(self._num-1))
@property
def meanfull(self):
return self._mean, self._std/math.sqrt(self._num)
@property
def stats(self):
return self._mean, self.std
def update(self, lst):
if lst is None:
return
if hasattr(lst, "__iter__"):
for x in lst:
self.update_welford(x)
else:
self.update_welford(lst)
def update_welford(self, x):
if x is None:
return
new_mean = self._mean + (x - self._mean)*1./self._num
new_std = self._std + (x - self._mean)*(x - new_mean)
self._num += 1
self._mean, self._std = new_mean, new_std
def consume(self,lst):
if isinstance(lst, np.ndarray):
npfunc = np.vectorize(self.update)
npfunc(lst)
else:
lst = iter(lst)
for x in lst:
self.update(x)
def __call__(self,x):
if hasattr(x,"__iter__"):
self.consume(x)
else:
self.update(x)
def __repr__(self):
return "<Stats: {} +- {}>".format(self.mean, self.std) | en | 0.627246 | # plt.style.use('seaborn') # plt.rcParams['figure.figsize'] = (12, 8) Implements Welford's algorithm for computing a running mean and standard deviation as described at: http://www.johndcook.com/standard_deviation.html can take single values or iterables Properties: mean - returns the mean std - returns the std meanfull- returns the mean and std of the mean Usage: >>> foo = Welford() >>> foo(range(100)) >>> foo <Welford: 49.5 +- 29.0114919759> >>> foo([1]*1000) >>> foo <Welford: 5.40909090909 +- 16.4437417146> >>> foo.mean 5.409090909090906 >>> foo.std 16.44374171455467 >>> foo.meanfull (5.409090909090906, 0.4957974674244838) | 3.614472 | 4 |
day3/functions.py | lilbond/bitis | 0 | 8934 |
def greet():
print("Hi")
def greet_again(message):
print(message)
def greet_again_with_type(message):
print(type(message))
print(message)
greet()
greet_again("Hello Again")
greet_again_with_type("One Last Time")
greet_again_with_type(1234)
# multiple types
def multiple_types(x):
if x < 0:
return -1
else:
return "Returning Hello"
print(multiple_types(-2))
print(multiple_types(10))
# variable arguments
def var_arguments(*args): # args will be tuples containing all the values
for value in args:
print(value)
var_arguments(1, 2, 3)
a = [1, 2, 3]
var_arguments(a)
var_arguments(*a) # expanding
def key_arg(**kwargs):
for key,value in kwargs.items():
print(key, value)
v
b = {"first" : "python", "second" : "python again"}
key_arg(b) |
def greet():
print("Hi")
def greet_again(message):
print(message)
def greet_again_with_type(message):
print(type(message))
print(message)
greet()
greet_again("Hello Again")
greet_again_with_type("One Last Time")
greet_again_with_type(1234)
# multiple types
def multiple_types(x):
if x < 0:
return -1
else:
return "Returning Hello"
print(multiple_types(-2))
print(multiple_types(10))
# variable arguments
def var_arguments(*args): # args will be tuples containing all the values
for value in args:
print(value)
var_arguments(1, 2, 3)
a = [1, 2, 3]
var_arguments(a)
var_arguments(*a) # expanding
def key_arg(**kwargs):
for key,value in kwargs.items():
print(key, value)
v
b = {"first" : "python", "second" : "python again"}
key_arg(b) | en | 0.323683 | # multiple types # variable arguments # args will be tuples containing all the values # expanding | 3.992867 | 4 |
test.py | KipCrossing/Micropython-AD9833 | 11 | 8935 | from ad9833 import AD9833
# DUMMY classes for testing without board
class SBI(object):
def __init__(self):
pass
def send(self, data):
print(data)
class Pin(object):
def __init__(self):
pass
def low(self):
print(" 0")
def high(self):
print(" 1")
# Code
SBI1 = SBI()
PIN3 = Pin()
wave = AD9833(SBI1, PIN3)
wave.set_freq(14500)
wave.set_type(2)
wave.send()
print(wave.shape_type)
| from ad9833 import AD9833
# DUMMY classes for testing without board
class SBI(object):
def __init__(self):
pass
def send(self, data):
print(data)
class Pin(object):
def __init__(self):
pass
def low(self):
print(" 0")
def high(self):
print(" 1")
# Code
SBI1 = SBI()
PIN3 = Pin()
wave = AD9833(SBI1, PIN3)
wave.set_freq(14500)
wave.set_type(2)
wave.send()
print(wave.shape_type)
| en | 0.744146 | # DUMMY classes for testing without board # Code | 3.008584 | 3 |
tests/test_api_network.py | devicehive/devicehive-plugin-python-template | 0 | 8936 | # Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from six.moves import range
def test_subscribe_events(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-e', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-e', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
command_name = '%s-command' % device_id
notification_name = '%s-notification' % device_id
return {'device': device,
'network': network,
'command_name': command_name,
'notification_name': notification_name}
def send_data(device, command_name, notification_name):
command = device.send_command(command_name)
command.status = 'status'
command.save()
notification = device.send_notification(notification_name)
return command.id, command.id, notification.id
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/insert', command_insert_id),
('command/update', command_update_id),
('notification/insert', notification_id)]
def handle_event(handler, event):
action_id_pair = (event.action, event.data.id)
assert action_id_pair in handler.data['event_ids']
handler.data['event_ids'].remove(action_id_pair)
if handler.data['event_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id])
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/insert', command_insert_id),
('command/update', command_update_id)]
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_notifications=False)
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/insert', command_insert_id),
('notification/insert', notification_id)]
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_update_commands=False)
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/update', command_update_id),
('notification/insert', notification_id)]
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_insert_commands=False)
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
def test_subscribe_insert_commands(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-i-c', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-i-c', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
return {'device': device,
'network': network,
'command_names': command_names}
def send_data(device, command_names):
return [device.send_command(name).id for name in command_names]
def handle_connect(handler):
handler.data['command_ids'] = send_data(handler.data['device'],
handler.data['command_names'])
def handle_command_insert(handler, command):
assert command.id in handler.data['command_ids']
handler.data['command_ids'].remove(command.id)
if handler.data['command_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-i-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_update_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_insert=handle_command_insert, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
handler.data['command_ids'] = send_data(
handler.data['device'], handler.data['command_names'])[-1:]
data = init_data()
name = test.generate_id('n-s-i-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
names=data['command_names'][-1:],
subscribe_update_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_insert=handle_command_insert, data=data)
plugin_api.remove_plugin(plugin['topicName'])
def test_subscribe_update_commands(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-u-c', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-u-c', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
return {'device': device,
'network': network,
'command_names': command_names}
def send_data(device, command_names):
command_ids = []
for name in command_names:
command = device.send_command(name)
command.status = 'status'
command.save()
command_ids.append(command.id)
return command_ids
def handle_connect(handler):
handler.data['command_ids'] = send_data(handler.data['device'],
handler.data['command_names'])
def handle_command_update(handler, command):
assert command.id in handler.data['command_ids']
assert command.status == 'status'
handler.data['command_ids'].remove(command.id)
if handler.data['command_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-u-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_insert_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_update=handle_command_update, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
handler.data['command_ids'] = send_data(
handler.data['device'], handler.data['command_names'])[-1:]
data = init_data()
name = test.generate_id('n-s-u-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
names=data['command_names'][-1:],
subscribe_insert_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_update=handle_command_update, data=data)
plugin_api.remove_plugin(plugin['topicName'])
def test_subscribe_notifications(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-n', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
notification_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
return {'device': device,
'network': network,
'notification_names': notification_names}
def send_data(device, notification_names):
return [device.send_notification(name).id for name in
notification_names]
def handle_connect(handler):
handler.data['notification_ids'] = send_data(
handler.data['device'], handler.data['notification_names'])
def handle_notification(handler, notification):
assert notification.id in handler.data['notification_ids']
handler.data['notification_ids'].remove(notification.id)
if handler.data['notification_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-n', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_insert_commands=False,
subscribe_update_commands=False)
test.run(plugin, handle_connect,
handle_notification=handle_notification, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
handler.data['notification_ids'] = send_data(
handler.data['device'], handler.data['notification_names'])[-1:]
data = init_data()
name = test.generate_id('n-s-n', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
names=data['notification_names'][-1:],
subscribe_insert_commands=False,
subscribe_update_commands=False)
test.run(plugin, handle_connect,
handle_notification=handle_notification, data=data)
plugin_api.remove_plugin(plugin['topicName'])
| # Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from six.moves import range
def test_subscribe_events(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-e', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-e', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
command_name = '%s-command' % device_id
notification_name = '%s-notification' % device_id
return {'device': device,
'network': network,
'command_name': command_name,
'notification_name': notification_name}
def send_data(device, command_name, notification_name):
command = device.send_command(command_name)
command.status = 'status'
command.save()
notification = device.send_notification(notification_name)
return command.id, command.id, notification.id
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/insert', command_insert_id),
('command/update', command_update_id),
('notification/insert', notification_id)]
def handle_event(handler, event):
action_id_pair = (event.action, event.data.id)
assert action_id_pair in handler.data['event_ids']
handler.data['event_ids'].remove(action_id_pair)
if handler.data['event_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id])
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/insert', command_insert_id),
('command/update', command_update_id)]
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_notifications=False)
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/insert', command_insert_id),
('notification/insert', notification_id)]
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_update_commands=False)
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
event_ids = send_data(handler.data['device'],
handler.data['command_name'],
handler.data['notification_name'])
command_insert_id, command_update_id, notification_id = event_ids
handler.data['event_ids'] = [('command/update', command_update_id),
('notification/insert', notification_id)]
data = init_data()
name = test.generate_id('n-s-e', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_insert_commands=False)
test.run(plugin, handle_connect, handle_event, data=data)
plugin_api.remove_plugin(plugin['topicName'])
def test_subscribe_insert_commands(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-i-c', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-i-c', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
return {'device': device,
'network': network,
'command_names': command_names}
def send_data(device, command_names):
return [device.send_command(name).id for name in command_names]
def handle_connect(handler):
handler.data['command_ids'] = send_data(handler.data['device'],
handler.data['command_names'])
def handle_command_insert(handler, command):
assert command.id in handler.data['command_ids']
handler.data['command_ids'].remove(command.id)
if handler.data['command_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-i-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_update_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_insert=handle_command_insert, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
handler.data['command_ids'] = send_data(
handler.data['device'], handler.data['command_names'])[-1:]
data = init_data()
name = test.generate_id('n-s-i-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
names=data['command_names'][-1:],
subscribe_update_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_insert=handle_command_insert, data=data)
plugin_api.remove_plugin(plugin['topicName'])
def test_subscribe_update_commands(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-u-c', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-u-c', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
return {'device': device,
'network': network,
'command_names': command_names}
def send_data(device, command_names):
command_ids = []
for name in command_names:
command = device.send_command(name)
command.status = 'status'
command.save()
command_ids.append(command.id)
return command_ids
def handle_connect(handler):
handler.data['command_ids'] = send_data(handler.data['device'],
handler.data['command_names'])
def handle_command_update(handler, command):
assert command.id in handler.data['command_ids']
assert command.status == 'status'
handler.data['command_ids'].remove(command.id)
if handler.data['command_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-u-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_insert_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_update=handle_command_update, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
handler.data['command_ids'] = send_data(
handler.data['device'], handler.data['command_names'])[-1:]
data = init_data()
name = test.generate_id('n-s-u-c', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
names=data['command_names'][-1:],
subscribe_insert_commands=False,
subscribe_notifications=False)
test.run(plugin, handle_connect,
handle_command_update=handle_command_update, data=data)
plugin_api.remove_plugin(plugin['topicName'])
def test_subscribe_notifications(test):
test.only_admin_implementation()
plugin_api = test.plugin_api()
device_hive_api = test.device_hive_api()
def init_data():
net_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
net_description = '%s-description' % net_name
network = device_hive_api.create_network(net_name, net_description)
device_id = test.generate_id('n-s-n', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id, network_id=network.id)
notification_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
return {'device': device,
'network': network,
'notification_names': notification_names}
def send_data(device, notification_names):
return [device.send_notification(name).id for name in
notification_names]
def handle_connect(handler):
handler.data['notification_ids'] = send_data(
handler.data['device'], handler.data['notification_names'])
def handle_notification(handler, notification):
assert notification.id in handler.data['notification_ids']
handler.data['notification_ids'].remove(notification.id)
if handler.data['notification_ids']:
return
handler.data['device'].remove()
handler.disconnect()
data = init_data()
name = test.generate_id('n-s-n', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
subscribe_insert_commands=False,
subscribe_update_commands=False)
test.run(plugin, handle_connect,
handle_notification=handle_notification, data=data)
plugin_api.remove_plugin(plugin['topicName'])
# =========================================================================
def handle_connect(handler):
handler.data['notification_ids'] = send_data(
handler.data['device'], handler.data['notification_names'])[-1:]
data = init_data()
name = test.generate_id('n-s-n', test.PLUGIN_ENTITY)
description = '%s-description' % name
plugin = plugin_api.create_plugin(name, description,
network_ids=[data['network'].id],
names=data['notification_names'][-1:],
subscribe_insert_commands=False,
subscribe_update_commands=False)
test.run(plugin, handle_connect,
handle_notification=handle_notification, data=data)
plugin_api.remove_plugin(plugin['topicName'])
| en | 0.632191 | # Copyright (C) 2018 DataArt # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # ========================================================================= # ========================================================================= # ========================================================================= # ========================================================================= # ========================================================================= # ========================================================================= | 1.735289 | 2 |
filehandler.py | miciux/telegram-bot-admin | 1 | 8937 | <filename>filehandler.py
import logging
import abstracthandler
import os
class FileHandler(abstracthandler.AbstractHandler):
def __init__(self, conf, bot):
abstracthandler.AbstractHandler.__init__(self, 'file', conf, bot)
self.log = logging.getLogger(__name__)
self.commands={}
self.commands['list'] = self.get_file_list
def handle_message(self,cid, command, args):
try:
self.commands[command](cid,args)
except Exception as e:
self.send_formatted_message(cid,self.get_sorry_message())
self.log.error(e)
def get_file_list(self, cid, args):
if len(args) >= 1:
for folder in args:
self.send_formatted_message(cid,self.get_folder_content(folder))
else:
self.send_formatted_message(cid,'*file list* usage: file list _[DIRECTORY]_...')
def get_folder_content(self, folder):
message = 'Lista dei files in *%s*:\n_%s_'
files = '\n'.join(os.listdir(folder))
return message % (folder,files);
| <filename>filehandler.py
import logging
import abstracthandler
import os
class FileHandler(abstracthandler.AbstractHandler):
def __init__(self, conf, bot):
abstracthandler.AbstractHandler.__init__(self, 'file', conf, bot)
self.log = logging.getLogger(__name__)
self.commands={}
self.commands['list'] = self.get_file_list
def handle_message(self,cid, command, args):
try:
self.commands[command](cid,args)
except Exception as e:
self.send_formatted_message(cid,self.get_sorry_message())
self.log.error(e)
def get_file_list(self, cid, args):
if len(args) >= 1:
for folder in args:
self.send_formatted_message(cid,self.get_folder_content(folder))
else:
self.send_formatted_message(cid,'*file list* usage: file list _[DIRECTORY]_...')
def get_folder_content(self, folder):
message = 'Lista dei files in *%s*:\n_%s_'
files = '\n'.join(os.listdir(folder))
return message % (folder,files);
| none | 1 | 2.864282 | 3 |
|
tensorflow_federated/python/learning/federated_evaluation.py | Tensorflow-Devs/federated | 0 | 8938 | <reponame>Tensorflow-Devs/federated<filename>tensorflow_federated/python/learning/federated_evaluation.py
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple implementation of federated evaluation."""
import collections
from typing import Callable, Optional
import tensorflow as tf
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.templates import measured_process
from tensorflow_federated.python.learning import model as model_lib
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning.framework import dataset_reduce
from tensorflow_federated.python.learning.framework import optimizer_utils
# Convenience aliases.
SequenceType = computation_types.SequenceType
def build_federated_evaluation(
model_fn: Callable[[], model_lib.Model],
broadcast_process: Optional[measured_process.MeasuredProcess] = None,
use_experimental_simulation_loop: bool = False,
) -> computation_base.Computation:
"""Builds the TFF computation for federated evaluation of the given model.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`. This method
must *not* capture TensorFlow tensors or variables and use them. The model
must be constructed entirely from scratch on each invocation, returning
the same pre-constructed model each call will result in an error.
broadcast_process: A `tff.templates.MeasuredProcess` that broadcasts the
model weights on the server to the clients. It must support the signature
`(input_values@SERVER -> output_values@CLIENTS)` and have empty state. If
set to default None, the server model is broadcast to the clients using
the default tff.federated_broadcast.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation.
Returns:
A federated computation (an instance of `tff.Computation`) that accepts
model parameters and federated data, and returns the evaluation metrics
as aggregated by `tff.learning.Model.federated_output_computation`.
"""
if broadcast_process is not None:
if not isinstance(broadcast_process, measured_process.MeasuredProcess):
raise ValueError('`broadcast_process` must be a `MeasuredProcess`, got '
f'{type(broadcast_process)}.')
if optimizer_utils.is_stateful_process(broadcast_process):
raise ValueError(
'Cannot create a federated evaluation with a stateful '
'broadcast process, must be stateless, has state: '
f'{broadcast_process.initialize.type_signature.result!r}')
# Construct the model first just to obtain the metadata and define all the
# types needed to define the computations that follow.
# TODO(b/124477628): Ideally replace the need for stamping throwaway models
# with some other mechanism.
with tf.Graph().as_default():
model = model_fn()
model_weights_type = model_utils.weights_type_from_model(model)
batch_type = computation_types.to_type(model.input_spec)
@computations.tf_computation(model_weights_type, SequenceType(batch_type))
@tf.function
def client_eval(incoming_model_weights, dataset):
"""Returns local outputs after evaluting `model_weights` on `dataset`."""
with tf.init_scope():
model = model_fn()
model_weights = model_utils.ModelWeights.from_model(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
incoming_model_weights)
def reduce_fn(num_examples, batch):
model_output = model.forward_pass(batch, training=False)
if model_output.num_examples is None:
# Compute shape from the size of the predictions if model didn't use the
# batch size.
return num_examples + tf.shape(
model_output.predictions, out_type=tf.int64)[0]
else:
return num_examples + tf.cast(model_output.num_examples, tf.int64)
dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(
use_experimental_simulation_loop)
num_examples = dataset_reduce_fn(
reduce_fn=reduce_fn,
dataset=dataset,
initial_state_fn=lambda: tf.zeros([], dtype=tf.int64))
return collections.OrderedDict(
local_outputs=model.report_local_outputs(), num_examples=num_examples)
@computations.federated_computation(
computation_types.at_server(model_weights_type),
computation_types.at_clients(SequenceType(batch_type)))
def server_eval(server_model_weights, federated_dataset):
if broadcast_process is not None:
# TODO(b/179091838): Zip the measurements from the broadcast_process with
# the result of `model.federated_output_computation` below to avoid
# dropping these metrics.
broadcast_output = broadcast_process.next(broadcast_process.initialize(),
server_model_weights)
client_outputs = intrinsics.federated_map(
client_eval, (broadcast_output.result, federated_dataset))
else:
client_outputs = intrinsics.federated_map(client_eval, [
intrinsics.federated_broadcast(server_model_weights),
federated_dataset
])
model_metrics = model.federated_output_computation(
client_outputs.local_outputs)
statistics = collections.OrderedDict(
num_examples=intrinsics.federated_sum(client_outputs.num_examples))
return intrinsics.federated_zip(
collections.OrderedDict(eval=model_metrics, stat=statistics))
return server_eval
| # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple implementation of federated evaluation."""
import collections
from typing import Callable, Optional
import tensorflow as tf
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.templates import measured_process
from tensorflow_federated.python.learning import model as model_lib
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning.framework import dataset_reduce
from tensorflow_federated.python.learning.framework import optimizer_utils
# Convenience aliases.
SequenceType = computation_types.SequenceType
def build_federated_evaluation(
model_fn: Callable[[], model_lib.Model],
broadcast_process: Optional[measured_process.MeasuredProcess] = None,
use_experimental_simulation_loop: bool = False,
) -> computation_base.Computation:
"""Builds the TFF computation for federated evaluation of the given model.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`. This method
must *not* capture TensorFlow tensors or variables and use them. The model
must be constructed entirely from scratch on each invocation, returning
the same pre-constructed model each call will result in an error.
broadcast_process: A `tff.templates.MeasuredProcess` that broadcasts the
model weights on the server to the clients. It must support the signature
`(input_values@SERVER -> output_values@CLIENTS)` and have empty state. If
set to default None, the server model is broadcast to the clients using
the default tff.federated_broadcast.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation.
Returns:
A federated computation (an instance of `tff.Computation`) that accepts
model parameters and federated data, and returns the evaluation metrics
as aggregated by `tff.learning.Model.federated_output_computation`.
"""
if broadcast_process is not None:
if not isinstance(broadcast_process, measured_process.MeasuredProcess):
raise ValueError('`broadcast_process` must be a `MeasuredProcess`, got '
f'{type(broadcast_process)}.')
if optimizer_utils.is_stateful_process(broadcast_process):
raise ValueError(
'Cannot create a federated evaluation with a stateful '
'broadcast process, must be stateless, has state: '
f'{broadcast_process.initialize.type_signature.result!r}')
# Construct the model first just to obtain the metadata and define all the
# types needed to define the computations that follow.
# TODO(b/124477628): Ideally replace the need for stamping throwaway models
# with some other mechanism.
with tf.Graph().as_default():
model = model_fn()
model_weights_type = model_utils.weights_type_from_model(model)
batch_type = computation_types.to_type(model.input_spec)
@computations.tf_computation(model_weights_type, SequenceType(batch_type))
@tf.function
def client_eval(incoming_model_weights, dataset):
"""Returns local outputs after evaluting `model_weights` on `dataset`."""
with tf.init_scope():
model = model_fn()
model_weights = model_utils.ModelWeights.from_model(model)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights,
incoming_model_weights)
def reduce_fn(num_examples, batch):
model_output = model.forward_pass(batch, training=False)
if model_output.num_examples is None:
# Compute shape from the size of the predictions if model didn't use the
# batch size.
return num_examples + tf.shape(
model_output.predictions, out_type=tf.int64)[0]
else:
return num_examples + tf.cast(model_output.num_examples, tf.int64)
dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(
use_experimental_simulation_loop)
num_examples = dataset_reduce_fn(
reduce_fn=reduce_fn,
dataset=dataset,
initial_state_fn=lambda: tf.zeros([], dtype=tf.int64))
return collections.OrderedDict(
local_outputs=model.report_local_outputs(), num_examples=num_examples)
@computations.federated_computation(
computation_types.at_server(model_weights_type),
computation_types.at_clients(SequenceType(batch_type)))
def server_eval(server_model_weights, federated_dataset):
if broadcast_process is not None:
# TODO(b/179091838): Zip the measurements from the broadcast_process with
# the result of `model.federated_output_computation` below to avoid
# dropping these metrics.
broadcast_output = broadcast_process.next(broadcast_process.initialize(),
server_model_weights)
client_outputs = intrinsics.federated_map(
client_eval, (broadcast_output.result, federated_dataset))
else:
client_outputs = intrinsics.federated_map(client_eval, [
intrinsics.federated_broadcast(server_model_weights),
federated_dataset
])
model_metrics = model.federated_output_computation(
client_outputs.local_outputs)
statistics = collections.OrderedDict(
num_examples=intrinsics.federated_sum(client_outputs.num_examples))
return intrinsics.federated_zip(
collections.OrderedDict(eval=model_metrics, stat=statistics))
return server_eval | en | 0.814118 | # Copyright 2019, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. A simple implementation of federated evaluation. # Convenience aliases. Builds the TFF computation for federated evaluation of the given model. Args: model_fn: A no-arg function that returns a `tff.learning.Model`. This method must *not* capture TensorFlow tensors or variables and use them. The model must be constructed entirely from scratch on each invocation, returning the same pre-constructed model each call will result in an error. broadcast_process: A `tff.templates.MeasuredProcess` that broadcasts the model weights on the server to the clients. It must support the signature `(input_values@SERVER -> output_values@CLIENTS)` and have empty state. If set to default None, the server model is broadcast to the clients using the default tff.federated_broadcast. use_experimental_simulation_loop: Controls the reduce loop function for input dataset. An experimental reduce loop is used for simulation. Returns: A federated computation (an instance of `tff.Computation`) that accepts model parameters and federated data, and returns the evaluation metrics as aggregated by `tff.learning.Model.federated_output_computation`. # Construct the model first just to obtain the metadata and define all the # types needed to define the computations that follow. # TODO(b/124477628): Ideally replace the need for stamping throwaway models # with some other mechanism. Returns local outputs after evaluting `model_weights` on `dataset`. # Compute shape from the size of the predictions if model didn't use the # batch size. # TODO(b/179091838): Zip the measurements from the broadcast_process with # the result of `model.federated_output_computation` below to avoid # dropping these metrics. | 1.871156 | 2 |
pylibcontainer/image.py | joaompinto/pylibcontainer | 7 | 8939 | from __future__ import print_function
import os
import shutil
import hashlib
import requests
import click
from tempfile import NamedTemporaryFile
from hashlib import sha256
from os.path import expanduser, join, exists, basename
from .utils import HumanSize
from .tar import extract_layer
from . import trust
from . import container
from .colorhelper import print_info, print_error, print_warn, print_success
from .colorhelper import success
from .image_index import get_url
from clint.textui import progress
from dateutil.parser import parse as parsedate
from datetime import datetime
CACHE_PATH = join(expanduser("~"), ".pylibcontainer", "images_cache")
class Cache(object):
cache_dir = CACHE_PATH
""" Provides an image caching mechanism on disk """
def __init__(self):
if not exists(CACHE_PATH):
os.makedirs(CACHE_PATH, 0o700)
def get(self, cache_key, default=None):
""" return info for cached file """
cache_hash = sha256(cache_key.encode()).hexdigest()
cache_fn = join(CACHE_PATH, "url_" + cache_hash)
if exists(cache_fn):
file_stat = os.stat(cache_fn)
last_modified = datetime.fromtimestamp(file_stat.st_mtime)
file_size = file_stat.st_size
return cache_fn, cache_hash, last_modified, file_size
return default
def put(self, filename, cache_key):
""" put a file into cache """
cache_hash = sha256(cache_key.encode()).hexdigest()
cache_fn = join(CACHE_PATH, "url_" + cache_hash)
shutil.move(filename, cache_fn)
return cache_hash, cache_fn
def download(image_url):
""" Download image (if not found in cache) and return it's filename """
response = requests.head(image_url)
file_size = remote_file_size = int(response.headers.get("Content-Length"))
remote_last_modified = parsedate(response.headers.get("Last-Modified")).replace(
tzinfo=None
)
remote_is_valid = response.status_code == 200 and file_size and remote_last_modified
# Check if image is on cache
cache = Cache()
cached_image = cache.get(image_url)
if cached_image:
if remote_is_valid:
cache_fn, cache_hash, last_modified, file_size = cached_image
if remote_file_size == file_size and remote_last_modified < last_modified:
print_info("Using file from cache", CACHE_PATH)
return cache_hash, cache_fn
print_info("Downloading new remote file because an update was found")
else:
print_warn("Unable to check the status for " + image_url)
print_warn("Assuming local cache is valid")
# Not cached, and no valid remote information was found
if not remote_is_valid:
print_error(
"Unable to get file, http_code=%s, size=%s, last_modified=%s"
% (response.status_code, remote_file_size, remote_last_modified)
)
exit(2)
# Dowload image
print_info(
"Downloading image... ",
"{0} [{1:.2S}]".format(basename(image_url), HumanSize(file_size)),
)
remote_sha256 = hashlib.sha256()
response = requests.get(image_url, stream=True)
with NamedTemporaryFile(delete=False) as tmp_file:
for chunk in progress.bar(
response.iter_content(chunk_size=1024), expected_size=(file_size / 1024) + 1
):
if chunk:
remote_sha256.update(chunk)
tmp_file.write(chunk)
tmp_file.flush()
# Verify image integrity
trust_verify = trust.verify(image_url, tmp_file.name, remote_sha256.hexdigest())
if not trust_verify or not trust_verify.valid or not trust_verify.username:
print_error("Integrity/authenticity error - GPG signature mismatch!")
exit(3)
print("{0:>10}: {1}".format("GPG Signer", success(trust_verify.username)))
print("{0:>10}: {1}".format("GPG ID", success(trust_verify.pubkey_fingerprint)))
print("{0:>10}: {1}".format("Creation", success(trust_verify.creation_date)))
return cache.put(tmp_file.name, image_url)
@click.command()
@click.argument("image_url")
@click.option("--as_root", is_flag=True)
@click.option("--overlay", "-o", multiple=True)
@click.argument("command", nargs=-1)
def run(image_url, command, as_root, overlay):
url = get_url(image_url)
image_url = url or image_url
if not image_url:
print_info("No index was found for image", image_url)
exit(5)
is_validate_only = False
if not command:
command = ["/bin/sh"]
image_protocol = image_url.split(":")[0].lower()
if image_protocol in ["http", "https"]:
_, image_fn = download(image_url)
else:
_, image_fn = sha256(image_url).hexdigest(), image_url
rootfs = extract_layer(image_fn)
if len(command) == 1 and command[0] == "-":
is_validate_only = True
print("Validating container setup with the rootfs")
else:
print_info("Executing", " ".join(command))
_, exit_code = container.runc(rootfs, command, as_root, overlay)
if exit_code != 0:
print_error("Last command returned an error")
elif is_validate_only:
print_success("OK")
| from __future__ import print_function
import os
import shutil
import hashlib
import requests
import click
from tempfile import NamedTemporaryFile
from hashlib import sha256
from os.path import expanduser, join, exists, basename
from .utils import HumanSize
from .tar import extract_layer
from . import trust
from . import container
from .colorhelper import print_info, print_error, print_warn, print_success
from .colorhelper import success
from .image_index import get_url
from clint.textui import progress
from dateutil.parser import parse as parsedate
from datetime import datetime
CACHE_PATH = join(expanduser("~"), ".pylibcontainer", "images_cache")
class Cache(object):
cache_dir = CACHE_PATH
""" Provides an image caching mechanism on disk """
def __init__(self):
if not exists(CACHE_PATH):
os.makedirs(CACHE_PATH, 0o700)
def get(self, cache_key, default=None):
""" return info for cached file """
cache_hash = sha256(cache_key.encode()).hexdigest()
cache_fn = join(CACHE_PATH, "url_" + cache_hash)
if exists(cache_fn):
file_stat = os.stat(cache_fn)
last_modified = datetime.fromtimestamp(file_stat.st_mtime)
file_size = file_stat.st_size
return cache_fn, cache_hash, last_modified, file_size
return default
def put(self, filename, cache_key):
""" put a file into cache """
cache_hash = sha256(cache_key.encode()).hexdigest()
cache_fn = join(CACHE_PATH, "url_" + cache_hash)
shutil.move(filename, cache_fn)
return cache_hash, cache_fn
def download(image_url):
""" Download image (if not found in cache) and return it's filename """
response = requests.head(image_url)
file_size = remote_file_size = int(response.headers.get("Content-Length"))
remote_last_modified = parsedate(response.headers.get("Last-Modified")).replace(
tzinfo=None
)
remote_is_valid = response.status_code == 200 and file_size and remote_last_modified
# Check if image is on cache
cache = Cache()
cached_image = cache.get(image_url)
if cached_image:
if remote_is_valid:
cache_fn, cache_hash, last_modified, file_size = cached_image
if remote_file_size == file_size and remote_last_modified < last_modified:
print_info("Using file from cache", CACHE_PATH)
return cache_hash, cache_fn
print_info("Downloading new remote file because an update was found")
else:
print_warn("Unable to check the status for " + image_url)
print_warn("Assuming local cache is valid")
# Not cached, and no valid remote information was found
if not remote_is_valid:
print_error(
"Unable to get file, http_code=%s, size=%s, last_modified=%s"
% (response.status_code, remote_file_size, remote_last_modified)
)
exit(2)
# Dowload image
print_info(
"Downloading image... ",
"{0} [{1:.2S}]".format(basename(image_url), HumanSize(file_size)),
)
remote_sha256 = hashlib.sha256()
response = requests.get(image_url, stream=True)
with NamedTemporaryFile(delete=False) as tmp_file:
for chunk in progress.bar(
response.iter_content(chunk_size=1024), expected_size=(file_size / 1024) + 1
):
if chunk:
remote_sha256.update(chunk)
tmp_file.write(chunk)
tmp_file.flush()
# Verify image integrity
trust_verify = trust.verify(image_url, tmp_file.name, remote_sha256.hexdigest())
if not trust_verify or not trust_verify.valid or not trust_verify.username:
print_error("Integrity/authenticity error - GPG signature mismatch!")
exit(3)
print("{0:>10}: {1}".format("GPG Signer", success(trust_verify.username)))
print("{0:>10}: {1}".format("GPG ID", success(trust_verify.pubkey_fingerprint)))
print("{0:>10}: {1}".format("Creation", success(trust_verify.creation_date)))
return cache.put(tmp_file.name, image_url)
@click.command()
@click.argument("image_url")
@click.option("--as_root", is_flag=True)
@click.option("--overlay", "-o", multiple=True)
@click.argument("command", nargs=-1)
def run(image_url, command, as_root, overlay):
url = get_url(image_url)
image_url = url or image_url
if not image_url:
print_info("No index was found for image", image_url)
exit(5)
is_validate_only = False
if not command:
command = ["/bin/sh"]
image_protocol = image_url.split(":")[0].lower()
if image_protocol in ["http", "https"]:
_, image_fn = download(image_url)
else:
_, image_fn = sha256(image_url).hexdigest(), image_url
rootfs = extract_layer(image_fn)
if len(command) == 1 and command[0] == "-":
is_validate_only = True
print("Validating container setup with the rootfs")
else:
print_info("Executing", " ".join(command))
_, exit_code = container.runc(rootfs, command, as_root, overlay)
if exit_code != 0:
print_error("Last command returned an error")
elif is_validate_only:
print_success("OK")
| en | 0.890305 | Provides an image caching mechanism on disk return info for cached file put a file into cache Download image (if not found in cache) and return it's filename # Check if image is on cache # Not cached, and no valid remote information was found # Dowload image # Verify image integrity | 2.370055 | 2 |
utest/test_compareimage.py | michel117/robotframework-doctestlibrary | 1 | 8940 | from DocTest.CompareImage import CompareImage
import pytest
from pathlib import Path
import numpy
def test_single_png(testdata_dir):
img = CompareImage(testdata_dir / 'text_big.png')
assert len(img.opencv_images)==1
assert type(img.opencv_images)==list
type(img.opencv_images[0])==numpy.ndarray
def test_single_pdf(testdata_dir):
pass
def test_multipage_pdf(testdata_dir):
pass
def test_huge_pdf(testdata_dir):
pass
def test_image_text_content(testdata_dir):
pass
def test_pdf_text_content(testdata_dir):
pass
def test_non_existing_file(testdata_dir):
with pytest.raises(AssertionError):
img = CompareImage(testdata_dir / 'does_not_exist.png')
def test_corrupt_image(testdata_dir):
with pytest.raises(AssertionError):
img = CompareImage(testdata_dir / 'corrupt_image.png')
def test_corrupt_pdf(testdata_dir):
with pytest.raises(AssertionError):
img = CompareImage(testdata_dir / 'corrupt_pdf.pdf')
| from DocTest.CompareImage import CompareImage
import pytest
from pathlib import Path
import numpy
def test_single_png(testdata_dir):
img = CompareImage(testdata_dir / 'text_big.png')
assert len(img.opencv_images)==1
assert type(img.opencv_images)==list
type(img.opencv_images[0])==numpy.ndarray
def test_single_pdf(testdata_dir):
pass
def test_multipage_pdf(testdata_dir):
pass
def test_huge_pdf(testdata_dir):
pass
def test_image_text_content(testdata_dir):
pass
def test_pdf_text_content(testdata_dir):
pass
def test_non_existing_file(testdata_dir):
with pytest.raises(AssertionError):
img = CompareImage(testdata_dir / 'does_not_exist.png')
def test_corrupt_image(testdata_dir):
with pytest.raises(AssertionError):
img = CompareImage(testdata_dir / 'corrupt_image.png')
def test_corrupt_pdf(testdata_dir):
with pytest.raises(AssertionError):
img = CompareImage(testdata_dir / 'corrupt_pdf.pdf')
| none | 1 | 2.301733 | 2 |
|
cvstudio/view/widgets/labels_tableview/__init__.py | haruiz/PytorchCvStudio | 32 | 8941 | <reponame>haruiz/PytorchCvStudio<gh_stars>10-100
from .labels_tableview import LabelsTableView
| from .labels_tableview import LabelsTableView | none | 1 | 1.087449 | 1 |
|
experiments/solve_different_methods.py | vishalbelsare/ags_nlp_solver | 8 | 8942 | import functools
import numpy as np
import math
import argparse
import ags_solver
import go_problems
import nlopt
import sys
from Simple import SimpleTuner
import itertools
from scipy.spatial import Delaunay
from scipy.optimize import differential_evolution
from scipy.optimize import basinhopping
from sdaopt import sda
from stochopy import Evolutionary
from pyOpt import Optimization
from pyOpt import MIDACO
import pyOpt
from shgo import shgo
from benchmark_tools.core import Solver, solve_class, GrishClass, GKLSClass
from benchmark_tools.plot import plot_cmcs
from benchmark_tools.stats import save_stats, compute_stats
class AGSWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01, mixedFast=False):
params = self.class_name2params(class_name)
params.mixedFastMode = mixedFast
if dist_stop:
params.eps = 0
params.itersLimit = max_iters
self.solver = ags_solver.Solver()
self.solver.SetParameters(params)
self.dist_stop = dist_stop
self.eps = eps
def class_name2params(self, name):
params = ags_solver.Parameters()
if 'grish' in name:
params.r = 3
elif 'gklss2' in name:
params.r = 4.6
elif 'gklsh2' in name:
params.r = 6.5
elif 'gklss3' in name:
params.r = 3.7
elif 'gklsh3' in name:
params.r = 4.4
elif 'gklss4' in name:
params.r = 4.7
elif 'gklsh4' in name:
params.r = 4.9
elif 'gklss5' in name:
params.r = 4
params.evolventDensity = 10
elif 'gklsh5' in name:
params.r = 4
params.evolventDensity = 10
return params
def Solve(self, problem):
self.solver.SetProblem([lambda x: problem.Calculate(x)], *problem.GetBounds())
#self.solver.SetProblem(problem)
if not self.dist_stop:
point, val, idx = self.solver.Solve()
else:
opt_pt = np.array(problem.GetOptimumPoint())
point, val, idx = self.solver.Solve(lambda x: np.linalg.norm(np.array(x)-opt_pt, np.inf) < self.eps)
#calcCounters = self.solver.GetCalculationsStatistics()
calcCounters = problem.GetCalculationsStatistics()
return point, val, calcCounters
class SDAWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def Solve(self, problem):
lb, ub = problem.GetBounds()
ret = sda(lambda x: problem.Calculate(x), None, bounds=list(zip(lb, ub)), \
seed=100, maxfun=self.max_iters, visit=2.72, maxiter=self.max_iters)
n_evals = problem.GetCalculationsStatistics()
return ret.x, ret.fun, n_evals
class SCBasinhoppingWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def Solve(self, problem):
lb, ub = problem.GetBounds()
#pop_size = self.class_name2params(self.class_name)
class MyBounds(object):
def __init__(self, xmax=[1.1,1.1], xmin=[-1.1,-1.1] ):
self.xmax = np.array(xmax)
self.xmin = np.array(xmin)
def __call__(self, **kwargs):
x = kwargs["x_new"]
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
return tmax and tmin
x0 = [.5]*problem.GetDimension()
result = \
basinhopping(lambda x: problem.Calculate(x), x0, accept_test=MyBounds(ub, lb), seed=100, T=10, stepsize=0.3)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
class SCDEWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def class_name2params(self, name):
if 'grish' in name:
popsize = 60
elif 'gklss2' in name:
popsize = 60
elif 'gklsh2' in name:
popsize = 60
elif 'gklss3' in name:
popsize = 70
elif 'gklsh3' in name:
popsize = 80
elif 'gklss4' in name:
popsize = 90
elif 'gklsh4' in name:
popsize = 100
elif 'gklss5' in name:
popsize = 120
elif 'gklsh5' in name:
popsize = 140
return popsize
def Solve(self, problem):
lb, ub = problem.GetBounds()
bounds = [(l, u) for l, u in zip(lb, ub)]
pop_size = self.class_name2params(self.class_name)
result = \
differential_evolution(
lambda x: problem.Calculate(x), bounds, mutation=(1.1,1.9),
tol=1e-12, maxiter=int(float(self.max_iters) / (pop_size*problem.GetDimension())), popsize=pop_size, disp=False, seed=100)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
class PyEvolveWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
def Solve(self, problem):
lb, ub = problem.GetBounds()
# Genome instance
genome = G1DList.G1DList(2)
genome.setParams(rangemin=lb[0], rangemax=ub[0], bestRawScore=-100, roundDecimal=2)
genome.initializator.set(Initializators.G1DListInitializatorReal)
genome.mutator.set(Mutators.G1DListMutatorRealGaussian)
# The evaluator function (objective function)
genome.evaluator.set(lambda x: problem.Calculate(x) + 100)
# Genetic Algorithm Instance
ga = GSimpleGA.GSimpleGA(genome)
ga.selector.set(Selectors.GRouletteWheel)
ga.minimax = Consts.minimaxType["minimize"]
ga.setGenerations(5000)
ga.setMutationRate(0.05)
ga.terminationCriteria.set(GSimpleGA.ConvergenceCriteria)
# Do the evolution, with stats dump
# frequency of 10 generations
ga.evolve(freq_stats=100)
# Best individual
best = ga.bestIndividual()
print ("\nBest individual score: %.2f" % (best.score - 100,))
print (best)
from bayes_opt import BayesianOptimization
class BOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
def Solve(self, problem):
lb, ub = problem.GetBounds()
bo = BayesianOptimization(lambda x, y: -problem.Calculate([x, y]),
{'x': (lb[0], ub[0]), 'y': (lb[1], ub[1])})
bo.maximize(init_points=5, n_iter=20, kappa=1.5)
n_evals = problem.GetCalculationsStatistics()
opt_val = -bo.res['max']['max_val']
opt_point = [bo.res['max']['max_params']['x'], bo.res['max']['max_params']['y']]
return opt_point, opt_val, n_evals
class SimpleWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.exploration = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
return 0.1
elif 'gklss2' in name:
return 0.15
elif 'gklsh2' in name:
return 0.15
elif 'gklss3' in name:
return 0.15
elif 'gklsh3' in name:
return 0.25
elif 'gklss4' in name:
return 0.2
elif 'gklsh4' in name:
return 0.25
def Solve(self, problem):
objective_function = lambda x: -problem.Calculate(x)
lb, ub = problem.GetBounds()
opt_pt = problem.GetOptimumPoint()
bounds = [[l, u] for l, u in zip(lb, ub)]
points = np.array([point for point in itertools.product(*bounds)])
tri = Delaunay(points)
optimization_domain_vertices = points[tri.simplices]
exploration = self.exploration # optional, default 0.15
tuner = SimpleTuner(optimization_domain_vertices, objective_function, \
exploration_preference=exploration,
stop_criterion=lambda x:np.linalg.norm(np.array(x)-opt_pt, np.inf) < self.eps)
tuner.optimize(self.max_iters)
opt_val, opt_point = tuner.get_best()
#tuner.plot() # only works in 2D
n_evals = problem.GetCalculationsStatistics()
return opt_point, -opt_val, n_evals
class NLOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, method=nlopt.GD_STOGO, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.method = method
self.max_iters = max_iters
self.pop_size = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
popsize = 150
elif 'gklss2' in name:
popsize = 200
elif 'gklsh2' in name:
popsize = 400
elif 'gklss3' in name:
popsize = 1000
elif 'gklsh3' in name:
popsize = 2000
elif 'gklss4' in name:
popsize = 8000
elif 'gklsh4' in name:
popsize = 16000
elif 'gklss5' in name:
popsize = 25000
elif 'gklsh5' in name:
popsize = 30000
return popsize
def Solve(self, problem):
lb, ub = problem.GetBounds()
self.opt = nlopt.opt(self.method, problem.GetDimension())
self.opt.set_local_optimizer(nlopt.opt(nlopt.LN_SBPLX, problem.GetDimension()))
self.opt.set_lower_bounds(lb)
self.opt.set_upper_bounds(ub)
self.opt.set_min_objective(lambda x, grad: problem.Calculate(x))
self.opt.set_maxeval(self.max_iters)
self.opt.set_xtol_rel(1e-13)
if self.method == nlopt.GN_CRS2_LM:
self.opt.set_population(self.pop_size)
x = self.opt.optimize([.5]*problem.GetDimension())
minf = self.opt.last_optimum_value()
n_evals = problem.GetCalculationsStatistics()
return x, minf, n_evals
class StochOpyWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.popsize = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
popsize = 60
elif 'gklss2' in name:
popsize = 60
elif 'gklsh2' in name:
popsize = 60
elif 'gklss3' in name:
popsize = 70
elif 'gklsh3' in name:
popsize = 80
elif 'gklss4' in name:
popsize = 90
elif 'gklsh4' in name:
popsize = 100
elif 'gklss5' in name:
popsize = 120
elif 'gklsh5' in name:
popsize = 140
return popsize
def Solve(self, problem):
objective_function = lambda x: 50 + problem.Calculate(x)
lb, ub = problem.GetBounds()
ea = Evolutionary(objective_function, lower=lb, upper=ub, popsize=self.popsize, \
max_iter=int(self.max_iters/self.popsize), eps1=1e-16, eps2=1e-16)
xopt, gfit = ea.optimize(solver='cpso', sync=False, CR=0.4, F=0.5)
n_evals = problem.GetCalculationsStatistics()
return xopt, gfit, n_evals
class PyOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
def Solve(self, problem):
objective_function = lambda x: [problem.Calculate(x), 0, 0]
lb, ub = problem.GetBounds()
opt_prob = pyOpt.Optimization('Problem', objective_function)
opt_prob.addObj('f')
for i in range(problem.GetDimension()):
opt_prob.addVar('x'+str(i),'c',lower=lb[i],upper=ub[i],value=(lb[i] + ub[i])/2.)
midaco_none = MIDACO(pll_type=None)
midaco_none.setOption('IPRINT',-1)
midaco_none.setOption('ISEED', 100)
midaco_none.setOption('MAXEVAL',self.max_iters)
midaco_none.setOption('FOCUS', -4)
fstr, xstr, inform = midaco_none(opt_prob)
n_evals = problem.GetCalculationsStatistics()
return xstr, fstr[0], n_evals
class SHGOWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
def Solve(self, problem):
objective_function = lambda x: problem.Calculate(x)
bounds = zip(*problem.GetBounds())
opts = {'maxfev': self.max_iters}
result = shgo(objective_function, bounds, options=opts)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
algos = {'scd': SCDEWrapper, 'ags': AGSWrapper,
'agsd': functools.partial(AGSWrapper, mixedFast=True),
'direct': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT),
'directl': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT_L),
'stogo': functools.partial(NLOptWrapper, method=nlopt.GD_STOGO),
'mlsl': functools.partial(NLOptWrapper, method=nlopt.G_MLSL_LDS),
'crs': functools.partial(NLOptWrapper, method=nlopt.GN_CRS2_LM),
'simple': SimpleWrapper, 'scb': SCBasinhoppingWrapper,
'sda': SDAWrapper, 'stochopy': StochOpyWrapper, 'shgo': SHGOWrapper,
'pyopt': PyOptWrapper}
algo2cature = {'scd': 'Scipy DE', 'ags': 'AGS', 'direct': 'DIRECT', 'agsd': 'AGSd',
'directl': 'DIRECTl', 'simple': 'Simple',
'stogo': 'StoGO', 'mlsl': 'MLSL', 'crs':'CRS', 'scb': 'Scipy B-H',
'sda': 'SDA', 'stochopy': 'Stochopy', 'pysot': 'PySOT', 'pyopt': 'PyOpt', 'shgo': 'SHGO'}
serg_eps = {2: 0.01, 3: 0.01, 4: math.pow(1e-6, 1./4), 5: math.pow(1e-7, 1./5)}
def main(args):
wrapper_class = algos[args.algo]
if args.problems_class == 'grish':
problems = GrishClass()
else:
assert args.problems_dim > 1 and args.problems_dim < 6
if args.problems_class == 'gklss':
problems = GKLSClass(args.problems_dim, go_problems.GKLSClass.Simple)
else:
problems = GKLSClass(args.problems_dim, go_problems.GKLSClass.Hard)
eps = 0.01
if args.serg_eps:
eps = serg_eps[args.problems_dim]
wrapper = wrapper_class(args.dist_stop, args.max_iters, args.problems_class+str(args.problems_dim), eps=0.01)
calc_stats, solved_status = solve_class(problems, wrapper, verbose=args.verbose, eps_check=eps)
stats = compute_stats(calc_stats, solved_status)
print('Problems solved: {}'.format(stats['num_solved']))
for i, avg in enumerate(stats['avg_calcs'][:-1]):
print('Average number of calculations of constraint #{}: {}'.format(i, avg))
print('Average number of calculations of objective: {}'.format(stats['avg_calcs'][-1]))
#plot_cmcs([stats['cmc']], captures=[algo2cature(args.algo)], show=True, filename='')
save_stats(stats, args.stats_fname, capture=algo2cature[args.algo])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sample for AGS solver')
parser.add_argument('--max_iters', type=int, default=10000, help='limit of iterations for the method')
parser.add_argument('--problems_class', type=str, choices=['grish','gklss','gklsh'], default='grish')
parser.add_argument('--algo', type=str, choices=algos.keys(), default='scd')
parser.add_argument('--problems_dim', type=int, default=2)
parser.add_argument('--verbose', action='store_true', help='Print additional info to console')
parser.add_argument('--dist_stop', action='store_true', help='Stop algorithm then the next point is close enough to the optimum')
parser.add_argument('--serg_eps', action='store_true')
parser.add_argument('--stats_fname', type=str, default='')
main(parser.parse_args())
| import functools
import numpy as np
import math
import argparse
import ags_solver
import go_problems
import nlopt
import sys
from Simple import SimpleTuner
import itertools
from scipy.spatial import Delaunay
from scipy.optimize import differential_evolution
from scipy.optimize import basinhopping
from sdaopt import sda
from stochopy import Evolutionary
from pyOpt import Optimization
from pyOpt import MIDACO
import pyOpt
from shgo import shgo
from benchmark_tools.core import Solver, solve_class, GrishClass, GKLSClass
from benchmark_tools.plot import plot_cmcs
from benchmark_tools.stats import save_stats, compute_stats
class AGSWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01, mixedFast=False):
params = self.class_name2params(class_name)
params.mixedFastMode = mixedFast
if dist_stop:
params.eps = 0
params.itersLimit = max_iters
self.solver = ags_solver.Solver()
self.solver.SetParameters(params)
self.dist_stop = dist_stop
self.eps = eps
def class_name2params(self, name):
params = ags_solver.Parameters()
if 'grish' in name:
params.r = 3
elif 'gklss2' in name:
params.r = 4.6
elif 'gklsh2' in name:
params.r = 6.5
elif 'gklss3' in name:
params.r = 3.7
elif 'gklsh3' in name:
params.r = 4.4
elif 'gklss4' in name:
params.r = 4.7
elif 'gklsh4' in name:
params.r = 4.9
elif 'gklss5' in name:
params.r = 4
params.evolventDensity = 10
elif 'gklsh5' in name:
params.r = 4
params.evolventDensity = 10
return params
def Solve(self, problem):
self.solver.SetProblem([lambda x: problem.Calculate(x)], *problem.GetBounds())
#self.solver.SetProblem(problem)
if not self.dist_stop:
point, val, idx = self.solver.Solve()
else:
opt_pt = np.array(problem.GetOptimumPoint())
point, val, idx = self.solver.Solve(lambda x: np.linalg.norm(np.array(x)-opt_pt, np.inf) < self.eps)
#calcCounters = self.solver.GetCalculationsStatistics()
calcCounters = problem.GetCalculationsStatistics()
return point, val, calcCounters
class SDAWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def Solve(self, problem):
lb, ub = problem.GetBounds()
ret = sda(lambda x: problem.Calculate(x), None, bounds=list(zip(lb, ub)), \
seed=100, maxfun=self.max_iters, visit=2.72, maxiter=self.max_iters)
n_evals = problem.GetCalculationsStatistics()
return ret.x, ret.fun, n_evals
class SCBasinhoppingWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def Solve(self, problem):
lb, ub = problem.GetBounds()
#pop_size = self.class_name2params(self.class_name)
class MyBounds(object):
def __init__(self, xmax=[1.1,1.1], xmin=[-1.1,-1.1] ):
self.xmax = np.array(xmax)
self.xmin = np.array(xmin)
def __call__(self, **kwargs):
x = kwargs["x_new"]
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
return tmax and tmin
x0 = [.5]*problem.GetDimension()
result = \
basinhopping(lambda x: problem.Calculate(x), x0, accept_test=MyBounds(ub, lb), seed=100, T=10, stepsize=0.3)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
class SCDEWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def class_name2params(self, name):
if 'grish' in name:
popsize = 60
elif 'gklss2' in name:
popsize = 60
elif 'gklsh2' in name:
popsize = 60
elif 'gklss3' in name:
popsize = 70
elif 'gklsh3' in name:
popsize = 80
elif 'gklss4' in name:
popsize = 90
elif 'gklsh4' in name:
popsize = 100
elif 'gklss5' in name:
popsize = 120
elif 'gklsh5' in name:
popsize = 140
return popsize
def Solve(self, problem):
lb, ub = problem.GetBounds()
bounds = [(l, u) for l, u in zip(lb, ub)]
pop_size = self.class_name2params(self.class_name)
result = \
differential_evolution(
lambda x: problem.Calculate(x), bounds, mutation=(1.1,1.9),
tol=1e-12, maxiter=int(float(self.max_iters) / (pop_size*problem.GetDimension())), popsize=pop_size, disp=False, seed=100)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
class PyEvolveWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
def Solve(self, problem):
lb, ub = problem.GetBounds()
# Genome instance
genome = G1DList.G1DList(2)
genome.setParams(rangemin=lb[0], rangemax=ub[0], bestRawScore=-100, roundDecimal=2)
genome.initializator.set(Initializators.G1DListInitializatorReal)
genome.mutator.set(Mutators.G1DListMutatorRealGaussian)
# The evaluator function (objective function)
genome.evaluator.set(lambda x: problem.Calculate(x) + 100)
# Genetic Algorithm Instance
ga = GSimpleGA.GSimpleGA(genome)
ga.selector.set(Selectors.GRouletteWheel)
ga.minimax = Consts.minimaxType["minimize"]
ga.setGenerations(5000)
ga.setMutationRate(0.05)
ga.terminationCriteria.set(GSimpleGA.ConvergenceCriteria)
# Do the evolution, with stats dump
# frequency of 10 generations
ga.evolve(freq_stats=100)
# Best individual
best = ga.bestIndividual()
print ("\nBest individual score: %.2f" % (best.score - 100,))
print (best)
from bayes_opt import BayesianOptimization
class BOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
def Solve(self, problem):
lb, ub = problem.GetBounds()
bo = BayesianOptimization(lambda x, y: -problem.Calculate([x, y]),
{'x': (lb[0], ub[0]), 'y': (lb[1], ub[1])})
bo.maximize(init_points=5, n_iter=20, kappa=1.5)
n_evals = problem.GetCalculationsStatistics()
opt_val = -bo.res['max']['max_val']
opt_point = [bo.res['max']['max_params']['x'], bo.res['max']['max_params']['y']]
return opt_point, opt_val, n_evals
class SimpleWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.exploration = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
return 0.1
elif 'gklss2' in name:
return 0.15
elif 'gklsh2' in name:
return 0.15
elif 'gklss3' in name:
return 0.15
elif 'gklsh3' in name:
return 0.25
elif 'gklss4' in name:
return 0.2
elif 'gklsh4' in name:
return 0.25
def Solve(self, problem):
objective_function = lambda x: -problem.Calculate(x)
lb, ub = problem.GetBounds()
opt_pt = problem.GetOptimumPoint()
bounds = [[l, u] for l, u in zip(lb, ub)]
points = np.array([point for point in itertools.product(*bounds)])
tri = Delaunay(points)
optimization_domain_vertices = points[tri.simplices]
exploration = self.exploration # optional, default 0.15
tuner = SimpleTuner(optimization_domain_vertices, objective_function, \
exploration_preference=exploration,
stop_criterion=lambda x:np.linalg.norm(np.array(x)-opt_pt, np.inf) < self.eps)
tuner.optimize(self.max_iters)
opt_val, opt_point = tuner.get_best()
#tuner.plot() # only works in 2D
n_evals = problem.GetCalculationsStatistics()
return opt_point, -opt_val, n_evals
class NLOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, method=nlopt.GD_STOGO, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.method = method
self.max_iters = max_iters
self.pop_size = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
popsize = 150
elif 'gklss2' in name:
popsize = 200
elif 'gklsh2' in name:
popsize = 400
elif 'gklss3' in name:
popsize = 1000
elif 'gklsh3' in name:
popsize = 2000
elif 'gklss4' in name:
popsize = 8000
elif 'gklsh4' in name:
popsize = 16000
elif 'gklss5' in name:
popsize = 25000
elif 'gklsh5' in name:
popsize = 30000
return popsize
def Solve(self, problem):
lb, ub = problem.GetBounds()
self.opt = nlopt.opt(self.method, problem.GetDimension())
self.opt.set_local_optimizer(nlopt.opt(nlopt.LN_SBPLX, problem.GetDimension()))
self.opt.set_lower_bounds(lb)
self.opt.set_upper_bounds(ub)
self.opt.set_min_objective(lambda x, grad: problem.Calculate(x))
self.opt.set_maxeval(self.max_iters)
self.opt.set_xtol_rel(1e-13)
if self.method == nlopt.GN_CRS2_LM:
self.opt.set_population(self.pop_size)
x = self.opt.optimize([.5]*problem.GetDimension())
minf = self.opt.last_optimum_value()
n_evals = problem.GetCalculationsStatistics()
return x, minf, n_evals
class StochOpyWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.popsize = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
popsize = 60
elif 'gklss2' in name:
popsize = 60
elif 'gklsh2' in name:
popsize = 60
elif 'gklss3' in name:
popsize = 70
elif 'gklsh3' in name:
popsize = 80
elif 'gklss4' in name:
popsize = 90
elif 'gklsh4' in name:
popsize = 100
elif 'gklss5' in name:
popsize = 120
elif 'gklsh5' in name:
popsize = 140
return popsize
def Solve(self, problem):
objective_function = lambda x: 50 + problem.Calculate(x)
lb, ub = problem.GetBounds()
ea = Evolutionary(objective_function, lower=lb, upper=ub, popsize=self.popsize, \
max_iter=int(self.max_iters/self.popsize), eps1=1e-16, eps2=1e-16)
xopt, gfit = ea.optimize(solver='cpso', sync=False, CR=0.4, F=0.5)
n_evals = problem.GetCalculationsStatistics()
return xopt, gfit, n_evals
class PyOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
def Solve(self, problem):
objective_function = lambda x: [problem.Calculate(x), 0, 0]
lb, ub = problem.GetBounds()
opt_prob = pyOpt.Optimization('Problem', objective_function)
opt_prob.addObj('f')
for i in range(problem.GetDimension()):
opt_prob.addVar('x'+str(i),'c',lower=lb[i],upper=ub[i],value=(lb[i] + ub[i])/2.)
midaco_none = MIDACO(pll_type=None)
midaco_none.setOption('IPRINT',-1)
midaco_none.setOption('ISEED', 100)
midaco_none.setOption('MAXEVAL',self.max_iters)
midaco_none.setOption('FOCUS', -4)
fstr, xstr, inform = midaco_none(opt_prob)
n_evals = problem.GetCalculationsStatistics()
return xstr, fstr[0], n_evals
class SHGOWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
def Solve(self, problem):
objective_function = lambda x: problem.Calculate(x)
bounds = zip(*problem.GetBounds())
opts = {'maxfev': self.max_iters}
result = shgo(objective_function, bounds, options=opts)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
algos = {'scd': SCDEWrapper, 'ags': AGSWrapper,
'agsd': functools.partial(AGSWrapper, mixedFast=True),
'direct': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT),
'directl': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT_L),
'stogo': functools.partial(NLOptWrapper, method=nlopt.GD_STOGO),
'mlsl': functools.partial(NLOptWrapper, method=nlopt.G_MLSL_LDS),
'crs': functools.partial(NLOptWrapper, method=nlopt.GN_CRS2_LM),
'simple': SimpleWrapper, 'scb': SCBasinhoppingWrapper,
'sda': SDAWrapper, 'stochopy': StochOpyWrapper, 'shgo': SHGOWrapper,
'pyopt': PyOptWrapper}
algo2cature = {'scd': 'Scipy DE', 'ags': 'AGS', 'direct': 'DIRECT', 'agsd': 'AGSd',
'directl': 'DIRECTl', 'simple': 'Simple',
'stogo': 'StoGO', 'mlsl': 'MLSL', 'crs':'CRS', 'scb': 'Scipy B-H',
'sda': 'SDA', 'stochopy': 'Stochopy', 'pysot': 'PySOT', 'pyopt': 'PyOpt', 'shgo': 'SHGO'}
serg_eps = {2: 0.01, 3: 0.01, 4: math.pow(1e-6, 1./4), 5: math.pow(1e-7, 1./5)}
def main(args):
wrapper_class = algos[args.algo]
if args.problems_class == 'grish':
problems = GrishClass()
else:
assert args.problems_dim > 1 and args.problems_dim < 6
if args.problems_class == 'gklss':
problems = GKLSClass(args.problems_dim, go_problems.GKLSClass.Simple)
else:
problems = GKLSClass(args.problems_dim, go_problems.GKLSClass.Hard)
eps = 0.01
if args.serg_eps:
eps = serg_eps[args.problems_dim]
wrapper = wrapper_class(args.dist_stop, args.max_iters, args.problems_class+str(args.problems_dim), eps=0.01)
calc_stats, solved_status = solve_class(problems, wrapper, verbose=args.verbose, eps_check=eps)
stats = compute_stats(calc_stats, solved_status)
print('Problems solved: {}'.format(stats['num_solved']))
for i, avg in enumerate(stats['avg_calcs'][:-1]):
print('Average number of calculations of constraint #{}: {}'.format(i, avg))
print('Average number of calculations of objective: {}'.format(stats['avg_calcs'][-1]))
#plot_cmcs([stats['cmc']], captures=[algo2cature(args.algo)], show=True, filename='')
save_stats(stats, args.stats_fname, capture=algo2cature[args.algo])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sample for AGS solver')
parser.add_argument('--max_iters', type=int, default=10000, help='limit of iterations for the method')
parser.add_argument('--problems_class', type=str, choices=['grish','gklss','gklsh'], default='grish')
parser.add_argument('--algo', type=str, choices=algos.keys(), default='scd')
parser.add_argument('--problems_dim', type=int, default=2)
parser.add_argument('--verbose', action='store_true', help='Print additional info to console')
parser.add_argument('--dist_stop', action='store_true', help='Stop algorithm then the next point is close enough to the optimum')
parser.add_argument('--serg_eps', action='store_true')
parser.add_argument('--stats_fname', type=str, default='')
main(parser.parse_args())
| en | 0.417197 | #self.solver.SetProblem(problem) #calcCounters = self.solver.GetCalculationsStatistics() #pop_size = self.class_name2params(self.class_name) # Genome instance # The evaluator function (objective function) # Genetic Algorithm Instance # Do the evolution, with stats dump # frequency of 10 generations # Best individual # optional, default 0.15 #tuner.plot() # only works in 2D #{}: {}'.format(i, avg)) #plot_cmcs([stats['cmc']], captures=[algo2cature(args.algo)], show=True, filename='') | 1.87621 | 2 |
src/py/fc.py | mattyschell/geodatabase-toiler | 0 | 8943 | import arcpy
import logging
import pathlib
import subprocess
import gdb
import cx_sde
class Fc(object):
def __init__(self
,gdb
,name):
# gdb object
self.gdb = gdb
# ex BUILDING
self.name = name.upper()
# esri tools usually expect this C:/sdefiles/bldg.sde/BUILDING
# also acceptable: C:/sdefiles/bldg.sde/BLDG.BUILDING
self.featureclass = self.gdb.sdeconn + "/" + self.name
def getfields(self):
desc = arcpy.Describe(self.featureclass)
fields = desc.fields
fieldsameslist = []
for field in fields:
fieldsameslist.append(field.name)
return fieldsameslist
def exists(self):
return arcpy.Exists(self.featureclass)
def delete(self):
logging.info('deleting {0}'.format(self.name))
desc = arcpy.Describe(self.featureclass)
if desc.IsArchived == True:
# disable archving and axe the _H table
arcpy.DisableArchiving_management(self.featureclass,
'DELETE')
arcpy.Delete_management(self.featureclass)
def locksexist(self):
if arcpy.TestSchemaLock(self.featureclass):
# "True A schema lock can be applied to the dataset"
return False
else:
return True
def interpret(self
,resobject):
# could also work with resobject.status
output = 0
if 'succeeded' not in resobject.getMessages().lower():
output = 1
logging.warn('response code is {0}'.format(resobject.status))
logging.warn('response messages are {0}'.format(resobject.getMessages()))
return output
def version(self):
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/register-as-versioned.htm
logging.info('versioning {0}'.format(self.name))
arcpy.RegisterAsVersioned_management(self.featureclass
,"NO_EDITS_TO_BASE")
# https://support.esri.com/en/technical-article/000023226
# When an ArcGIS 10.8 / ArcGIS Pro 2.5 (or newer) client connects to a
# 10.7.1, or earlier, release of an Enterprise geodatabase in Oracle,
# and registers the data as versioned, the versioned view is not created
# for the associated table or feature class.
# I cant get this shell out to python27 to work
# so like I dummy I'm gonna print it to the screen for now
# the test will fail until I (or esri) get it right, thats honest at least
py2versionedviews = pathlib.Path(__file__).parent.parent \
.joinpath('py27') \
.joinpath('create_versionedviews.py')
# see gdb class for this path, perhaps 'C:\Python27\ArcGIS10.6'
callcmd = r'{0} {1} {2}'.format(self.gdb.arcpy2path, py2versionedviews, self.name)
logging.info('YOU MUST CREATE versioned views from py27 using {0}'.format(callcmd))
logging.info('YOU YES YOU MUST call this: {0}'.format(callcmd))
# From a script run a postprocess something like:
# C:\Python27\ArcGIS10.6\python.exe C:\matt_projects\geodatabase-toiler\src\py27\create_versionedviews.py TOILERTESTFC
# exit_code = subprocess.call(callcmd,shell=True)
# exit_code = subprocess.run([self.gdb.arcpy2path, 'C:\matt_projects\geodatabase-toiler\src\py27\create_versionedviews.py'])
# subprocess.Popen(["virtualenv1/bin/python", "my_script.py"])
# attempts above yield
# File "C:\Program Files\ArcGIS\Pro\bin\Python\envs\arcgispro-py3\Lib\site.py", line 177
#file=sys.stderr)
# ^
# SyntaxError: invalid syntax
def trackedits(self):
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/enable-editor-tracking.htm
# this will create fields only if they dont exist
# I am gonna fix the field names here. Reminder that our goal is to
# be opinionated and consistent across anything we manage
logging.info('enabling editor tracking on {0}'.format(self.name))
return self.interpret(arcpy.EnableEditorTracking_management(self.featureclass
,'CREATED_USER'
,'CREATED_DATE'
,'LAST_EDITED_USER'
,'LAST_EDITED_DATE'
,'NO_ADD_FIELDS'
,'UTC'))
def grantprivileges(self
,user
,edits='GRANT'): # or AS_IS
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/change-privileges.htm
# caller should know who editors are we dont concern ourselves here
# always grant select, edits are GRANT or AS_IS for grant select only
# The nobs and dials on this tool are confounding
logging.info('granting privileges on {0} to {1}'.format(self.name
,user))
return self.interpret(arcpy.ChangePrivileges_management(self.featureclass
,user
,'GRANT'
,edits))
def index(self
,column):
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/add-attribute-index.htm
# unique indexes cant be specified for multiversioned tables
logging.info('indexing column {0} on {1}'.format(column
,self.name))
# BUILDINGBINIX
# BUILDING_HISTORICDOITT_IDIX = 27 careful friend
return self.interpret(arcpy.AddIndex_management(self.featureclass
,column
,'{0}{1}{2}'.format(self.name
,column
,'IX')))
def analyze(self
,components=['BUSINESS','ADDS','DELETES']):
return self.interpret(arcpy.Analyze_management(self.featureclass
,components))
def rebuildindexes(self):
# https://pro.arcgis.com/en/pro-app/latest/tool-reference/data-management/rebuild-indexes.htm
return self.interpret(arcpy.RebuildIndexes_management(self.gdb.sdeconn
,'NO_SYSTEM'
,self.name
,'ALL'))
def enablearchiving(self):
desc = arcpy.Describe(self.featureclass)
if desc.IsArchived == False:
return self.interpret(arcpy.EnableArchiving_management(self.featureclass))
else:
return 0
def exporttoshp(self
,outputdir
,outputname):
# print('fc2fc {0} {1} {2}'.format(self.featureclass, outputdir, outputname))
arcpy.FeatureClassToFeatureClass_conversion(self.featureclass
,outputdir
,outputname)
# TODO exportogeopackage if ESRI ever fills in some functionality in
# https://pro.arcgis.com/en/pro-app/latest/tool-reference/conversion/an-overview-of-the-to-geopackage-toolset.htm
# TODO exportogeojson if ESRI tool does something other than error 99999 (guess: sdo_geometry not supported)
# For now export to shp, then ogr2ogr to other formats. Classic
| import arcpy
import logging
import pathlib
import subprocess
import gdb
import cx_sde
class Fc(object):
def __init__(self
,gdb
,name):
# gdb object
self.gdb = gdb
# ex BUILDING
self.name = name.upper()
# esri tools usually expect this C:/sdefiles/bldg.sde/BUILDING
# also acceptable: C:/sdefiles/bldg.sde/BLDG.BUILDING
self.featureclass = self.gdb.sdeconn + "/" + self.name
def getfields(self):
desc = arcpy.Describe(self.featureclass)
fields = desc.fields
fieldsameslist = []
for field in fields:
fieldsameslist.append(field.name)
return fieldsameslist
def exists(self):
return arcpy.Exists(self.featureclass)
def delete(self):
logging.info('deleting {0}'.format(self.name))
desc = arcpy.Describe(self.featureclass)
if desc.IsArchived == True:
# disable archving and axe the _H table
arcpy.DisableArchiving_management(self.featureclass,
'DELETE')
arcpy.Delete_management(self.featureclass)
def locksexist(self):
if arcpy.TestSchemaLock(self.featureclass):
# "True A schema lock can be applied to the dataset"
return False
else:
return True
def interpret(self
,resobject):
# could also work with resobject.status
output = 0
if 'succeeded' not in resobject.getMessages().lower():
output = 1
logging.warn('response code is {0}'.format(resobject.status))
logging.warn('response messages are {0}'.format(resobject.getMessages()))
return output
def version(self):
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/register-as-versioned.htm
logging.info('versioning {0}'.format(self.name))
arcpy.RegisterAsVersioned_management(self.featureclass
,"NO_EDITS_TO_BASE")
# https://support.esri.com/en/technical-article/000023226
# When an ArcGIS 10.8 / ArcGIS Pro 2.5 (or newer) client connects to a
# 10.7.1, or earlier, release of an Enterprise geodatabase in Oracle,
# and registers the data as versioned, the versioned view is not created
# for the associated table or feature class.
# I cant get this shell out to python27 to work
# so like I dummy I'm gonna print it to the screen for now
# the test will fail until I (or esri) get it right, thats honest at least
py2versionedviews = pathlib.Path(__file__).parent.parent \
.joinpath('py27') \
.joinpath('create_versionedviews.py')
# see gdb class for this path, perhaps 'C:\Python27\ArcGIS10.6'
callcmd = r'{0} {1} {2}'.format(self.gdb.arcpy2path, py2versionedviews, self.name)
logging.info('YOU MUST CREATE versioned views from py27 using {0}'.format(callcmd))
logging.info('YOU YES YOU MUST call this: {0}'.format(callcmd))
# From a script run a postprocess something like:
# C:\Python27\ArcGIS10.6\python.exe C:\matt_projects\geodatabase-toiler\src\py27\create_versionedviews.py TOILERTESTFC
# exit_code = subprocess.call(callcmd,shell=True)
# exit_code = subprocess.run([self.gdb.arcpy2path, 'C:\matt_projects\geodatabase-toiler\src\py27\create_versionedviews.py'])
# subprocess.Popen(["virtualenv1/bin/python", "my_script.py"])
# attempts above yield
# File "C:\Program Files\ArcGIS\Pro\bin\Python\envs\arcgispro-py3\Lib\site.py", line 177
#file=sys.stderr)
# ^
# SyntaxError: invalid syntax
def trackedits(self):
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/enable-editor-tracking.htm
# this will create fields only if they dont exist
# I am gonna fix the field names here. Reminder that our goal is to
# be opinionated and consistent across anything we manage
logging.info('enabling editor tracking on {0}'.format(self.name))
return self.interpret(arcpy.EnableEditorTracking_management(self.featureclass
,'CREATED_USER'
,'CREATED_DATE'
,'LAST_EDITED_USER'
,'LAST_EDITED_DATE'
,'NO_ADD_FIELDS'
,'UTC'))
def grantprivileges(self
,user
,edits='GRANT'): # or AS_IS
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/change-privileges.htm
# caller should know who editors are we dont concern ourselves here
# always grant select, edits are GRANT or AS_IS for grant select only
# The nobs and dials on this tool are confounding
logging.info('granting privileges on {0} to {1}'.format(self.name
,user))
return self.interpret(arcpy.ChangePrivileges_management(self.featureclass
,user
,'GRANT'
,edits))
def index(self
,column):
# https://pro.arcgis.com/en/pro-app/tool-reference/data-management/add-attribute-index.htm
# unique indexes cant be specified for multiversioned tables
logging.info('indexing column {0} on {1}'.format(column
,self.name))
# BUILDINGBINIX
# BUILDING_HISTORICDOITT_IDIX = 27 careful friend
return self.interpret(arcpy.AddIndex_management(self.featureclass
,column
,'{0}{1}{2}'.format(self.name
,column
,'IX')))
def analyze(self
,components=['BUSINESS','ADDS','DELETES']):
return self.interpret(arcpy.Analyze_management(self.featureclass
,components))
def rebuildindexes(self):
# https://pro.arcgis.com/en/pro-app/latest/tool-reference/data-management/rebuild-indexes.htm
return self.interpret(arcpy.RebuildIndexes_management(self.gdb.sdeconn
,'NO_SYSTEM'
,self.name
,'ALL'))
def enablearchiving(self):
desc = arcpy.Describe(self.featureclass)
if desc.IsArchived == False:
return self.interpret(arcpy.EnableArchiving_management(self.featureclass))
else:
return 0
def exporttoshp(self
,outputdir
,outputname):
# print('fc2fc {0} {1} {2}'.format(self.featureclass, outputdir, outputname))
arcpy.FeatureClassToFeatureClass_conversion(self.featureclass
,outputdir
,outputname)
# TODO exportogeopackage if ESRI ever fills in some functionality in
# https://pro.arcgis.com/en/pro-app/latest/tool-reference/conversion/an-overview-of-the-to-geopackage-toolset.htm
# TODO exportogeojson if ESRI tool does something other than error 99999 (guess: sdo_geometry not supported)
# For now export to shp, then ogr2ogr to other formats. Classic
| en | 0.676589 | # gdb object # ex BUILDING # esri tools usually expect this C:/sdefiles/bldg.sde/BUILDING # also acceptable: C:/sdefiles/bldg.sde/BLDG.BUILDING # disable archving and axe the _H table # "True A schema lock can be applied to the dataset" # could also work with resobject.status # https://pro.arcgis.com/en/pro-app/tool-reference/data-management/register-as-versioned.htm # https://support.esri.com/en/technical-article/000023226 # When an ArcGIS 10.8 / ArcGIS Pro 2.5 (or newer) client connects to a # 10.7.1, or earlier, release of an Enterprise geodatabase in Oracle, # and registers the data as versioned, the versioned view is not created # for the associated table or feature class. # I cant get this shell out to python27 to work # so like I dummy I'm gonna print it to the screen for now # the test will fail until I (or esri) get it right, thats honest at least # see gdb class for this path, perhaps 'C:\Python27\ArcGIS10.6' # From a script run a postprocess something like: # C:\Python27\ArcGIS10.6\python.exe C:\matt_projects\geodatabase-toiler\src\py27\create_versionedviews.py TOILERTESTFC # exit_code = subprocess.call(callcmd,shell=True) # exit_code = subprocess.run([self.gdb.arcpy2path, 'C:\matt_projects\geodatabase-toiler\src\py27\create_versionedviews.py']) # subprocess.Popen(["virtualenv1/bin/python", "my_script.py"]) # attempts above yield # File "C:\Program Files\ArcGIS\Pro\bin\Python\envs\arcgispro-py3\Lib\site.py", line 177 #file=sys.stderr) # ^ # SyntaxError: invalid syntax # https://pro.arcgis.com/en/pro-app/tool-reference/data-management/enable-editor-tracking.htm # this will create fields only if they dont exist # I am gonna fix the field names here. Reminder that our goal is to # be opinionated and consistent across anything we manage # or AS_IS # https://pro.arcgis.com/en/pro-app/tool-reference/data-management/change-privileges.htm # caller should know who editors are we dont concern ourselves here # always grant select, edits are GRANT or AS_IS for grant select only # The nobs and dials on this tool are confounding # https://pro.arcgis.com/en/pro-app/tool-reference/data-management/add-attribute-index.htm # unique indexes cant be specified for multiversioned tables # BUILDINGBINIX # BUILDING_HISTORICDOITT_IDIX = 27 careful friend # https://pro.arcgis.com/en/pro-app/latest/tool-reference/data-management/rebuild-indexes.htm # print('fc2fc {0} {1} {2}'.format(self.featureclass, outputdir, outputname)) # TODO exportogeopackage if ESRI ever fills in some functionality in # https://pro.arcgis.com/en/pro-app/latest/tool-reference/conversion/an-overview-of-the-to-geopackage-toolset.htm # TODO exportogeojson if ESRI tool does something other than error 99999 (guess: sdo_geometry not supported) # For now export to shp, then ogr2ogr to other formats. Classic | 2.013435 | 2 |
desafiosCursoEmVideo/ex004.py | gomesGabriel/Pythonicos | 1 | 8944 | <reponame>gomesGabriel/Pythonicos
n = input('Digite algo: ')
print('O tipo primitivo da variável é: ', type(n))
print('O que foi digitado é alfa numérico? ', n.isalnum())
print('O que foi digitado é alfabético? ', n.isalpha())
print('O que foi digitado é um decimal? ', n.isdecimal())
print('O que foi digitado é minúsculo? ', n.islower())
print('O que foi digitado é numérico? ', n.isnumeric())
print('O que foi digitado pode ser impresso? ', n.isprintable())
print('O que foi digitado é apenas espaço? ', n.isspace())
print('O que foi digitado está capitalizada? ', n.istitle())
print('O que foi digitado é maiúsculo? ', n.isupper())
| n = input('Digite algo: ')
print('O tipo primitivo da variável é: ', type(n))
print('O que foi digitado é alfa numérico? ', n.isalnum())
print('O que foi digitado é alfabético? ', n.isalpha())
print('O que foi digitado é um decimal? ', n.isdecimal())
print('O que foi digitado é minúsculo? ', n.islower())
print('O que foi digitado é numérico? ', n.isnumeric())
print('O que foi digitado pode ser impresso? ', n.isprintable())
print('O que foi digitado é apenas espaço? ', n.isspace())
print('O que foi digitado está capitalizada? ', n.istitle())
print('O que foi digitado é maiúsculo? ', n.isupper()) | none | 1 | 3.960602 | 4 |
|
Machine learning book/3 - MultiLayer Perceptron/test_regression.py | dalmia/Lisa-Lab-Tutorials | 25 | 8945 | <reponame>dalmia/Lisa-Lab-Tutorials
from numpy import *
import numpy as np
import matplotlib.pyplot as plt
from mlp import mlp
x = ones((1, 40)) * linspace(0, 1, 40)
t = sin(2 * pi * x) + cos(2 * pi * x) + np.random.randn(40) * 0.2
x = transpose(x)
t = transpose(t)
n_hidden = 3
eta = 0.25
n_iterations = 101
plt.plot(x, t, '.')
plt.show()
train = x[0::2, :]
test = x[1::4, :]
valid = x[3::4, :]
train_targets = t[0::2, :]
test_targets = t[1::4, :]
valid_targets = t[3::4, :]
net = mlp(train, train_targets, n_hidden, out_type='linear')
net.mlptrain(train, train_targets, eta, n_iterations)
best_err = net.earlystopping(train, train_targets, valid, valid_targets, eta)
| from numpy import *
import numpy as np
import matplotlib.pyplot as plt
from mlp import mlp
x = ones((1, 40)) * linspace(0, 1, 40)
t = sin(2 * pi * x) + cos(2 * pi * x) + np.random.randn(40) * 0.2
x = transpose(x)
t = transpose(t)
n_hidden = 3
eta = 0.25
n_iterations = 101
plt.plot(x, t, '.')
plt.show()
train = x[0::2, :]
test = x[1::4, :]
valid = x[3::4, :]
train_targets = t[0::2, :]
test_targets = t[1::4, :]
valid_targets = t[3::4, :]
net = mlp(train, train_targets, n_hidden, out_type='linear')
net.mlptrain(train, train_targets, eta, n_iterations)
best_err = net.earlystopping(train, train_targets, valid, valid_targets, eta) | none | 1 | 3.054425 | 3 |
|
gomoku/networks/__init__.py | IllIIIllll/reinforcement-learning-omok | 1 | 8946 | # © 2020 지성. all rights reserved.
# <<EMAIL>>
# Apache License 2.0
from .small import *
from .medium import *
from .large import * | # © 2020 지성. all rights reserved.
# <<EMAIL>>
# Apache License 2.0
from .small import *
from .medium import *
from .large import * | en | 0.684195 | # © 2020 지성. all rights reserved. # <<EMAIL>> # Apache License 2.0 | 1.168448 | 1 |
alipay/aop/api/domain/AlipayEbppInvoiceAuthSignModel.py | snowxmas/alipay-sdk-python-all | 1 | 8947 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEbppInvoiceAuthSignModel(object):
def __init__(self):
self._authorization_type = None
self._m_short_name = None
self._user_id = None
@property
def authorization_type(self):
return self._authorization_type
@authorization_type.setter
def authorization_type(self, value):
self._authorization_type = value
@property
def m_short_name(self):
return self._m_short_name
@m_short_name.setter
def m_short_name(self, value):
self._m_short_name = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.authorization_type:
if hasattr(self.authorization_type, 'to_alipay_dict'):
params['authorization_type'] = self.authorization_type.to_alipay_dict()
else:
params['authorization_type'] = self.authorization_type
if self.m_short_name:
if hasattr(self.m_short_name, 'to_alipay_dict'):
params['m_short_name'] = self.m_short_name.to_alipay_dict()
else:
params['m_short_name'] = self.m_short_name
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEbppInvoiceAuthSignModel()
if 'authorization_type' in d:
o.authorization_type = d['authorization_type']
if 'm_short_name' in d:
o.m_short_name = d['m_short_name']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEbppInvoiceAuthSignModel(object):
def __init__(self):
self._authorization_type = None
self._m_short_name = None
self._user_id = None
@property
def authorization_type(self):
return self._authorization_type
@authorization_type.setter
def authorization_type(self, value):
self._authorization_type = value
@property
def m_short_name(self):
return self._m_short_name
@m_short_name.setter
def m_short_name(self, value):
self._m_short_name = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.authorization_type:
if hasattr(self.authorization_type, 'to_alipay_dict'):
params['authorization_type'] = self.authorization_type.to_alipay_dict()
else:
params['authorization_type'] = self.authorization_type
if self.m_short_name:
if hasattr(self.m_short_name, 'to_alipay_dict'):
params['m_short_name'] = self.m_short_name.to_alipay_dict()
else:
params['m_short_name'] = self.m_short_name
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEbppInvoiceAuthSignModel()
if 'authorization_type' in d:
o.authorization_type = d['authorization_type']
if 'm_short_name' in d:
o.m_short_name = d['m_short_name']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.112913 | 2 |
sdk/python/tekton_pipeline/models/v1beta1_embedded_task.py | jmcshane/experimental | 0 | 8948 | # Copyright 2020 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Tekton
Tekton Pipeline # noqa: E501
The version of the OpenAPI document: v0.17.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from tekton_pipeline.configuration import Configuration
class V1beta1EmbeddedTask(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'description': 'str',
'metadata': 'V1beta1PipelineTaskMetadata',
'params': 'list[V1beta1ParamSpec]',
'resources': 'V1beta1TaskResources',
'results': 'list[V1beta1TaskResult]',
'sidecars': 'list[V1beta1Sidecar]',
'step_template': 'V1Container',
'steps': 'list[V1beta1Step]',
'volumes': 'list[V1Volume]',
'workspaces': 'list[V1beta1WorkspaceDeclaration]'
}
attribute_map = {
'description': 'description',
'metadata': 'metadata',
'params': 'params',
'resources': 'resources',
'results': 'results',
'sidecars': 'sidecars',
'step_template': 'stepTemplate',
'steps': 'steps',
'volumes': 'volumes',
'workspaces': 'workspaces'
}
def __init__(self, description=None, metadata=None, params=None, resources=None, results=None, sidecars=None, step_template=None, steps=None, volumes=None, workspaces=None, local_vars_configuration=None): # noqa: E501
"""V1beta1EmbeddedTask - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._metadata = None
self._params = None
self._resources = None
self._results = None
self._sidecars = None
self._step_template = None
self._steps = None
self._volumes = None
self._workspaces = None
self.discriminator = None
if description is not None:
self.description = description
if metadata is not None:
self.metadata = metadata
if params is not None:
self.params = params
if resources is not None:
self.resources = resources
if results is not None:
self.results = results
if sidecars is not None:
self.sidecars = sidecars
if step_template is not None:
self.step_template = step_template
if steps is not None:
self.steps = steps
if volumes is not None:
self.volumes = volumes
if workspaces is not None:
self.workspaces = workspaces
@property
def description(self):
"""Gets the description of this V1beta1EmbeddedTask. # noqa: E501
Description is a user-facing description of the task that may be used to populate a UI. # noqa: E501
:return: The description of this V1beta1EmbeddedTask. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1beta1EmbeddedTask.
Description is a user-facing description of the task that may be used to populate a UI. # noqa: E501
:param description: The description of this V1beta1EmbeddedTask. # noqa: E501
:type: str
"""
self._description = description
@property
def metadata(self):
"""Gets the metadata of this V1beta1EmbeddedTask. # noqa: E501
:return: The metadata of this V1beta1EmbeddedTask. # noqa: E501
:rtype: V1beta1PipelineTaskMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1EmbeddedTask.
:param metadata: The metadata of this V1beta1EmbeddedTask. # noqa: E501
:type: V1beta1PipelineTaskMetadata
"""
self._metadata = metadata
@property
def params(self):
"""Gets the params of this V1beta1EmbeddedTask. # noqa: E501
Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value. # noqa: E501
:return: The params of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1ParamSpec]
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this V1beta1EmbeddedTask.
Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value. # noqa: E501
:param params: The params of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1ParamSpec]
"""
self._params = params
@property
def resources(self):
"""Gets the resources of this V1beta1EmbeddedTask. # noqa: E501
:return: The resources of this V1beta1EmbeddedTask. # noqa: E501
:rtype: V1beta1TaskResources
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1beta1EmbeddedTask.
:param resources: The resources of this V1beta1EmbeddedTask. # noqa: E501
:type: V1beta1TaskResources
"""
self._resources = resources
@property
def results(self):
"""Gets the results of this V1beta1EmbeddedTask. # noqa: E501
Results are values that this Task can output # noqa: E501
:return: The results of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1TaskResult]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this V1beta1EmbeddedTask.
Results are values that this Task can output # noqa: E501
:param results: The results of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1TaskResult]
"""
self._results = results
@property
def sidecars(self):
"""Gets the sidecars of this V1beta1EmbeddedTask. # noqa: E501
Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete. # noqa: E501
:return: The sidecars of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1Sidecar]
"""
return self._sidecars
@sidecars.setter
def sidecars(self, sidecars):
"""Sets the sidecars of this V1beta1EmbeddedTask.
Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete. # noqa: E501
:param sidecars: The sidecars of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1Sidecar]
"""
self._sidecars = sidecars
@property
def step_template(self):
"""Gets the step_template of this V1beta1EmbeddedTask. # noqa: E501
:return: The step_template of this V1beta1EmbeddedTask. # noqa: E501
:rtype: V1Container
"""
return self._step_template
@step_template.setter
def step_template(self, step_template):
"""Sets the step_template of this V1beta1EmbeddedTask.
:param step_template: The step_template of this V1beta1EmbeddedTask. # noqa: E501
:type: V1Container
"""
self._step_template = step_template
@property
def steps(self):
"""Gets the steps of this V1beta1EmbeddedTask. # noqa: E501
Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace. # noqa: E501
:return: The steps of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1Step]
"""
return self._steps
@steps.setter
def steps(self, steps):
"""Sets the steps of this V1beta1EmbeddedTask.
Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace. # noqa: E501
:param steps: The steps of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1Step]
"""
self._steps = steps
@property
def volumes(self):
"""Gets the volumes of this V1beta1EmbeddedTask. # noqa: E501
Volumes is a collection of volumes that are available to mount into the steps of the build. # noqa: E501
:return: The volumes of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1Volume]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""Sets the volumes of this V1beta1EmbeddedTask.
Volumes is a collection of volumes that are available to mount into the steps of the build. # noqa: E501
:param volumes: The volumes of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1Volume]
"""
self._volumes = volumes
@property
def workspaces(self):
"""Gets the workspaces of this V1beta1EmbeddedTask. # noqa: E501
Workspaces are the volumes that this Task requires. # noqa: E501
:return: The workspaces of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1WorkspaceDeclaration]
"""
return self._workspaces
@workspaces.setter
def workspaces(self, workspaces):
"""Sets the workspaces of this V1beta1EmbeddedTask.
Workspaces are the volumes that this Task requires. # noqa: E501
:param workspaces: The workspaces of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1WorkspaceDeclaration]
"""
self._workspaces = workspaces
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1EmbeddedTask):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1EmbeddedTask):
return True
return self.to_dict() != other.to_dict()
| # Copyright 2020 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Tekton
Tekton Pipeline # noqa: E501
The version of the OpenAPI document: v0.17.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from tekton_pipeline.configuration import Configuration
class V1beta1EmbeddedTask(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'description': 'str',
'metadata': 'V1beta1PipelineTaskMetadata',
'params': 'list[V1beta1ParamSpec]',
'resources': 'V1beta1TaskResources',
'results': 'list[V1beta1TaskResult]',
'sidecars': 'list[V1beta1Sidecar]',
'step_template': 'V1Container',
'steps': 'list[V1beta1Step]',
'volumes': 'list[V1Volume]',
'workspaces': 'list[V1beta1WorkspaceDeclaration]'
}
attribute_map = {
'description': 'description',
'metadata': 'metadata',
'params': 'params',
'resources': 'resources',
'results': 'results',
'sidecars': 'sidecars',
'step_template': 'stepTemplate',
'steps': 'steps',
'volumes': 'volumes',
'workspaces': 'workspaces'
}
def __init__(self, description=None, metadata=None, params=None, resources=None, results=None, sidecars=None, step_template=None, steps=None, volumes=None, workspaces=None, local_vars_configuration=None): # noqa: E501
"""V1beta1EmbeddedTask - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._metadata = None
self._params = None
self._resources = None
self._results = None
self._sidecars = None
self._step_template = None
self._steps = None
self._volumes = None
self._workspaces = None
self.discriminator = None
if description is not None:
self.description = description
if metadata is not None:
self.metadata = metadata
if params is not None:
self.params = params
if resources is not None:
self.resources = resources
if results is not None:
self.results = results
if sidecars is not None:
self.sidecars = sidecars
if step_template is not None:
self.step_template = step_template
if steps is not None:
self.steps = steps
if volumes is not None:
self.volumes = volumes
if workspaces is not None:
self.workspaces = workspaces
@property
def description(self):
"""Gets the description of this V1beta1EmbeddedTask. # noqa: E501
Description is a user-facing description of the task that may be used to populate a UI. # noqa: E501
:return: The description of this V1beta1EmbeddedTask. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1beta1EmbeddedTask.
Description is a user-facing description of the task that may be used to populate a UI. # noqa: E501
:param description: The description of this V1beta1EmbeddedTask. # noqa: E501
:type: str
"""
self._description = description
@property
def metadata(self):
"""Gets the metadata of this V1beta1EmbeddedTask. # noqa: E501
:return: The metadata of this V1beta1EmbeddedTask. # noqa: E501
:rtype: V1beta1PipelineTaskMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1EmbeddedTask.
:param metadata: The metadata of this V1beta1EmbeddedTask. # noqa: E501
:type: V1beta1PipelineTaskMetadata
"""
self._metadata = metadata
@property
def params(self):
"""Gets the params of this V1beta1EmbeddedTask. # noqa: E501
Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value. # noqa: E501
:return: The params of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1ParamSpec]
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this V1beta1EmbeddedTask.
Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value. # noqa: E501
:param params: The params of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1ParamSpec]
"""
self._params = params
@property
def resources(self):
"""Gets the resources of this V1beta1EmbeddedTask. # noqa: E501
:return: The resources of this V1beta1EmbeddedTask. # noqa: E501
:rtype: V1beta1TaskResources
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1beta1EmbeddedTask.
:param resources: The resources of this V1beta1EmbeddedTask. # noqa: E501
:type: V1beta1TaskResources
"""
self._resources = resources
@property
def results(self):
"""Gets the results of this V1beta1EmbeddedTask. # noqa: E501
Results are values that this Task can output # noqa: E501
:return: The results of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1TaskResult]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this V1beta1EmbeddedTask.
Results are values that this Task can output # noqa: E501
:param results: The results of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1TaskResult]
"""
self._results = results
@property
def sidecars(self):
"""Gets the sidecars of this V1beta1EmbeddedTask. # noqa: E501
Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete. # noqa: E501
:return: The sidecars of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1Sidecar]
"""
return self._sidecars
@sidecars.setter
def sidecars(self, sidecars):
"""Sets the sidecars of this V1beta1EmbeddedTask.
Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete. # noqa: E501
:param sidecars: The sidecars of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1Sidecar]
"""
self._sidecars = sidecars
@property
def step_template(self):
"""Gets the step_template of this V1beta1EmbeddedTask. # noqa: E501
:return: The step_template of this V1beta1EmbeddedTask. # noqa: E501
:rtype: V1Container
"""
return self._step_template
@step_template.setter
def step_template(self, step_template):
"""Sets the step_template of this V1beta1EmbeddedTask.
:param step_template: The step_template of this V1beta1EmbeddedTask. # noqa: E501
:type: V1Container
"""
self._step_template = step_template
@property
def steps(self):
"""Gets the steps of this V1beta1EmbeddedTask. # noqa: E501
Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace. # noqa: E501
:return: The steps of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1Step]
"""
return self._steps
@steps.setter
def steps(self, steps):
"""Sets the steps of this V1beta1EmbeddedTask.
Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace. # noqa: E501
:param steps: The steps of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1Step]
"""
self._steps = steps
@property
def volumes(self):
"""Gets the volumes of this V1beta1EmbeddedTask. # noqa: E501
Volumes is a collection of volumes that are available to mount into the steps of the build. # noqa: E501
:return: The volumes of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1Volume]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""Sets the volumes of this V1beta1EmbeddedTask.
Volumes is a collection of volumes that are available to mount into the steps of the build. # noqa: E501
:param volumes: The volumes of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1Volume]
"""
self._volumes = volumes
@property
def workspaces(self):
"""Gets the workspaces of this V1beta1EmbeddedTask. # noqa: E501
Workspaces are the volumes that this Task requires. # noqa: E501
:return: The workspaces of this V1beta1EmbeddedTask. # noqa: E501
:rtype: list[V1beta1WorkspaceDeclaration]
"""
return self._workspaces
@workspaces.setter
def workspaces(self, workspaces):
"""Sets the workspaces of this V1beta1EmbeddedTask.
Workspaces are the volumes that this Task requires. # noqa: E501
:param workspaces: The workspaces of this V1beta1EmbeddedTask. # noqa: E501
:type: list[V1beta1WorkspaceDeclaration]
"""
self._workspaces = workspaces
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1EmbeddedTask):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1EmbeddedTask):
return True
return self.to_dict() != other.to_dict()
| en | 0.654171 | # Copyright 2020 The Tekton Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 Tekton Tekton Pipeline # noqa: E501 The version of the OpenAPI document: v0.17.2 Generated by: https://openapi-generator.tech # noqa: F401 NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 V1beta1EmbeddedTask - a model defined in OpenAPI # noqa: E501 Gets the description of this V1beta1EmbeddedTask. # noqa: E501 Description is a user-facing description of the task that may be used to populate a UI. # noqa: E501 :return: The description of this V1beta1EmbeddedTask. # noqa: E501 :rtype: str Sets the description of this V1beta1EmbeddedTask. Description is a user-facing description of the task that may be used to populate a UI. # noqa: E501 :param description: The description of this V1beta1EmbeddedTask. # noqa: E501 :type: str Gets the metadata of this V1beta1EmbeddedTask. # noqa: E501 :return: The metadata of this V1beta1EmbeddedTask. # noqa: E501 :rtype: V1beta1PipelineTaskMetadata Sets the metadata of this V1beta1EmbeddedTask. :param metadata: The metadata of this V1beta1EmbeddedTask. # noqa: E501 :type: V1beta1PipelineTaskMetadata Gets the params of this V1beta1EmbeddedTask. # noqa: E501 Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value. # noqa: E501 :return: The params of this V1beta1EmbeddedTask. # noqa: E501 :rtype: list[V1beta1ParamSpec] Sets the params of this V1beta1EmbeddedTask. Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value. # noqa: E501 :param params: The params of this V1beta1EmbeddedTask. # noqa: E501 :type: list[V1beta1ParamSpec] Gets the resources of this V1beta1EmbeddedTask. # noqa: E501 :return: The resources of this V1beta1EmbeddedTask. # noqa: E501 :rtype: V1beta1TaskResources Sets the resources of this V1beta1EmbeddedTask. :param resources: The resources of this V1beta1EmbeddedTask. # noqa: E501 :type: V1beta1TaskResources Gets the results of this V1beta1EmbeddedTask. # noqa: E501 Results are values that this Task can output # noqa: E501 :return: The results of this V1beta1EmbeddedTask. # noqa: E501 :rtype: list[V1beta1TaskResult] Sets the results of this V1beta1EmbeddedTask. Results are values that this Task can output # noqa: E501 :param results: The results of this V1beta1EmbeddedTask. # noqa: E501 :type: list[V1beta1TaskResult] Gets the sidecars of this V1beta1EmbeddedTask. # noqa: E501 Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete. # noqa: E501 :return: The sidecars of this V1beta1EmbeddedTask. # noqa: E501 :rtype: list[V1beta1Sidecar] Sets the sidecars of this V1beta1EmbeddedTask. Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete. # noqa: E501 :param sidecars: The sidecars of this V1beta1EmbeddedTask. # noqa: E501 :type: list[V1beta1Sidecar] Gets the step_template of this V1beta1EmbeddedTask. # noqa: E501 :return: The step_template of this V1beta1EmbeddedTask. # noqa: E501 :rtype: V1Container Sets the step_template of this V1beta1EmbeddedTask. :param step_template: The step_template of this V1beta1EmbeddedTask. # noqa: E501 :type: V1Container Gets the steps of this V1beta1EmbeddedTask. # noqa: E501 Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace. # noqa: E501 :return: The steps of this V1beta1EmbeddedTask. # noqa: E501 :rtype: list[V1beta1Step] Sets the steps of this V1beta1EmbeddedTask. Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace. # noqa: E501 :param steps: The steps of this V1beta1EmbeddedTask. # noqa: E501 :type: list[V1beta1Step] Gets the volumes of this V1beta1EmbeddedTask. # noqa: E501 Volumes is a collection of volumes that are available to mount into the steps of the build. # noqa: E501 :return: The volumes of this V1beta1EmbeddedTask. # noqa: E501 :rtype: list[V1Volume] Sets the volumes of this V1beta1EmbeddedTask. Volumes is a collection of volumes that are available to mount into the steps of the build. # noqa: E501 :param volumes: The volumes of this V1beta1EmbeddedTask. # noqa: E501 :type: list[V1Volume] Gets the workspaces of this V1beta1EmbeddedTask. # noqa: E501 Workspaces are the volumes that this Task requires. # noqa: E501 :return: The workspaces of this V1beta1EmbeddedTask. # noqa: E501 :rtype: list[V1beta1WorkspaceDeclaration] Sets the workspaces of this V1beta1EmbeddedTask. Workspaces are the volumes that this Task requires. # noqa: E501 :param workspaces: The workspaces of this V1beta1EmbeddedTask. # noqa: E501 :type: list[V1beta1WorkspaceDeclaration] Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.564201 | 2 |
tzp.py | gmlunesa/zhat | 1 | 8949 | <filename>tzp.py
import zmq
import curses
import argparse
import configparser
import threading
import time
from curses import wrapper
from client import Client
from ui import UI
def parse_args():
parser = argparse.ArgumentParser(description='Client for teezeepee')
# Please specify your username
parser.add_argument('username',
type=str,
help='Specified username')
parser.add_argument('--config-file',
type=str,
help='Default path for configuration file.')
return parser.parse_args()
def display_section(window, display):
window_lines, window_cols = window.getmaxyx()
bottom_line = window_lines - 1
window.bkgd(curses.A_NORMAL)
window.scrollok(1)
while True:
window.addstr(bottom_line, 1, display.recv_string())
window.move(bottom_line, 1)
window.scroll(1)
window.refresh()
def input_section(window, chat_sender):
window.bkgd(curses.A_NORMAL)
window.clear()
window.box()
window.refresh()
while True:
window.clear()
window.box()
window.refresh()
s = window.getstr(1, 1).decode('utf-8')
if s is not None and s != "":
chat_sender.send_string(s)
# Short pause
time.sleep(0.01)
def main(stdscr):
config_file = args.config_file if args.config_file is not None else 'tzp.cfg'
config = configparser.ConfigParser()
config.read(config_file)
config = config['default']
receiver = zmq.Context().instance().socket(zmq.PAIR)
receiver.bind("inproc://clientchat")
sender = zmq.Context().instance().socket(zmq.PAIR)
sender.connect("inproc://clientchat")
client = Client(args.username, config['server_host'],
config['chat_port'], receiver)
client.run()
show_receiver = zmq.Context().instance().socket(zmq.PAIR)
show_receiver.bind("inproc://clientdisplay")
show_sender = zmq.Context().instance().socket(zmq.PAIR)
show_sender.connect("inproc://clientdisplay")
ui = UI(config['server_host'], config['display_port'], show_sender)
ui.run()
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.echo()
curses.curs_set(0)
window_height = curses.LINES
window_width = curses.COLS
divider = int(window_height * 0.5)
history_screen = stdscr.subpad(divider, window_width, 0, 0)
input_screen = stdscr.subpad(window_height - divider, window_width, divider, 0)
history_thread = threading.Thread(target=display_section, args=(history_screen, show_receiver))
history_thread.daemon = True
history_thread.start()
input_thread = threading.Thread(target=input_section, args=(input_screen, sender))
input_thread.daemon = True
input_thread.start()
history_thread.join()
input_thread.join()
if '__main__' == __name__:
try:
args = parse_args()
wrapper(main)
except KeyboardInterrupt as e:
pass
except:
raise
| <filename>tzp.py
import zmq
import curses
import argparse
import configparser
import threading
import time
from curses import wrapper
from client import Client
from ui import UI
def parse_args():
parser = argparse.ArgumentParser(description='Client for teezeepee')
# Please specify your username
parser.add_argument('username',
type=str,
help='Specified username')
parser.add_argument('--config-file',
type=str,
help='Default path for configuration file.')
return parser.parse_args()
def display_section(window, display):
window_lines, window_cols = window.getmaxyx()
bottom_line = window_lines - 1
window.bkgd(curses.A_NORMAL)
window.scrollok(1)
while True:
window.addstr(bottom_line, 1, display.recv_string())
window.move(bottom_line, 1)
window.scroll(1)
window.refresh()
def input_section(window, chat_sender):
window.bkgd(curses.A_NORMAL)
window.clear()
window.box()
window.refresh()
while True:
window.clear()
window.box()
window.refresh()
s = window.getstr(1, 1).decode('utf-8')
if s is not None and s != "":
chat_sender.send_string(s)
# Short pause
time.sleep(0.01)
def main(stdscr):
config_file = args.config_file if args.config_file is not None else 'tzp.cfg'
config = configparser.ConfigParser()
config.read(config_file)
config = config['default']
receiver = zmq.Context().instance().socket(zmq.PAIR)
receiver.bind("inproc://clientchat")
sender = zmq.Context().instance().socket(zmq.PAIR)
sender.connect("inproc://clientchat")
client = Client(args.username, config['server_host'],
config['chat_port'], receiver)
client.run()
show_receiver = zmq.Context().instance().socket(zmq.PAIR)
show_receiver.bind("inproc://clientdisplay")
show_sender = zmq.Context().instance().socket(zmq.PAIR)
show_sender.connect("inproc://clientdisplay")
ui = UI(config['server_host'], config['display_port'], show_sender)
ui.run()
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.echo()
curses.curs_set(0)
window_height = curses.LINES
window_width = curses.COLS
divider = int(window_height * 0.5)
history_screen = stdscr.subpad(divider, window_width, 0, 0)
input_screen = stdscr.subpad(window_height - divider, window_width, divider, 0)
history_thread = threading.Thread(target=display_section, args=(history_screen, show_receiver))
history_thread.daemon = True
history_thread.start()
input_thread = threading.Thread(target=input_section, args=(input_screen, sender))
input_thread.daemon = True
input_thread.start()
history_thread.join()
input_thread.join()
if '__main__' == __name__:
try:
args = parse_args()
wrapper(main)
except KeyboardInterrupt as e:
pass
except:
raise
| en | 0.192563 | # Please specify your username # Short pause | 2.421252 | 2 |
anonlink-entity-service/backend/entityservice/tasks/solver.py | Sam-Gresh/linkage-agent-tools | 1 | 8950 | <gh_stars>1-10
import anonlink
from anonlink.candidate_generation import _merge_similarities
from entityservice.object_store import connect_to_object_store
from entityservice.async_worker import celery, logger
from entityservice.settings import Config as config
from entityservice.tasks.base_task import TracedTask
from entityservice.tasks.permutation import save_and_permute
@celery.task(base=TracedTask, ignore_result=True, args_as_tags=('project_id', 'run_id'))
def solver_task(similarity_scores_filename, project_id, run_id, dataset_sizes, parent_span):
log = logger.bind(pid=project_id, run_id=run_id)
mc = connect_to_object_store()
solver_task.span.log_kv({'datasetSizes': dataset_sizes,
'filename': similarity_scores_filename})
score_file = mc.get_object(config.MINIO_BUCKET, similarity_scores_filename)
log.debug("Creating python sparse matrix from bytes data")
candidate_pairs_with_duplicates = anonlink.serialization.load_candidate_pairs(score_file)
similarity_scores, (dset_is0, dset_is1), (rec_is0, rec_is1) = candidate_pairs_with_duplicates
log.info(f"Number of candidate pairs before deduplication: {len(candidate_pairs_with_duplicates[0])}")
if len(candidate_pairs_with_duplicates[0]) > 0:
# TODO use public interface when available
# https://github.com/data61/anonlink/issues/271
candidate_pairs = _merge_similarities([zip(similarity_scores, dset_is0, dset_is1, rec_is0, rec_is1)], k=None)
log.info(f"Number of candidate pairs after deduplication: {len(candidate_pairs[0])}")
log.info("Calculating the optimal mapping from similarity matrix")
groups = anonlink.solving.greedy_solve(candidate_pairs)
else:
groups = []
log.info("Entity groups have been computed")
res = {
"groups": groups,
"datasetSizes": dataset_sizes
}
save_and_permute.delay(res, project_id, run_id, solver_task.get_serialized_span())
| import anonlink
from anonlink.candidate_generation import _merge_similarities
from entityservice.object_store import connect_to_object_store
from entityservice.async_worker import celery, logger
from entityservice.settings import Config as config
from entityservice.tasks.base_task import TracedTask
from entityservice.tasks.permutation import save_and_permute
@celery.task(base=TracedTask, ignore_result=True, args_as_tags=('project_id', 'run_id'))
def solver_task(similarity_scores_filename, project_id, run_id, dataset_sizes, parent_span):
log = logger.bind(pid=project_id, run_id=run_id)
mc = connect_to_object_store()
solver_task.span.log_kv({'datasetSizes': dataset_sizes,
'filename': similarity_scores_filename})
score_file = mc.get_object(config.MINIO_BUCKET, similarity_scores_filename)
log.debug("Creating python sparse matrix from bytes data")
candidate_pairs_with_duplicates = anonlink.serialization.load_candidate_pairs(score_file)
similarity_scores, (dset_is0, dset_is1), (rec_is0, rec_is1) = candidate_pairs_with_duplicates
log.info(f"Number of candidate pairs before deduplication: {len(candidate_pairs_with_duplicates[0])}")
if len(candidate_pairs_with_duplicates[0]) > 0:
# TODO use public interface when available
# https://github.com/data61/anonlink/issues/271
candidate_pairs = _merge_similarities([zip(similarity_scores, dset_is0, dset_is1, rec_is0, rec_is1)], k=None)
log.info(f"Number of candidate pairs after deduplication: {len(candidate_pairs[0])}")
log.info("Calculating the optimal mapping from similarity matrix")
groups = anonlink.solving.greedy_solve(candidate_pairs)
else:
groups = []
log.info("Entity groups have been computed")
res = {
"groups": groups,
"datasetSizes": dataset_sizes
}
save_and_permute.delay(res, project_id, run_id, solver_task.get_serialized_span()) | en | 0.574558 | # TODO use public interface when available # https://github.com/data61/anonlink/issues/271 | 2.03597 | 2 |
portal/migrations/0007_auto_20170824_1341.py | nickmvincent/ugc-val-est | 2 | 8951 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-24 13:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portal', '0006_auto_20170824_0950'),
]
operations = [
migrations.AddField(
model_name='sampledstackoverflowpost',
name='num_question_comments',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='question_score',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title',
field=models.CharField(default='', max_length=1182),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_coleman_liau_index',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_length',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_lexicon_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_percent_punctuation',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_percent_spaces',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_percent_uppercase',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_sentence_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_starts_capitalized',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='sampledredditthread',
name='title',
field=models.CharField(default='', max_length=1182),
),
migrations.AlterField(
model_name='stackoverflowanswer',
name='owner_user_id',
field=models.IntegerField(blank=True, db_index=True, null=True),
),
migrations.AlterField(
model_name='stackoverflowanswer',
name='parent_id',
field=models.IntegerField(db_index=True),
),
migrations.AlterField(
model_name='stackoverflowquestion',
name='accepted_answer_id',
field=models.IntegerField(blank=True, db_index=True, null=True),
),
migrations.AlterField(
model_name='stackoverflowquestion',
name='owner_user_id',
field=models.IntegerField(db_index=True),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-24 13:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portal', '0006_auto_20170824_0950'),
]
operations = [
migrations.AddField(
model_name='sampledstackoverflowpost',
name='num_question_comments',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='question_score',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title',
field=models.CharField(default='', max_length=1182),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_coleman_liau_index',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_length',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_lexicon_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_percent_punctuation',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_percent_spaces',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_percent_uppercase',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_sentence_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='sampledstackoverflowpost',
name='title_starts_capitalized',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='sampledredditthread',
name='title',
field=models.CharField(default='', max_length=1182),
),
migrations.AlterField(
model_name='stackoverflowanswer',
name='owner_user_id',
field=models.IntegerField(blank=True, db_index=True, null=True),
),
migrations.AlterField(
model_name='stackoverflowanswer',
name='parent_id',
field=models.IntegerField(db_index=True),
),
migrations.AlterField(
model_name='stackoverflowquestion',
name='accepted_answer_id',
field=models.IntegerField(blank=True, db_index=True, null=True),
),
migrations.AlterField(
model_name='stackoverflowquestion',
name='owner_user_id',
field=models.IntegerField(db_index=True),
),
]
| en | 0.755624 | # -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-08-24 13:41 | 1.735333 | 2 |
exercise-09/programming_assignment/hopfield.py | AleRiccardi/technical-neural-network-course | 0 | 8952 | <filename>exercise-09/programming_assignment/hopfield.py
import numpy as np
import random
letter_C = np.array([
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
])
noisy_C = np.array([
[1, 1, 1, 1, 1],
[0, 1, 0, 0, 1],
[1, 0, 0, 0, 0],
[1, 0, 0, 1, 0],
[1, 0, 1, 1, 1],
])
letter_I = np.array([
[0, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[1, 1, 1, 1, 1],
])
noisy_I = np.array([
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 0, 1, 1],
])
letter_T = np.array([
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
])
noisy_T = np.array([
[1, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
])
class HopfieldNet:
def __init__(self, num_neurons, threshold=None):
assert num_neurons <= 1000
self.weights = np.zeros((num_neurons, num_neurons)).astype(np.int)
self.state = np.array((1, num_neurons))
if threshold:
self.thresholds = np.array([threshold for _ in num_neurons])
else:
self.thresholds = np.zeros((num_neurons,))
def fit(self, X):
num_p = X.shape[0]
num_k = X.shape[1]
# check right number of pattern
assert num_p < num_k * 0.138
num_k = X.shape[1]
for p in range(X.shape[0]):
X_p = X[p, :].reshape((1, num_k))
matrix_lr = np.dot(X_p.T, X_p).astype(np.int)
np.fill_diagonal(matrix_lr, 0)
self.weights += matrix_lr
def predict(self, X, show_energy=False, show_char=False):
num_k = X.shape[1]
X_pred = X.copy()
# loop per every pattern
for p in range(X_pred.shape[0]):
differ = True
time_s = 0
# loop until the state
# stay the same
while differ:
X_prev = X_pred[p].copy()
# print energy
if show_energy:
self.print_energy(X_pred[p], p, time_s)
# print char
if show_char and num_k <= 100:
self.print_char(X_pred[p], p, time_s)
# loop per every neuron
for k in range(num_k):
val = np.dot(X_pred[p], self.weights[:, k])
val_thres = 1 if val > self.thresholds[k] else -1
X_pred[p, k] = val_thres
# check if the new state differs from the previous one
differ = False if np.array_equal(X_pred[p], X_prev) else True
time_s += 1
return X_pred
def print_energy(self, state, num_p, time_s):
first_term = 0
second_term = 0
for i in range(state.shape[0]):
for j in range(state.shape[0]):
first_term += self.weights[i, j] * state[i] * state[j]
for k in range(state.shape[0]):
second_term += self.thresholds[k] * state[k]
energy = -0.5 * first_term + second_term
print('Pattern: {}\t||\tTime stamp: {}\t||\tEnergy: {:7.0f}'.format(num_p, time_s, energy))
return energy
def print_char(self, sequence, num_p, time_s):
sqrtK = np.sqrt(sequence.shape[0])
# check if correct sequence
assert sqrtK % 1 == 0
print('Pattern: {}\t||\tTime stamp: {}'.format(num_p, time_s))
for y in range(int(sqrtK)):
for x in range(int(sqrtK)):
idx = int(y * sqrtK + x)
val = '*' if sequence[idx] > 0 else ' '
print(val, end=' ')
print('', sep='', end='\n')
print('', sep='', end='\n')
def test_w_less_101():
print('\n================')
print('K < 101')
print('================\n')
X = np.array([
letter_C.flatten(),
letter_I.flatten(),
letter_T.flatten(),
])
X = np.where(X > 0, 1, -1)
net = HopfieldNet(X.shape[1])
net.fit(X)
X_test = np.array([
noisy_C.flatten(),
noisy_I.flatten(),
noisy_T.flatten(),
])
X_test = np.where(X_test > 0, 1, -1)
_ = net.predict(X_test, show_char=True)
def test_w_more_100():
print('\n================')
print('K > 100')
print('================\n')
num_k = random.randint(101, 1000)
binary = 2
X = np.array([
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
])
X = np.where(X > 0, 1, -1)
net = HopfieldNet(X.shape[1])
net.fit(X)
X_test = np.array([
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
])
X_test = np.where(X_test > 0, 1, -1)
_ = net.predict(X_test, show_energy=True)
if __name__ == '__main__':
test_w_less_101()
test_w_more_100()
| <filename>exercise-09/programming_assignment/hopfield.py
import numpy as np
import random
letter_C = np.array([
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
])
noisy_C = np.array([
[1, 1, 1, 1, 1],
[0, 1, 0, 0, 1],
[1, 0, 0, 0, 0],
[1, 0, 0, 1, 0],
[1, 0, 1, 1, 1],
])
letter_I = np.array([
[0, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[1, 1, 1, 1, 1],
])
noisy_I = np.array([
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 0, 1, 1],
])
letter_T = np.array([
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
])
noisy_T = np.array([
[1, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
])
class HopfieldNet:
def __init__(self, num_neurons, threshold=None):
assert num_neurons <= 1000
self.weights = np.zeros((num_neurons, num_neurons)).astype(np.int)
self.state = np.array((1, num_neurons))
if threshold:
self.thresholds = np.array([threshold for _ in num_neurons])
else:
self.thresholds = np.zeros((num_neurons,))
def fit(self, X):
num_p = X.shape[0]
num_k = X.shape[1]
# check right number of pattern
assert num_p < num_k * 0.138
num_k = X.shape[1]
for p in range(X.shape[0]):
X_p = X[p, :].reshape((1, num_k))
matrix_lr = np.dot(X_p.T, X_p).astype(np.int)
np.fill_diagonal(matrix_lr, 0)
self.weights += matrix_lr
def predict(self, X, show_energy=False, show_char=False):
num_k = X.shape[1]
X_pred = X.copy()
# loop per every pattern
for p in range(X_pred.shape[0]):
differ = True
time_s = 0
# loop until the state
# stay the same
while differ:
X_prev = X_pred[p].copy()
# print energy
if show_energy:
self.print_energy(X_pred[p], p, time_s)
# print char
if show_char and num_k <= 100:
self.print_char(X_pred[p], p, time_s)
# loop per every neuron
for k in range(num_k):
val = np.dot(X_pred[p], self.weights[:, k])
val_thres = 1 if val > self.thresholds[k] else -1
X_pred[p, k] = val_thres
# check if the new state differs from the previous one
differ = False if np.array_equal(X_pred[p], X_prev) else True
time_s += 1
return X_pred
def print_energy(self, state, num_p, time_s):
first_term = 0
second_term = 0
for i in range(state.shape[0]):
for j in range(state.shape[0]):
first_term += self.weights[i, j] * state[i] * state[j]
for k in range(state.shape[0]):
second_term += self.thresholds[k] * state[k]
energy = -0.5 * first_term + second_term
print('Pattern: {}\t||\tTime stamp: {}\t||\tEnergy: {:7.0f}'.format(num_p, time_s, energy))
return energy
def print_char(self, sequence, num_p, time_s):
sqrtK = np.sqrt(sequence.shape[0])
# check if correct sequence
assert sqrtK % 1 == 0
print('Pattern: {}\t||\tTime stamp: {}'.format(num_p, time_s))
for y in range(int(sqrtK)):
for x in range(int(sqrtK)):
idx = int(y * sqrtK + x)
val = '*' if sequence[idx] > 0 else ' '
print(val, end=' ')
print('', sep='', end='\n')
print('', sep='', end='\n')
def test_w_less_101():
print('\n================')
print('K < 101')
print('================\n')
X = np.array([
letter_C.flatten(),
letter_I.flatten(),
letter_T.flatten(),
])
X = np.where(X > 0, 1, -1)
net = HopfieldNet(X.shape[1])
net.fit(X)
X_test = np.array([
noisy_C.flatten(),
noisy_I.flatten(),
noisy_T.flatten(),
])
X_test = np.where(X_test > 0, 1, -1)
_ = net.predict(X_test, show_char=True)
def test_w_more_100():
print('\n================')
print('K > 100')
print('================\n')
num_k = random.randint(101, 1000)
binary = 2
X = np.array([
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
])
X = np.where(X > 0, 1, -1)
net = HopfieldNet(X.shape[1])
net.fit(X)
X_test = np.array([
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
np.random.randint(binary, size=num_k),
])
X_test = np.where(X_test > 0, 1, -1)
_ = net.predict(X_test, show_energy=True)
if __name__ == '__main__':
test_w_less_101()
test_w_more_100()
| en | 0.707752 | # check right number of pattern # loop per every pattern # loop until the state # stay the same # print energy # print char # loop per every neuron # check if the new state differs from the previous one # check if correct sequence | 3.266651 | 3 |
util/infoclient/test_infoclient.py | cdla/murfi2 | 7 | 8953 |
from infoclientLib import InfoClient
ic = InfoClient('localhost', 15002, 'localhost', 15003)
ic.add('roi-weightedave', 'active')
ic.start()
|
from infoclientLib import InfoClient
ic = InfoClient('localhost', 15002, 'localhost', 15003)
ic.add('roi-weightedave', 'active')
ic.start()
| none | 1 | 1.617871 | 2 |
|
lrtc_lib/experiment_runners/experiment_runner.py | MovestaDev/low-resource-text-classification-framework | 57 | 8954 | <gh_stars>10-100
# (c) Copyright IBM Corporation 2020.
# LICENSE: Apache License 2.0 (Apache-2.0)
# http://www.apache.org/licenses/LICENSE-2.0
import abc
import logging
import time
from collections import defaultdict
from typing import List
import numpy as np
from dataclasses import dataclass
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')
import lrtc_lib.data_access.data_access_factory as data_access_factory
import lrtc_lib.experiment_runners.experiments_results_handler as res_handler
from lrtc_lib.oracle_data_access import oracle_data_access_api
from lrtc_lib.active_learning.diversity_calculator import DiversityCalculator
from lrtc_lib.active_learning.knn_outlier_calculator import KnnOutlierCalculator
from lrtc_lib.active_learning.strategies import ActiveLearningStrategies
from lrtc_lib.data_access.core.data_structs import TextElement
from lrtc_lib.data_access.data_access_api import DataAccessApi
from lrtc_lib.data_access.data_access_factory import get_data_access
from lrtc_lib.orchestrator import orchestrator_api
from lrtc_lib.orchestrator.orchestrator_api import DeleteModels
from lrtc_lib.train_and_infer_service.model_type import ModelType
from lrtc_lib.training_set_selector.train_and_dev_set_selector_api import TrainingSetSelectionStrategy
@dataclass
class ExperimentParams:
experiment_name: str
train_dataset_name: str
dev_dataset_name: str
test_dataset_name: str
category_name: str
workspace_id: str
model: ModelType
active_learning_strategies: list
repeat_id: int
train_params: dict
def compute_batch_scores(config, elements):
data_access = get_data_access()
unlabeled = data_access.sample_unlabeled_text_elements(config.workspace_id, config.train_dataset_name,
config.category_name, 10 ** 6)["results"]
unlabeled_emb = np.array(orchestrator_api.infer(config.workspace_id, config.category_name, unlabeled)["embeddings"])
batch_emb = np.array(orchestrator_api.infer(config.workspace_id, config.category_name, elements)["embeddings"])
outlier_calculator = KnnOutlierCalculator(unlabeled_emb)
outlier_value = outlier_calculator.compute_batch_score(batch_emb)
representativeness_value = 1 / outlier_value
diversity_calculator = DiversityCalculator(unlabeled_emb)
diversity_value = diversity_calculator.compute_batch_score(batch_emb)
return diversity_value, representativeness_value
class ExperimentRunner(object, metaclass=abc.ABCMeta):
NO_AL = 'no_active_learning'
def __init__(self, first_model_positives_num: int, first_model_negatives_num: int,
active_learning_suggestions_num: int):
"""
Init the ExperimentsRunner
:param first_model_positives_num: the number of positives instances to provide for the first model.
:param first_model_negatives_num: the number of negative instances to provide for the first model.
:param active_learning_suggestions_num: the number of instances to be suggested by the active learning strategy
for the training of the second model.
"""
self.first_model_positives_num = first_model_positives_num
self.first_model_negatives_num = first_model_negatives_num
self.active_learning_suggestions_num = active_learning_suggestions_num
self.data_access: DataAccessApi = data_access_factory.get_data_access()
self.cached_first_model_scores = False
orchestrator_api.set_training_set_selection_strategy(TrainingSetSelectionStrategy.ALL_LABELED)
def run(self, config: ExperimentParams, active_learning_iterations_num: int, results_file_path: str,
delete_workspaces: bool = True):
# key: active learning name, value: list of results oevr iterations (first model has no iterations)
results_per_active_learning = defaultdict(dict)
# train first model
iteration = 0
res_dict = self.train_first_model(config=config)
res_handler.save_results(results_file_path, [res_dict])
results_per_active_learning[self.NO_AL][iteration] = res_dict
original_workspace_id = config.workspace_id
for al in config.active_learning_strategies:
orchestrator_api.set_active_learning_strategy(al)
if not orchestrator_api.is_model_compatible_with_active_learning(al, config.model):
logging.info(f'skipping active learning strategy {al.name} for model {config.model.name} '
f'since the strategy does not support this model.')
continue
al_workspace_id = original_workspace_id + "-" + al.name
if orchestrator_api.workspace_exists(al_workspace_id):
orchestrator_api.delete_workspace(al_workspace_id)
orchestrator_api.copy_workspace(original_workspace_id, al_workspace_id)
config.workspace_id = al_workspace_id
for iteration in range(1, active_learning_iterations_num + 1):
logging.info(f'Run AL strategy: {al.name}, iteration num: {iteration}, repeat num: {config.repeat_id}\t'
f'workspace: {config.workspace_id}')
res_dict, train_id = self.run_active_learning_iteration(config, al, iteration)
res_handler.save_results(results_file_path, [res_dict])
results_per_active_learning[al.name][iteration] = res_dict
if delete_workspaces:
orchestrator_api.delete_workspace(config.workspace_id, DeleteModels.ALL_BUT_FIRST_MODEL)
if delete_workspaces:
orchestrator_api.delete_workspace(original_workspace_id)
return results_per_active_learning
def train_first_model(self, config: ExperimentParams):
if orchestrator_api.workspace_exists(config.workspace_id):
orchestrator_api.delete_workspace(config.workspace_id)
orchestrator_api.create_workspace(config.workspace_id, config.train_dataset_name,
dev_dataset_name=config.dev_dataset_name)
orchestrator_api.create_new_category(config.workspace_id, config.category_name, "No description for you")
dev_text_elements_uris = orchestrator_api.get_all_text_elements_uris(config.dev_dataset_name)
dev_text_elements_and_labels = oracle_data_access_api.get_gold_labels(config.dev_dataset_name,
dev_text_elements_uris)
if dev_text_elements_and_labels is not None:
orchestrator_api.set_labels(config.workspace_id, dev_text_elements_and_labels)
random_seed = sum([ord(c) for c in config.workspace_id])
logging.info(str(config))
logging.info(f'random seed: {random_seed}')
self.set_first_model_positives(config, random_seed)
self.set_first_model_negatives(config, random_seed)
# train first model
logging.info(f'Starting first model training (model: {config.model.name})\tworkspace: {config.workspace_id}')
new_model_id = orchestrator_api.train(config.workspace_id, config.category_name, config.model, train_params=config.train_params)
if new_model_id is None:
raise Exception(f'a new model was not trained\tworkspace: {config.workspace_id}')
eval_dataset = config.test_dataset_name
res_dict = self.evaluate(config, al=self.NO_AL, iteration=0, eval_dataset=eval_dataset)
res_dict.update(self.generate_al_batch_dict(config)) # ensures AL-related keys are in the results dictionary
logging.info(f'Evaluation on dataset: {eval_dataset}, iteration: 0, first model (id: {new_model_id}) '
f'repeat: {config.repeat_id}, is: {res_dict}\t'
f'workspace: {config.workspace_id}')
return res_dict
def run_active_learning_iteration(self, config: ExperimentParams, al, iteration):
# get suggested elements for labeling (and their gold labels)
suggested_text_elements, suggested_uris_and_gold_labels = \
self.get_suggested_elements_and_gold_labels(config, al)
# calculate metrics for the batch suggested by the active learning strategy
al_batch_dict = self.generate_al_batch_dict(config, suggested_text_elements)
# set gold labels as the user-provided labels of the elements suggested by the active learning strategy
orchestrator_api.set_labels(config.workspace_id, suggested_uris_and_gold_labels)
# train a new model with the additional elements suggested by the active learning strategy
new_model_id = orchestrator_api.train(config.workspace_id, config.category_name, config.model, train_params=config.train_params)
if new_model_id is None:
raise Exception('New model was not trained')
# evaluate the new model
eval_dataset = config.test_dataset_name
res_dict = self.evaluate(config, al.name, iteration, eval_dataset, suggested_text_elements)
res_dict.update(al_batch_dict)
logging.info(f'Evaluation on dataset: {eval_dataset}, with AL: {al.name}, iteration: {iteration}, '
f'repeat: {config.repeat_id}, model (id: {new_model_id}) is: {res_dict}\t'
f'workspace: {config.workspace_id}')
return res_dict, new_model_id
def get_suggested_elements_and_gold_labels(self, config, al):
start = time.time()
suggested_text_elements_for_labeling = \
orchestrator_api.get_elements_to_label(config.workspace_id, config.category_name,
self.active_learning_suggestions_num)
end = time.time()
logging.info(f'{len(suggested_text_elements_for_labeling)} instances '
f'suggested by active learning strategy: {al.name} '
f'for dataset: {config.train_dataset_name} and category: {config.category_name}.\t'
f'runtime: {end - start}\tworkspace: {config.workspace_id}')
uris_for_labeling = [elem.uri for elem in suggested_text_elements_for_labeling]
uris_and_gold_labels = oracle_data_access_api.get_gold_labels(config.train_dataset_name, uris_for_labeling,
config.category_name)
return suggested_text_elements_for_labeling, uris_and_gold_labels
def evaluate(self, config: ExperimentParams, al, iteration, eval_dataset,
suggested_text_elements_for_labeling=None):
metadata_dict = res_handler.generate_metadata_dict(config, eval_dataset, al, iteration)
labels_counts_dict = res_handler.generate_train_labels_counts_dict(config)
performance_dict = res_handler.generate_performance_metrics_dict(config, eval_dataset)
experiment_specific_metrics_dict = \
self.generate_additional_metrics_dict(config, suggested_text_elements_for_labeling)
res_dict = {**metadata_dict, **labels_counts_dict, **performance_dict, **experiment_specific_metrics_dict}
return res_dict
@abc.abstractmethod
def set_first_model_positives(self, config, random_seed) -> List[TextElement]:
"""
Set the positive instances for the training of the first model.
:param config: experiment config for this run
:param random_seed: a seed for the Random being used for sampling
:return: a list of TextElements and a log message
"""
func_name = self.set_first_model_positives.__name__
raise NotImplementedError('users must define ' + func_name + ' to use this base class')
@abc.abstractmethod
def set_first_model_negatives(self, config, random_seed) -> List[TextElement]:
"""
Set the negative instances for the training of the first model.
:param config: experiment config for this run
:param random_seed: a seed for the Random being used for sampling
:return: a list of TextElements and a log message
"""
func_name = self.set_first_model_negatives.__name__
raise NotImplementedError('users must define ' + func_name + ' to use this base class')
@staticmethod
def generate_al_batch_dict(config, batch_elements=None):
batch_dict = {}
model_supports_embeddings = \
orchestrator_api.is_model_compatible_with_active_learning(ActiveLearningStrategies.DAL, config.model)
if batch_elements is not None and model_supports_embeddings:
diversity_value, representativeness_value = compute_batch_scores(config, batch_elements)
batch_dict["diversity"] = diversity_value
batch_dict["representativeness"] = representativeness_value
else:
batch_dict["diversity"] = "NA"
batch_dict["representativeness"] = "NA"
return batch_dict
def generate_additional_metrics_dict(self, config, suggested_text_elements_for_labeling):
return {}
| # (c) Copyright IBM Corporation 2020.
# LICENSE: Apache License 2.0 (Apache-2.0)
# http://www.apache.org/licenses/LICENSE-2.0
import abc
import logging
import time
from collections import defaultdict
from typing import List
import numpy as np
from dataclasses import dataclass
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')
import lrtc_lib.data_access.data_access_factory as data_access_factory
import lrtc_lib.experiment_runners.experiments_results_handler as res_handler
from lrtc_lib.oracle_data_access import oracle_data_access_api
from lrtc_lib.active_learning.diversity_calculator import DiversityCalculator
from lrtc_lib.active_learning.knn_outlier_calculator import KnnOutlierCalculator
from lrtc_lib.active_learning.strategies import ActiveLearningStrategies
from lrtc_lib.data_access.core.data_structs import TextElement
from lrtc_lib.data_access.data_access_api import DataAccessApi
from lrtc_lib.data_access.data_access_factory import get_data_access
from lrtc_lib.orchestrator import orchestrator_api
from lrtc_lib.orchestrator.orchestrator_api import DeleteModels
from lrtc_lib.train_and_infer_service.model_type import ModelType
from lrtc_lib.training_set_selector.train_and_dev_set_selector_api import TrainingSetSelectionStrategy
@dataclass
class ExperimentParams:
experiment_name: str
train_dataset_name: str
dev_dataset_name: str
test_dataset_name: str
category_name: str
workspace_id: str
model: ModelType
active_learning_strategies: list
repeat_id: int
train_params: dict
def compute_batch_scores(config, elements):
data_access = get_data_access()
unlabeled = data_access.sample_unlabeled_text_elements(config.workspace_id, config.train_dataset_name,
config.category_name, 10 ** 6)["results"]
unlabeled_emb = np.array(orchestrator_api.infer(config.workspace_id, config.category_name, unlabeled)["embeddings"])
batch_emb = np.array(orchestrator_api.infer(config.workspace_id, config.category_name, elements)["embeddings"])
outlier_calculator = KnnOutlierCalculator(unlabeled_emb)
outlier_value = outlier_calculator.compute_batch_score(batch_emb)
representativeness_value = 1 / outlier_value
diversity_calculator = DiversityCalculator(unlabeled_emb)
diversity_value = diversity_calculator.compute_batch_score(batch_emb)
return diversity_value, representativeness_value
class ExperimentRunner(object, metaclass=abc.ABCMeta):
NO_AL = 'no_active_learning'
def __init__(self, first_model_positives_num: int, first_model_negatives_num: int,
active_learning_suggestions_num: int):
"""
Init the ExperimentsRunner
:param first_model_positives_num: the number of positives instances to provide for the first model.
:param first_model_negatives_num: the number of negative instances to provide for the first model.
:param active_learning_suggestions_num: the number of instances to be suggested by the active learning strategy
for the training of the second model.
"""
self.first_model_positives_num = first_model_positives_num
self.first_model_negatives_num = first_model_negatives_num
self.active_learning_suggestions_num = active_learning_suggestions_num
self.data_access: DataAccessApi = data_access_factory.get_data_access()
self.cached_first_model_scores = False
orchestrator_api.set_training_set_selection_strategy(TrainingSetSelectionStrategy.ALL_LABELED)
def run(self, config: ExperimentParams, active_learning_iterations_num: int, results_file_path: str,
delete_workspaces: bool = True):
# key: active learning name, value: list of results oevr iterations (first model has no iterations)
results_per_active_learning = defaultdict(dict)
# train first model
iteration = 0
res_dict = self.train_first_model(config=config)
res_handler.save_results(results_file_path, [res_dict])
results_per_active_learning[self.NO_AL][iteration] = res_dict
original_workspace_id = config.workspace_id
for al in config.active_learning_strategies:
orchestrator_api.set_active_learning_strategy(al)
if not orchestrator_api.is_model_compatible_with_active_learning(al, config.model):
logging.info(f'skipping active learning strategy {al.name} for model {config.model.name} '
f'since the strategy does not support this model.')
continue
al_workspace_id = original_workspace_id + "-" + al.name
if orchestrator_api.workspace_exists(al_workspace_id):
orchestrator_api.delete_workspace(al_workspace_id)
orchestrator_api.copy_workspace(original_workspace_id, al_workspace_id)
config.workspace_id = al_workspace_id
for iteration in range(1, active_learning_iterations_num + 1):
logging.info(f'Run AL strategy: {al.name}, iteration num: {iteration}, repeat num: {config.repeat_id}\t'
f'workspace: {config.workspace_id}')
res_dict, train_id = self.run_active_learning_iteration(config, al, iteration)
res_handler.save_results(results_file_path, [res_dict])
results_per_active_learning[al.name][iteration] = res_dict
if delete_workspaces:
orchestrator_api.delete_workspace(config.workspace_id, DeleteModels.ALL_BUT_FIRST_MODEL)
if delete_workspaces:
orchestrator_api.delete_workspace(original_workspace_id)
return results_per_active_learning
def train_first_model(self, config: ExperimentParams):
if orchestrator_api.workspace_exists(config.workspace_id):
orchestrator_api.delete_workspace(config.workspace_id)
orchestrator_api.create_workspace(config.workspace_id, config.train_dataset_name,
dev_dataset_name=config.dev_dataset_name)
orchestrator_api.create_new_category(config.workspace_id, config.category_name, "No description for you")
dev_text_elements_uris = orchestrator_api.get_all_text_elements_uris(config.dev_dataset_name)
dev_text_elements_and_labels = oracle_data_access_api.get_gold_labels(config.dev_dataset_name,
dev_text_elements_uris)
if dev_text_elements_and_labels is not None:
orchestrator_api.set_labels(config.workspace_id, dev_text_elements_and_labels)
random_seed = sum([ord(c) for c in config.workspace_id])
logging.info(str(config))
logging.info(f'random seed: {random_seed}')
self.set_first_model_positives(config, random_seed)
self.set_first_model_negatives(config, random_seed)
# train first model
logging.info(f'Starting first model training (model: {config.model.name})\tworkspace: {config.workspace_id}')
new_model_id = orchestrator_api.train(config.workspace_id, config.category_name, config.model, train_params=config.train_params)
if new_model_id is None:
raise Exception(f'a new model was not trained\tworkspace: {config.workspace_id}')
eval_dataset = config.test_dataset_name
res_dict = self.evaluate(config, al=self.NO_AL, iteration=0, eval_dataset=eval_dataset)
res_dict.update(self.generate_al_batch_dict(config)) # ensures AL-related keys are in the results dictionary
logging.info(f'Evaluation on dataset: {eval_dataset}, iteration: 0, first model (id: {new_model_id}) '
f'repeat: {config.repeat_id}, is: {res_dict}\t'
f'workspace: {config.workspace_id}')
return res_dict
def run_active_learning_iteration(self, config: ExperimentParams, al, iteration):
# get suggested elements for labeling (and their gold labels)
suggested_text_elements, suggested_uris_and_gold_labels = \
self.get_suggested_elements_and_gold_labels(config, al)
# calculate metrics for the batch suggested by the active learning strategy
al_batch_dict = self.generate_al_batch_dict(config, suggested_text_elements)
# set gold labels as the user-provided labels of the elements suggested by the active learning strategy
orchestrator_api.set_labels(config.workspace_id, suggested_uris_and_gold_labels)
# train a new model with the additional elements suggested by the active learning strategy
new_model_id = orchestrator_api.train(config.workspace_id, config.category_name, config.model, train_params=config.train_params)
if new_model_id is None:
raise Exception('New model was not trained')
# evaluate the new model
eval_dataset = config.test_dataset_name
res_dict = self.evaluate(config, al.name, iteration, eval_dataset, suggested_text_elements)
res_dict.update(al_batch_dict)
logging.info(f'Evaluation on dataset: {eval_dataset}, with AL: {al.name}, iteration: {iteration}, '
f'repeat: {config.repeat_id}, model (id: {new_model_id}) is: {res_dict}\t'
f'workspace: {config.workspace_id}')
return res_dict, new_model_id
def get_suggested_elements_and_gold_labels(self, config, al):
start = time.time()
suggested_text_elements_for_labeling = \
orchestrator_api.get_elements_to_label(config.workspace_id, config.category_name,
self.active_learning_suggestions_num)
end = time.time()
logging.info(f'{len(suggested_text_elements_for_labeling)} instances '
f'suggested by active learning strategy: {al.name} '
f'for dataset: {config.train_dataset_name} and category: {config.category_name}.\t'
f'runtime: {end - start}\tworkspace: {config.workspace_id}')
uris_for_labeling = [elem.uri for elem in suggested_text_elements_for_labeling]
uris_and_gold_labels = oracle_data_access_api.get_gold_labels(config.train_dataset_name, uris_for_labeling,
config.category_name)
return suggested_text_elements_for_labeling, uris_and_gold_labels
def evaluate(self, config: ExperimentParams, al, iteration, eval_dataset,
suggested_text_elements_for_labeling=None):
metadata_dict = res_handler.generate_metadata_dict(config, eval_dataset, al, iteration)
labels_counts_dict = res_handler.generate_train_labels_counts_dict(config)
performance_dict = res_handler.generate_performance_metrics_dict(config, eval_dataset)
experiment_specific_metrics_dict = \
self.generate_additional_metrics_dict(config, suggested_text_elements_for_labeling)
res_dict = {**metadata_dict, **labels_counts_dict, **performance_dict, **experiment_specific_metrics_dict}
return res_dict
@abc.abstractmethod
def set_first_model_positives(self, config, random_seed) -> List[TextElement]:
"""
Set the positive instances for the training of the first model.
:param config: experiment config for this run
:param random_seed: a seed for the Random being used for sampling
:return: a list of TextElements and a log message
"""
func_name = self.set_first_model_positives.__name__
raise NotImplementedError('users must define ' + func_name + ' to use this base class')
@abc.abstractmethod
def set_first_model_negatives(self, config, random_seed) -> List[TextElement]:
"""
Set the negative instances for the training of the first model.
:param config: experiment config for this run
:param random_seed: a seed for the Random being used for sampling
:return: a list of TextElements and a log message
"""
func_name = self.set_first_model_negatives.__name__
raise NotImplementedError('users must define ' + func_name + ' to use this base class')
@staticmethod
def generate_al_batch_dict(config, batch_elements=None):
batch_dict = {}
model_supports_embeddings = \
orchestrator_api.is_model_compatible_with_active_learning(ActiveLearningStrategies.DAL, config.model)
if batch_elements is not None and model_supports_embeddings:
diversity_value, representativeness_value = compute_batch_scores(config, batch_elements)
batch_dict["diversity"] = diversity_value
batch_dict["representativeness"] = representativeness_value
else:
batch_dict["diversity"] = "NA"
batch_dict["representativeness"] = "NA"
return batch_dict
def generate_additional_metrics_dict(self, config, suggested_text_elements_for_labeling):
return {} | en | 0.820455 | # (c) Copyright IBM Corporation 2020. # LICENSE: Apache License 2.0 (Apache-2.0) # http://www.apache.org/licenses/LICENSE-2.0 Init the ExperimentsRunner :param first_model_positives_num: the number of positives instances to provide for the first model. :param first_model_negatives_num: the number of negative instances to provide for the first model. :param active_learning_suggestions_num: the number of instances to be suggested by the active learning strategy for the training of the second model. # key: active learning name, value: list of results oevr iterations (first model has no iterations) # train first model # train first model # ensures AL-related keys are in the results dictionary # get suggested elements for labeling (and their gold labels) # calculate metrics for the batch suggested by the active learning strategy # set gold labels as the user-provided labels of the elements suggested by the active learning strategy # train a new model with the additional elements suggested by the active learning strategy # evaluate the new model Set the positive instances for the training of the first model. :param config: experiment config for this run :param random_seed: a seed for the Random being used for sampling :return: a list of TextElements and a log message Set the negative instances for the training of the first model. :param config: experiment config for this run :param random_seed: a seed for the Random being used for sampling :return: a list of TextElements and a log message | 1.591558 | 2 |
contrast/environment/data.py | alexbjorling/acquisition-framework | 0 | 8955 | <gh_stars>0
try:
from tango import DeviceProxy, DevError
except ModuleNotFoundError:
pass
class PathFixer(object):
"""
Basic pathfixer which takes a path manually.
"""
def __init__(self):
self.directory = None
class SdmPathFixer(object):
"""
MAX IV pathfixer which takes a path from a Tango device.
"""
def __init__(self, sdm_device):
self.device = DeviceProxy(sdm_device)
self.TRIALS = 10
self.cache = None
@property
def directory(self):
for trial in range(self.TRIALS):
try:
val = self.device.SamplePath
self.cache = val
return val
except DevError:
print('Failed in getting SDM path from Tango. Trying again...')
print('Failed %u times, using cached value: %s'
% (self.TRIALS, self.cache))
return self.cache
| try:
from tango import DeviceProxy, DevError
except ModuleNotFoundError:
pass
class PathFixer(object):
"""
Basic pathfixer which takes a path manually.
"""
def __init__(self):
self.directory = None
class SdmPathFixer(object):
"""
MAX IV pathfixer which takes a path from a Tango device.
"""
def __init__(self, sdm_device):
self.device = DeviceProxy(sdm_device)
self.TRIALS = 10
self.cache = None
@property
def directory(self):
for trial in range(self.TRIALS):
try:
val = self.device.SamplePath
self.cache = val
return val
except DevError:
print('Failed in getting SDM path from Tango. Trying again...')
print('Failed %u times, using cached value: %s'
% (self.TRIALS, self.cache))
return self.cache | en | 0.938478 | Basic pathfixer which takes a path manually. MAX IV pathfixer which takes a path from a Tango device. | 2.505715 | 3 |
game_2048/views.py | fung04/csrw_game | 0 | 8956 | import json
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.shortcuts import redirect, render
from .models import Game2048
# Create your views here.
# test_user
# 8!S#5RP!WVMACg
def game(request):
return render(request, 'game_2048/index.html')
def set_result(request):
user = request.user if str(
request.user) != "AnonymousUser" else User.objects.get(username='test_user')
if request.method == 'POST':
# Get the game state from the POST request
game_state = request.body
obj = Game2048.objects.get(user=user)
# Check if the game state idendical to the server game state
if game_state != obj.game_state:
# let string to JSON object
json_game_state = json.loads(game_state)
# extract value of best from JSON objest
obj.best_score = json_game_state['best']
obj.game_state = json_game_state # save JSON object to game_state
obj.save()
else:
return redirect('game_2048:game')
return JsonResponse("", safe=False)
def get_result(request):
# Check if user is logged in if not set user to test_user
user = request.user if str(
request.user) != "AnonymousUser" else User.objects.get(username='test_user')
if request.method == 'GET':
obj, created = Game2048.objects.get_or_create(user=user)
game_state = obj.game_state
return JsonResponse(game_state, safe=False)
| import json
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.shortcuts import redirect, render
from .models import Game2048
# Create your views here.
# test_user
# 8!S#5RP!WVMACg
def game(request):
return render(request, 'game_2048/index.html')
def set_result(request):
user = request.user if str(
request.user) != "AnonymousUser" else User.objects.get(username='test_user')
if request.method == 'POST':
# Get the game state from the POST request
game_state = request.body
obj = Game2048.objects.get(user=user)
# Check if the game state idendical to the server game state
if game_state != obj.game_state:
# let string to JSON object
json_game_state = json.loads(game_state)
# extract value of best from JSON objest
obj.best_score = json_game_state['best']
obj.game_state = json_game_state # save JSON object to game_state
obj.save()
else:
return redirect('game_2048:game')
return JsonResponse("", safe=False)
def get_result(request):
# Check if user is logged in if not set user to test_user
user = request.user if str(
request.user) != "AnonymousUser" else User.objects.get(username='test_user')
if request.method == 'GET':
obj, created = Game2048.objects.get_or_create(user=user)
game_state = obj.game_state
return JsonResponse(game_state, safe=False)
| en | 0.751943 | # Create your views here. # test_user # 8!S#5RP!WVMACg # Get the game state from the POST request # Check if the game state idendical to the server game state # let string to JSON object # extract value of best from JSON objest # save JSON object to game_state # Check if user is logged in if not set user to test_user | 2.396149 | 2 |
distdeepq/__init__.py | Silvicek/distributional-dqn | 131 | 8957 | from distdeepq import models # noqa
from distdeepq.build_graph import build_act, build_train # noqa
from distdeepq.simple import learn, load, make_session # noqa
from distdeepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa
from distdeepq.static import *
from distdeepq.plots import PlotMachine
| from distdeepq import models # noqa
from distdeepq.build_graph import build_act, build_train # noqa
from distdeepq.simple import learn, load, make_session # noqa
from distdeepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa
from distdeepq.static import *
from distdeepq.plots import PlotMachine
| uz | 0.447735 | # noqa # noqa # noqa # noqa | 1.200001 | 1 |
python/10.Authentication-&-API-Keys.py | 17nikhil/codecademy | 0 | 8958 | # Authentication & API Keys
# Many APIs require an API key. Just as a real-world key allows you to access something, an API key grants you access to a particular API. Moreover, an API key identifies you to the API, which helps the API provider keep track of how their service is used and prevent unauthorized or malicious activity.
#
# Some APIs require authentication using a protocol called OAuth. We won't get into the details, but if you've ever been redirected to a page asking for permission to link an application with your account, you've probably used OAuth.
#
# API keys are often long alphanumeric strings. We've made one up in the editor to the right! (It won't actually work on anything, but when you receive your own API keys in future projects, they'll look a lot like this.)
api_key = "string"
| # Authentication & API Keys
# Many APIs require an API key. Just as a real-world key allows you to access something, an API key grants you access to a particular API. Moreover, an API key identifies you to the API, which helps the API provider keep track of how their service is used and prevent unauthorized or malicious activity.
#
# Some APIs require authentication using a protocol called OAuth. We won't get into the details, but if you've ever been redirected to a page asking for permission to link an application with your account, you've probably used OAuth.
#
# API keys are often long alphanumeric strings. We've made one up in the editor to the right! (It won't actually work on anything, but when you receive your own API keys in future projects, they'll look a lot like this.)
api_key = "string"
| en | 0.954527 | # Authentication & API Keys # Many APIs require an API key. Just as a real-world key allows you to access something, an API key grants you access to a particular API. Moreover, an API key identifies you to the API, which helps the API provider keep track of how their service is used and prevent unauthorized or malicious activity. # # Some APIs require authentication using a protocol called OAuth. We won't get into the details, but if you've ever been redirected to a page asking for permission to link an application with your account, you've probably used OAuth. # # API keys are often long alphanumeric strings. We've made one up in the editor to the right! (It won't actually work on anything, but when you receive your own API keys in future projects, they'll look a lot like this.) | 2.135496 | 2 |
plucker/__init__.py | takkaria/json-plucker | 0 | 8959 | <filename>plucker/__init__.py
from .plucker import pluck, Path
from .exceptions import PluckError
__all__ = ["pluck", "Path", "PluckError"]
| <filename>plucker/__init__.py
from .plucker import pluck, Path
from .exceptions import PluckError
__all__ = ["pluck", "Path", "PluckError"]
| none | 1 | 1.680164 | 2 |
|
arviz/plots/pairplot.py | gimbo/arviz | 0 | 8960 | <reponame>gimbo/arviz<filename>arviz/plots/pairplot.py<gh_stars>0
"""Plot a scatter or hexbin of sampled parameters."""
import warnings
import numpy as np
from ..data import convert_to_dataset, convert_to_inference_data
from .plot_utils import xarray_to_ndarray, get_coords, get_plotting_function
from ..utils import _var_names
def plot_pair(
data,
group="posterior",
var_names=None,
coords=None,
figsize=None,
textsize=None,
kind="scatter",
gridsize="auto",
contour=True,
fill_last=True,
divergences=False,
colorbar=False,
ax=None,
divergences_kwargs=None,
plot_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""
Plot a scatter or hexbin matrix of the sampled parameters.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
group : str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names : list of variable names
Variables to be plotted, if None all variable are plotted
coords : mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
figsize : figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str
Type of plot to display (scatter, kde or hexbin)
gridsize : int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences : Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar : bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs : dicts, optional
Additional keywords passed to ax.scatter for divergences
plot_kwargs : dicts, optional
Additional keywords passed to ax.plot, az.plot_kde or ax.hexbin
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['theta', 'mu', 'tau'],
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
if kind not in valid_kinds:
raise ValueError(
("Plot type {} not recognized." "Plot type must be in {}").format(kind, valid_kinds)
)
if coords is None:
coords = {}
if plot_kwargs is None:
plot_kwargs = {}
if kind == "scatter":
plot_kwargs.setdefault("marker", ".")
plot_kwargs.setdefault("lw", 0)
if divergences_kwargs is None:
divergences_kwargs = {}
divergences_kwargs.setdefault("marker", "o")
divergences_kwargs.setdefault("markeredgecolor", "k")
divergences_kwargs.setdefault("color", "C1")
divergences_kwargs.setdefault("lw", 0)
# Get posterior draws and combine chains
data = convert_to_inference_data(data)
grouped_data = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, grouped_data)
flat_var_names, infdata_group = xarray_to_ndarray(
get_coords(grouped_data, coords), var_names=var_names, combined=True
)
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
SyntaxWarning,
)
if gridsize == "auto":
gridsize = int(len(infdata_group[0]) ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise Exception("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
infdata_group=infdata_group,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
plot_kwargs=plot_kwargs,
contour=contour,
fill_last=fill_last,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
show=show,
)
if backend == "bokeh":
pairplot_kwargs.pop("gridsize", None)
pairplot_kwargs.pop("colorbar", None)
pairplot_kwargs.pop("divergences_kwargs", None)
pairplot_kwargs.pop("hexbin_values", None)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax
| """Plot a scatter or hexbin of sampled parameters."""
import warnings
import numpy as np
from ..data import convert_to_dataset, convert_to_inference_data
from .plot_utils import xarray_to_ndarray, get_coords, get_plotting_function
from ..utils import _var_names
def plot_pair(
data,
group="posterior",
var_names=None,
coords=None,
figsize=None,
textsize=None,
kind="scatter",
gridsize="auto",
contour=True,
fill_last=True,
divergences=False,
colorbar=False,
ax=None,
divergences_kwargs=None,
plot_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""
Plot a scatter or hexbin matrix of the sampled parameters.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
group : str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names : list of variable names
Variables to be plotted, if None all variable are plotted
coords : mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
figsize : figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str
Type of plot to display (scatter, kde or hexbin)
gridsize : int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences : Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar : bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs : dicts, optional
Additional keywords passed to ax.scatter for divergences
plot_kwargs : dicts, optional
Additional keywords passed to ax.plot, az.plot_kde or ax.hexbin
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['theta', 'mu', 'tau'],
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
if kind not in valid_kinds:
raise ValueError(
("Plot type {} not recognized." "Plot type must be in {}").format(kind, valid_kinds)
)
if coords is None:
coords = {}
if plot_kwargs is None:
plot_kwargs = {}
if kind == "scatter":
plot_kwargs.setdefault("marker", ".")
plot_kwargs.setdefault("lw", 0)
if divergences_kwargs is None:
divergences_kwargs = {}
divergences_kwargs.setdefault("marker", "o")
divergences_kwargs.setdefault("markeredgecolor", "k")
divergences_kwargs.setdefault("color", "C1")
divergences_kwargs.setdefault("lw", 0)
# Get posterior draws and combine chains
data = convert_to_inference_data(data)
grouped_data = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, grouped_data)
flat_var_names, infdata_group = xarray_to_ndarray(
get_coords(grouped_data, coords), var_names=var_names, combined=True
)
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
SyntaxWarning,
)
if gridsize == "auto":
gridsize = int(len(infdata_group[0]) ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise Exception("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
infdata_group=infdata_group,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
plot_kwargs=plot_kwargs,
contour=contour,
fill_last=fill_last,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
show=show,
)
if backend == "bokeh":
pairplot_kwargs.pop("gridsize", None)
pairplot_kwargs.pop("colorbar", None)
pairplot_kwargs.pop("divergences_kwargs", None)
pairplot_kwargs.pop("hexbin_values", None)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax | en | 0.494177 | Plot a scatter or hexbin of sampled parameters. Plot a scatter or hexbin matrix of the sampled parameters. Parameters ---------- data : obj Any object that can be converted to an az.InferenceData object Refer to documentation of az.convert_to_dataset for details group : str, optional Specifies which InferenceData group should be plotted. Defaults to 'posterior'. var_names : list of variable names Variables to be plotted, if None all variable are plotted coords : mapping, optional Coordinates of var_names to be plotted. Passed to `Dataset.sel` figsize : figure size tuple If None, size is (8 + numvars, 8 + numvars) textsize: int Text size for labels. If None it will be autoscaled based on figsize. kind : str Type of plot to display (scatter, kde or hexbin) gridsize : int or (int, int), optional Only works for kind=hexbin. The number of hexagons in the x-direction. The corresponding number of hexagons in the y-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. contour : bool If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True. fill_last : bool If True fill the last contour of the 2D KDE plot. Defaults to True. divergences : Boolean If True divergences will be plotted in a different color, only if group is either 'prior' or 'posterior'. colorbar : bool If True a colorbar will be included as part of the plot (Defaults to False). Only works when kind=hexbin ax: axes, optional Matplotlib axes or bokeh figures. divergences_kwargs : dicts, optional Additional keywords passed to ax.scatter for divergences plot_kwargs : dicts, optional Additional keywords passed to ax.plot, az.plot_kde or ax.hexbin backend: str, optional Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib". backend_kwargs: bool, optional These are kwargs specific to the backend being used. For additional documentation check the plotting method of the backend. show : bool, optional Call backend show function. Returns ------- axes : matplotlib axes or bokeh figures Examples -------- KDE Pair Plot .. plot:: :context: close-figs >>> import arviz as az >>> centered = az.load_arviz_data('centered_eight') >>> coords = {'school': ['Choate', 'Deerfield']} >>> az.plot_pair(centered, >>> var_names=['theta', 'mu', 'tau'], >>> kind='kde', >>> coords=coords, >>> divergences=True, >>> textsize=18) Hexbin pair plot .. plot:: :context: close-figs >>> az.plot_pair(centered, >>> var_names=['theta', 'mu'], >>> coords=coords, >>> textsize=18, >>> kind='hexbin') Pair plot showing divergences .. plot:: :context: close-figs >>> az.plot_pair(centered, ... var_names=['theta', 'mu', 'tau'], ... coords=coords, ... divergences=True, ... textsize=18) # Get posterior draws and combine chains # Assigning divergence group based on group param # Get diverging draws and combine chains # TODO: Add backend kwargs | 3.027842 | 3 |
cuestionario/formularios.py | LisandroCanteros/Grupo2_COM06_Info2021 | 0 | 8961 | from django.forms import ModelForm
from .models import Cuestionario, Categoria
from preguntas.models import Pregunta, Respuesta
class CuestionarioForm(ModelForm):
class Meta:
model = Cuestionario
fields = '__all__'
class PreguntaForm(ModelForm):
class Meta:
model = Pregunta
fields = '__all__'
class RespuestaForm(ModelForm):
class Meta:
model = Respuesta
fields = '__all__'
class CategoriaForm(ModelForm):
class Meta:
model = Categoria
fields = '__all__'
| from django.forms import ModelForm
from .models import Cuestionario, Categoria
from preguntas.models import Pregunta, Respuesta
class CuestionarioForm(ModelForm):
class Meta:
model = Cuestionario
fields = '__all__'
class PreguntaForm(ModelForm):
class Meta:
model = Pregunta
fields = '__all__'
class RespuestaForm(ModelForm):
class Meta:
model = Respuesta
fields = '__all__'
class CategoriaForm(ModelForm):
class Meta:
model = Categoria
fields = '__all__'
| none | 1 | 2.267855 | 2 |
|
vitcloud/views.py | biocross/VITCloud | 2 | 8962 | <filename>vitcloud/views.py
from django.views.generic import View
from django.http import HttpResponse
import os, json, datetime
from django.shortcuts import redirect
from django.shortcuts import render_to_response
from vitcloud.models import File
from django.views.decorators.csrf import csrf_exempt
from listingapikeys import findResult
import sys # sys.setdefaultencoding is cancelled by site.py
reload(sys) # to re-enable sys.setdefaultencoding()
sys.setdefaultencoding('utf-8')
#Custom Functions:
def repeated(fname, fsize, fblock, froom):
if(File.objects.filter(name=fname, size=fsize, block=fblock, room=froom)):
return True
else:
return False
#**Not for Production** Views
def clean(request):
q = File.objects.filter(block__iexact="L")
q.delete()
#Views:
def home(request):
no_of_files = len(File.objects.values_list('name').distinct())
no_of_blocks = len(File.objects.values_list('block').distinct())
no_of_rooms = len(File.objects.values_list('room').distinct())
file_sizes = File.objects.all()
total_file_size = 0
for x in file_sizes:
total_file_size = total_file_size + int(x.size)
total_file_size = (total_file_size/1024)
return render_to_response('home/home.htm' , {'x' : no_of_files, 'y' : no_of_blocks, 'z' : no_of_rooms, 'w' : total_file_size })
def pageSearch(request):
return render_to_response('home/search.htm')
def pageHowitworks(request):
return render_to_response('home/howitworks.htm')
def pageTopsharers(request):
return render_to_response('home/topsharers.htm')
def pageGettheapp(request):
return render_to_response('home/gettheapp.htm')
def search(request):
if request.method == "GET":
if (request.GET['thesearchbox'] == ""):
if ('latest' in request.GET):
results = File.objects.all().order_by("-id")
no = len(results)
paragraph = True
return render_to_response('home/results.htm', {'results' : results, 'paragraph' : paragraph, 'no' : no })
else:
return redirect('/blockwise')
else:
filename = str(request.GET['thesearchbox'])
paragraph = False
results = File.objects.filter(name__icontains=filename).order_by("-id")
no = len(results)
return render_to_response('home/results.htm', {'results' : results, 'paragraph': paragraph, 'no' : no })
def blockwise(request):
blockNames = File.objects.values_list('block').distinct()
for x in blockNames:
print str(x[0])
return render_to_response('home/blockwise.htm', {'blocks' : blockNames})
def blockwiseFeeder(request):
if request.method == "GET":
block = request.GET['block']
blockFiles = File.objects.filter(block__iexact=block).order_by("-id")
return render_to_response('home/blockwiseFeeder.htm', {'block': block, 'results': blockFiles})
def suggestions(request):
if request.method == "GET":
filename = str(request.GET['q'])
results = File.objects.filter(name__icontains=filename)
length = len(results)
suggestions = [filename, []]
for x in range(0, length, 1):
suggestions[1].append(results[x].name)
return HttpResponse(json.dumps(suggestions))
class Block:
name = ""
total = ""
def statistics(request):
finalArray = []
blockNames = []
blockSizes = []
blocks = File.objects.values_list('block').distinct()
for x in blocks:
blockName = str(str(x[0]).upper())
blockName = blockName + " Block"
blockNames.append(str(blockName).encode('utf-8'))
blockFiles = File.objects.filter(block__iexact=x[0])
totalSize = 0
for y in blockFiles:
totalSize = totalSize + int(y.size)
blockSizes.append(totalSize/1024)
return render_to_response('home/stats.htm', { 'blockNames' : blockNames, 'blockSizes' : blockSizes })
def apiFeed(request):
if request.method == "GET":
if("q" in request.GET):
filename = str(request.GET['q'])
result = findResult(filename)
return HttpResponse(json.dumps(result))
else:
return HttpResponse("Need The Required Parameters to work!")
def fileDetails(request):
if request.method == "GET":
filename = str(request.GET['q'])
results = File.objects.filter(name__icontains=filename)
filen = "NOTFOUND.404"
for x in results:
filen = x.name
return render_to_response('home/file.htm', {'results' : results, 'filen': filen })
def submitOne(request):
error = False
if 'filename' in request.GET:
filename = request.GET['filename']
filesize = 100000
fileblock = "MN"
fileroom = "447"
if not filename:
error = True
else:
now = datetime.datetime.now()
p1 = File.objects.create(name=filename, size = filesize, block = fileblock, room = fileroom, date = now)
results = File.objects.all()
return render_to_response('home/success.htm', { 'results': results })
return render_to_response('home/submitone.htm', { 'error': error })
@csrf_exempt
def interface(request):
if request.method == "POST":
data = json.loads(request.body)
currentBlock = str(data['Block'])
currentRoom = str(data['Room'])
currentHostelType = str(data['Hostel'])
no = len(data['Files'])
inserted = 0
data=data['Files']
for x in range(0, no, 2):
data[x+1] = int(data[x+1])
data[x+1] = (data[x+1]/1048576)
if not repeated(fname = data[x], fsize = str(data[x+1]), fblock=currentBlock, froom = currentRoom):
now = datetime.datetime.now()
temp = File.objects.create(name=data[x], size=str(data[x+1]), block = currentBlock, room = currentRoom, date = now)
inserted = (inserted + 1)
files_inserted = inserted
result = "inserted files: \n\n" + str(files_inserted)
return HttpResponse(result)
else:
return HttpResponse("<h2>VITCloud</h2> <h4>Desktop App Interface</h4><br/><br/><strong>Current Status:</strong> Listening at /interface...<br/><br/>Copyright 2012-2013<br/><NAME><br/><NAME>")
| <filename>vitcloud/views.py
from django.views.generic import View
from django.http import HttpResponse
import os, json, datetime
from django.shortcuts import redirect
from django.shortcuts import render_to_response
from vitcloud.models import File
from django.views.decorators.csrf import csrf_exempt
from listingapikeys import findResult
import sys # sys.setdefaultencoding is cancelled by site.py
reload(sys) # to re-enable sys.setdefaultencoding()
sys.setdefaultencoding('utf-8')
#Custom Functions:
def repeated(fname, fsize, fblock, froom):
if(File.objects.filter(name=fname, size=fsize, block=fblock, room=froom)):
return True
else:
return False
#**Not for Production** Views
def clean(request):
q = File.objects.filter(block__iexact="L")
q.delete()
#Views:
def home(request):
no_of_files = len(File.objects.values_list('name').distinct())
no_of_blocks = len(File.objects.values_list('block').distinct())
no_of_rooms = len(File.objects.values_list('room').distinct())
file_sizes = File.objects.all()
total_file_size = 0
for x in file_sizes:
total_file_size = total_file_size + int(x.size)
total_file_size = (total_file_size/1024)
return render_to_response('home/home.htm' , {'x' : no_of_files, 'y' : no_of_blocks, 'z' : no_of_rooms, 'w' : total_file_size })
def pageSearch(request):
return render_to_response('home/search.htm')
def pageHowitworks(request):
return render_to_response('home/howitworks.htm')
def pageTopsharers(request):
return render_to_response('home/topsharers.htm')
def pageGettheapp(request):
return render_to_response('home/gettheapp.htm')
def search(request):
if request.method == "GET":
if (request.GET['thesearchbox'] == ""):
if ('latest' in request.GET):
results = File.objects.all().order_by("-id")
no = len(results)
paragraph = True
return render_to_response('home/results.htm', {'results' : results, 'paragraph' : paragraph, 'no' : no })
else:
return redirect('/blockwise')
else:
filename = str(request.GET['thesearchbox'])
paragraph = False
results = File.objects.filter(name__icontains=filename).order_by("-id")
no = len(results)
return render_to_response('home/results.htm', {'results' : results, 'paragraph': paragraph, 'no' : no })
def blockwise(request):
blockNames = File.objects.values_list('block').distinct()
for x in blockNames:
print str(x[0])
return render_to_response('home/blockwise.htm', {'blocks' : blockNames})
def blockwiseFeeder(request):
if request.method == "GET":
block = request.GET['block']
blockFiles = File.objects.filter(block__iexact=block).order_by("-id")
return render_to_response('home/blockwiseFeeder.htm', {'block': block, 'results': blockFiles})
def suggestions(request):
if request.method == "GET":
filename = str(request.GET['q'])
results = File.objects.filter(name__icontains=filename)
length = len(results)
suggestions = [filename, []]
for x in range(0, length, 1):
suggestions[1].append(results[x].name)
return HttpResponse(json.dumps(suggestions))
class Block:
name = ""
total = ""
def statistics(request):
finalArray = []
blockNames = []
blockSizes = []
blocks = File.objects.values_list('block').distinct()
for x in blocks:
blockName = str(str(x[0]).upper())
blockName = blockName + " Block"
blockNames.append(str(blockName).encode('utf-8'))
blockFiles = File.objects.filter(block__iexact=x[0])
totalSize = 0
for y in blockFiles:
totalSize = totalSize + int(y.size)
blockSizes.append(totalSize/1024)
return render_to_response('home/stats.htm', { 'blockNames' : blockNames, 'blockSizes' : blockSizes })
def apiFeed(request):
if request.method == "GET":
if("q" in request.GET):
filename = str(request.GET['q'])
result = findResult(filename)
return HttpResponse(json.dumps(result))
else:
return HttpResponse("Need The Required Parameters to work!")
def fileDetails(request):
if request.method == "GET":
filename = str(request.GET['q'])
results = File.objects.filter(name__icontains=filename)
filen = "NOTFOUND.404"
for x in results:
filen = x.name
return render_to_response('home/file.htm', {'results' : results, 'filen': filen })
def submitOne(request):
error = False
if 'filename' in request.GET:
filename = request.GET['filename']
filesize = 100000
fileblock = "MN"
fileroom = "447"
if not filename:
error = True
else:
now = datetime.datetime.now()
p1 = File.objects.create(name=filename, size = filesize, block = fileblock, room = fileroom, date = now)
results = File.objects.all()
return render_to_response('home/success.htm', { 'results': results })
return render_to_response('home/submitone.htm', { 'error': error })
@csrf_exempt
def interface(request):
if request.method == "POST":
data = json.loads(request.body)
currentBlock = str(data['Block'])
currentRoom = str(data['Room'])
currentHostelType = str(data['Hostel'])
no = len(data['Files'])
inserted = 0
data=data['Files']
for x in range(0, no, 2):
data[x+1] = int(data[x+1])
data[x+1] = (data[x+1]/1048576)
if not repeated(fname = data[x], fsize = str(data[x+1]), fblock=currentBlock, froom = currentRoom):
now = datetime.datetime.now()
temp = File.objects.create(name=data[x], size=str(data[x+1]), block = currentBlock, room = currentRoom, date = now)
inserted = (inserted + 1)
files_inserted = inserted
result = "inserted files: \n\n" + str(files_inserted)
return HttpResponse(result)
else:
return HttpResponse("<h2>VITCloud</h2> <h4>Desktop App Interface</h4><br/><br/><strong>Current Status:</strong> Listening at /interface...<br/><br/>Copyright 2012-2013<br/><NAME><br/><NAME>")
| en | 0.60277 | # sys.setdefaultencoding is cancelled by site.py # to re-enable sys.setdefaultencoding() #Custom Functions: #**Not for Production** Views #Views: | 1.88197 | 2 |
blurple/ui/base.py | jeremytiki/blurple.py | 4 | 8963 | <reponame>jeremytiki/blurple.py
from abc import ABC
import discord
class Base(discord.Embed, ABC):
async def send(self, client: discord.abc.Messageable):
""" Send the component as a message in discord.
:param client: The client used, usually a :class:`discord.abc.Messageable`. Must have implemented :func:`.send`
:returns: :class:`discord.Message`
"""
return await client.send(embed=self)
| from abc import ABC
import discord
class Base(discord.Embed, ABC):
async def send(self, client: discord.abc.Messageable):
""" Send the component as a message in discord.
:param client: The client used, usually a :class:`discord.abc.Messageable`. Must have implemented :func:`.send`
:returns: :class:`discord.Message`
"""
return await client.send(embed=self) | en | 0.572518 | Send the component as a message in discord. :param client: The client used, usually a :class:`discord.abc.Messageable`. Must have implemented :func:`.send` :returns: :class:`discord.Message` | 3.066206 | 3 |
migrations/versions/e86dd3bc539c_change_admin_to_boolean.py | jonzxz/project-piscator | 0 | 8964 | """change admin to boolean
Revision ID: e86dd3bc539c
Revises: <KEY>
Create Date: 2020-11-11 22:32:00.707936
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e86dd3bc539c'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('email_address', sa.Column('active', sa.Boolean(), nullable=False))
op.add_column('email_address', sa.Column('email_password', sa.String(length=255), nullable=False))
op.add_column('email_address', sa.Column('last_mailbox_size', sa.Integer(), nullable=True))
op.add_column('email_address', sa.Column('last_updated', sa.DateTime(), nullable=True))
op.add_column('email_address', sa.Column('phishing_mail_detected', sa.Integer(), nullable=True))
op.add_column('user', sa.Column('is_active', sa.Boolean(), nullable=False))
op.add_column('user', sa.Column('is_admin', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('last_logged_in', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_logged_in')
op.drop_column('user', 'is_admin')
op.drop_column('user', 'is_active')
op.drop_column('email_address', 'phishing_mail_detected')
op.drop_column('email_address', 'last_updated')
op.drop_column('email_address', 'last_mailbox_size')
op.drop_column('email_address', 'email_password')
op.drop_column('email_address', 'active')
# ### end Alembic commands ###
| """change admin to boolean
Revision ID: e86dd3bc539c
Revises: <KEY>
Create Date: 2020-11-11 22:32:00.707936
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e86dd3bc539c'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('email_address', sa.Column('active', sa.Boolean(), nullable=False))
op.add_column('email_address', sa.Column('email_password', sa.String(length=255), nullable=False))
op.add_column('email_address', sa.Column('last_mailbox_size', sa.Integer(), nullable=True))
op.add_column('email_address', sa.Column('last_updated', sa.DateTime(), nullable=True))
op.add_column('email_address', sa.Column('phishing_mail_detected', sa.Integer(), nullable=True))
op.add_column('user', sa.Column('is_active', sa.Boolean(), nullable=False))
op.add_column('user', sa.Column('is_admin', sa.Boolean(), nullable=True))
op.add_column('user', sa.Column('last_logged_in', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_logged_in')
op.drop_column('user', 'is_admin')
op.drop_column('user', 'is_active')
op.drop_column('email_address', 'phishing_mail_detected')
op.drop_column('email_address', 'last_updated')
op.drop_column('email_address', 'last_mailbox_size')
op.drop_column('email_address', 'email_password')
op.drop_column('email_address', 'active')
# ### end Alembic commands ###
| en | 0.52941 | change admin to boolean Revision ID: e86dd3bc539c Revises: <KEY> Create Date: 2020-11-11 22:32:00.707936 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.528658 | 2 |
school/migrations/0010_alter_sala_unique_together.py | adrianomqsmts/django-escola | 0 | 8965 | <reponame>adrianomqsmts/django-escola
# Generated by Django 4.0.3 on 2022-03-16 03:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('school', '0009_rename_periodo_semestre_alter_semestre_options_and_more'),
]
operations = [
migrations.AlterUniqueTogether(
name='sala',
unique_together={('porta', 'predio')},
),
]
| # Generated by Django 4.0.3 on 2022-03-16 03:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('school', '0009_rename_periodo_semestre_alter_semestre_options_and_more'),
]
operations = [
migrations.AlterUniqueTogether(
name='sala',
unique_together={('porta', 'predio')},
),
] | en | 0.786966 | # Generated by Django 4.0.3 on 2022-03-16 03:09 | 1.548085 | 2 |
code_trunk/trainer/abc.py | chris4540/DD2430-ds-proj | 0 | 8966 | """
Abstract training class
"""
from abc import ABC as AbstractBaseClass
from abc import abstractmethod
class AdstractTrainer(AbstractBaseClass):
@abstractmethod
def run(self):
pass
@abstractmethod
def prepare_data_loaders(self):
"""
For preparing data loaders and save them as instance attributes
"""
pass
@abstractmethod
def prepare_exp_settings(self):
"""
Define stuff which are before the actual run. For example:
- Optimizer
- Model
"""
pass
@abstractmethod
def prepare_logging(self):
pass
| """
Abstract training class
"""
from abc import ABC as AbstractBaseClass
from abc import abstractmethod
class AdstractTrainer(AbstractBaseClass):
@abstractmethod
def run(self):
pass
@abstractmethod
def prepare_data_loaders(self):
"""
For preparing data loaders and save them as instance attributes
"""
pass
@abstractmethod
def prepare_exp_settings(self):
"""
Define stuff which are before the actual run. For example:
- Optimizer
- Model
"""
pass
@abstractmethod
def prepare_logging(self):
pass
| en | 0.841932 | Abstract training class For preparing data loaders and save them as instance attributes Define stuff which are before the actual run. For example: - Optimizer - Model | 3.231257 | 3 |
quadpy/triangle/cools_haegemans.py | melvyniandrag/quadpy | 1 | 8967 | <reponame>melvyniandrag/quadpy
# -*- coding: utf-8 -*-
#
from mpmath import mp
from .helpers import untangle2
class CoolsHaegemans(object):
"""
<NAME>, <NAME>,
Construction of minimal cubature formulae for the square and the triangle
using invariant theory,
Department of Computer Science, K.U.Leuven,
TW Reports vol:TW96, Sept. 1987,
<https://lirias.kuleuven.be/handle/123456789/131869>.
"""
def __init__(self, index, mpmath=False):
self.name = "CoolsHaegemans({})".format(index)
assert index == 1
self.degree = 8
flt = mp.mpf if mpmath else float
mp.dps = 20
data = {
"rot": [
[
flt("0.16058343856681218798E-09"),
flt("0.34579201116826902882E+00"),
flt("0.36231682215692616667E+01"),
],
[
flt("0.26530624434780379347E-01"),
flt("0.65101993458939166328E-01"),
flt("0.87016510156356306078E+00"),
],
[
flt("0.29285717640155892159E-01"),
flt("0.65177530364879570754E+00"),
flt("0.31347788752373300717E+00"),
],
[
flt("0.43909556791220782402E-01"),
flt("0.31325121067172530696E+00"),
flt("0.63062143431895614010E+00"),
],
[
flt("0.66940767639916174192E-01"),
flt("0.51334692063945414949E+00"),
flt("0.28104124731511039057E+00"),
],
]
}
# elif index == 2:
# self.degree = 10
# data = [
# (0.15319130036758557631E-06_r3(+0.58469201683584513031E-01, -0.54887778772527519316E+00)),
# (0.13260526227928785221E-01_r3(0.50849285064031410705E-01, 0.90799059794957813439E+00)),
# (0.15646439344539042136E-01_r3(0.51586732419949574487E+00, 0.46312452842927062902E+00)),
# (0.21704258224807323311E-01_r3(0.24311033191739048230E+00, 0.72180595182371959467E-00)),
# (0.21797613600129922367E-01_r3(0.75397765920922660134E-00, 0.20647569839132397633E+00)),
# (0.38587913508193459468E-01_r3(0.42209207910846960294E-00, 0.12689533413411127327E+00)),
# (0.39699584282594413022E-01_r3(0.19823878346663354068E+00, 0.62124412566393319745E+00)),
# (0.47910534861520060665E-01numpy.array([[1.0/3.0, 1.0/3.0, 1.0/3.0]])
# ]
self.bary, self.weights = untangle2(data)
self.points = self.bary[:, 1:]
self.weights *= 2
return
| # -*- coding: utf-8 -*-
#
from mpmath import mp
from .helpers import untangle2
class CoolsHaegemans(object):
"""
<NAME>, <NAME>,
Construction of minimal cubature formulae for the square and the triangle
using invariant theory,
Department of Computer Science, K.U.Leuven,
TW Reports vol:TW96, Sept. 1987,
<https://lirias.kuleuven.be/handle/123456789/131869>.
"""
def __init__(self, index, mpmath=False):
self.name = "CoolsHaegemans({})".format(index)
assert index == 1
self.degree = 8
flt = mp.mpf if mpmath else float
mp.dps = 20
data = {
"rot": [
[
flt("0.16058343856681218798E-09"),
flt("0.34579201116826902882E+00"),
flt("0.36231682215692616667E+01"),
],
[
flt("0.26530624434780379347E-01"),
flt("0.65101993458939166328E-01"),
flt("0.87016510156356306078E+00"),
],
[
flt("0.29285717640155892159E-01"),
flt("0.65177530364879570754E+00"),
flt("0.31347788752373300717E+00"),
],
[
flt("0.43909556791220782402E-01"),
flt("0.31325121067172530696E+00"),
flt("0.63062143431895614010E+00"),
],
[
flt("0.66940767639916174192E-01"),
flt("0.51334692063945414949E+00"),
flt("0.28104124731511039057E+00"),
],
]
}
# elif index == 2:
# self.degree = 10
# data = [
# (0.15319130036758557631E-06_r3(+0.58469201683584513031E-01, -0.54887778772527519316E+00)),
# (0.13260526227928785221E-01_r3(0.50849285064031410705E-01, 0.90799059794957813439E+00)),
# (0.15646439344539042136E-01_r3(0.51586732419949574487E+00, 0.46312452842927062902E+00)),
# (0.21704258224807323311E-01_r3(0.24311033191739048230E+00, 0.72180595182371959467E-00)),
# (0.21797613600129922367E-01_r3(0.75397765920922660134E-00, 0.20647569839132397633E+00)),
# (0.38587913508193459468E-01_r3(0.42209207910846960294E-00, 0.12689533413411127327E+00)),
# (0.39699584282594413022E-01_r3(0.19823878346663354068E+00, 0.62124412566393319745E+00)),
# (0.47910534861520060665E-01numpy.array([[1.0/3.0, 1.0/3.0, 1.0/3.0]])
# ]
self.bary, self.weights = untangle2(data)
self.points = self.bary[:, 1:]
self.weights *= 2
return | en | 0.512785 | # -*- coding: utf-8 -*- # <NAME>, <NAME>, Construction of minimal cubature formulae for the square and the triangle using invariant theory, Department of Computer Science, K.U.Leuven, TW Reports vol:TW96, Sept. 1987, <https://lirias.kuleuven.be/handle/123456789/131869>. # elif index == 2: # self.degree = 10 # data = [ # (0.15319130036758557631E-06_r3(+0.58469201683584513031E-01, -0.54887778772527519316E+00)), # (0.13260526227928785221E-01_r3(0.50849285064031410705E-01, 0.90799059794957813439E+00)), # (0.15646439344539042136E-01_r3(0.51586732419949574487E+00, 0.46312452842927062902E+00)), # (0.21704258224807323311E-01_r3(0.24311033191739048230E+00, 0.72180595182371959467E-00)), # (0.21797613600129922367E-01_r3(0.75397765920922660134E-00, 0.20647569839132397633E+00)), # (0.38587913508193459468E-01_r3(0.42209207910846960294E-00, 0.12689533413411127327E+00)), # (0.39699584282594413022E-01_r3(0.19823878346663354068E+00, 0.62124412566393319745E+00)), # (0.47910534861520060665E-01numpy.array([[1.0/3.0, 1.0/3.0, 1.0/3.0]]) # ] | 2.69609 | 3 |
account/admin.py | RichardLeeH/invoce_sys | 0 | 8968 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from account.models import Profile
admin.site.site_header = 'invoce'
class TokenAdmin(admin.ModelAdmin):
list_display = ('key', 'uid', 'user', 'created')
fields = ('user',)
ordering = ('-created',)
def uid(self, obj):
return obj.user.id
uid.short_description = u'用户ID'
admin.site.unregister(Token)
admin.site.register(Token, TokenAdmin)
class ProfileInline(admin.StackedInline):
model = Profile
class UserCustomAdmin(UserAdmin):
list_display = ('id', 'username', 'email', 'is_active', 'is_staff')
inlines = (ProfileInline, )
ordering = ('-id', )
admin.site.unregister(User)
admin.site.register(User, UserCustomAdmin)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from account.models import Profile
admin.site.site_header = 'invoce'
class TokenAdmin(admin.ModelAdmin):
list_display = ('key', 'uid', 'user', 'created')
fields = ('user',)
ordering = ('-created',)
def uid(self, obj):
return obj.user.id
uid.short_description = u'用户ID'
admin.site.unregister(Token)
admin.site.register(Token, TokenAdmin)
class ProfileInline(admin.StackedInline):
model = Profile
class UserCustomAdmin(UserAdmin):
list_display = ('id', 'username', 'email', 'is_active', 'is_staff')
inlines = (ProfileInline, )
ordering = ('-id', )
admin.site.unregister(User)
admin.site.register(User, UserCustomAdmin) | en | 0.769321 | # -*- coding: utf-8 -*- | 1.918512 | 2 |
oops/#016exceptions.py | krishankansal/PythonPrograms | 0 | 8969 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 08:40:11 2020
@author: krishan
"""
def funny_division2(anumber):
try:
if anumber == 13:
raise ValueError("13 is an unlucky number")
return 100 / anumber
except (ZeroDivisionError, TypeError):
return "Enter a number other than zero"
def funny_division3(anumber):
try:
if anumber == 13:
raise ValueError("13 is an unlucky number")
return 100 / anumber
except ZeroDivisionError:
return "Enter a number other than zero"
except TypeError:
return "Enter a numerical value"
except ValueError as e:
print("The exception arguments were",e.args)
#raise
for val in (0, "hello", 50.0, 13):
print(f"Testing {val}:", funny_division3(val))
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 08:40:11 2020
@author: krishan
"""
def funny_division2(anumber):
try:
if anumber == 13:
raise ValueError("13 is an unlucky number")
return 100 / anumber
except (ZeroDivisionError, TypeError):
return "Enter a number other than zero"
def funny_division3(anumber):
try:
if anumber == 13:
raise ValueError("13 is an unlucky number")
return 100 / anumber
except ZeroDivisionError:
return "Enter a number other than zero"
except TypeError:
return "Enter a numerical value"
except ValueError as e:
print("The exception arguments were",e.args)
#raise
for val in (0, "hello", 50.0, 13):
print(f"Testing {val}:", funny_division3(val)) | en | 0.555924 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Thu Jun 18 08:40:11 2020 @author: krishan #raise | 3.892881 | 4 |
config/simclr_config.py | denn-s/SimCLR | 5 | 8970 | <filename>config/simclr_config.py
import os
from datetime import datetime
import torch
from dataclasses import dataclass
class SimCLRConfig:
@dataclass()
class Base:
output_dir_path: str
log_dir_path: str
log_file_path: str
device: object
num_gpu: int
logger_name: str
@dataclass()
class Train:
# batch_size as usual. examples: 16,32,..
batch_size: int
# number of workers to be used for data loading. examples: 2,4,...
num_workers: int
# start training with this epoch. most likely: 0
start_epoch: int
# in case of restart this is where the saved model is expected to be located
restart_log_dir_path: str
# end training with this epoch. examples: 10, 100,...
epochs: int
# directory where the datasets are located. example: "/home/USER_NAME/Data"
data_dir_path: str
# dataset name. options: ["CIFAR10", "STL10", "iNaturalist2019", "ImageNet"]
dataset: str
# save trained model every n epochs. examples: 1,5,10,...
save_num_epochs: int
# image size obtained from last data preparation step
img_size: int
# name of the optimizer. options: ["Adam", "LARS"]
# TODO: implement LARS ptimizer
optimizer: str
weight_decay: float
temperature: float
global_step: int
current_epoch: int
@dataclass()
class Model:
# model architecture. options: ["resnet18", "resnet50"]
resnet: str
normalize: bool
projection_dim: int
@dataclass()
class SimCLR:
train: object
model: object
@dataclass()
class LogisticRegression:
epochs: int
batch_size: int
learning_rate: float
momentum: float
img_size: int
model_path: str
epoch_num: int
@dataclass()
class FineTuning:
epochs: int
batch_size: int
learning_rate: float
momentum: float
img_size: int
save_num_epochs: int
# decay "learning_rate" by a factor of "gamma" every "step_size" epochs
gamma: float
step_size: int
model_path: str
epoch_num: int
@dataclass()
class ONNX:
batch_size: int
img_size: int
model_path: str
epoch_num: int
def __init__(self, config):
global_step = 0
current_epoch = 0
simclr_train = SimCLRConfig.Train(**config['simclr']['train'], global_step=global_step,
current_epoch=current_epoch)
simclr_model = SimCLRConfig.Model(**config['simclr']['model'])
self.simclr = SimCLRConfig.SimCLR(simclr_train, simclr_model)
model_path = None
epoch_num = None
self.logistic_regression = SimCLRConfig.LogisticRegression(**config['logistic_regression'],
model_path=model_path, epoch_num=epoch_num)
model_path = None
epoch_num = None
self.fine_tuning = SimCLRConfig.FineTuning(**config['fine_tuning'], model_path=model_path,
epoch_num=epoch_num)
model_path = None
epoch_num = None
self.onnx = SimCLRConfig.ONNX(**config['onnx'], model_path=model_path, epoch_num=epoch_num)
logger_name = config['logger_name']
output_dir_path = 'output'
now = datetime.now()
dt_string: str = now.strftime("%Y_%m_%d_%H_%M_%S")
log_dir_name = dt_string + '_' + logger_name + '_' + self.simclr.train.dataset.lower()
log_dir_path = os.path.join(output_dir_path, log_dir_name)
log_file_path = os.path.join(log_dir_path, 'log.txt')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
num_gpu = torch.cuda.device_count()
self.base = SimCLRConfig.Base(output_dir_path, log_dir_path, log_file_path, device, num_gpu, logger_name)
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
| <filename>config/simclr_config.py
import os
from datetime import datetime
import torch
from dataclasses import dataclass
class SimCLRConfig:
@dataclass()
class Base:
output_dir_path: str
log_dir_path: str
log_file_path: str
device: object
num_gpu: int
logger_name: str
@dataclass()
class Train:
# batch_size as usual. examples: 16,32,..
batch_size: int
# number of workers to be used for data loading. examples: 2,4,...
num_workers: int
# start training with this epoch. most likely: 0
start_epoch: int
# in case of restart this is where the saved model is expected to be located
restart_log_dir_path: str
# end training with this epoch. examples: 10, 100,...
epochs: int
# directory where the datasets are located. example: "/home/USER_NAME/Data"
data_dir_path: str
# dataset name. options: ["CIFAR10", "STL10", "iNaturalist2019", "ImageNet"]
dataset: str
# save trained model every n epochs. examples: 1,5,10,...
save_num_epochs: int
# image size obtained from last data preparation step
img_size: int
# name of the optimizer. options: ["Adam", "LARS"]
# TODO: implement LARS ptimizer
optimizer: str
weight_decay: float
temperature: float
global_step: int
current_epoch: int
@dataclass()
class Model:
# model architecture. options: ["resnet18", "resnet50"]
resnet: str
normalize: bool
projection_dim: int
@dataclass()
class SimCLR:
train: object
model: object
@dataclass()
class LogisticRegression:
epochs: int
batch_size: int
learning_rate: float
momentum: float
img_size: int
model_path: str
epoch_num: int
@dataclass()
class FineTuning:
epochs: int
batch_size: int
learning_rate: float
momentum: float
img_size: int
save_num_epochs: int
# decay "learning_rate" by a factor of "gamma" every "step_size" epochs
gamma: float
step_size: int
model_path: str
epoch_num: int
@dataclass()
class ONNX:
batch_size: int
img_size: int
model_path: str
epoch_num: int
def __init__(self, config):
global_step = 0
current_epoch = 0
simclr_train = SimCLRConfig.Train(**config['simclr']['train'], global_step=global_step,
current_epoch=current_epoch)
simclr_model = SimCLRConfig.Model(**config['simclr']['model'])
self.simclr = SimCLRConfig.SimCLR(simclr_train, simclr_model)
model_path = None
epoch_num = None
self.logistic_regression = SimCLRConfig.LogisticRegression(**config['logistic_regression'],
model_path=model_path, epoch_num=epoch_num)
model_path = None
epoch_num = None
self.fine_tuning = SimCLRConfig.FineTuning(**config['fine_tuning'], model_path=model_path,
epoch_num=epoch_num)
model_path = None
epoch_num = None
self.onnx = SimCLRConfig.ONNX(**config['onnx'], model_path=model_path, epoch_num=epoch_num)
logger_name = config['logger_name']
output_dir_path = 'output'
now = datetime.now()
dt_string: str = now.strftime("%Y_%m_%d_%H_%M_%S")
log_dir_name = dt_string + '_' + logger_name + '_' + self.simclr.train.dataset.lower()
log_dir_path = os.path.join(output_dir_path, log_dir_name)
log_file_path = os.path.join(log_dir_path, 'log.txt')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
num_gpu = torch.cuda.device_count()
self.base = SimCLRConfig.Base(output_dir_path, log_dir_path, log_file_path, device, num_gpu, logger_name)
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
| en | 0.843267 | # batch_size as usual. examples: 16,32,.. # number of workers to be used for data loading. examples: 2,4,... # start training with this epoch. most likely: 0 # in case of restart this is where the saved model is expected to be located # end training with this epoch. examples: 10, 100,... # directory where the datasets are located. example: "/home/USER_NAME/Data" # dataset name. options: ["CIFAR10", "STL10", "iNaturalist2019", "ImageNet"] # save trained model every n epochs. examples: 1,5,10,... # image size obtained from last data preparation step # name of the optimizer. options: ["Adam", "LARS"] # TODO: implement LARS ptimizer # model architecture. options: ["resnet18", "resnet50"] # decay "learning_rate" by a factor of "gamma" every "step_size" epochs | 2.546368 | 3 |
test/unit/common/middleware/s3api/test_obj.py | Priyanka-Askani/swift | 1 | 8971 | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime
import hashlib
import os
from os.path import join
import time
from mock import patch
from swift.common import swob
from swift.common.swob import Request
from test.unit.common.middleware.s3api import S3ApiTestCase
from test.unit.common.middleware.s3api.test_s3_acl import s3acl
from swift.common.middleware.s3api.subresource import ACL, User, encode_acl, \
Owner, Grant
from swift.common.middleware.s3api.etree import fromstring
from swift.common.middleware.s3api.utils import mktime, S3Timestamp
from test.unit.common.middleware.s3api.helpers import FakeSwift
def _wrap_fake_auth_middleware(org_func):
def fake_fake_auth_middleware(self, env):
org_func(env)
if 'swift.authorize_override' in env:
return
if 'HTTP_AUTHORIZATION' not in env:
return
_, authorization = env['HTTP_AUTHORIZATION'].split(' ')
tenant_user, sign = authorization.rsplit(':', 1)
tenant, user = tenant_user.rsplit(':', 1)
env['HTTP_X_TENANT_NAME'] = tenant
env['HTTP_X_USER_NAME'] = user
return fake_fake_auth_middleware
class TestS3ApiObj(S3ApiTestCase):
def setUp(self):
super(TestS3ApiObj, self).setUp()
self.object_body = 'hello'
self.etag = hashlib.md5(self.object_body).hexdigest()
self.last_modified = 'Fri, 01 Apr 2014 12:00:00 GMT'
self.response_headers = {'Content-Type': 'text/html',
'Content-Length': len(self.object_body),
'Content-Disposition': 'inline',
'Content-Language': 'en',
'x-object-meta-test': 'swift',
'etag': self.etag,
'last-modified': self.last_modified,
'expires': 'Mon, 21 Sep 2015 12:00:00 GMT',
'x-robots-tag': 'nofollow',
'cache-control': 'private'}
self.swift.register('GET', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers,
self.object_body)
self.swift.register('PUT', '/v1/AUTH_test/bucket/object',
swob.HTTPCreated,
{'etag': self.etag,
'last-modified': self.last_modified,
'x-object-meta-something': 'oh hai'},
None)
def _test_object_GETorHEAD(self, method):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': method},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
unexpected_headers = []
for key, val in self.response_headers.iteritems():
if key in ('Content-Length', 'Content-Type', 'content-encoding',
'last-modified', 'cache-control', 'Content-Disposition',
'Content-Language', 'expires', 'x-robots-tag'):
self.assertIn(key, headers)
self.assertEqual(headers[key], str(val))
elif key == 'etag':
self.assertEqual(headers[key], '"%s"' % val)
elif key.startswith('x-object-meta-'):
self.assertIn('x-amz-meta-' + key[14:], headers)
self.assertEqual(headers['x-amz-meta-' + key[14:]], val)
else:
unexpected_headers.append((key, val))
if unexpected_headers:
self.fail('unexpected headers: %r' % unexpected_headers)
self.assertEqual(headers['etag'],
'"%s"' % self.response_headers['etag'])
if method == 'GET':
self.assertEqual(body, self.object_body)
@s3acl
def test_object_HEAD_error(self):
# HEAD does not return the body even an error response in the
# specifications of the REST API.
# So, check the response code for error test of HEAD.
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPUnauthorized, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '403')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPForbidden, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '403')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPNotFound, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '404')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPPreconditionFailed, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '412')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPServerError, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '500')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPServiceUnavailable, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '500')
self.assertEqual(body, '') # sanity
def test_object_HEAD(self):
self._test_object_GETorHEAD('HEAD')
def _test_object_HEAD_Range(self, range_value):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Range': range_value,
'Date': self.get_date_header()})
return self.call_s3api(req)
@s3acl
def test_object_HEAD_Range_with_invalid_value(self):
range_value = ''
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'hoge'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes='
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=1'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=5-1'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=5-10'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '416')
@s3acl
def test_object_HEAD_Range(self):
# update response headers
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers,
self.object_body)
range_value = 'bytes=0-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '4')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 0-3'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=3-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '1')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 3-3'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=1-'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '4')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 1-4'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '3')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 2-4'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
@s3acl
def test_object_GET_error(self):
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchKey')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPPreconditionFailed)
self.assertEqual(code, 'PreconditionFailed')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
@s3acl
def test_object_GET(self):
self._test_object_GETorHEAD('GET')
@s3acl(s3acl_only=True)
def test_object_GET_with_s3acl_and_keystone(self):
# for passing keystone authentication root
fake_auth = self.swift._fake_auth_middleware
with patch.object(FakeSwift, '_fake_auth_middleware',
_wrap_fake_auth_middleware(fake_auth)):
self._test_object_GETorHEAD('GET')
_, _, headers = self.swift.calls_with_headers[-1]
self.assertNotIn('Authorization', headers)
_, _, headers = self.swift.calls_with_headers[0]
self.assertNotIn('Authorization', headers)
@s3acl
def test_object_GET_Range(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac',
'Range': 'bytes=0-3',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 0-3'))
@s3acl
def test_object_GET_Range_error(self):
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPRequestedRangeNotSatisfiable)
self.assertEqual(code, 'InvalidRange')
@s3acl
def test_object_GET_Response(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING':
'response-content-type=%s&'
'response-content-language=%s&'
'response-expires=%s&'
'response-cache-control=%s&'
'response-content-disposition=%s&'
'response-content-encoding=%s&'
% ('text/plain', 'en',
'Fri, 01 Apr 2014 12:00:00 GMT',
'no-cache',
'attachment',
'gzip')},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'text/plain')
self.assertTrue('content-language' in headers)
self.assertEqual(headers['content-language'], 'en')
self.assertTrue('expires' in headers)
self.assertEqual(headers['expires'], 'Fri, 01 Apr 2014 12:00:00 GMT')
self.assertTrue('cache-control' in headers)
self.assertEqual(headers['cache-control'], 'no-cache')
self.assertTrue('content-disposition' in headers)
self.assertEqual(headers['content-disposition'],
'attachment')
self.assertTrue('content-encoding' in headers)
self.assertEqual(headers['content-encoding'], 'gzip')
@s3acl
def test_object_PUT_error(self):
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchBucket')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPRequestEntityTooLarge)
self.assertEqual(code, 'EntityTooLarge')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPUnprocessableEntity)
self.assertEqual(code, 'BadDigest')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPLengthRequired)
self.assertEqual(code, 'MissingContentLength')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPPreconditionFailed)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': ''})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?foo=bar'})
self.assertEqual(code, 'InvalidArgument')
# adding other query paramerters will cause an error
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?versionId=foo&bar=baz'})
self.assertEqual(code, 'InvalidArgument')
# ...even versionId appears in the last
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?bar=baz&versionId=foo'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?versionId=foo'})
self.assertEqual(code, 'NotImplemented')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/src_bucket/src_object',
'X-Amz-Copy-Source-Range': 'bytes=0-0'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPRequestTimeout)
self.assertEqual(code, 'RequestTimeout')
@s3acl
def test_object_PUT(self):
etag = self.response_headers['etag']
content_md5 = etag.decode('hex').encode('base64').strip()
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'x-amz-storage-class': 'STANDARD',
'Content-MD5': content_md5,
'Date': self.get_date_header()},
body=self.object_body)
req.date = datetime.now()
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
# Check that s3api returns an etag header.
self.assertEqual(headers['etag'], '"%s"' % etag)
_, _, headers = self.swift.calls_with_headers[-1]
# Check that s3api converts a Content-MD5 header into an etag.
self.assertEqual(headers['etag'], etag)
def test_object_PUT_headers(self):
content_md5 = self.etag.decode('hex').encode('base64').strip()
self.swift.register('HEAD', '/v1/AUTH_test/some/source',
swob.HTTPOk, {'last-modified': self.last_modified},
None)
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'X-Amz-Storage-Class': 'STANDARD',
'X-Amz-Meta-Something': 'oh hai',
'X-Amz-Meta-Unreadable-Prefix': '\x04w',
'X-Amz-Meta-Unreadable-Suffix': 'h\x04',
'X-Amz-Meta-Lots-Of-Unprintable': 5 * '\x04',
'X-Amz-Copy-Source': '/some/source',
'Content-MD5': content_md5,
'Date': self.get_date_header()})
req.date = datetime.now()
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
# Check that s3api does not return an etag header,
# specified copy source.
self.assertTrue(headers.get('etag') is None)
# Check that s3api does not return custom metadata in response
self.assertTrue(headers.get('x-amz-meta-something') is None)
_, _, headers = self.swift.calls_with_headers[-1]
# Check that s3api converts a Content-MD5 header into an etag.
self.assertEqual(headers['ETag'], self.etag)
self.assertEqual(headers['X-Object-Meta-Something'], 'oh hai')
self.assertEqual(headers['X-Object-Meta-Unreadable-Prefix'],
'=?UTF-8?Q?=04w?=')
self.assertEqual(headers['X-Object-Meta-Unreadable-Suffix'],
'=?UTF-8?Q?h=04?=')
self.assertEqual(headers['X-Object-Meta-Lots-Of-Unprintable'],
'=?UTF-8?B?BAQEBAQ=?=')
self.assertEqual(headers['X-Copy-From'], '/some/source')
self.assertEqual(headers['Content-Length'], '0')
def _test_object_PUT_copy(self, head_resp, put_header=None,
src_path='/some/source', timestamp=None):
account = 'test:tester'
grants = [Grant(User(account), 'FULL_CONTROL')]
head_headers = \
encode_acl('object',
ACL(Owner(account, account), grants))
head_headers.update({'last-modified': self.last_modified})
self.swift.register('HEAD', '/v1/AUTH_test/some/source',
head_resp, head_headers, None)
put_header = put_header or {}
return self._call_object_copy(src_path, put_header, timestamp)
def _test_object_PUT_copy_self(self, head_resp,
put_header=None, timestamp=None):
account = 'test:tester'
grants = [Grant(User(account), 'FULL_CONTROL')]
head_headers = \
encode_acl('object',
ACL(Owner(account, account), grants))
head_headers.update({'last-modified': self.last_modified})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
head_resp, head_headers, None)
put_header = put_header or {}
return self._call_object_copy('/bucket/object', put_header, timestamp)
def _call_object_copy(self, src_path, put_header, timestamp=None):
put_headers = {'Authorization': 'AWS test:tester:hmac',
'X-Amz-Copy-Source': src_path,
'Date': self.get_date_header()}
put_headers.update(put_header)
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers=put_headers)
req.date = datetime.now()
req.content_type = 'text/plain'
timestamp = timestamp or time.time()
with patch('swift.common.middleware.s3api.utils.time.time',
return_value=timestamp):
return self.call_s3api(req)
@s3acl
def test_object_PUT_copy(self):
def do_test(src_path=None):
date_header = self.get_date_header()
timestamp = mktime(date_header)
last_modified = S3Timestamp(timestamp).s3xmlformat
status, headers, body = self._test_object_PUT_copy(
swob.HTTPOk, put_header={'Date': date_header},
timestamp=timestamp, src_path=src_path)
self.assertEqual(status.split()[0], '200')
self.assertEqual(headers['Content-Type'], 'application/xml')
self.assertTrue(headers.get('etag') is None)
self.assertTrue(headers.get('x-amz-meta-something') is None)
elem = fromstring(body, 'CopyObjectResult')
self.assertEqual(elem.find('LastModified').text, last_modified)
self.assertEqual(elem.find('ETag').text, '"%s"' % self.etag)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertEqual(headers['X-Copy-From'], '/some/source')
self.assertEqual(headers['Content-Length'], '0')
do_test('/some/source')
do_test('/some/source?')
do_test('/some/source?versionId=null')
# Some clients (like Boto) don't include the leading slash;
# AWS seems to tolerate this so we should, too
do_test('some/source')
@s3acl
def test_object_PUT_copy_self(self):
status, headers, body = \
self._test_object_PUT_copy_self(swob.HTTPOk)
self.assertEqual(status.split()[0], '400')
elem = fromstring(body, 'Error')
err_msg = ("This copy request is illegal because it is trying to copy "
"an object to itself without changing the object's "
"metadata, storage class, website redirect location or "
"encryption attributes.")
self.assertEqual(elem.find('Code').text, 'InvalidRequest')
self.assertEqual(elem.find('Message').text, err_msg)
@s3acl
def test_object_PUT_copy_self_metadata_copy(self):
header = {'x-amz-metadata-directive': 'COPY'}
status, headers, body = \
self._test_object_PUT_copy_self(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '400')
elem = fromstring(body, 'Error')
err_msg = ("This copy request is illegal because it is trying to copy "
"an object to itself without changing the object's "
"metadata, storage class, website redirect location or "
"encryption attributes.")
self.assertEqual(elem.find('Code').text, 'InvalidRequest')
self.assertEqual(elem.find('Message').text, err_msg)
@s3acl
def test_object_PUT_copy_self_metadata_replace(self):
date_header = self.get_date_header()
timestamp = mktime(date_header)
last_modified = S3Timestamp(timestamp).s3xmlformat
header = {'x-amz-metadata-directive': 'REPLACE',
'Date': date_header}
status, headers, body = self._test_object_PUT_copy_self(
swob.HTTPOk, header, timestamp=timestamp)
self.assertEqual(status.split()[0], '200')
self.assertEqual(headers['Content-Type'], 'application/xml')
self.assertTrue(headers.get('etag') is None)
elem = fromstring(body, 'CopyObjectResult')
self.assertEqual(elem.find('LastModified').text, last_modified)
self.assertEqual(elem.find('ETag').text, '"%s"' % self.etag)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertEqual(headers['X-Copy-From'], '/bucket/object')
self.assertEqual(headers['Content-Length'], '0')
@s3acl
def test_object_PUT_copy_headers_error(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPPreconditionFailed,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = {'X-Amz-Copy-Source-If-None-Match': etag}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPNotModified,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = {'X-Amz-Copy-Source-If-Modified-Since': last_modified_since}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPNotModified,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = \
{'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPPreconditionFailed,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
def test_object_PUT_copy_headers_with_match(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 11:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'X-Amz-Copy-Source-If-Modified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 2)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-Match'], etag)
self.assertEqual(headers['If-Modified-Since'], last_modified_since)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_headers_with_match_and_s3acl(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 11:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'X-Amz-Copy-Source-If-Modified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 3)
# After the check of the copy source in the case of s3acl is valid,
# s3api check the bucket write permissions of the destination.
_, _, headers = self.swift.calls_with_headers[-2]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-Match'], etag)
self.assertEqual(headers['If-Modified-Since'], last_modified_since)
def test_object_PUT_copy_headers_with_not_match(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-None-Match': etag,
'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 2)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-None-Match') is None)
self.assertTrue(headers.get('If-Unmodified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-None-Match'], etag)
self.assertEqual(headers['If-Unmodified-Since'], last_modified_since)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_headers_with_not_match_and_s3acl(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-None-Match': etag,
'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
# After the check of the copy source in the case of s3acl is valid,
# s3api check the bucket write permissions of the destination.
self.assertEqual(len(self.swift.calls_with_headers), 3)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-None-Match') is None)
self.assertTrue(headers.get('If-Unmodified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-None-Match'], etag)
self.assertEqual(headers['If-Unmodified-Since'], last_modified_since)
@s3acl
def test_object_POST_error(self):
code = self._test_method_error('POST', '/bucket/object', None)
self.assertEqual(code, 'NotImplemented')
@s3acl
def test_object_DELETE_error(self):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
with patch(
'swift.common.middleware.s3api.s3request.get_container_info',
return_value={'status': 204}):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchKey')
with patch(
'swift.common.middleware.s3api.s3request.get_container_info',
return_value={'status': 404}):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchBucket')
@s3acl
def test_object_DELETE_no_multipart(self):
self.s3api.conf.allow_multipart_uploads = False
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '204')
self.assertNotIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
_, path = self.swift.calls[-1]
self.assertEqual(path.count('?'), 0)
@s3acl
def test_object_DELETE_multipart(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '204')
self.assertIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
_, path = self.swift.calls[-1]
self.assertEqual(path.count('?'), 0)
@s3acl
def test_slo_object_DELETE(self):
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk,
{'x-static-large-object': 'True'},
None)
self.swift.register('DELETE', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, {}, '<SLO delete results>')
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header(),
'Content-Type': 'foo/bar'})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '204')
self.assertEqual(body, '')
self.assertIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'
'?multipart-manifest=delete'),
self.swift.calls)
_, path, headers = self.swift.calls_with_headers[-1]
path, query_string = path.split('?', 1)
query = {}
for q in query_string.split('&'):
key, arg = q.split('=')
query[key] = arg
self.assertEqual(query['multipart-manifest'], 'delete')
self.assertNotIn('Content-Type', headers)
def _test_object_for_s3acl(self, method, account):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': method},
headers={'Authorization': 'AWS %s:hmac' % account,
'Date': self.get_date_header()})
return self.call_s3api(req)
def _test_set_container_permission(self, account, permission):
grants = [Grant(User(account), permission)]
headers = \
encode_acl('container',
ACL(Owner('test:tester', 'test:tester'), grants))
self.swift.register('HEAD', '/v1/AUTH_test/bucket',
swob.HTTPNoContent, headers, None)
@s3acl(s3acl_only=True)
def test_object_GET_without_permission(self):
status, headers, body = self._test_object_for_s3acl('GET',
'test:other')
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_GET_with_read_permission(self):
status, headers, body = self._test_object_for_s3acl('GET',
'test:read')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_GET_with_fullcontrol_permission(self):
status, headers, body = \
self._test_object_for_s3acl('GET', 'test:full_control')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_without_permission(self):
status, headers, body = self._test_object_for_s3acl('PUT',
'test:other')
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_PUT_with_owner_permission(self):
status, headers, body = self._test_object_for_s3acl('PUT',
'test:tester')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_with_write_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'WRITE')
status, headers, body = self._test_object_for_s3acl('PUT', account)
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_with_fullcontrol_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'FULL_CONTROL')
status, headers, body = \
self._test_object_for_s3acl('PUT', account)
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_DELETE_without_permission(self):
account = 'test:other'
status, headers, body = self._test_object_for_s3acl('DELETE',
account)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_owner_permission(self):
status, headers, body = self._test_object_for_s3acl('DELETE',
'test:tester')
self.assertEqual(status.split()[0], '204')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_write_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'WRITE')
status, headers, body = self._test_object_for_s3acl('DELETE',
account)
self.assertEqual(status.split()[0], '204')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_fullcontrol_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'FULL_CONTROL')
status, headers, body = self._test_object_for_s3acl('DELETE', account)
self.assertEqual(status.split()[0], '204')
def _test_object_copy_for_s3acl(self, account, src_permission=None,
src_path='/src_bucket/src_obj'):
owner = 'test:tester'
grants = [Grant(User(account), src_permission)] \
if src_permission else [Grant(User(owner), 'FULL_CONTROL')]
src_o_headers = \
encode_acl('object', ACL(Owner(owner, owner), grants))
src_o_headers.update({'last-modified': self.last_modified})
self.swift.register(
'HEAD', join('/v1/AUTH_test', src_path.lstrip('/')),
swob.HTTPOk, src_o_headers, None)
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS %s:hmac' % account,
'X-Amz-Copy-Source': src_path,
'Date': self.get_date_header()})
return self.call_s3api(req)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_owner_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:tester')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_fullcontrol_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:full_control',
'FULL_CONTROL')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_grantee_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:write', 'READ')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_without_src_obj_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:write')
self.assertEqual(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_without_dst_container_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:other', 'READ')
self.assertEqual(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_empty_src_path(self):
self.swift.register('PUT', '/v1/AUTH_test/bucket/object',
swob.HTTPPreconditionFailed, {}, None)
status, headers, body = self._test_object_copy_for_s3acl(
'test:write', 'READ', src_path='')
self.assertEqual(status.split()[0], '400')
class TestS3ApiObjNonUTC(TestS3ApiObj):
def setUp(self):
self.orig_tz = os.environ.get('TZ', '')
os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0'
time.tzset()
super(TestS3ApiObjNonUTC, self).setUp()
def tearDown(self):
super(TestS3ApiObjNonUTC, self).tearDown()
os.environ['TZ'] = self.orig_tz
time.tzset()
if __name__ == '__main__':
unittest.main()
| # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime
import hashlib
import os
from os.path import join
import time
from mock import patch
from swift.common import swob
from swift.common.swob import Request
from test.unit.common.middleware.s3api import S3ApiTestCase
from test.unit.common.middleware.s3api.test_s3_acl import s3acl
from swift.common.middleware.s3api.subresource import ACL, User, encode_acl, \
Owner, Grant
from swift.common.middleware.s3api.etree import fromstring
from swift.common.middleware.s3api.utils import mktime, S3Timestamp
from test.unit.common.middleware.s3api.helpers import FakeSwift
def _wrap_fake_auth_middleware(org_func):
def fake_fake_auth_middleware(self, env):
org_func(env)
if 'swift.authorize_override' in env:
return
if 'HTTP_AUTHORIZATION' not in env:
return
_, authorization = env['HTTP_AUTHORIZATION'].split(' ')
tenant_user, sign = authorization.rsplit(':', 1)
tenant, user = tenant_user.rsplit(':', 1)
env['HTTP_X_TENANT_NAME'] = tenant
env['HTTP_X_USER_NAME'] = user
return fake_fake_auth_middleware
class TestS3ApiObj(S3ApiTestCase):
def setUp(self):
super(TestS3ApiObj, self).setUp()
self.object_body = 'hello'
self.etag = hashlib.md5(self.object_body).hexdigest()
self.last_modified = 'Fri, 01 Apr 2014 12:00:00 GMT'
self.response_headers = {'Content-Type': 'text/html',
'Content-Length': len(self.object_body),
'Content-Disposition': 'inline',
'Content-Language': 'en',
'x-object-meta-test': 'swift',
'etag': self.etag,
'last-modified': self.last_modified,
'expires': 'Mon, 21 Sep 2015 12:00:00 GMT',
'x-robots-tag': 'nofollow',
'cache-control': 'private'}
self.swift.register('GET', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers,
self.object_body)
self.swift.register('PUT', '/v1/AUTH_test/bucket/object',
swob.HTTPCreated,
{'etag': self.etag,
'last-modified': self.last_modified,
'x-object-meta-something': 'oh hai'},
None)
def _test_object_GETorHEAD(self, method):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': method},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
unexpected_headers = []
for key, val in self.response_headers.iteritems():
if key in ('Content-Length', 'Content-Type', 'content-encoding',
'last-modified', 'cache-control', 'Content-Disposition',
'Content-Language', 'expires', 'x-robots-tag'):
self.assertIn(key, headers)
self.assertEqual(headers[key], str(val))
elif key == 'etag':
self.assertEqual(headers[key], '"%s"' % val)
elif key.startswith('x-object-meta-'):
self.assertIn('x-amz-meta-' + key[14:], headers)
self.assertEqual(headers['x-amz-meta-' + key[14:]], val)
else:
unexpected_headers.append((key, val))
if unexpected_headers:
self.fail('unexpected headers: %r' % unexpected_headers)
self.assertEqual(headers['etag'],
'"%s"' % self.response_headers['etag'])
if method == 'GET':
self.assertEqual(body, self.object_body)
@s3acl
def test_object_HEAD_error(self):
# HEAD does not return the body even an error response in the
# specifications of the REST API.
# So, check the response code for error test of HEAD.
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPUnauthorized, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '403')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPForbidden, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '403')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPNotFound, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '404')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPPreconditionFailed, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '412')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPServerError, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '500')
self.assertEqual(body, '') # sanity
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPServiceUnavailable, {}, None)
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '500')
self.assertEqual(body, '') # sanity
def test_object_HEAD(self):
self._test_object_GETorHEAD('HEAD')
def _test_object_HEAD_Range(self, range_value):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'HEAD'},
headers={'Authorization': 'AWS test:tester:hmac',
'Range': range_value,
'Date': self.get_date_header()})
return self.call_s3api(req)
@s3acl
def test_object_HEAD_Range_with_invalid_value(self):
range_value = ''
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'hoge'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes='
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=1'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=5-1'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '5')
self.assertTrue('content-range' not in headers)
range_value = 'bytes=5-10'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '416')
@s3acl
def test_object_HEAD_Range(self):
# update response headers
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers,
self.object_body)
range_value = 'bytes=0-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '4')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 0-3'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=3-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '1')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 3-3'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=1-'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '4')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 1-4'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
range_value = 'bytes=-3'
status, headers, body = self._test_object_HEAD_Range(range_value)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '3')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 2-4'))
self.assertTrue('x-amz-meta-test' in headers)
self.assertEqual('swift', headers['x-amz-meta-test'])
@s3acl
def test_object_GET_error(self):
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchKey')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPPreconditionFailed)
self.assertEqual(code, 'PreconditionFailed')
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
@s3acl
def test_object_GET(self):
self._test_object_GETorHEAD('GET')
@s3acl(s3acl_only=True)
def test_object_GET_with_s3acl_and_keystone(self):
# for passing keystone authentication root
fake_auth = self.swift._fake_auth_middleware
with patch.object(FakeSwift, '_fake_auth_middleware',
_wrap_fake_auth_middleware(fake_auth)):
self._test_object_GETorHEAD('GET')
_, _, headers = self.swift.calls_with_headers[-1]
self.assertNotIn('Authorization', headers)
_, _, headers = self.swift.calls_with_headers[0]
self.assertNotIn('Authorization', headers)
@s3acl
def test_object_GET_Range(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac',
'Range': 'bytes=0-3',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '206')
self.assertTrue('content-range' in headers)
self.assertTrue(headers['content-range'].startswith('bytes 0-3'))
@s3acl
def test_object_GET_Range_error(self):
code = self._test_method_error('GET', '/bucket/object',
swob.HTTPRequestedRangeNotSatisfiable)
self.assertEqual(code, 'InvalidRange')
@s3acl
def test_object_GET_Response(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING':
'response-content-type=%s&'
'response-content-language=%s&'
'response-expires=%s&'
'response-cache-control=%s&'
'response-content-disposition=%s&'
'response-content-encoding=%s&'
% ('text/plain', 'en',
'Fri, 01 Apr 2014 12:00:00 GMT',
'no-cache',
'attachment',
'gzip')},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'text/plain')
self.assertTrue('content-language' in headers)
self.assertEqual(headers['content-language'], 'en')
self.assertTrue('expires' in headers)
self.assertEqual(headers['expires'], 'Fri, 01 Apr 2014 12:00:00 GMT')
self.assertTrue('cache-control' in headers)
self.assertEqual(headers['cache-control'], 'no-cache')
self.assertTrue('content-disposition' in headers)
self.assertEqual(headers['content-disposition'],
'attachment')
self.assertTrue('content-encoding' in headers)
self.assertEqual(headers['content-encoding'], 'gzip')
@s3acl
def test_object_PUT_error(self):
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchBucket')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPRequestEntityTooLarge)
self.assertEqual(code, 'EntityTooLarge')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPUnprocessableEntity)
self.assertEqual(code, 'BadDigest')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPLengthRequired)
self.assertEqual(code, 'MissingContentLength')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPPreconditionFailed)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': ''})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?foo=bar'})
self.assertEqual(code, 'InvalidArgument')
# adding other query paramerters will cause an error
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?versionId=foo&bar=baz'})
self.assertEqual(code, 'InvalidArgument')
# ...even versionId appears in the last
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?bar=baz&versionId=foo'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/bucket/src_obj?versionId=foo'})
self.assertEqual(code, 'NotImplemented')
code = self._test_method_error(
'PUT', '/bucket/object',
swob.HTTPCreated,
{'X-Amz-Copy-Source': '/src_bucket/src_object',
'X-Amz-Copy-Source-Range': 'bytes=0-0'})
self.assertEqual(code, 'InvalidArgument')
code = self._test_method_error('PUT', '/bucket/object',
swob.HTTPRequestTimeout)
self.assertEqual(code, 'RequestTimeout')
@s3acl
def test_object_PUT(self):
etag = self.response_headers['etag']
content_md5 = etag.decode('hex').encode('base64').strip()
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'x-amz-storage-class': 'STANDARD',
'Content-MD5': content_md5,
'Date': self.get_date_header()},
body=self.object_body)
req.date = datetime.now()
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
# Check that s3api returns an etag header.
self.assertEqual(headers['etag'], '"%s"' % etag)
_, _, headers = self.swift.calls_with_headers[-1]
# Check that s3api converts a Content-MD5 header into an etag.
self.assertEqual(headers['etag'], etag)
def test_object_PUT_headers(self):
content_md5 = self.etag.decode('hex').encode('base64').strip()
self.swift.register('HEAD', '/v1/AUTH_test/some/source',
swob.HTTPOk, {'last-modified': self.last_modified},
None)
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'X-Amz-Storage-Class': 'STANDARD',
'X-Amz-Meta-Something': 'oh hai',
'X-Amz-Meta-Unreadable-Prefix': '\x04w',
'X-Amz-Meta-Unreadable-Suffix': 'h\x04',
'X-Amz-Meta-Lots-Of-Unprintable': 5 * '\x04',
'X-Amz-Copy-Source': '/some/source',
'Content-MD5': content_md5,
'Date': self.get_date_header()})
req.date = datetime.now()
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
# Check that s3api does not return an etag header,
# specified copy source.
self.assertTrue(headers.get('etag') is None)
# Check that s3api does not return custom metadata in response
self.assertTrue(headers.get('x-amz-meta-something') is None)
_, _, headers = self.swift.calls_with_headers[-1]
# Check that s3api converts a Content-MD5 header into an etag.
self.assertEqual(headers['ETag'], self.etag)
self.assertEqual(headers['X-Object-Meta-Something'], 'oh hai')
self.assertEqual(headers['X-Object-Meta-Unreadable-Prefix'],
'=?UTF-8?Q?=04w?=')
self.assertEqual(headers['X-Object-Meta-Unreadable-Suffix'],
'=?UTF-8?Q?h=04?=')
self.assertEqual(headers['X-Object-Meta-Lots-Of-Unprintable'],
'=?UTF-8?B?BAQEBAQ=?=')
self.assertEqual(headers['X-Copy-From'], '/some/source')
self.assertEqual(headers['Content-Length'], '0')
def _test_object_PUT_copy(self, head_resp, put_header=None,
src_path='/some/source', timestamp=None):
account = 'test:tester'
grants = [Grant(User(account), 'FULL_CONTROL')]
head_headers = \
encode_acl('object',
ACL(Owner(account, account), grants))
head_headers.update({'last-modified': self.last_modified})
self.swift.register('HEAD', '/v1/AUTH_test/some/source',
head_resp, head_headers, None)
put_header = put_header or {}
return self._call_object_copy(src_path, put_header, timestamp)
def _test_object_PUT_copy_self(self, head_resp,
put_header=None, timestamp=None):
account = 'test:tester'
grants = [Grant(User(account), 'FULL_CONTROL')]
head_headers = \
encode_acl('object',
ACL(Owner(account, account), grants))
head_headers.update({'last-modified': self.last_modified})
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
head_resp, head_headers, None)
put_header = put_header or {}
return self._call_object_copy('/bucket/object', put_header, timestamp)
def _call_object_copy(self, src_path, put_header, timestamp=None):
put_headers = {'Authorization': 'AWS test:tester:hmac',
'X-Amz-Copy-Source': src_path,
'Date': self.get_date_header()}
put_headers.update(put_header)
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers=put_headers)
req.date = datetime.now()
req.content_type = 'text/plain'
timestamp = timestamp or time.time()
with patch('swift.common.middleware.s3api.utils.time.time',
return_value=timestamp):
return self.call_s3api(req)
@s3acl
def test_object_PUT_copy(self):
def do_test(src_path=None):
date_header = self.get_date_header()
timestamp = mktime(date_header)
last_modified = S3Timestamp(timestamp).s3xmlformat
status, headers, body = self._test_object_PUT_copy(
swob.HTTPOk, put_header={'Date': date_header},
timestamp=timestamp, src_path=src_path)
self.assertEqual(status.split()[0], '200')
self.assertEqual(headers['Content-Type'], 'application/xml')
self.assertTrue(headers.get('etag') is None)
self.assertTrue(headers.get('x-amz-meta-something') is None)
elem = fromstring(body, 'CopyObjectResult')
self.assertEqual(elem.find('LastModified').text, last_modified)
self.assertEqual(elem.find('ETag').text, '"%s"' % self.etag)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertEqual(headers['X-Copy-From'], '/some/source')
self.assertEqual(headers['Content-Length'], '0')
do_test('/some/source')
do_test('/some/source?')
do_test('/some/source?versionId=null')
# Some clients (like Boto) don't include the leading slash;
# AWS seems to tolerate this so we should, too
do_test('some/source')
@s3acl
def test_object_PUT_copy_self(self):
status, headers, body = \
self._test_object_PUT_copy_self(swob.HTTPOk)
self.assertEqual(status.split()[0], '400')
elem = fromstring(body, 'Error')
err_msg = ("This copy request is illegal because it is trying to copy "
"an object to itself without changing the object's "
"metadata, storage class, website redirect location or "
"encryption attributes.")
self.assertEqual(elem.find('Code').text, 'InvalidRequest')
self.assertEqual(elem.find('Message').text, err_msg)
@s3acl
def test_object_PUT_copy_self_metadata_copy(self):
header = {'x-amz-metadata-directive': 'COPY'}
status, headers, body = \
self._test_object_PUT_copy_self(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '400')
elem = fromstring(body, 'Error')
err_msg = ("This copy request is illegal because it is trying to copy "
"an object to itself without changing the object's "
"metadata, storage class, website redirect location or "
"encryption attributes.")
self.assertEqual(elem.find('Code').text, 'InvalidRequest')
self.assertEqual(elem.find('Message').text, err_msg)
@s3acl
def test_object_PUT_copy_self_metadata_replace(self):
date_header = self.get_date_header()
timestamp = mktime(date_header)
last_modified = S3Timestamp(timestamp).s3xmlformat
header = {'x-amz-metadata-directive': 'REPLACE',
'Date': date_header}
status, headers, body = self._test_object_PUT_copy_self(
swob.HTTPOk, header, timestamp=timestamp)
self.assertEqual(status.split()[0], '200')
self.assertEqual(headers['Content-Type'], 'application/xml')
self.assertTrue(headers.get('etag') is None)
elem = fromstring(body, 'CopyObjectResult')
self.assertEqual(elem.find('LastModified').text, last_modified)
self.assertEqual(elem.find('ETag').text, '"%s"' % self.etag)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertEqual(headers['X-Copy-From'], '/bucket/object')
self.assertEqual(headers['Content-Length'], '0')
@s3acl
def test_object_PUT_copy_headers_error(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPPreconditionFailed,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = {'X-Amz-Copy-Source-If-None-Match': etag}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPNotModified,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = {'X-Amz-Copy-Source-If-Modified-Since': last_modified_since}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPNotModified,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
header = \
{'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPPreconditionFailed,
header)
self.assertEqual(self._get_error_code(body), 'PreconditionFailed')
def test_object_PUT_copy_headers_with_match(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 11:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'X-Amz-Copy-Source-If-Modified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 2)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-Match'], etag)
self.assertEqual(headers['If-Modified-Since'], last_modified_since)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_headers_with_match_and_s3acl(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 11:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'X-Amz-Copy-Source-If-Modified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 3)
# After the check of the copy source in the case of s3acl is valid,
# s3api check the bucket write permissions of the destination.
_, _, headers = self.swift.calls_with_headers[-2]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-Match'], etag)
self.assertEqual(headers['If-Modified-Since'], last_modified_since)
def test_object_PUT_copy_headers_with_not_match(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-None-Match': etag,
'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 2)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-None-Match') is None)
self.assertTrue(headers.get('If-Unmodified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-None-Match'], etag)
self.assertEqual(headers['If-Unmodified-Since'], last_modified_since)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_headers_with_not_match_and_s3acl(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-None-Match': etag,
'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
# After the check of the copy source in the case of s3acl is valid,
# s3api check the bucket write permissions of the destination.
self.assertEqual(len(self.swift.calls_with_headers), 3)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-None-Match') is None)
self.assertTrue(headers.get('If-Unmodified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-None-Match'], etag)
self.assertEqual(headers['If-Unmodified-Since'], last_modified_since)
@s3acl
def test_object_POST_error(self):
code = self._test_method_error('POST', '/bucket/object', None)
self.assertEqual(code, 'NotImplemented')
@s3acl
def test_object_DELETE_error(self):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPUnauthorized)
self.assertEqual(code, 'SignatureDoesNotMatch')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPForbidden)
self.assertEqual(code, 'AccessDenied')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPServerError)
self.assertEqual(code, 'InternalError')
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPServiceUnavailable)
self.assertEqual(code, 'InternalError')
with patch(
'swift.common.middleware.s3api.s3request.get_container_info',
return_value={'status': 204}):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchKey')
with patch(
'swift.common.middleware.s3api.s3request.get_container_info',
return_value={'status': 404}):
code = self._test_method_error('DELETE', '/bucket/object',
swob.HTTPNotFound)
self.assertEqual(code, 'NoSuchBucket')
@s3acl
def test_object_DELETE_no_multipart(self):
self.s3api.conf.allow_multipart_uploads = False
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '204')
self.assertNotIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
_, path = self.swift.calls[-1]
self.assertEqual(path.count('?'), 0)
@s3acl
def test_object_DELETE_multipart(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '204')
self.assertIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
_, path = self.swift.calls[-1]
self.assertEqual(path.count('?'), 0)
@s3acl
def test_slo_object_DELETE(self):
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk,
{'x-static-large-object': 'True'},
None)
self.swift.register('DELETE', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, {}, '<SLO delete results>')
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header(),
'Content-Type': 'foo/bar'})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '204')
self.assertEqual(body, '')
self.assertIn(('HEAD', '/v1/AUTH_test/bucket/object'),
self.swift.calls)
self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'
'?multipart-manifest=delete'),
self.swift.calls)
_, path, headers = self.swift.calls_with_headers[-1]
path, query_string = path.split('?', 1)
query = {}
for q in query_string.split('&'):
key, arg = q.split('=')
query[key] = arg
self.assertEqual(query['multipart-manifest'], 'delete')
self.assertNotIn('Content-Type', headers)
def _test_object_for_s3acl(self, method, account):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': method},
headers={'Authorization': 'AWS %s:hmac' % account,
'Date': self.get_date_header()})
return self.call_s3api(req)
def _test_set_container_permission(self, account, permission):
grants = [Grant(User(account), permission)]
headers = \
encode_acl('container',
ACL(Owner('test:tester', 'test:tester'), grants))
self.swift.register('HEAD', '/v1/AUTH_test/bucket',
swob.HTTPNoContent, headers, None)
@s3acl(s3acl_only=True)
def test_object_GET_without_permission(self):
status, headers, body = self._test_object_for_s3acl('GET',
'test:other')
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_GET_with_read_permission(self):
status, headers, body = self._test_object_for_s3acl('GET',
'test:read')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_GET_with_fullcontrol_permission(self):
status, headers, body = \
self._test_object_for_s3acl('GET', 'test:full_control')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_without_permission(self):
status, headers, body = self._test_object_for_s3acl('PUT',
'test:other')
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_PUT_with_owner_permission(self):
status, headers, body = self._test_object_for_s3acl('PUT',
'test:tester')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_with_write_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'WRITE')
status, headers, body = self._test_object_for_s3acl('PUT', account)
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_with_fullcontrol_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'FULL_CONTROL')
status, headers, body = \
self._test_object_for_s3acl('PUT', account)
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_DELETE_without_permission(self):
account = 'test:other'
status, headers, body = self._test_object_for_s3acl('DELETE',
account)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_owner_permission(self):
status, headers, body = self._test_object_for_s3acl('DELETE',
'test:tester')
self.assertEqual(status.split()[0], '204')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_write_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'WRITE')
status, headers, body = self._test_object_for_s3acl('DELETE',
account)
self.assertEqual(status.split()[0], '204')
@s3acl(s3acl_only=True)
def test_object_DELETE_with_fullcontrol_permission(self):
account = 'test:other'
self._test_set_container_permission(account, 'FULL_CONTROL')
status, headers, body = self._test_object_for_s3acl('DELETE', account)
self.assertEqual(status.split()[0], '204')
def _test_object_copy_for_s3acl(self, account, src_permission=None,
src_path='/src_bucket/src_obj'):
owner = 'test:tester'
grants = [Grant(User(account), src_permission)] \
if src_permission else [Grant(User(owner), 'FULL_CONTROL')]
src_o_headers = \
encode_acl('object', ACL(Owner(owner, owner), grants))
src_o_headers.update({'last-modified': self.last_modified})
self.swift.register(
'HEAD', join('/v1/AUTH_test', src_path.lstrip('/')),
swob.HTTPOk, src_o_headers, None)
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS %s:hmac' % account,
'X-Amz-Copy-Source': src_path,
'Date': self.get_date_header()})
return self.call_s3api(req)
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_owner_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:tester')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_fullcontrol_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:full_control',
'FULL_CONTROL')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_with_grantee_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:write', 'READ')
self.assertEqual(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_without_src_obj_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:write')
self.assertEqual(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_without_dst_container_permission(self):
status, headers, body = \
self._test_object_copy_for_s3acl('test:other', 'READ')
self.assertEqual(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_object_PUT_copy_empty_src_path(self):
self.swift.register('PUT', '/v1/AUTH_test/bucket/object',
swob.HTTPPreconditionFailed, {}, None)
status, headers, body = self._test_object_copy_for_s3acl(
'test:write', 'READ', src_path='')
self.assertEqual(status.split()[0], '400')
class TestS3ApiObjNonUTC(TestS3ApiObj):
def setUp(self):
self.orig_tz = os.environ.get('TZ', '')
os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0'
time.tzset()
super(TestS3ApiObjNonUTC, self).setUp()
def tearDown(self):
super(TestS3ApiObjNonUTC, self).tearDown()
os.environ['TZ'] = self.orig_tz
time.tzset()
if __name__ == '__main__':
unittest.main()
| en | 0.800796 | # Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # HEAD does not return the body even an error response in the # specifications of the REST API. # So, check the response code for error test of HEAD. # sanity # sanity # sanity # sanity # sanity # sanity # update response headers # for passing keystone authentication root # adding other query paramerters will cause an error # ...even versionId appears in the last # Check that s3api returns an etag header. # Check that s3api converts a Content-MD5 header into an etag. # Check that s3api does not return an etag header, # specified copy source. # Check that s3api does not return custom metadata in response # Check that s3api converts a Content-MD5 header into an etag. # Some clients (like Boto) don't include the leading slash; # AWS seems to tolerate this so we should, too # After the check of the copy source in the case of s3acl is valid, # s3api check the bucket write permissions of the destination. # After the check of the copy source in the case of s3acl is valid, # s3api check the bucket write permissions of the destination. | 1.58916 | 2 |
pynyzo/pynyzo/keyutil.py | EggPool/pynyzo | 6 | 8972 | """
Eddsa Ed25519 key handling
From
https://github.com/n-y-z-o/nyzoVerifier/blob/b73bc25ba3094abe3470ec070ce306885ad9a18f/src/main/java/co/nyzo/verifier/KeyUtil.java
plus
https://github.com/n-y-z-o/nyzoVerifier/blob/17509f03a7f530c0431ce85377db9b35688c078e/src/main/java/co/nyzo/verifier/util/SignatureUtil.java
"""
# Uses https://github.com/warner/python-ed25519 , c binding, fast
import ed25519
import hashlib
from pynyzo.byteutil import ByteUtil
class KeyUtil:
@staticmethod
def main():
"""Temp test, not to be used"""
signing_key, verifying_key = ed25519.create_keypair()
print("Original private key", ByteUtil.bytes_as_string_with_dashes(signing_key.to_bytes()[:32]))
# signing key has signing + verifying, we keep the first 32 to only get the private part.
print("Original public key", ByteUtil.bytes_as_string_with_dashes(verifying_key.to_bytes()))
@staticmethod
def generateSeed(hashable_keyword: str='') -> bytes:
"""Generate a private key, with optional keyword to get reproducible tests results or later HD Wallet."""
if len(hashable_keyword):
seed = hashlib.sha256(hashable_keyword).digest()
signing_key = ed25519.SigningKey(seed)
else:
signing_key, _ = ed25519.create_keypair()
return signing_key.to_bytes()[:32]
@staticmethod
def private_to_public(private: str) -> str:
"""Temp Test"""
keydata = bytes.fromhex(private)
signing_key = ed25519.SigningKey(keydata)
verifying_key = signing_key.get_verifying_key()
vkey_hex = verifying_key.to_ascii(encoding="hex")
return vkey_hex.decode('utf-8')
@staticmethod
def get_from_private_seed_file(filename: str):
"""returns priv and pub key - as object - from the stored nyzo text id format"""
with open(filename) as f:
nyzo = f.read(80).replace('-', '').encode('utf-8').strip()
signing_key = ed25519.SigningKey(nyzo, encoding="hex")
verifying_key = signing_key.get_verifying_key()
return signing_key, verifying_key
@staticmethod
def get_from_private_seed(seed: str):
"""returns priv and pub key - as object - from an hex seed"""
seed = seed.replace('-', '').encode('utf-8').strip()
signing_key = ed25519.SigningKey(seed, encoding="hex")
verifying_key = signing_key.get_verifying_key()
return signing_key, verifying_key
@staticmethod
def save_to_private_seed_file(filename: str, key: bytes) -> None:
"""Saves the privkey to the nyzo formatted file"""
nyzo_format = ByteUtil.bytes_as_string_with_dashes(key)
with open(filename, 'w') as f:
f.write(nyzo_format)
@staticmethod
def sign_bytes(bytes_to_sign: bytes, private_key: ed25519.SigningKey) -> bytes:
sig = private_key.sign(bytes_to_sign)
return sig
@staticmethod
def signature_is_valid(signature: bytes, signed_bytes: bytes, public_id: bytes) -> bool:
verifying_key = ed25519.VerifyingKey(public_id)
# todo: cache key from id, see https://github.com/n-y-z-o/nyzoVerifier/blob/17509f03a7f530c0431ce85377db9b35688c078e/src/main/java/co/nyzo/verifier/util/SignatureUtil.java
try:
verifying_key.verify(signature, signed_bytes)
# print("signature is good")
return True
except ed25519.BadSignatureError:
# print("signature is bad!")
return False
if __name__ == "__main__":
KeyUtil.main()
# KeyUtil.private_to_public('nyzo-formatted-private-key'.replace('-', ''))
| """
Eddsa Ed25519 key handling
From
https://github.com/n-y-z-o/nyzoVerifier/blob/b73bc25ba3094abe3470ec070ce306885ad9a18f/src/main/java/co/nyzo/verifier/KeyUtil.java
plus
https://github.com/n-y-z-o/nyzoVerifier/blob/17509f03a7f530c0431ce85377db9b35688c078e/src/main/java/co/nyzo/verifier/util/SignatureUtil.java
"""
# Uses https://github.com/warner/python-ed25519 , c binding, fast
import ed25519
import hashlib
from pynyzo.byteutil import ByteUtil
class KeyUtil:
@staticmethod
def main():
"""Temp test, not to be used"""
signing_key, verifying_key = ed25519.create_keypair()
print("Original private key", ByteUtil.bytes_as_string_with_dashes(signing_key.to_bytes()[:32]))
# signing key has signing + verifying, we keep the first 32 to only get the private part.
print("Original public key", ByteUtil.bytes_as_string_with_dashes(verifying_key.to_bytes()))
@staticmethod
def generateSeed(hashable_keyword: str='') -> bytes:
"""Generate a private key, with optional keyword to get reproducible tests results or later HD Wallet."""
if len(hashable_keyword):
seed = hashlib.sha256(hashable_keyword).digest()
signing_key = ed25519.SigningKey(seed)
else:
signing_key, _ = ed25519.create_keypair()
return signing_key.to_bytes()[:32]
@staticmethod
def private_to_public(private: str) -> str:
"""Temp Test"""
keydata = bytes.fromhex(private)
signing_key = ed25519.SigningKey(keydata)
verifying_key = signing_key.get_verifying_key()
vkey_hex = verifying_key.to_ascii(encoding="hex")
return vkey_hex.decode('utf-8')
@staticmethod
def get_from_private_seed_file(filename: str):
"""returns priv and pub key - as object - from the stored nyzo text id format"""
with open(filename) as f:
nyzo = f.read(80).replace('-', '').encode('utf-8').strip()
signing_key = ed25519.SigningKey(nyzo, encoding="hex")
verifying_key = signing_key.get_verifying_key()
return signing_key, verifying_key
@staticmethod
def get_from_private_seed(seed: str):
"""returns priv and pub key - as object - from an hex seed"""
seed = seed.replace('-', '').encode('utf-8').strip()
signing_key = ed25519.SigningKey(seed, encoding="hex")
verifying_key = signing_key.get_verifying_key()
return signing_key, verifying_key
@staticmethod
def save_to_private_seed_file(filename: str, key: bytes) -> None:
"""Saves the privkey to the nyzo formatted file"""
nyzo_format = ByteUtil.bytes_as_string_with_dashes(key)
with open(filename, 'w') as f:
f.write(nyzo_format)
@staticmethod
def sign_bytes(bytes_to_sign: bytes, private_key: ed25519.SigningKey) -> bytes:
sig = private_key.sign(bytes_to_sign)
return sig
@staticmethod
def signature_is_valid(signature: bytes, signed_bytes: bytes, public_id: bytes) -> bool:
verifying_key = ed25519.VerifyingKey(public_id)
# todo: cache key from id, see https://github.com/n-y-z-o/nyzoVerifier/blob/17509f03a7f530c0431ce85377db9b35688c078e/src/main/java/co/nyzo/verifier/util/SignatureUtil.java
try:
verifying_key.verify(signature, signed_bytes)
# print("signature is good")
return True
except ed25519.BadSignatureError:
# print("signature is bad!")
return False
if __name__ == "__main__":
KeyUtil.main()
# KeyUtil.private_to_public('nyzo-formatted-private-key'.replace('-', ''))
| en | 0.529476 | Eddsa Ed25519 key handling From https://github.com/n-y-z-o/nyzoVerifier/blob/b73bc25ba3094abe3470ec070ce306885ad9a18f/src/main/java/co/nyzo/verifier/KeyUtil.java plus https://github.com/n-y-z-o/nyzoVerifier/blob/17509f03a7f530c0431ce85377db9b35688c078e/src/main/java/co/nyzo/verifier/util/SignatureUtil.java # Uses https://github.com/warner/python-ed25519 , c binding, fast Temp test, not to be used # signing key has signing + verifying, we keep the first 32 to only get the private part. Generate a private key, with optional keyword to get reproducible tests results or later HD Wallet. Temp Test returns priv and pub key - as object - from the stored nyzo text id format returns priv and pub key - as object - from an hex seed Saves the privkey to the nyzo formatted file # todo: cache key from id, see https://github.com/n-y-z-o/nyzoVerifier/blob/17509f03a7f530c0431ce85377db9b35688c078e/src/main/java/co/nyzo/verifier/util/SignatureUtil.java # print("signature is good") # print("signature is bad!") # KeyUtil.private_to_public('nyzo-formatted-private-key'.replace('-', '')) | 2.763858 | 3 |
mldftdat/scripts/train_gp.py | mir-group/CiderPress | 10 | 8973 | from argparse import ArgumentParser
import os
import numpy as np
from joblib import dump
from mldftdat.workflow_utils import SAVE_ROOT
from mldftdat.models.gp import *
from mldftdat.data import load_descriptors, filter_descriptors
import yaml
def parse_settings(args):
fname = args.datasets_list[0]
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
with open(os.path.join(fname, 'settings.yaml'), 'r') as f:
d = yaml.load(f, Loader=yaml.Loader)
args.gg_a0 = d.get('a0')
args.gg_amin = d.get('amin')
args.gg_facmul = d.get('fac_mul')
def parse_dataset(args, i, val=False):
if val:
fname = args.validation_set[2*i]
n = int(args.validation_set[2*i+1])
else:
fname = args.datasets_list[2*i]
n = int(args.datasets_list[2*i+1])
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
X, y, rho_data = load_descriptors(fname)
if val:
# offset in case repeat datasets are used
X, y, rho_data = X[n//2+1:,:], y[n//2+1:], rho_data[:,n//2+1:]
X, y, rho, rho_data = filter_descriptors(X, y, rho_data,
tol=args.density_cutoff)
print(X.shape, n)
if args.randomize:
inds = np.arange(X.shape[0])
np.random.shuffle(inds)
X = X[inds,:]
y = y[inds]
rho = rho[inds]
rho_data = rho_data[:,inds]
return X[::n,:], y[::n], rho[::n], rho_data[:,::n]
def parse_list(lststr, T=int):
return [T(substr) for substr in lststr.split(',')]
def main():
parser = ArgumentParser(description='Trains a GP exchange model')
parser.add_argument('save_file', type=str)
parser.add_argument('feature_file', type=str,
help='serialized FeatureList object in yaml format')
parser.add_argument('datasets_list', nargs='+',
help='pairs of dataset names and inverse sampling densities')
parser.add_argument('basis', metavar='basis', type=str,
help='basis set code')
parser.add_argument('--functional', metavar='functional', type=str, default=None,
help='exchange-correlation functional, HF for Hartree-Fock')
parser.add_argument('-r', '--randomize', action='store_true')
parser.add_argument('-c', '--density-cutoff', type=float, default=1e-4)
#parser.add_argument('-m', '--model-class', type=str, default=None)
#parser.add_argument('-k', '--kernel', help='kernel initialization strategy', type=str, default=None)
parser.add_argument('-s', '--seed', help='random seed', default=0, type=int)
parser.add_argument('-vs', '--validation-set', nargs='+')
parser.add_argument('-d', '--delete-k', action='store_true',
help='Delete L (LL^T=K the kernel matrix) to save disk space. Need to refit when reloading to calculate covariance.')
parser.add_argument('--heg', action='store_true', help='HEG exact constraint')
parser.add_argument('--tail', action='store_true', help='atomic tail exact constraint')
parser.add_argument('-o', '--desc-order', default=None,
help='comma-separated list of descriptor order with no spaces. must start with 0,1.')
parser.add_argument('-l', '--length-scale', default=None,
help='comma-separated list initial length-scale guesses')
parser.add_argument('--length-scale-mul', type=float, default=1.0,
help='Used for automatic length-scale initial guess')
parser.add_argument('-a', '--agpr', action='store_true',
help='Whether to use Additive RBF. If False, use RBF')
parser.add_argument('-as', '--agpr-scale', default=None)
parser.add_argument('-ao', '--agpr-order', default=2, type=int)
parser.add_argument('-an', '--agpr-nsingle', default=1, type=int)
parser.add_argument('-x', '--xed-y-code', default='CHACHIYO', type=str)
parser.add_argument('-on', '--optimize-noise', action='store_true',
help='Whether to optimzie exponent of density noise.')
parser.add_argument('-v', '--version', default='c', type=str,
help='version of descriptor set. Default c')
parser.add_argument('--suffix', default=None, type=str,
help='customize data directories with this suffix')
args = parser.parse_args()
parse_settings(args)
np.random.seed(args.seed)
feature_list = FeatureList.load(args.feature_file)
if args.length_scale is not None:
args.length_scale = parse_list(args.length_scale, T=float)
if args.agpr_scale is not None:
args.agpr_scale = parse_list(args.agpr_scale, T=float)
if args.desc_order is not None:
args.desc_order = parse_list(args.desc_order)
assert len(args.datasets_list) % 2 == 0, 'Need pairs of entries for datasets list.'
assert len(args.datasets_list) != 0, 'Need training data'
nd = len(args.datasets_list) // 2
if args.validation_set is None:
nv = 0
else:
assert len(args.validation_set) % 2 == 0, 'Need pairs of entries for datasets list.'
nv = len(args.validation_set) // 2
X, y, rho, rho_data = parse_dataset(args, 0)
for i in range(1, nd):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i)
X = np.append(X, Xn, axis=0)
y = np.append(y, yn, axis=0)
rho = np.append(rho, rhon, axis=0)
rho_data = np.append(rho_data, rho_datan, axis=1)
if nv != 0:
Xv, yv, rhov, rho_datav = parse_dataset(args, 0, val=True)
for i in range(1, nv):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i, val=True)
Xv = np.append(Xv, Xn, axis=0)
yv = np.append(yv, yn, axis=0)
rhov = np.append(rhov, rhon, axis=0)
rho_datav = np.append(rho_datav, rho_datan, axis=1)
gpcls = DFTGPR
gpr = gpcls.from_settings(X, feature_list, args)
gpr.fit(X, y, add_heg=args.heg, add_tail=args.tail)
#if args.heg:
# gpr.add_heg_limit()
print('FINAL KERNEL', gpr.gp.kernel_)
if nv != 0:
pred = gpr.xed_to_y(gpr.predict(Xv), Xv)
abserr = np.abs(pred - gpr.xed_to_y(yv, Xv))
print('MAE VAL SET', np.mean(abserr))
# Always attach the arguments to the object to keep track of settings.
gpr.args = args
if args.delete_k:
gpr.L_ = None
dump(gpr, args.save_file)
if __name__ == '__main__':
main()
| from argparse import ArgumentParser
import os
import numpy as np
from joblib import dump
from mldftdat.workflow_utils import SAVE_ROOT
from mldftdat.models.gp import *
from mldftdat.data import load_descriptors, filter_descriptors
import yaml
def parse_settings(args):
fname = args.datasets_list[0]
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
with open(os.path.join(fname, 'settings.yaml'), 'r') as f:
d = yaml.load(f, Loader=yaml.Loader)
args.gg_a0 = d.get('a0')
args.gg_amin = d.get('amin')
args.gg_facmul = d.get('fac_mul')
def parse_dataset(args, i, val=False):
if val:
fname = args.validation_set[2*i]
n = int(args.validation_set[2*i+1])
else:
fname = args.datasets_list[2*i]
n = int(args.datasets_list[2*i+1])
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
X, y, rho_data = load_descriptors(fname)
if val:
# offset in case repeat datasets are used
X, y, rho_data = X[n//2+1:,:], y[n//2+1:], rho_data[:,n//2+1:]
X, y, rho, rho_data = filter_descriptors(X, y, rho_data,
tol=args.density_cutoff)
print(X.shape, n)
if args.randomize:
inds = np.arange(X.shape[0])
np.random.shuffle(inds)
X = X[inds,:]
y = y[inds]
rho = rho[inds]
rho_data = rho_data[:,inds]
return X[::n,:], y[::n], rho[::n], rho_data[:,::n]
def parse_list(lststr, T=int):
return [T(substr) for substr in lststr.split(',')]
def main():
parser = ArgumentParser(description='Trains a GP exchange model')
parser.add_argument('save_file', type=str)
parser.add_argument('feature_file', type=str,
help='serialized FeatureList object in yaml format')
parser.add_argument('datasets_list', nargs='+',
help='pairs of dataset names and inverse sampling densities')
parser.add_argument('basis', metavar='basis', type=str,
help='basis set code')
parser.add_argument('--functional', metavar='functional', type=str, default=None,
help='exchange-correlation functional, HF for Hartree-Fock')
parser.add_argument('-r', '--randomize', action='store_true')
parser.add_argument('-c', '--density-cutoff', type=float, default=1e-4)
#parser.add_argument('-m', '--model-class', type=str, default=None)
#parser.add_argument('-k', '--kernel', help='kernel initialization strategy', type=str, default=None)
parser.add_argument('-s', '--seed', help='random seed', default=0, type=int)
parser.add_argument('-vs', '--validation-set', nargs='+')
parser.add_argument('-d', '--delete-k', action='store_true',
help='Delete L (LL^T=K the kernel matrix) to save disk space. Need to refit when reloading to calculate covariance.')
parser.add_argument('--heg', action='store_true', help='HEG exact constraint')
parser.add_argument('--tail', action='store_true', help='atomic tail exact constraint')
parser.add_argument('-o', '--desc-order', default=None,
help='comma-separated list of descriptor order with no spaces. must start with 0,1.')
parser.add_argument('-l', '--length-scale', default=None,
help='comma-separated list initial length-scale guesses')
parser.add_argument('--length-scale-mul', type=float, default=1.0,
help='Used for automatic length-scale initial guess')
parser.add_argument('-a', '--agpr', action='store_true',
help='Whether to use Additive RBF. If False, use RBF')
parser.add_argument('-as', '--agpr-scale', default=None)
parser.add_argument('-ao', '--agpr-order', default=2, type=int)
parser.add_argument('-an', '--agpr-nsingle', default=1, type=int)
parser.add_argument('-x', '--xed-y-code', default='CHACHIYO', type=str)
parser.add_argument('-on', '--optimize-noise', action='store_true',
help='Whether to optimzie exponent of density noise.')
parser.add_argument('-v', '--version', default='c', type=str,
help='version of descriptor set. Default c')
parser.add_argument('--suffix', default=None, type=str,
help='customize data directories with this suffix')
args = parser.parse_args()
parse_settings(args)
np.random.seed(args.seed)
feature_list = FeatureList.load(args.feature_file)
if args.length_scale is not None:
args.length_scale = parse_list(args.length_scale, T=float)
if args.agpr_scale is not None:
args.agpr_scale = parse_list(args.agpr_scale, T=float)
if args.desc_order is not None:
args.desc_order = parse_list(args.desc_order)
assert len(args.datasets_list) % 2 == 0, 'Need pairs of entries for datasets list.'
assert len(args.datasets_list) != 0, 'Need training data'
nd = len(args.datasets_list) // 2
if args.validation_set is None:
nv = 0
else:
assert len(args.validation_set) % 2 == 0, 'Need pairs of entries for datasets list.'
nv = len(args.validation_set) // 2
X, y, rho, rho_data = parse_dataset(args, 0)
for i in range(1, nd):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i)
X = np.append(X, Xn, axis=0)
y = np.append(y, yn, axis=0)
rho = np.append(rho, rhon, axis=0)
rho_data = np.append(rho_data, rho_datan, axis=1)
if nv != 0:
Xv, yv, rhov, rho_datav = parse_dataset(args, 0, val=True)
for i in range(1, nv):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i, val=True)
Xv = np.append(Xv, Xn, axis=0)
yv = np.append(yv, yn, axis=0)
rhov = np.append(rhov, rhon, axis=0)
rho_datav = np.append(rho_datav, rho_datan, axis=1)
gpcls = DFTGPR
gpr = gpcls.from_settings(X, feature_list, args)
gpr.fit(X, y, add_heg=args.heg, add_tail=args.tail)
#if args.heg:
# gpr.add_heg_limit()
print('FINAL KERNEL', gpr.gp.kernel_)
if nv != 0:
pred = gpr.xed_to_y(gpr.predict(Xv), Xv)
abserr = np.abs(pred - gpr.xed_to_y(yv, Xv))
print('MAE VAL SET', np.mean(abserr))
# Always attach the arguments to the object to keep track of settings.
gpr.args = args
if args.delete_k:
gpr.L_ = None
dump(gpr, args.save_file)
if __name__ == '__main__':
main()
| en | 0.179827 | # offset in case repeat datasets are used #parser.add_argument('-m', '--model-class', type=str, default=None) #parser.add_argument('-k', '--kernel', help='kernel initialization strategy', type=str, default=None) #if args.heg: # gpr.add_heg_limit() # Always attach the arguments to the object to keep track of settings. | 2.205937 | 2 |
picoCTF-web/api/routes/admin.py | zaratec/picoCTF | 0 | 8974 | <filename>picoCTF-web/api/routes/admin.py
import api
import bson
from api.annotations import (
api_wrapper,
log_action,
require_admin,
require_login,
require_teacher
)
from api.common import WebError, WebSuccess
from flask import (
Blueprint,
Flask,
render_template,
request,
send_from_directory,
session
)
blueprint = Blueprint("admin_api", __name__)
@blueprint.route('/problems', methods=['GET'])
@api_wrapper
@require_admin
def get_problem_data_hook():
has_instances = lambda p : len(p["instances"]) > 0
problems = list(filter(has_instances, api.problem.get_all_problems(show_disabled=True)))
for problem in problems:
problem["reviews"] = api.problem_feedback.get_problem_feedback(pid=problem["pid"])
data = {
"problems": problems,
"bundles": api.problem.get_all_bundles()
}
return WebSuccess(data=data)
@blueprint.route('/users', methods=['GET'])
@api_wrapper
@require_admin
def get_all_users_hook():
users = api.user.get_all_users()
if users is None:
return WebError("There was an error query users from the database.")
return WebSuccess(data=users)
@blueprint.route('/exceptions', methods=['GET'])
@api_wrapper
@require_admin
def get_exceptions_hook():
try:
limit = abs(int(request.args.get("limit")))
exceptions = api.admin.get_api_exceptions(result_limit=limit)
return WebSuccess(data=exceptions)
except (ValueError, TypeError):
return WebError("limit is not a valid integer.")
@blueprint.route('/exceptions/dismiss', methods=['POST'])
@api_wrapper
@require_admin
def dismiss_exceptions_hook():
trace = request.form.get("trace", None)
if trace:
api.admin.dismiss_api_exceptions(trace)
return WebSuccess(data="Successfuly changed exception visibility.")
else:
return WebError(message="You must supply a trace to hide.")
@blueprint.route("/problems/submissions", methods=["GET"])
@api_wrapper
@require_admin
def get_problem():
submission_data = {p["name"]:api.stats.get_problem_submission_stats(pid=p["pid"]) \
for p in api.problem.get_all_problems(show_disabled=True)}
return WebSuccess(data=submission_data)
@blueprint.route("/problems/availability", methods=["POST"])
@api_wrapper
@require_admin
def change_problem_availability_hook():
pid = request.form.get("pid", None)
desired_state = request.form.get("state", None)
if desired_state == None:
return WebError("Problems are either enabled or disabled.")
else:
state = bson.json_util.loads(desired_state)
api.admin.set_problem_availability(pid, state)
return WebSuccess(data="Problem state changed successfully.")
@blueprint.route("/shell_servers", methods=["GET"])
@api_wrapper
@require_admin
def get_shell_servers():
return WebSuccess(data=api.shell_servers.get_servers())
@blueprint.route("/shell_servers/add", methods=["POST"])
@api_wrapper
@require_admin
def add_shell_server():
params = api.common.flat_multi(request.form)
api.shell_servers.add_server(params)
return WebSuccess("Shell server added.")
@blueprint.route("/shell_servers/update", methods=["POST"])
@api_wrapper
@require_admin
def update_shell_server():
params = api.common.flat_multi(request.form)
sid = params.get("sid", None)
if sid is None:
return WebError("Must specify sid to be updated")
api.shell_servers.update_server(sid, params)
return WebSuccess("Shell server updated.")
@blueprint.route("/shell_servers/remove", methods=["POST"])
@api_wrapper
@require_admin
def remove_shell_server():
sid = request.form.get("sid", None)
if sid is None:
return WebError("Must specify sid to be removed")
api.shell_servers.remove_server(sid)
return WebSuccess("Shell server removed.")
@blueprint.route("/shell_servers/load_problems", methods=["POST"])
@api_wrapper
@require_admin
def load_problems_from_shell_server():
sid = request.form.get("sid", None)
if sid is None:
return WebError("Must provide sid to load from.")
number = api.shell_servers.load_problems_from_server(sid)
return WebSuccess("Loaded {} problems from the server".format(number))
@blueprint.route("/shell_servers/check_status", methods=["GET"])
@api_wrapper
@require_admin
def check_status_of_shell_server():
sid = request.args.get("sid", None)
if sid is None:
return WebError("Must provide sid to load from.")
all_online, data = api.shell_servers.get_problem_status_from_server(sid)
if all_online:
return WebSuccess("All problems are online", data=data)
else:
return WebError("One or more problems are offline. Please connect and fix the errors.", data=data)
@blueprint.route("/bundle/dependencies_active", methods=["POST"])
@api_wrapper
@require_admin
def bundle_dependencies():
bid = request.form.get("bid", None)
state = request.form.get("state", None)
if bid is None:
return WebError("Must provide bid to load from.")
if state is None:
return WebError("Must provide a state to set.")
state = bson.json_util.loads(state)
api.problem.set_bundle_dependencies_enabled(bid, state)
return WebSuccess("Dependencies are now {}.".format("enabled" if state else "disabled"))
@blueprint.route("/settings", methods=["GET"])
@api_wrapper
@require_admin
def get_settings():
return WebSuccess(data=api.config.get_settings())
@blueprint.route("/settings/change", methods=["POST"])
@api_wrapper
@require_admin
def change_settings():
data = bson.json_util.loads(request.form["json"])
api.config.change_settings(data)
return WebSuccess("Settings updated")
| <filename>picoCTF-web/api/routes/admin.py
import api
import bson
from api.annotations import (
api_wrapper,
log_action,
require_admin,
require_login,
require_teacher
)
from api.common import WebError, WebSuccess
from flask import (
Blueprint,
Flask,
render_template,
request,
send_from_directory,
session
)
blueprint = Blueprint("admin_api", __name__)
@blueprint.route('/problems', methods=['GET'])
@api_wrapper
@require_admin
def get_problem_data_hook():
has_instances = lambda p : len(p["instances"]) > 0
problems = list(filter(has_instances, api.problem.get_all_problems(show_disabled=True)))
for problem in problems:
problem["reviews"] = api.problem_feedback.get_problem_feedback(pid=problem["pid"])
data = {
"problems": problems,
"bundles": api.problem.get_all_bundles()
}
return WebSuccess(data=data)
@blueprint.route('/users', methods=['GET'])
@api_wrapper
@require_admin
def get_all_users_hook():
users = api.user.get_all_users()
if users is None:
return WebError("There was an error query users from the database.")
return WebSuccess(data=users)
@blueprint.route('/exceptions', methods=['GET'])
@api_wrapper
@require_admin
def get_exceptions_hook():
try:
limit = abs(int(request.args.get("limit")))
exceptions = api.admin.get_api_exceptions(result_limit=limit)
return WebSuccess(data=exceptions)
except (ValueError, TypeError):
return WebError("limit is not a valid integer.")
@blueprint.route('/exceptions/dismiss', methods=['POST'])
@api_wrapper
@require_admin
def dismiss_exceptions_hook():
trace = request.form.get("trace", None)
if trace:
api.admin.dismiss_api_exceptions(trace)
return WebSuccess(data="Successfuly changed exception visibility.")
else:
return WebError(message="You must supply a trace to hide.")
@blueprint.route("/problems/submissions", methods=["GET"])
@api_wrapper
@require_admin
def get_problem():
submission_data = {p["name"]:api.stats.get_problem_submission_stats(pid=p["pid"]) \
for p in api.problem.get_all_problems(show_disabled=True)}
return WebSuccess(data=submission_data)
@blueprint.route("/problems/availability", methods=["POST"])
@api_wrapper
@require_admin
def change_problem_availability_hook():
pid = request.form.get("pid", None)
desired_state = request.form.get("state", None)
if desired_state == None:
return WebError("Problems are either enabled or disabled.")
else:
state = bson.json_util.loads(desired_state)
api.admin.set_problem_availability(pid, state)
return WebSuccess(data="Problem state changed successfully.")
@blueprint.route("/shell_servers", methods=["GET"])
@api_wrapper
@require_admin
def get_shell_servers():
return WebSuccess(data=api.shell_servers.get_servers())
@blueprint.route("/shell_servers/add", methods=["POST"])
@api_wrapper
@require_admin
def add_shell_server():
params = api.common.flat_multi(request.form)
api.shell_servers.add_server(params)
return WebSuccess("Shell server added.")
@blueprint.route("/shell_servers/update", methods=["POST"])
@api_wrapper
@require_admin
def update_shell_server():
params = api.common.flat_multi(request.form)
sid = params.get("sid", None)
if sid is None:
return WebError("Must specify sid to be updated")
api.shell_servers.update_server(sid, params)
return WebSuccess("Shell server updated.")
@blueprint.route("/shell_servers/remove", methods=["POST"])
@api_wrapper
@require_admin
def remove_shell_server():
sid = request.form.get("sid", None)
if sid is None:
return WebError("Must specify sid to be removed")
api.shell_servers.remove_server(sid)
return WebSuccess("Shell server removed.")
@blueprint.route("/shell_servers/load_problems", methods=["POST"])
@api_wrapper
@require_admin
def load_problems_from_shell_server():
sid = request.form.get("sid", None)
if sid is None:
return WebError("Must provide sid to load from.")
number = api.shell_servers.load_problems_from_server(sid)
return WebSuccess("Loaded {} problems from the server".format(number))
@blueprint.route("/shell_servers/check_status", methods=["GET"])
@api_wrapper
@require_admin
def check_status_of_shell_server():
sid = request.args.get("sid", None)
if sid is None:
return WebError("Must provide sid to load from.")
all_online, data = api.shell_servers.get_problem_status_from_server(sid)
if all_online:
return WebSuccess("All problems are online", data=data)
else:
return WebError("One or more problems are offline. Please connect and fix the errors.", data=data)
@blueprint.route("/bundle/dependencies_active", methods=["POST"])
@api_wrapper
@require_admin
def bundle_dependencies():
bid = request.form.get("bid", None)
state = request.form.get("state", None)
if bid is None:
return WebError("Must provide bid to load from.")
if state is None:
return WebError("Must provide a state to set.")
state = bson.json_util.loads(state)
api.problem.set_bundle_dependencies_enabled(bid, state)
return WebSuccess("Dependencies are now {}.".format("enabled" if state else "disabled"))
@blueprint.route("/settings", methods=["GET"])
@api_wrapper
@require_admin
def get_settings():
return WebSuccess(data=api.config.get_settings())
@blueprint.route("/settings/change", methods=["POST"])
@api_wrapper
@require_admin
def change_settings():
data = bson.json_util.loads(request.form["json"])
api.config.change_settings(data)
return WebSuccess("Settings updated")
| none | 1 | 2.296451 | 2 |
|
python code/influxdb_worker.py | thongnbui/MIDS_251_project | 0 | 8975 | <reponame>thongnbui/MIDS_251_project
#!/usr/bin/python
import json
import argparse
from influxdb import InfluxDBClient
parser = argparse.ArgumentParser(description = 'pull data for softlayer queue' )
parser.add_argument( 'measurement' , help = 'measurement001' )
args = parser.parse_args()
client_influxdb = InfluxDBClient('172.16.31.10', '8086', 'cricket', 'cricket', 'cricket_data')
query = 'SELECT "data_center", "device", "value" FROM "cricket_data"."cricket_retention".'+args.measurement+' WHERE time > now() - 10m order by time'
result = client_influxdb.query(query)
for r in result:
i = 0
for data_center, device, value, time in r:
print args.measurement,'\t',r[i][data_center],'\t',r[i][device],'\t',r[i][time],'\t',r[i][value]
i += 1
| #!/usr/bin/python
import json
import argparse
from influxdb import InfluxDBClient
parser = argparse.ArgumentParser(description = 'pull data for softlayer queue' )
parser.add_argument( 'measurement' , help = 'measurement001' )
args = parser.parse_args()
client_influxdb = InfluxDBClient('172.16.31.10', '8086', 'cricket', 'cricket', 'cricket_data')
query = 'SELECT "data_center", "device", "value" FROM "cricket_data"."cricket_retention".'+args.measurement+' WHERE time > now() - 10m order by time'
result = client_influxdb.query(query)
for r in result:
i = 0
for data_center, device, value, time in r:
print args.measurement,'\t',r[i][data_center],'\t',r[i][device],'\t',r[i][time],'\t',r[i][value]
i += 1 | ru | 0.258958 | #!/usr/bin/python | 2.587181 | 3 |
src/python/pants/backend/docker/lint/hadolint/subsystem.py | xyzst/pants | 0 | 8976 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import cast
from pants.core.util_rules.config_files import ConfigFilesRequest
from pants.core.util_rules.external_tool import TemplatedExternalTool
from pants.option.custom_types import file_option, shell_str
class Hadolint(TemplatedExternalTool):
options_scope = "hadolint"
name = "hadolint"
help = "A linter for Dockerfiles."
default_version = "v2.8.0"
# TODO: https://github.com/hadolint/hadolint/issues/411 tracks building and releasing
# hadolint for Linux ARM64.
default_known_versions = [
"v2.8.0|macos_x86_64|27985f257a216ecab06a16e643e8cb0123e7145b5d526cfcb4ce7a31fe99f357|2428944",
"v2.8.0|macos_arm64 |27985f257a216ecab06a16e643e8cb0123e7145b5d526cfcb4ce7a31fe99f357|2428944", # same as mac x86
"v2.8.0|linux_x86_64|9dfc155139a1e1e9b3b28f3de9907736b9dfe7cead1c3a0ae7ff0158f3191674|5895708",
]
default_url_template = (
"https://github.com/hadolint/hadolint/releases/download/{version}/hadolint-{platform}"
)
default_url_platform_mapping = {
"macos_arm64": "Darwin-x86_64",
"macos_x86_64": "Darwin-x86_64",
"linux_x86_64": "Linux-x86_64",
}
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--skip",
type=bool,
default=False,
help="Don't use Hadolint when running `./pants lint`.",
)
register(
"--args",
type=list,
member_type=shell_str,
help=(
"Arguments to pass directly to Hadolint, e.g. `--hadolint-args='--format json'`.'"
),
)
register(
"--config",
type=file_option,
default=None,
advanced=True,
help=(
"Path to an YAML config file understood by Hadolint "
"(https://github.com/hadolint/hadolint#configure).\n\n"
f"Setting this option will disable `[{cls.options_scope}].config_discovery`. Use "
"this option if the config is located in a non-standard location."
),
)
register(
"--config-discovery",
type=bool,
default=True,
advanced=True,
help=(
"If true, Pants will include all relevant config files during runs "
"(`.hadolint.yaml` and `.hadolint.yml`).\n\n"
f"Use `[{cls.options_scope}].config` instead if your config is in a "
"non-standard location."
),
)
@property
def skip(self) -> bool:
return cast(bool, self.options.skip)
@property
def args(self) -> tuple[str, ...]:
return tuple(self.options.args)
@property
def config(self) -> str | None:
return cast("str | None", self.options.config)
def config_request(self) -> ConfigFilesRequest:
# Refer to https://github.com/hadolint/hadolint#configure for how config files are
# discovered.
return ConfigFilesRequest(
specified=self.config,
specified_option_name=f"[{self.options_scope}].config",
discovery=cast(bool, self.options.config_discovery),
check_existence=[".hadolint.yaml", ".hadolint.yml"],
)
| # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import cast
from pants.core.util_rules.config_files import ConfigFilesRequest
from pants.core.util_rules.external_tool import TemplatedExternalTool
from pants.option.custom_types import file_option, shell_str
class Hadolint(TemplatedExternalTool):
options_scope = "hadolint"
name = "hadolint"
help = "A linter for Dockerfiles."
default_version = "v2.8.0"
# TODO: https://github.com/hadolint/hadolint/issues/411 tracks building and releasing
# hadolint for Linux ARM64.
default_known_versions = [
"v2.8.0|macos_x86_64|27985f257a216ecab06a16e643e8cb0123e7145b5d526cfcb4ce7a31fe99f357|2428944",
"v2.8.0|macos_arm64 |27985f257a216ecab06a16e643e8cb0123e7145b5d526cfcb4ce7a31fe99f357|2428944", # same as mac x86
"v2.8.0|linux_x86_64|9dfc155139a1e1e9b3b28f3de9907736b9dfe7cead1c3a0ae7ff0158f3191674|5895708",
]
default_url_template = (
"https://github.com/hadolint/hadolint/releases/download/{version}/hadolint-{platform}"
)
default_url_platform_mapping = {
"macos_arm64": "Darwin-x86_64",
"macos_x86_64": "Darwin-x86_64",
"linux_x86_64": "Linux-x86_64",
}
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--skip",
type=bool,
default=False,
help="Don't use Hadolint when running `./pants lint`.",
)
register(
"--args",
type=list,
member_type=shell_str,
help=(
"Arguments to pass directly to Hadolint, e.g. `--hadolint-args='--format json'`.'"
),
)
register(
"--config",
type=file_option,
default=None,
advanced=True,
help=(
"Path to an YAML config file understood by Hadolint "
"(https://github.com/hadolint/hadolint#configure).\n\n"
f"Setting this option will disable `[{cls.options_scope}].config_discovery`. Use "
"this option if the config is located in a non-standard location."
),
)
register(
"--config-discovery",
type=bool,
default=True,
advanced=True,
help=(
"If true, Pants will include all relevant config files during runs "
"(`.hadolint.yaml` and `.hadolint.yml`).\n\n"
f"Use `[{cls.options_scope}].config` instead if your config is in a "
"non-standard location."
),
)
@property
def skip(self) -> bool:
return cast(bool, self.options.skip)
@property
def args(self) -> tuple[str, ...]:
return tuple(self.options.args)
@property
def config(self) -> str | None:
return cast("str | None", self.options.config)
def config_request(self) -> ConfigFilesRequest:
# Refer to https://github.com/hadolint/hadolint#configure for how config files are
# discovered.
return ConfigFilesRequest(
specified=self.config,
specified_option_name=f"[{self.options_scope}].config",
discovery=cast(bool, self.options.config_discovery),
check_existence=[".hadolint.yaml", ".hadolint.yml"],
)
| en | 0.726419 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). # TODO: https://github.com/hadolint/hadolint/issues/411 tracks building and releasing # hadolint for Linux ARM64. # same as mac x86 #configure).\n\n" # Refer to https://github.com/hadolint/hadolint#configure for how config files are # discovered. | 1.930714 | 2 |
venv/lib/python3.9/site-packages/biorun/fetch.py | LucaCilibrasi/docker_viruclust | 0 | 8977 | <filename>venv/lib/python3.9/site-packages/biorun/fetch.py
"""
Handles functionality related to data storege.
"""
import sys, os, glob, re, gzip, json
from biorun import const, utils, objects, ncbi
from biorun.models import jsonrec
import biorun.libs.placlib as plac
# Module level logger.
logger = utils.logger
# A nicer error message on incorrect installation.
try:
from Bio import SeqIO
except ImportError as exc:
print(f"*** Error: {exc}", file=sys.stderr)
print(f"*** This program requires biopython", file=sys.stderr)
print(f"*** Install: conda install -y biopython>=1.78", file=sys.stderr)
sys.exit(-1)
def resolve_fname(name, format='json'):
"""
Resolve a file name given an accession number.
"""
ext = format.lower()
fname = f"{name}.{ext}.gz"
fname = os.path.join(utils.DATADIR, fname)
return fname
def delete_data(text):
"""
Deletes data under a filename.
"""
for name in text.split(","):
fname = resolve_fname(name)
if os.path.isfile(fname):
os.remove(fname)
logger.info(f"removed: {fname}")
else:
logger.info(f"file does not exist: {fname}")
def read_json_file(fname):
"""
Returns the content of a JSON file.
"""
fp = utils.gz_read(fname)
data = json.load(fp)
fp.close()
return data
def save_json_file(fname, data):
"""
Returns the content of a JSON file.
"""
fp = utils.gz_write(fname)
json.dump(data, fp)
fp.close()
logger.info(f"saved {fname}")
return data
def change_seqid(json_name, seqid):
"""
Changes the sequence id stored in a json file.
"""
if os.path.isfile(json_name):
data = read_json_file(json_name)
for item in data:
item[const.SEQID] = seqid
fp = utils.gz_write(json_name)
json.dump(data, fp)
fp.close()
def fetch_data(data, param):
"""
Obtains data from NCBI. Fills each parameter with a json field.
"""
db = "protein" if param.protein else "nuccore"
# Ensure json DB is built
ncbi.build_db()
genbank, taxon_acc, refseq = ncbi.get_data()
for name in data:
# Pretend no data if it is an update.
json = None if param.update else get_json(name)
# The data exists, nothing needs to be done.
if json:
continue
# The JSON representation of the data.
json_name = resolve_fname(name=name, format="json")
# GenBank representation of the data.
gbk_name = resolve_fname(name=name, format="gb")
# Genome assembly data.
if name.startswith("GCA") or name.startswith("GCF"):
ncbi.genome(name=name, fname=gbk_name, update=param.update, genbank=genbank,
refseq=refseq)
else:
# Genbank data.
ncbi.genbank_save(name, db=db, fname=gbk_name)
# Convert Genbank to JSON.
data = jsonrec.parse_file(fname=gbk_name, seqid=param.seqid)
# Save JSON file.
save_json_file(fname=json_name, data=data)
def genbank_view(params):
for param in params:
altname = resolve_fname(param.acc, format="gb")
if os.path.isfile(param.acc):
stream = utils.gz_read(param.acc)
elif os.path.isfile(altname):
stream = utils.gz_read(altname)
else:
stream = []
utils.error(f"data not found: {param.acc}")
for line in stream:
print(line, end='')
def get_json(name, seqid=None, inter=False, strict=False):
"""
Attempts to return a JSON formatted data based on a name.
"""
# Data is an existing path to a JSON file.
if os.path.isfile(name):
try:
data = jsonrec.parse_file(name, seqid=seqid)
except Exception as exc:
logger.error(f"JSON parsing error for file {name}: {exc}")
sys.exit(-1)
return data
# The JSON representation of the data.
json_name = resolve_fname(name=name, format="json")
# GenBank representation of the data.
gbk_name = resolve_fname(name=name, format="gb")
# Found the JSON representation of the file.
if os.path.isfile(json_name):
logger.info(f"found {json_name}")
data = read_json_file(json_name)
return data
# There is no JSON file but there is a GenBank file.
if os.path.isfile(gbk_name):
logger.info(f"found {gbk_name}")
data = jsonrec.parse_file(fname=gbk_name, seqid=seqid)
data = save_json_file(fname=json_name, data=data)
return data
# Interactive input, make JSON from name
if inter:
data = jsonrec.make_jsonrec(name, seqid=seqid)
return data
# Raise error if in strict mode
if strict:
utils.error(f"data not found: {name}")
return None
def rename_data(data, param, newname=None):
"""
Rename data.
"""
# Will only rename a single data
newnames = newname.split(",")
for name1, name2 in zip(data, newnames):
src_json = resolve_fname(name=name1, format="json")
dest_json = resolve_fname(name=name2, format="json")
src_gb = resolve_fname(name=name1, format="gb")
dest_gb = resolve_fname(name=name2, format="gb")
if os.path.isfile(src_json):
logger.info(f"renamed {name1} as {name2}")
os.rename(src_json, dest_json)
if param.seqid:
change_seqid(dest_json, seqid=param.seqid)
else:
logger.info(f"file not found: {src_json}")
if os.path.isfile(src_gb):
if not os.path.isfile(dest_gb):
os.symlink(src_gb, dest_gb)
else:
logger.info(f"file not found: {src_gb}")
def print_data_list():
"""
Returns a list of the files in the data directory
"""
pattern = os.path.join(os.path.join(utils.DATADIR, '*.json.gz'))
matched = glob.glob(pattern)
# Extract the definition from the JSON without parsing it.
patt = re.compile(r'(definition\":\s*)(?P<value>\".+?\")')
collect = []
for path in matched:
fsize = utils.human_size(os.path.getsize(path))
base, fname = os.path.split(path)
fname = fname.rsplit(".", maxsplit=2)[0]
# Parse the first N lines
stream = gzip.open(path, 'rt') if path.endswith('gz') else open(path, 'rt')
text = stream.read(1000)
match = patt.search(text)
title = match.group("value") if match else ''
title = title.strip('", ')
# Trim the title
stitle = title[:100]
stitle = stitle + "..." if len(title) != len(stitle) else stitle
collect.append((str(fsize), f"{fname:10s}", stitle))
collect = sorted(collect, key=lambda x: x[2])
for row in collect:
line = "\t".join(row)
print(line)
@plac.pos("data", "data names")
@plac.flg('fetch', "download data as accessions")
@plac.flg('update', "updates data in storage")
@plac.opt('rename', "rename the data")
@plac.opt('seqid', "set the sequence id of the data")
@plac.flg('protein', "use the protein database")
@plac.flg('build', "build the database")
@plac.flg('verbose', "verbose mode")
def run(update=False, rename='', seqid='', protein=False, verbose=False, *data):
"""
Fetches and manages data in storage.
"""
# Set the verbosity
utils.set_verbosity(logger, level=int(verbose))
# Reset counter (needed for consistency during testing).
jsonrec.reset_counter()
# A simple wrapper class to represent input parameters.
param = objects.Param(seqid=seqid, rename=rename, start=1, protein=protein, update=update)
# Fetch the data.
fetch_data(data, param=param)
# Renaming after fetching.
if rename:
rename_data(data, param=param, newname=rename)
@plac.opt('delete', "deletes foo from storage", metavar='foo')
@plac.flg('verbose', "verbose mode")
def data(delete, verbose=False):
"""
Shows the data in the storage.
Usage:
bio data : lists the data
bio data --delete foo : deletes data called foo
bio data --delete foo,bar : deletes multiple datasets
"""
# Set the verbosity
utils.set_verbosity(logger, level=int(verbose))
# Reset counter (needed for consistency during testing).
jsonrec.reset_counter()
# Delete should be the first to execute.
if delete:
delete_data(delete)
else:
# Prints the data listing.
print_data_list() | <filename>venv/lib/python3.9/site-packages/biorun/fetch.py
"""
Handles functionality related to data storege.
"""
import sys, os, glob, re, gzip, json
from biorun import const, utils, objects, ncbi
from biorun.models import jsonrec
import biorun.libs.placlib as plac
# Module level logger.
logger = utils.logger
# A nicer error message on incorrect installation.
try:
from Bio import SeqIO
except ImportError as exc:
print(f"*** Error: {exc}", file=sys.stderr)
print(f"*** This program requires biopython", file=sys.stderr)
print(f"*** Install: conda install -y biopython>=1.78", file=sys.stderr)
sys.exit(-1)
def resolve_fname(name, format='json'):
"""
Resolve a file name given an accession number.
"""
ext = format.lower()
fname = f"{name}.{ext}.gz"
fname = os.path.join(utils.DATADIR, fname)
return fname
def delete_data(text):
"""
Deletes data under a filename.
"""
for name in text.split(","):
fname = resolve_fname(name)
if os.path.isfile(fname):
os.remove(fname)
logger.info(f"removed: {fname}")
else:
logger.info(f"file does not exist: {fname}")
def read_json_file(fname):
"""
Returns the content of a JSON file.
"""
fp = utils.gz_read(fname)
data = json.load(fp)
fp.close()
return data
def save_json_file(fname, data):
"""
Returns the content of a JSON file.
"""
fp = utils.gz_write(fname)
json.dump(data, fp)
fp.close()
logger.info(f"saved {fname}")
return data
def change_seqid(json_name, seqid):
"""
Changes the sequence id stored in a json file.
"""
if os.path.isfile(json_name):
data = read_json_file(json_name)
for item in data:
item[const.SEQID] = seqid
fp = utils.gz_write(json_name)
json.dump(data, fp)
fp.close()
def fetch_data(data, param):
"""
Obtains data from NCBI. Fills each parameter with a json field.
"""
db = "protein" if param.protein else "nuccore"
# Ensure json DB is built
ncbi.build_db()
genbank, taxon_acc, refseq = ncbi.get_data()
for name in data:
# Pretend no data if it is an update.
json = None if param.update else get_json(name)
# The data exists, nothing needs to be done.
if json:
continue
# The JSON representation of the data.
json_name = resolve_fname(name=name, format="json")
# GenBank representation of the data.
gbk_name = resolve_fname(name=name, format="gb")
# Genome assembly data.
if name.startswith("GCA") or name.startswith("GCF"):
ncbi.genome(name=name, fname=gbk_name, update=param.update, genbank=genbank,
refseq=refseq)
else:
# Genbank data.
ncbi.genbank_save(name, db=db, fname=gbk_name)
# Convert Genbank to JSON.
data = jsonrec.parse_file(fname=gbk_name, seqid=param.seqid)
# Save JSON file.
save_json_file(fname=json_name, data=data)
def genbank_view(params):
for param in params:
altname = resolve_fname(param.acc, format="gb")
if os.path.isfile(param.acc):
stream = utils.gz_read(param.acc)
elif os.path.isfile(altname):
stream = utils.gz_read(altname)
else:
stream = []
utils.error(f"data not found: {param.acc}")
for line in stream:
print(line, end='')
def get_json(name, seqid=None, inter=False, strict=False):
"""
Attempts to return a JSON formatted data based on a name.
"""
# Data is an existing path to a JSON file.
if os.path.isfile(name):
try:
data = jsonrec.parse_file(name, seqid=seqid)
except Exception as exc:
logger.error(f"JSON parsing error for file {name}: {exc}")
sys.exit(-1)
return data
# The JSON representation of the data.
json_name = resolve_fname(name=name, format="json")
# GenBank representation of the data.
gbk_name = resolve_fname(name=name, format="gb")
# Found the JSON representation of the file.
if os.path.isfile(json_name):
logger.info(f"found {json_name}")
data = read_json_file(json_name)
return data
# There is no JSON file but there is a GenBank file.
if os.path.isfile(gbk_name):
logger.info(f"found {gbk_name}")
data = jsonrec.parse_file(fname=gbk_name, seqid=seqid)
data = save_json_file(fname=json_name, data=data)
return data
# Interactive input, make JSON from name
if inter:
data = jsonrec.make_jsonrec(name, seqid=seqid)
return data
# Raise error if in strict mode
if strict:
utils.error(f"data not found: {name}")
return None
def rename_data(data, param, newname=None):
"""
Rename data.
"""
# Will only rename a single data
newnames = newname.split(",")
for name1, name2 in zip(data, newnames):
src_json = resolve_fname(name=name1, format="json")
dest_json = resolve_fname(name=name2, format="json")
src_gb = resolve_fname(name=name1, format="gb")
dest_gb = resolve_fname(name=name2, format="gb")
if os.path.isfile(src_json):
logger.info(f"renamed {name1} as {name2}")
os.rename(src_json, dest_json)
if param.seqid:
change_seqid(dest_json, seqid=param.seqid)
else:
logger.info(f"file not found: {src_json}")
if os.path.isfile(src_gb):
if not os.path.isfile(dest_gb):
os.symlink(src_gb, dest_gb)
else:
logger.info(f"file not found: {src_gb}")
def print_data_list():
"""
Returns a list of the files in the data directory
"""
pattern = os.path.join(os.path.join(utils.DATADIR, '*.json.gz'))
matched = glob.glob(pattern)
# Extract the definition from the JSON without parsing it.
patt = re.compile(r'(definition\":\s*)(?P<value>\".+?\")')
collect = []
for path in matched:
fsize = utils.human_size(os.path.getsize(path))
base, fname = os.path.split(path)
fname = fname.rsplit(".", maxsplit=2)[0]
# Parse the first N lines
stream = gzip.open(path, 'rt') if path.endswith('gz') else open(path, 'rt')
text = stream.read(1000)
match = patt.search(text)
title = match.group("value") if match else ''
title = title.strip('", ')
# Trim the title
stitle = title[:100]
stitle = stitle + "..." if len(title) != len(stitle) else stitle
collect.append((str(fsize), f"{fname:10s}", stitle))
collect = sorted(collect, key=lambda x: x[2])
for row in collect:
line = "\t".join(row)
print(line)
@plac.pos("data", "data names")
@plac.flg('fetch', "download data as accessions")
@plac.flg('update', "updates data in storage")
@plac.opt('rename', "rename the data")
@plac.opt('seqid', "set the sequence id of the data")
@plac.flg('protein', "use the protein database")
@plac.flg('build', "build the database")
@plac.flg('verbose', "verbose mode")
def run(update=False, rename='', seqid='', protein=False, verbose=False, *data):
"""
Fetches and manages data in storage.
"""
# Set the verbosity
utils.set_verbosity(logger, level=int(verbose))
# Reset counter (needed for consistency during testing).
jsonrec.reset_counter()
# A simple wrapper class to represent input parameters.
param = objects.Param(seqid=seqid, rename=rename, start=1, protein=protein, update=update)
# Fetch the data.
fetch_data(data, param=param)
# Renaming after fetching.
if rename:
rename_data(data, param=param, newname=rename)
@plac.opt('delete', "deletes foo from storage", metavar='foo')
@plac.flg('verbose', "verbose mode")
def data(delete, verbose=False):
"""
Shows the data in the storage.
Usage:
bio data : lists the data
bio data --delete foo : deletes data called foo
bio data --delete foo,bar : deletes multiple datasets
"""
# Set the verbosity
utils.set_verbosity(logger, level=int(verbose))
# Reset counter (needed for consistency during testing).
jsonrec.reset_counter()
# Delete should be the first to execute.
if delete:
delete_data(delete)
else:
# Prints the data listing.
print_data_list() | en | 0.748943 | Handles functionality related to data storege. # Module level logger. # A nicer error message on incorrect installation. Resolve a file name given an accession number. Deletes data under a filename. Returns the content of a JSON file. Returns the content of a JSON file. Changes the sequence id stored in a json file. Obtains data from NCBI. Fills each parameter with a json field. # Ensure json DB is built # Pretend no data if it is an update. # The data exists, nothing needs to be done. # The JSON representation of the data. # GenBank representation of the data. # Genome assembly data. # Genbank data. # Convert Genbank to JSON. # Save JSON file. Attempts to return a JSON formatted data based on a name. # Data is an existing path to a JSON file. # The JSON representation of the data. # GenBank representation of the data. # Found the JSON representation of the file. # There is no JSON file but there is a GenBank file. # Interactive input, make JSON from name # Raise error if in strict mode Rename data. # Will only rename a single data Returns a list of the files in the data directory # Extract the definition from the JSON without parsing it. # Parse the first N lines # Trim the title Fetches and manages data in storage. # Set the verbosity # Reset counter (needed for consistency during testing). # A simple wrapper class to represent input parameters. # Fetch the data. # Renaming after fetching. Shows the data in the storage. Usage: bio data : lists the data bio data --delete foo : deletes data called foo bio data --delete foo,bar : deletes multiple datasets # Set the verbosity # Reset counter (needed for consistency during testing). # Delete should be the first to execute. # Prints the data listing. | 2.265965 | 2 |
game/items/game_item.py | LaverdeS/Genetic_Algorithm_EGame | 2 | 8978 | <filename>game/items/game_item.py<gh_stars>1-10
import numpy as np
from random import randint
from PyQt5.QtGui import QImage
from PyQt5.QtCore import QPointF
class GameItem():
def __init__(self, parent, boundary, position=None):
self.parent = parent
self.config = parent.config
self.items_config = self.config.items
if position is None:
_left_border = boundary
_right_border = int(self.parent.frame_dimension[0]) - boundary
_top_border = boundary
_bottom_border = int(self.parent.frame_dimension[1]) - boundary
_x = float(randint(_left_border, _right_border))
_y = float(randint(_top_border, _bottom_border))
self._position = np.array([_x, _y])
else:
self._position = position
def draw_image(self, painter):
item_image = QImage(self.image)
painter.drawImage(QPointF(self._position[0]-(item_image.height()/2),
self._position[1]-(item_image.width()/2)),
item_image) | <filename>game/items/game_item.py<gh_stars>1-10
import numpy as np
from random import randint
from PyQt5.QtGui import QImage
from PyQt5.QtCore import QPointF
class GameItem():
def __init__(self, parent, boundary, position=None):
self.parent = parent
self.config = parent.config
self.items_config = self.config.items
if position is None:
_left_border = boundary
_right_border = int(self.parent.frame_dimension[0]) - boundary
_top_border = boundary
_bottom_border = int(self.parent.frame_dimension[1]) - boundary
_x = float(randint(_left_border, _right_border))
_y = float(randint(_top_border, _bottom_border))
self._position = np.array([_x, _y])
else:
self._position = position
def draw_image(self, painter):
item_image = QImage(self.image)
painter.drawImage(QPointF(self._position[0]-(item_image.height()/2),
self._position[1]-(item_image.width()/2)),
item_image) | none | 1 | 2.797971 | 3 |
|
source/deepsecurity/models/application_type_rights.py | felipecosta09/cloudone-workload-controltower-lifecycle | 1 | 8979 | # coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApplicationTypeRights(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'can_create_new_application_types': 'bool',
'can_delete_application_types': 'bool',
'can_edit_application_type_properties': 'bool'
}
attribute_map = {
'can_create_new_application_types': 'canCreateNewApplicationTypes',
'can_delete_application_types': 'canDeleteApplicationTypes',
'can_edit_application_type_properties': 'canEditApplicationTypeProperties'
}
def __init__(self, can_create_new_application_types=None, can_delete_application_types=None, can_edit_application_type_properties=None): # noqa: E501
"""ApplicationTypeRights - a model defined in Swagger""" # noqa: E501
self._can_create_new_application_types = None
self._can_delete_application_types = None
self._can_edit_application_type_properties = None
self.discriminator = None
if can_create_new_application_types is not None:
self.can_create_new_application_types = can_create_new_application_types
if can_delete_application_types is not None:
self.can_delete_application_types = can_delete_application_types
if can_edit_application_type_properties is not None:
self.can_edit_application_type_properties = can_edit_application_type_properties
@property
def can_create_new_application_types(self):
"""Gets the can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
Right to create new application types. # noqa: E501
:return: The can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
:rtype: bool
"""
return self._can_create_new_application_types
@can_create_new_application_types.setter
def can_create_new_application_types(self, can_create_new_application_types):
"""Sets the can_create_new_application_types of this ApplicationTypeRights.
Right to create new application types. # noqa: E501
:param can_create_new_application_types: The can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
:type: bool
"""
self._can_create_new_application_types = can_create_new_application_types
@property
def can_delete_application_types(self):
"""Gets the can_delete_application_types of this ApplicationTypeRights. # noqa: E501
Right to delete application types. # noqa: E501
:return: The can_delete_application_types of this ApplicationTypeRights. # noqa: E501
:rtype: bool
"""
return self._can_delete_application_types
@can_delete_application_types.setter
def can_delete_application_types(self, can_delete_application_types):
"""Sets the can_delete_application_types of this ApplicationTypeRights.
Right to delete application types. # noqa: E501
:param can_delete_application_types: The can_delete_application_types of this ApplicationTypeRights. # noqa: E501
:type: bool
"""
self._can_delete_application_types = can_delete_application_types
@property
def can_edit_application_type_properties(self):
"""Gets the can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
Right to edit application type properties. # noqa: E501
:return: The can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
:rtype: bool
"""
return self._can_edit_application_type_properties
@can_edit_application_type_properties.setter
def can_edit_application_type_properties(self, can_edit_application_type_properties):
"""Sets the can_edit_application_type_properties of this ApplicationTypeRights.
Right to edit application type properties. # noqa: E501
:param can_edit_application_type_properties: The can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
:type: bool
"""
self._can_edit_application_type_properties = can_edit_application_type_properties
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApplicationTypeRights, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplicationTypeRights):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| # coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApplicationTypeRights(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'can_create_new_application_types': 'bool',
'can_delete_application_types': 'bool',
'can_edit_application_type_properties': 'bool'
}
attribute_map = {
'can_create_new_application_types': 'canCreateNewApplicationTypes',
'can_delete_application_types': 'canDeleteApplicationTypes',
'can_edit_application_type_properties': 'canEditApplicationTypeProperties'
}
def __init__(self, can_create_new_application_types=None, can_delete_application_types=None, can_edit_application_type_properties=None): # noqa: E501
"""ApplicationTypeRights - a model defined in Swagger""" # noqa: E501
self._can_create_new_application_types = None
self._can_delete_application_types = None
self._can_edit_application_type_properties = None
self.discriminator = None
if can_create_new_application_types is not None:
self.can_create_new_application_types = can_create_new_application_types
if can_delete_application_types is not None:
self.can_delete_application_types = can_delete_application_types
if can_edit_application_type_properties is not None:
self.can_edit_application_type_properties = can_edit_application_type_properties
@property
def can_create_new_application_types(self):
"""Gets the can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
Right to create new application types. # noqa: E501
:return: The can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
:rtype: bool
"""
return self._can_create_new_application_types
@can_create_new_application_types.setter
def can_create_new_application_types(self, can_create_new_application_types):
"""Sets the can_create_new_application_types of this ApplicationTypeRights.
Right to create new application types. # noqa: E501
:param can_create_new_application_types: The can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
:type: bool
"""
self._can_create_new_application_types = can_create_new_application_types
@property
def can_delete_application_types(self):
"""Gets the can_delete_application_types of this ApplicationTypeRights. # noqa: E501
Right to delete application types. # noqa: E501
:return: The can_delete_application_types of this ApplicationTypeRights. # noqa: E501
:rtype: bool
"""
return self._can_delete_application_types
@can_delete_application_types.setter
def can_delete_application_types(self, can_delete_application_types):
"""Sets the can_delete_application_types of this ApplicationTypeRights.
Right to delete application types. # noqa: E501
:param can_delete_application_types: The can_delete_application_types of this ApplicationTypeRights. # noqa: E501
:type: bool
"""
self._can_delete_application_types = can_delete_application_types
@property
def can_edit_application_type_properties(self):
"""Gets the can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
Right to edit application type properties. # noqa: E501
:return: The can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
:rtype: bool
"""
return self._can_edit_application_type_properties
@can_edit_application_type_properties.setter
def can_edit_application_type_properties(self, can_edit_application_type_properties):
"""Sets the can_edit_application_type_properties of this ApplicationTypeRights.
Right to edit application type properties. # noqa: E501
:param can_edit_application_type_properties: The can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
:type: bool
"""
self._can_edit_application_type_properties = can_edit_application_type_properties
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApplicationTypeRights, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplicationTypeRights):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| en | 0.681668 | # coding: utf-8 Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually. Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition. # noqa: E501 ApplicationTypeRights - a model defined in Swagger # noqa: E501 Gets the can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
Right to create new application types. # noqa: E501
:return: The can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
:rtype: bool Sets the can_create_new_application_types of this ApplicationTypeRights.
Right to create new application types. # noqa: E501
:param can_create_new_application_types: The can_create_new_application_types of this ApplicationTypeRights. # noqa: E501
:type: bool Gets the can_delete_application_types of this ApplicationTypeRights. # noqa: E501
Right to delete application types. # noqa: E501
:return: The can_delete_application_types of this ApplicationTypeRights. # noqa: E501
:rtype: bool Sets the can_delete_application_types of this ApplicationTypeRights.
Right to delete application types. # noqa: E501
:param can_delete_application_types: The can_delete_application_types of this ApplicationTypeRights. # noqa: E501
:type: bool Gets the can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
Right to edit application type properties. # noqa: E501
:return: The can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
:rtype: bool Sets the can_edit_application_type_properties of this ApplicationTypeRights.
Right to edit application type properties. # noqa: E501
:param can_edit_application_type_properties: The can_edit_application_type_properties of this ApplicationTypeRights. # noqa: E501
:type: bool Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.521312 | 2 |
code-samples/aws_neptune.py | hardikvasa/database-journal | 45 | 8980 | from __future__ import print_function # Python 2/3 compatibility
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
#initializing the graph object
graph = Graph()
#creating connection with the remote
remoteConn = DriverRemoteConnection('wss://<endpoint>:8182/gremlin','g')
g = graph.traversal().withRemote(DriverRemoteConnection('wss://<endpoint>:8182/gremlin','g'))
print('Connection created.')
#clearing out all the vertices to start fresh
g.V().drop().iterate()
print('Deleting everything and starting clean.')
#Adding some vertices (nodes)
gerald = g.addV('person').property('age','81').property('first_name','Gerald').property('stays_in','Portland').next()
edith = g.addV('person').property('age','78').property('first_name','Edith').property('stays_in','Portland').next()
peter = g.addV('person').property('age','52').property('first_name','Shane').property('stays_in','Seattle').next()
mary = g.addV('person').property('age','50').property('first_name','Mary').property('stays_in','Seattle').next()
betty = g.addV('person').property('age','19').property('first_name','Betty').property('stays_in','Chicago').next()
print('Added some vertices (nodes).')
#Adding relationships (edges)
edge = g.V().has('first_name', 'Gerald').addE('husband_of').to(g.V().has('first_name', 'Edith')).property('married_since','1947').next()
edge = g.V().has('first_name', 'Edith').addE('wife_of').to(g.V().has('first_name', 'Gerald')).property('married_since','1947').next()
edge = g.V().has('first_name', 'Shane').addE('son_of').to(g.V().has('first_name', 'Gerald')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Gerald').addE('father_of').to(g.V().has('first_name', 'Shane')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Shane').addE('son_of').to(g.V().has('first_name', 'Edith')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Edith').addE('mother_of').to(g.V().has('first_name', 'Shane')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Shane').addE('husband_of').to(g.V().has('first_name', 'Mary')).property('known_since','1989').next()
edge = g.V().has('first_name', 'Mary').addE('wife_of').to(g.V().has('first_name', 'Shane')).property('known_since','1989').next()
edge = g.V().has('first_name', 'Shane').addE('father_of').to(g.V().has('first_name', 'Betty')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Betty').addE('daughter_of').to(g.V().has('first_name', 'Shane')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Mary').addE('mother_of').to(g.V().has('first_name', 'Betty')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Betty').addE('daughter_of').to(g.V().has('first_name', 'Mary')).property('known_since','1991').next()
#print out all the node's first names
print('\n Printing first name from all nodes:')
print(g.V().first_name.toList())
#print out all the properties of person whose's first name is Shane
print('\n Printing all properties of person whose first name is Shane:')
print(g.V().has('person','first_name','Shane').valueMap().next())
#traversing the graph starting with Betty to then Shane to then Edith
print('\n Finding Betty and then looking up her parents:')
print(g.V().has('first_name', 'Betty').out('daughter_of').out('son_of').valueMap().toList())
#Print out all the nodes
print('\n Printing out all the nodes:')
people = g.V().valueMap().toList()
print(people)
#Print out all the connections (edges)
print('\n Print out all the connections (edges):')
connections = g.E().valueMap().toList()
print(connections)
#Closing the connection
remoteConn.close()
print('Connection closed!') | from __future__ import print_function # Python 2/3 compatibility
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
#initializing the graph object
graph = Graph()
#creating connection with the remote
remoteConn = DriverRemoteConnection('wss://<endpoint>:8182/gremlin','g')
g = graph.traversal().withRemote(DriverRemoteConnection('wss://<endpoint>:8182/gremlin','g'))
print('Connection created.')
#clearing out all the vertices to start fresh
g.V().drop().iterate()
print('Deleting everything and starting clean.')
#Adding some vertices (nodes)
gerald = g.addV('person').property('age','81').property('first_name','Gerald').property('stays_in','Portland').next()
edith = g.addV('person').property('age','78').property('first_name','Edith').property('stays_in','Portland').next()
peter = g.addV('person').property('age','52').property('first_name','Shane').property('stays_in','Seattle').next()
mary = g.addV('person').property('age','50').property('first_name','Mary').property('stays_in','Seattle').next()
betty = g.addV('person').property('age','19').property('first_name','Betty').property('stays_in','Chicago').next()
print('Added some vertices (nodes).')
#Adding relationships (edges)
edge = g.V().has('first_name', 'Gerald').addE('husband_of').to(g.V().has('first_name', 'Edith')).property('married_since','1947').next()
edge = g.V().has('first_name', 'Edith').addE('wife_of').to(g.V().has('first_name', 'Gerald')).property('married_since','1947').next()
edge = g.V().has('first_name', 'Shane').addE('son_of').to(g.V().has('first_name', 'Gerald')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Gerald').addE('father_of').to(g.V().has('first_name', 'Shane')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Shane').addE('son_of').to(g.V().has('first_name', 'Edith')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Edith').addE('mother_of').to(g.V().has('first_name', 'Shane')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Shane').addE('husband_of').to(g.V().has('first_name', 'Mary')).property('known_since','1989').next()
edge = g.V().has('first_name', 'Mary').addE('wife_of').to(g.V().has('first_name', 'Shane')).property('known_since','1989').next()
edge = g.V().has('first_name', 'Shane').addE('father_of').to(g.V().has('first_name', 'Betty')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Betty').addE('daughter_of').to(g.V().has('first_name', 'Shane')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Mary').addE('mother_of').to(g.V().has('first_name', 'Betty')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Betty').addE('daughter_of').to(g.V().has('first_name', 'Mary')).property('known_since','1991').next()
#print out all the node's first names
print('\n Printing first name from all nodes:')
print(g.V().first_name.toList())
#print out all the properties of person whose's first name is Shane
print('\n Printing all properties of person whose first name is Shane:')
print(g.V().has('person','first_name','Shane').valueMap().next())
#traversing the graph starting with Betty to then Shane to then Edith
print('\n Finding Betty and then looking up her parents:')
print(g.V().has('first_name', 'Betty').out('daughter_of').out('son_of').valueMap().toList())
#Print out all the nodes
print('\n Printing out all the nodes:')
people = g.V().valueMap().toList()
print(people)
#Print out all the connections (edges)
print('\n Print out all the connections (edges):')
connections = g.E().valueMap().toList()
print(connections)
#Closing the connection
remoteConn.close()
print('Connection closed!') | en | 0.812514 | # Python 2/3 compatibility #initializing the graph object #creating connection with the remote #clearing out all the vertices to start fresh #Adding some vertices (nodes) #Adding relationships (edges) #print out all the node's first names #print out all the properties of person whose's first name is Shane #traversing the graph starting with Betty to then Shane to then Edith #Print out all the nodes #Print out all the connections (edges) #Closing the connection | 2.381719 | 2 |
kits19cnn/io/preprocess_train.py | Ramsha04/kits19-2d-reproduce | 0 | 8981 | <reponame>Ramsha04/kits19-2d-reproduce
import os
from os.path import join, isdir
from pathlib import Path
from collections import defaultdict
from tqdm import tqdm
import nibabel as nib
import numpy as np
import json
from .resample import resample_patient
from .custom_augmentations import resize_data_and_seg, crop_to_bbox
class Preprocessor(object):
"""
Preprocesses the original dataset (interpolated).
Procedures:
* Resampled all volumes to have a thickness of 3mm.
* Clipped to [-30, 300] HU
* z-score standardization (zero mean and unit variance)
* Standardization per 3D image instead of ACROSS THE WHOLE
TRAINING SET
* save as .npy array
* imaging.npy
* segmentation.npy (if with_masks)
"""
def __init__(self, in_dir, out_dir, cases=None, kits_json_path=None,
bbox_json_path=None, clip_values=[-30, 300], with_mask=True,
fg_classes=[0, 1, 2], resize_xy_shape=(256, 256)):
"""
Attributes:
in_dir (str): directory with the input data. Should be the
kits19/data directory.
out_dir (str): output directory where you want to save each case
cases: list of case folders to preprocess
kits_json_path (str): path to the kits.json file in the kits19/data
directory. This only should be specfied if you're resampling.
Defaults to None.
bbox_json_path (str): path to the bbox_stage1.json file made from
stage1 post-processing. Triggers cropping to the bboxes.
Defaults to None.
target_spacing (list/tuple): spacing to resample to
clip_values (list, tuple): values you want to clip CT scans to.
Defaults to None for no clipping.
with_mask (bool): whether or not to preprocess with masks or no
masks. Applicable to preprocessing test set (no labels
available).
fg_classes (list): of foreground class indices
if None, doesn't gather fg class stats.
"""
self.in_dir = in_dir
self.out_dir = out_dir
self._load_kits_json(kits_json_path)
self._load_bbox_json(bbox_json_path)
self.clip_values = clip_values
self.with_mask = with_mask
self.fg_classes = fg_classes
if not self.with_mask:
assert self.fg_classes is None, \
"When with_mask is False, fg_classes must be None."
self.cases = cases
# automatically collecting all of the case folder names
if self.cases is None:
self.cases = [os.path.join(self.in_dir, case) \
for case in os.listdir(self.in_dir) \
if case.startswith("case")]
self.cases = sorted(self.cases)
assert len(self.cases) > 0, \
"Please make sure that in_dir refers to the proper directory."
# making directory if out_dir doesn't exist
if not isdir(out_dir):
os.mkdir(out_dir)
print("Created directory: {0}".format(out_dir))
self.resize_xy_shape = tuple(resize_xy_shape)
def gen_data(self, save_fnames=["imaging", "segmentation"]):
"""
Generates and saves preprocessed data as numpy arrays (n, x, y).
Args:
task_path: file path to the task directory
(must have the corresponding "dataset.json" in it)
save_fnames (List[str]): save names for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
Returns:
None
"""
# Generating data and saving them recursively
for case in tqdm(self.cases):
x_path, y_path = join(case, "imaging.nii.gz"), join(case, "segmentation.nii.gz")
image = nib.load(x_path).get_fdata()[None]
label = nib.load(y_path).get_fdata()[None] if self.with_mask \
else None
preprocessed_img, preprocessed_label = self.preprocess(image,
label,
case)
if self.bbox_dict is not None:
preprocessed_img, preprocessed_label = self.crop_case_to_bbox(preprocessed_img,
preprocessed_label,
case)
self.save_imgs(preprocessed_img, preprocessed_label, case,
save_fnames=save_fnames)
def preprocess(self, image, mask, case=None):
"""
Clipping, cropping, and resampling.
Args:
image: numpy array
shape (c, n, x, y)
mask: numpy array or None
shape (c, n, x, y)
case (str): path to a case folder
Returns:
tuple of:
- preprocessed image
shape: (n, x, y)
- preprocessed mask or None
shape: (n, x, y)
"""
raw_case = Path(case).name # raw case name, i.e. case_00000
# resampling
if self.kits_json is not None:
for info_dict in self.kits_json:
# guaranteeing that the info is corresponding to the right
# case
if info_dict["case_id"] == raw_case:
case_info_dict = info_dict
break
# resampling the slices axis to 3mm
orig_spacing = (case_info_dict["captured_slice_thickness"],
case_info_dict["captured_pixel_width"],
case_info_dict["captured_pixel_width"])
target_spacing = (3,) + orig_spacing[1:]
image, mask = resample_patient(image, mask, np.array(orig_spacing),
target_spacing=np.array(target_spacing))
if self.clip_values is not None:
image = np.clip(image, self.clip_values[0], self.clip_values[1])
if self.resize_xy_shape is not None:
# image coming in : shape (c, n, h, w); mask is same shape
zdim_size = image.shape[1]
resize_xy_shape = (zdim_size,) + self.resize_xy_shape
image, mask = resize_data_and_seg(image, size=resize_xy_shape,
seg=mask)
image = standardize_per_image(image)
mask = mask.squeeze() if mask is not None else mask
return (image.squeeze(), mask)
def save_imgs(self, image, mask, case,
save_fnames=["imaging", "segmentation"]):
"""
Saves an image and mask pair as .npy arrays in the KiTS19 file structure
Args:
image: numpy array
mask: numpy array
case: path to a case folder (each element of self.cases)
save_fnames (List[str]): save names for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
"""
for fname in save_fnames:
assert not ".npy" in fname, \
"Filenames in save_fnames should not include .npy in the name."
# saving the generated dataset
# output dir in KiTS19 format
# extracting the raw case folder name
case_raw = Path(case).name # extracting the raw case folder name
out_case_dir = join(self.out_dir, case_raw)
# checking to make sure that the output directories exist
if not isdir(out_case_dir):
os.mkdir(out_case_dir)
np.save(os.path.join(out_case_dir, f"{save_fnames[0]}.npy"), image)
if mask is not None:
np.save(os.path.join(out_case_dir, f"{save_fnames[1]}.npy"), mask)
def save_dir_as_2d(self, base_fnames=["imaging", "segmentation"],
delete3dcase=False):
"""
Takes preprocessed 3D numpy arrays and saves them as slices
in the same directory.
Arrays must have shape (n, h, w).
Args:
base_fnames (List[str]): names to read for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
delete3dcase (bool): whether or not to delete the 3D volume after
saving the 2D sliced versions
"""
for fname in base_fnames:
assert not ".npy" in fname, \
"Filenames in base_fnames should not include .npy in the name."
self.pos_per_class_dict = {} # saves slices per class
self.pos_per_slice_dict = defaultdict(list) # saves classes per slice
# Generating data and saving them recursively
for case in tqdm(self.cases):
# output dir in KiTS19 format
case_raw = Path(case).name # extracting the raw case folder name
out_case_dir = join(self.out_dir, case_raw)
# checking to make sure that the output directories exist
if not isdir(out_case_dir):
os.mkdir(out_case_dir)
# assumes the .npy files have shape: (d, h, w)
paths = [join(out_case_dir, f"{base_fnames[0]}.npy"),
join(out_case_dir, f"{base_fnames[1]}.npy")]
image, label = np.load(paths[0]), np.load(paths[1])
self.save_3d_as_2d(image, label, case_raw, out_case_dir)
# to deal with colaboratory storage limitations
if delete3dcase:
os.remove(paths[0]), os.remove(paths[1])
if self.fg_classes is not None:
self._save_pos_slice_dict()
def save_3d_as_2d(self, image, mask, case_raw, out_case_dir):
"""
Saves a 3D volume as separate 2D arrays for each slice across the
axial axis. The naming convention is as follows:
imaging_{parsed_slice_idx}.npy
segmentation_{parsed_slice_idx}.npy
where parsed_slice_idx is just the slice index but filled with
zeros until it hits 5 digits (so sorting is easier.)
Args:
image: numpy array
mask: numpy array
case: raw case folder name
"""
# saving the generated dataset
# iterates through all slices and saves them individually as 2D arrays
assert len(image.shape) == 3, \
"Image shape should be (n, h, w)"
slice_idx_per_class = defaultdict(list)
for slice_idx in range(image.shape[0]):
# naming
slice_idx_str = parse_slice_idx_to_str(slice_idx)
case_str = f"{case_raw}_{slice_idx_str}"
if mask is not None:
label_slice = mask[slice_idx]
# appending fg slice indices
if self.fg_classes is not None:
for label_idx in self.fg_classes:
if label_idx != 0 and (label_slice == label_idx).any():
slice_idx_per_class[label_idx].append(slice_idx)
self.pos_per_slice_dict[case_str].append(label_idx)
elif label_idx == 0 and np.sum(label_slice) == 0:
# for completely blank labels
slice_idx_per_class[label_idx].append(slice_idx)
self.pos_per_slice_dict[case_str].append(label_idx)
self._save_slices(image, mask, out_case_dir=out_case_dir,
slice_idx=slice_idx, slice_idx_str=slice_idx_str)
if self.fg_classes is not None:
self.pos_per_class_dict[case_raw] = slice_idx_per_class
def _save_pos_slice_dict(self):
"""
Saves the foreground (positive) class dictionaries:
- slice_indices.json
saves the slice indices per class
{
case: {fg_class1: [slice indices...],
fg_class2: [slice indices...],
...}
}
- classes_per_slice.json
the keys are not cases, but the actual filenames that are
being read.
{
case_slice_idx_str: [classes_in_slice],
case_slice_idx_str2: [classes_in_slice],
}
"""
save_path_per_slice = join(self.out_dir, "classes_per_slice.json")
# saving the dictionaries
print(f"Logged the classes in {self.fg_classes} for each slice at",
f"{save_path_per_slice}.")
with open(save_path_per_slice, "w") as fp:
json.dump(self.pos_per_slice_dict, fp)
save_path = join(self.out_dir, "slice_indices.json")
# saving the dictionaries
print(f"Logged the slice indices for each class in {self.fg_classes} at",
f"{save_path}.")
with open(save_path, "w") as fp:
json.dump(self.pos_per_class_dict, fp)
def _save_slices(self, image, mask, out_case_dir, slice_idx,
slice_idx_str):
"""
For saving the slices in self.save_3d_as_2d()
"""
np.save(join(out_case_dir, f"imaging_{slice_idx_str}.npy"),
image[slice_idx])
if mask is not None:
label_slice = mask[slice_idx]
np.save(join(out_case_dir, f"segmentation_{slice_idx_str}.npy"),
label_slice)
def _load_kits_json(self, json_path):
"""
Loads the kits.json file into `self.kits_json`
"""
if json_path is None:
self.kits_json = None
print("`kits_json_path is empty, so not resampling.`")
elif json_path is not None:
with open(json_path, "r") as fp:
self.kits_json = json.load(fp)
def _load_bbox_json(self, json_path):
"""
Loads the kits.json file into `self.kits_json`
"""
if json_path is None:
self.bbox_dict = None
print("bbox_json_path, so not cropping volumes to their bbox.")
else:
with open(json_path, "r") as fp:
self.bbox_dict = json.load(fp)
def crop_case_to_bbox(self, image, label, case):
"""
Crops a 3D image and 3D label to the corresponding bounding box.
"""
bbox_coord = self.bbox_dict[case]
return (crop_to_bbox(image, bbox), crop_to_bbox(label, case))
def standardize_per_image(image):
"""
Z-score standardization per image.
"""
mean, stddev = image.mean(), image.std()
return (image - mean) / stddev
def parse_slice_idx_to_str(slice_idx):
"""
Parse the slice index to a three digit string for saving and reading the
2D .npy files generated by io.preprocess.Preprocessor.
Naming convention: {type of slice}_{case}_{slice_idx}
* adding 0s to slice_idx until it reaches 3 digits,
* so sorting files is easier when stacking
"""
return f"{slice_idx:03}"
| import os
from os.path import join, isdir
from pathlib import Path
from collections import defaultdict
from tqdm import tqdm
import nibabel as nib
import numpy as np
import json
from .resample import resample_patient
from .custom_augmentations import resize_data_and_seg, crop_to_bbox
class Preprocessor(object):
"""
Preprocesses the original dataset (interpolated).
Procedures:
* Resampled all volumes to have a thickness of 3mm.
* Clipped to [-30, 300] HU
* z-score standardization (zero mean and unit variance)
* Standardization per 3D image instead of ACROSS THE WHOLE
TRAINING SET
* save as .npy array
* imaging.npy
* segmentation.npy (if with_masks)
"""
def __init__(self, in_dir, out_dir, cases=None, kits_json_path=None,
bbox_json_path=None, clip_values=[-30, 300], with_mask=True,
fg_classes=[0, 1, 2], resize_xy_shape=(256, 256)):
"""
Attributes:
in_dir (str): directory with the input data. Should be the
kits19/data directory.
out_dir (str): output directory where you want to save each case
cases: list of case folders to preprocess
kits_json_path (str): path to the kits.json file in the kits19/data
directory. This only should be specfied if you're resampling.
Defaults to None.
bbox_json_path (str): path to the bbox_stage1.json file made from
stage1 post-processing. Triggers cropping to the bboxes.
Defaults to None.
target_spacing (list/tuple): spacing to resample to
clip_values (list, tuple): values you want to clip CT scans to.
Defaults to None for no clipping.
with_mask (bool): whether or not to preprocess with masks or no
masks. Applicable to preprocessing test set (no labels
available).
fg_classes (list): of foreground class indices
if None, doesn't gather fg class stats.
"""
self.in_dir = in_dir
self.out_dir = out_dir
self._load_kits_json(kits_json_path)
self._load_bbox_json(bbox_json_path)
self.clip_values = clip_values
self.with_mask = with_mask
self.fg_classes = fg_classes
if not self.with_mask:
assert self.fg_classes is None, \
"When with_mask is False, fg_classes must be None."
self.cases = cases
# automatically collecting all of the case folder names
if self.cases is None:
self.cases = [os.path.join(self.in_dir, case) \
for case in os.listdir(self.in_dir) \
if case.startswith("case")]
self.cases = sorted(self.cases)
assert len(self.cases) > 0, \
"Please make sure that in_dir refers to the proper directory."
# making directory if out_dir doesn't exist
if not isdir(out_dir):
os.mkdir(out_dir)
print("Created directory: {0}".format(out_dir))
self.resize_xy_shape = tuple(resize_xy_shape)
def gen_data(self, save_fnames=["imaging", "segmentation"]):
"""
Generates and saves preprocessed data as numpy arrays (n, x, y).
Args:
task_path: file path to the task directory
(must have the corresponding "dataset.json" in it)
save_fnames (List[str]): save names for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
Returns:
None
"""
# Generating data and saving them recursively
for case in tqdm(self.cases):
x_path, y_path = join(case, "imaging.nii.gz"), join(case, "segmentation.nii.gz")
image = nib.load(x_path).get_fdata()[None]
label = nib.load(y_path).get_fdata()[None] if self.with_mask \
else None
preprocessed_img, preprocessed_label = self.preprocess(image,
label,
case)
if self.bbox_dict is not None:
preprocessed_img, preprocessed_label = self.crop_case_to_bbox(preprocessed_img,
preprocessed_label,
case)
self.save_imgs(preprocessed_img, preprocessed_label, case,
save_fnames=save_fnames)
def preprocess(self, image, mask, case=None):
"""
Clipping, cropping, and resampling.
Args:
image: numpy array
shape (c, n, x, y)
mask: numpy array or None
shape (c, n, x, y)
case (str): path to a case folder
Returns:
tuple of:
- preprocessed image
shape: (n, x, y)
- preprocessed mask or None
shape: (n, x, y)
"""
raw_case = Path(case).name # raw case name, i.e. case_00000
# resampling
if self.kits_json is not None:
for info_dict in self.kits_json:
# guaranteeing that the info is corresponding to the right
# case
if info_dict["case_id"] == raw_case:
case_info_dict = info_dict
break
# resampling the slices axis to 3mm
orig_spacing = (case_info_dict["captured_slice_thickness"],
case_info_dict["captured_pixel_width"],
case_info_dict["captured_pixel_width"])
target_spacing = (3,) + orig_spacing[1:]
image, mask = resample_patient(image, mask, np.array(orig_spacing),
target_spacing=np.array(target_spacing))
if self.clip_values is not None:
image = np.clip(image, self.clip_values[0], self.clip_values[1])
if self.resize_xy_shape is not None:
# image coming in : shape (c, n, h, w); mask is same shape
zdim_size = image.shape[1]
resize_xy_shape = (zdim_size,) + self.resize_xy_shape
image, mask = resize_data_and_seg(image, size=resize_xy_shape,
seg=mask)
image = standardize_per_image(image)
mask = mask.squeeze() if mask is not None else mask
return (image.squeeze(), mask)
def save_imgs(self, image, mask, case,
save_fnames=["imaging", "segmentation"]):
"""
Saves an image and mask pair as .npy arrays in the KiTS19 file structure
Args:
image: numpy array
mask: numpy array
case: path to a case folder (each element of self.cases)
save_fnames (List[str]): save names for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
"""
for fname in save_fnames:
assert not ".npy" in fname, \
"Filenames in save_fnames should not include .npy in the name."
# saving the generated dataset
# output dir in KiTS19 format
# extracting the raw case folder name
case_raw = Path(case).name # extracting the raw case folder name
out_case_dir = join(self.out_dir, case_raw)
# checking to make sure that the output directories exist
if not isdir(out_case_dir):
os.mkdir(out_case_dir)
np.save(os.path.join(out_case_dir, f"{save_fnames[0]}.npy"), image)
if mask is not None:
np.save(os.path.join(out_case_dir, f"{save_fnames[1]}.npy"), mask)
def save_dir_as_2d(self, base_fnames=["imaging", "segmentation"],
delete3dcase=False):
"""
Takes preprocessed 3D numpy arrays and saves them as slices
in the same directory.
Arrays must have shape (n, h, w).
Args:
base_fnames (List[str]): names to read for [image, seg] respectively.
DOESN'T INCLUDE THE .npy
delete3dcase (bool): whether or not to delete the 3D volume after
saving the 2D sliced versions
"""
for fname in base_fnames:
assert not ".npy" in fname, \
"Filenames in base_fnames should not include .npy in the name."
self.pos_per_class_dict = {} # saves slices per class
self.pos_per_slice_dict = defaultdict(list) # saves classes per slice
# Generating data and saving them recursively
for case in tqdm(self.cases):
# output dir in KiTS19 format
case_raw = Path(case).name # extracting the raw case folder name
out_case_dir = join(self.out_dir, case_raw)
# checking to make sure that the output directories exist
if not isdir(out_case_dir):
os.mkdir(out_case_dir)
# assumes the .npy files have shape: (d, h, w)
paths = [join(out_case_dir, f"{base_fnames[0]}.npy"),
join(out_case_dir, f"{base_fnames[1]}.npy")]
image, label = np.load(paths[0]), np.load(paths[1])
self.save_3d_as_2d(image, label, case_raw, out_case_dir)
# to deal with colaboratory storage limitations
if delete3dcase:
os.remove(paths[0]), os.remove(paths[1])
if self.fg_classes is not None:
self._save_pos_slice_dict()
def save_3d_as_2d(self, image, mask, case_raw, out_case_dir):
"""
Saves a 3D volume as separate 2D arrays for each slice across the
axial axis. The naming convention is as follows:
imaging_{parsed_slice_idx}.npy
segmentation_{parsed_slice_idx}.npy
where parsed_slice_idx is just the slice index but filled with
zeros until it hits 5 digits (so sorting is easier.)
Args:
image: numpy array
mask: numpy array
case: raw case folder name
"""
# saving the generated dataset
# iterates through all slices and saves them individually as 2D arrays
assert len(image.shape) == 3, \
"Image shape should be (n, h, w)"
slice_idx_per_class = defaultdict(list)
for slice_idx in range(image.shape[0]):
# naming
slice_idx_str = parse_slice_idx_to_str(slice_idx)
case_str = f"{case_raw}_{slice_idx_str}"
if mask is not None:
label_slice = mask[slice_idx]
# appending fg slice indices
if self.fg_classes is not None:
for label_idx in self.fg_classes:
if label_idx != 0 and (label_slice == label_idx).any():
slice_idx_per_class[label_idx].append(slice_idx)
self.pos_per_slice_dict[case_str].append(label_idx)
elif label_idx == 0 and np.sum(label_slice) == 0:
# for completely blank labels
slice_idx_per_class[label_idx].append(slice_idx)
self.pos_per_slice_dict[case_str].append(label_idx)
self._save_slices(image, mask, out_case_dir=out_case_dir,
slice_idx=slice_idx, slice_idx_str=slice_idx_str)
if self.fg_classes is not None:
self.pos_per_class_dict[case_raw] = slice_idx_per_class
def _save_pos_slice_dict(self):
"""
Saves the foreground (positive) class dictionaries:
- slice_indices.json
saves the slice indices per class
{
case: {fg_class1: [slice indices...],
fg_class2: [slice indices...],
...}
}
- classes_per_slice.json
the keys are not cases, but the actual filenames that are
being read.
{
case_slice_idx_str: [classes_in_slice],
case_slice_idx_str2: [classes_in_slice],
}
"""
save_path_per_slice = join(self.out_dir, "classes_per_slice.json")
# saving the dictionaries
print(f"Logged the classes in {self.fg_classes} for each slice at",
f"{save_path_per_slice}.")
with open(save_path_per_slice, "w") as fp:
json.dump(self.pos_per_slice_dict, fp)
save_path = join(self.out_dir, "slice_indices.json")
# saving the dictionaries
print(f"Logged the slice indices for each class in {self.fg_classes} at",
f"{save_path}.")
with open(save_path, "w") as fp:
json.dump(self.pos_per_class_dict, fp)
def _save_slices(self, image, mask, out_case_dir, slice_idx,
slice_idx_str):
"""
For saving the slices in self.save_3d_as_2d()
"""
np.save(join(out_case_dir, f"imaging_{slice_idx_str}.npy"),
image[slice_idx])
if mask is not None:
label_slice = mask[slice_idx]
np.save(join(out_case_dir, f"segmentation_{slice_idx_str}.npy"),
label_slice)
def _load_kits_json(self, json_path):
"""
Loads the kits.json file into `self.kits_json`
"""
if json_path is None:
self.kits_json = None
print("`kits_json_path is empty, so not resampling.`")
elif json_path is not None:
with open(json_path, "r") as fp:
self.kits_json = json.load(fp)
def _load_bbox_json(self, json_path):
"""
Loads the kits.json file into `self.kits_json`
"""
if json_path is None:
self.bbox_dict = None
print("bbox_json_path, so not cropping volumes to their bbox.")
else:
with open(json_path, "r") as fp:
self.bbox_dict = json.load(fp)
def crop_case_to_bbox(self, image, label, case):
"""
Crops a 3D image and 3D label to the corresponding bounding box.
"""
bbox_coord = self.bbox_dict[case]
return (crop_to_bbox(image, bbox), crop_to_bbox(label, case))
def standardize_per_image(image):
"""
Z-score standardization per image.
"""
mean, stddev = image.mean(), image.std()
return (image - mean) / stddev
def parse_slice_idx_to_str(slice_idx):
"""
Parse the slice index to a three digit string for saving and reading the
2D .npy files generated by io.preprocess.Preprocessor.
Naming convention: {type of slice}_{case}_{slice_idx}
* adding 0s to slice_idx until it reaches 3 digits,
* so sorting files is easier when stacking
"""
return f"{slice_idx:03}" | en | 0.734419 | Preprocesses the original dataset (interpolated). Procedures: * Resampled all volumes to have a thickness of 3mm. * Clipped to [-30, 300] HU * z-score standardization (zero mean and unit variance) * Standardization per 3D image instead of ACROSS THE WHOLE TRAINING SET * save as .npy array * imaging.npy * segmentation.npy (if with_masks) Attributes: in_dir (str): directory with the input data. Should be the kits19/data directory. out_dir (str): output directory where you want to save each case cases: list of case folders to preprocess kits_json_path (str): path to the kits.json file in the kits19/data directory. This only should be specfied if you're resampling. Defaults to None. bbox_json_path (str): path to the bbox_stage1.json file made from stage1 post-processing. Triggers cropping to the bboxes. Defaults to None. target_spacing (list/tuple): spacing to resample to clip_values (list, tuple): values you want to clip CT scans to. Defaults to None for no clipping. with_mask (bool): whether or not to preprocess with masks or no masks. Applicable to preprocessing test set (no labels available). fg_classes (list): of foreground class indices if None, doesn't gather fg class stats. # automatically collecting all of the case folder names # making directory if out_dir doesn't exist Generates and saves preprocessed data as numpy arrays (n, x, y). Args: task_path: file path to the task directory (must have the corresponding "dataset.json" in it) save_fnames (List[str]): save names for [image, seg] respectively. DOESN'T INCLUDE THE .npy Returns: None # Generating data and saving them recursively Clipping, cropping, and resampling. Args: image: numpy array shape (c, n, x, y) mask: numpy array or None shape (c, n, x, y) case (str): path to a case folder Returns: tuple of: - preprocessed image shape: (n, x, y) - preprocessed mask or None shape: (n, x, y) # raw case name, i.e. case_00000 # resampling # guaranteeing that the info is corresponding to the right # case # resampling the slices axis to 3mm # image coming in : shape (c, n, h, w); mask is same shape Saves an image and mask pair as .npy arrays in the KiTS19 file structure Args: image: numpy array mask: numpy array case: path to a case folder (each element of self.cases) save_fnames (List[str]): save names for [image, seg] respectively. DOESN'T INCLUDE THE .npy # saving the generated dataset # output dir in KiTS19 format # extracting the raw case folder name # extracting the raw case folder name # checking to make sure that the output directories exist Takes preprocessed 3D numpy arrays and saves them as slices in the same directory. Arrays must have shape (n, h, w). Args: base_fnames (List[str]): names to read for [image, seg] respectively. DOESN'T INCLUDE THE .npy delete3dcase (bool): whether or not to delete the 3D volume after saving the 2D sliced versions # saves slices per class # saves classes per slice # Generating data and saving them recursively # output dir in KiTS19 format # extracting the raw case folder name # checking to make sure that the output directories exist # assumes the .npy files have shape: (d, h, w) # to deal with colaboratory storage limitations Saves a 3D volume as separate 2D arrays for each slice across the axial axis. The naming convention is as follows: imaging_{parsed_slice_idx}.npy segmentation_{parsed_slice_idx}.npy where parsed_slice_idx is just the slice index but filled with zeros until it hits 5 digits (so sorting is easier.) Args: image: numpy array mask: numpy array case: raw case folder name # saving the generated dataset # iterates through all slices and saves them individually as 2D arrays # naming # appending fg slice indices # for completely blank labels Saves the foreground (positive) class dictionaries: - slice_indices.json saves the slice indices per class { case: {fg_class1: [slice indices...], fg_class2: [slice indices...], ...} } - classes_per_slice.json the keys are not cases, but the actual filenames that are being read. { case_slice_idx_str: [classes_in_slice], case_slice_idx_str2: [classes_in_slice], } # saving the dictionaries # saving the dictionaries For saving the slices in self.save_3d_as_2d() Loads the kits.json file into `self.kits_json` Loads the kits.json file into `self.kits_json` Crops a 3D image and 3D label to the corresponding bounding box. Z-score standardization per image. Parse the slice index to a three digit string for saving and reading the 2D .npy files generated by io.preprocess.Preprocessor. Naming convention: {type of slice}_{case}_{slice_idx} * adding 0s to slice_idx until it reaches 3 digits, * so sorting files is easier when stacking | 2.217607 | 2 |
setup.py | opywan/calm-dsl | 0 | 8982 | <reponame>opywan/calm-dsl
import sys
import setuptools
from setuptools.command.test import test as TestCommand
def read_file(filename):
with open(filename, "r", encoding='utf8') as f:
return f.read()
class PyTest(TestCommand):
"""PyTest"""
def finalize_options(self):
"""finalize_options"""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""run_tests"""
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setuptools.setup(
name="calm.dsl",
version="0.9.0-alpha",
author="Nutanix",
author_email="<EMAIL>",
description="Calm DSL for blueprints",
long_description=read_file("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/nutanix/calm-dsl",
packages=setuptools.find_namespace_packages(include=["calm.*"]),
namespace_packages=["calm"],
install_requires=read_file("requirements.txt"),
tests_require=read_file("dev-requirements.txt"),
cmdclass={"test": PyTest},
zip_safe=False,
include_package_data=True,
entry_points={"console_scripts": ["calm=calm.dsl.cli:main"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
],
)
| import sys
import setuptools
from setuptools.command.test import test as TestCommand
def read_file(filename):
with open(filename, "r", encoding='utf8') as f:
return f.read()
class PyTest(TestCommand):
"""PyTest"""
def finalize_options(self):
"""finalize_options"""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""run_tests"""
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setuptools.setup(
name="calm.dsl",
version="0.9.0-alpha",
author="Nutanix",
author_email="<EMAIL>",
description="Calm DSL for blueprints",
long_description=read_file("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/nutanix/calm-dsl",
packages=setuptools.find_namespace_packages(include=["calm.*"]),
namespace_packages=["calm"],
install_requires=read_file("requirements.txt"),
tests_require=read_file("dev-requirements.txt"),
cmdclass={"test": PyTest},
zip_safe=False,
include_package_data=True,
entry_points={"console_scripts": ["calm=calm.dsl.cli:main"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
],
) | en | 0.665701 | PyTest finalize_options run_tests | 1.913758 | 2 |
hanlp/pretrained/tok.py | chen88358323/HanLP | 2 | 8983 | <reponame>chen88358323/HanLP
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 21:12
from hanlp_common.constant import HANLP_URL
SIGHAN2005_PKU_CONVSEG = HANLP_URL + 'tok/sighan2005-pku-convseg_20200110_153722.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on sighan2005 pku dataset.'
SIGHAN2005_MSR_CONVSEG = HANLP_URL + 'tok/convseg-msr-nocrf-noembed_20200110_153524.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on sighan2005 msr dataset.'
CTB6_CONVSEG = HANLP_URL + 'tok/ctb6_convseg_nowe_nocrf_20200110_004046.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on CTB6 dataset.'
PKU_NAME_MERGED_SIX_MONTHS_CONVSEG = HANLP_URL + 'tok/pku98_6m_conv_ngram_20200110_134736.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on pku98 six months dataset with familiy name and given name merged into one unit.'
LARGE_ALBERT_BASE = HANLP_URL + 'tok/large_corpus_cws_albert_base_20211228_160926.zip'
'ALBERT model (:cite:`Lan2020ALBERT:`) trained on the largest CWS dataset in the world.'
SIGHAN2005_PKU_BERT_BASE_ZH = HANLP_URL + 'tok/sighan2005_pku_bert_base_zh_20201231_141130.zip'
'BERT model (:cite:`devlin-etal-2019-bert`) trained on sighan2005 pku dataset.'
COARSE_ELECTRA_SMALL_ZH = HANLP_URL + 'tok/coarse_electra_small_20220220_013548.zip'
'Electra (:cite:`clark2020electra`) small model trained on coarse-grained CWS corpora. Its performance is P=96.97% R=96.87% F1=96.92% which is ' \
'much higher than that of MTL model '
FINE_ELECTRA_SMALL_ZH = HANLP_URL + 'tok/fine_electra_small_20220217_190117.zip'
'Electra (:cite:`clark2020electra`) small model trained on fine-grained CWS corpora. Its performance is P=97.44% R=97.40% F1=97.42% which is ' \
'much higher than that of MTL model '
CTB9_TOK_ELECTRA_SMALL = HANLP_URL + 'tok/ctb9_electra_small_20220215_205427.zip'
'Electra (:cite:`clark2020electra`) small model trained on CTB9. Its performance is P=97.15% R=97.36% F1=97.26% which is ' \
'much higher than that of MTL model '
# Will be filled up during runtime
ALL = {}
| # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 21:12
from hanlp_common.constant import HANLP_URL
SIGHAN2005_PKU_CONVSEG = HANLP_URL + 'tok/sighan2005-pku-convseg_20200110_153722.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on sighan2005 pku dataset.'
SIGHAN2005_MSR_CONVSEG = HANLP_URL + 'tok/convseg-msr-nocrf-noembed_20200110_153524.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on sighan2005 msr dataset.'
CTB6_CONVSEG = HANLP_URL + 'tok/ctb6_convseg_nowe_nocrf_20200110_004046.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on CTB6 dataset.'
PKU_NAME_MERGED_SIX_MONTHS_CONVSEG = HANLP_URL + 'tok/pku98_6m_conv_ngram_20200110_134736.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on pku98 six months dataset with familiy name and given name merged into one unit.'
LARGE_ALBERT_BASE = HANLP_URL + 'tok/large_corpus_cws_albert_base_20211228_160926.zip'
'ALBERT model (:cite:`Lan2020ALBERT:`) trained on the largest CWS dataset in the world.'
SIGHAN2005_PKU_BERT_BASE_ZH = HANLP_URL + 'tok/sighan2005_pku_bert_base_zh_20201231_141130.zip'
'BERT model (:cite:`devlin-etal-2019-bert`) trained on sighan2005 pku dataset.'
COARSE_ELECTRA_SMALL_ZH = HANLP_URL + 'tok/coarse_electra_small_20220220_013548.zip'
'Electra (:cite:`clark2020electra`) small model trained on coarse-grained CWS corpora. Its performance is P=96.97% R=96.87% F1=96.92% which is ' \
'much higher than that of MTL model '
FINE_ELECTRA_SMALL_ZH = HANLP_URL + 'tok/fine_electra_small_20220217_190117.zip'
'Electra (:cite:`clark2020electra`) small model trained on fine-grained CWS corpora. Its performance is P=97.44% R=97.40% F1=97.42% which is ' \
'much higher than that of MTL model '
CTB9_TOK_ELECTRA_SMALL = HANLP_URL + 'tok/ctb9_electra_small_20220215_205427.zip'
'Electra (:cite:`clark2020electra`) small model trained on CTB9. Its performance is P=97.15% R=97.36% F1=97.26% which is ' \
'much higher than that of MTL model '
# Will be filled up during runtime
ALL = {} | en | 0.876793 | # -*- coding:utf-8 -*- # Author: hankcs # Date: 2019-12-28 21:12 # Will be filled up during runtime | 1.330566 | 1 |
third_party/webrtc/src/chromium/src/tools/swarming_client/tests/logging_utils_test.py | bopopescu/webrtc-streaming-node | 8 | 8984 | #!/usr/bin/env python
# Copyright 2015 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
import logging
import os
import subprocess
import sys
import tempfile
import shutil
import unittest
import re
THIS_FILE = os.path.abspath(__file__)
sys.path.insert(0, os.path.dirname(os.path.dirname(THIS_FILE)))
from utils import logging_utils
# PID YYYY-MM-DD HH:MM:SS.MMM
_LOG_HEADER = r'^%d \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d' % os.getpid()
_LOG_HEADER_PID = r'^\d+ \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d'
_PHASE = 'LOGGING_UTILS_TESTS_PHASE'
def call(phase, cwd):
"""Calls itself back."""
env = os.environ.copy()
env[_PHASE] = phase
return subprocess.call([sys.executable, '-u', THIS_FILE], env=env, cwd=cwd)
class Test(unittest.TestCase):
def setUp(self):
super(Test, self).setUp()
self.tmp = tempfile.mkdtemp(prefix='logging_utils')
def tearDown(self):
try:
shutil.rmtree(self.tmp)
finally:
super(Test, self).tearDown()
def test_capture(self):
root = logging.RootLogger(logging.DEBUG)
with logging_utils.CaptureLogs('foo', root) as log:
root.debug('foo')
result = log.read()
expected = _LOG_HEADER + ': DEBUG foo\n$'
if sys.platform == 'win32':
expected = expected.replace('\n', '\r\n')
self.assertTrue(re.match(expected, result), (expected, result))
def test_prepare_logging(self):
root = logging.RootLogger(logging.DEBUG)
filepath = os.path.join(self.tmp, 'test.log')
logging_utils.prepare_logging(filepath, root)
root.debug('foo')
with open(filepath, 'rb') as f:
result = f.read()
# It'd be nice to figure out a way to ensure it's properly in UTC but it's
# tricky to do reliably.
expected = _LOG_HEADER + ' D: foo\n$'
self.assertTrue(re.match(expected, result), (expected, result))
def test_rotating(self):
# Create a rotating log. Create a subprocess then delete the file. Make sure
# nothing blows up.
# Everything is done in a child process because the called functions mutate
# the global state.
self.assertEqual(0, call('test_rotating_phase_1', cwd=self.tmp))
self.assertEqual({'shared.1.log'}, set(os.listdir(self.tmp)))
with open(os.path.join(self.tmp, 'shared.1.log'), 'rb') as f:
lines = f.read().splitlines()
expected = [
r' I: Parent1',
r' I: Child1',
r' I: Child2',
r' I: Parent2',
]
for e, l in zip(expected, lines):
ex = _LOG_HEADER_PID + e + '$'
self.assertTrue(re.match(ex, l), (ex, l))
self.assertEqual(len(expected), len(lines))
def test_rotating_phase_1():
logging_utils.prepare_logging('shared.log')
logging.info('Parent1')
r = call('test_rotating_phase_2', None)
logging.info('Parent2')
return r
def test_rotating_phase_2():
# Simulate rotating the log.
logging_utils.prepare_logging('shared.log')
logging.info('Child1')
os.rename('shared.log', 'shared.1.log')
logging.info('Child2')
return 0
def main():
phase = os.environ.get(_PHASE)
if phase:
return getattr(sys.modules[__name__], phase)()
verbose = '-v' in sys.argv
logging.basicConfig(level=logging.DEBUG if verbose else logging.ERROR)
unittest.main()
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
# Copyright 2015 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
import logging
import os
import subprocess
import sys
import tempfile
import shutil
import unittest
import re
THIS_FILE = os.path.abspath(__file__)
sys.path.insert(0, os.path.dirname(os.path.dirname(THIS_FILE)))
from utils import logging_utils
# PID YYYY-MM-DD HH:MM:SS.MMM
_LOG_HEADER = r'^%d \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d' % os.getpid()
_LOG_HEADER_PID = r'^\d+ \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d'
_PHASE = 'LOGGING_UTILS_TESTS_PHASE'
def call(phase, cwd):
"""Calls itself back."""
env = os.environ.copy()
env[_PHASE] = phase
return subprocess.call([sys.executable, '-u', THIS_FILE], env=env, cwd=cwd)
class Test(unittest.TestCase):
def setUp(self):
super(Test, self).setUp()
self.tmp = tempfile.mkdtemp(prefix='logging_utils')
def tearDown(self):
try:
shutil.rmtree(self.tmp)
finally:
super(Test, self).tearDown()
def test_capture(self):
root = logging.RootLogger(logging.DEBUG)
with logging_utils.CaptureLogs('foo', root) as log:
root.debug('foo')
result = log.read()
expected = _LOG_HEADER + ': DEBUG foo\n$'
if sys.platform == 'win32':
expected = expected.replace('\n', '\r\n')
self.assertTrue(re.match(expected, result), (expected, result))
def test_prepare_logging(self):
root = logging.RootLogger(logging.DEBUG)
filepath = os.path.join(self.tmp, 'test.log')
logging_utils.prepare_logging(filepath, root)
root.debug('foo')
with open(filepath, 'rb') as f:
result = f.read()
# It'd be nice to figure out a way to ensure it's properly in UTC but it's
# tricky to do reliably.
expected = _LOG_HEADER + ' D: foo\n$'
self.assertTrue(re.match(expected, result), (expected, result))
def test_rotating(self):
# Create a rotating log. Create a subprocess then delete the file. Make sure
# nothing blows up.
# Everything is done in a child process because the called functions mutate
# the global state.
self.assertEqual(0, call('test_rotating_phase_1', cwd=self.tmp))
self.assertEqual({'shared.1.log'}, set(os.listdir(self.tmp)))
with open(os.path.join(self.tmp, 'shared.1.log'), 'rb') as f:
lines = f.read().splitlines()
expected = [
r' I: Parent1',
r' I: Child1',
r' I: Child2',
r' I: Parent2',
]
for e, l in zip(expected, lines):
ex = _LOG_HEADER_PID + e + '$'
self.assertTrue(re.match(ex, l), (ex, l))
self.assertEqual(len(expected), len(lines))
def test_rotating_phase_1():
logging_utils.prepare_logging('shared.log')
logging.info('Parent1')
r = call('test_rotating_phase_2', None)
logging.info('Parent2')
return r
def test_rotating_phase_2():
# Simulate rotating the log.
logging_utils.prepare_logging('shared.log')
logging.info('Child1')
os.rename('shared.log', 'shared.1.log')
logging.info('Child2')
return 0
def main():
phase = os.environ.get(_PHASE)
if phase:
return getattr(sys.modules[__name__], phase)()
verbose = '-v' in sys.argv
logging.basicConfig(level=logging.DEBUG if verbose else logging.ERROR)
unittest.main()
if __name__ == '__main__':
sys.exit(main())
| en | 0.891518 | #!/usr/bin/env python # Copyright 2015 The Swarming Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 that # can be found in the LICENSE file. # PID YYYY-MM-DD HH:MM:SS.MMM Calls itself back. # It'd be nice to figure out a way to ensure it's properly in UTC but it's # tricky to do reliably. # Create a rotating log. Create a subprocess then delete the file. Make sure # nothing blows up. # Everything is done in a child process because the called functions mutate # the global state. # Simulate rotating the log. | 2.257668 | 2 |
model_selection.py | HrishikV/ineuron_inceome_prediction_internship | 0 | 8985 | from featur_selection import df,race,occupation,workclass,country
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score,KFold
from sklearn.linear_model import LogisticRegression
from imblearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from imblearn.combine import SMOTETomek
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoostClassifier
from xgboost import XGBClassifier
from sklearn.svm import SVC
from matplotlib import pyplot as plt
import seaborn as sns
df1=df.copy()
salary=df1['salary'].reset_index(drop=True)
df1=df1.drop(['salary'],axis=1)
def concat_dataframes(data):
dataframe = pd.concat([data, workclass.iloc[data.index, :], race.iloc[data.index , :], occupation.iloc[data.index, :], country.iloc[data.index, :]], axis = 1)
dataframe = dataframe.dropna()
dataframe = dataframe.reset_index(drop=True)
return dataframe
df1= concat_dataframes(df1)
features=['age_logarthmic','hours_per_week']
scaler = ColumnTransformer(transformers = [('scale_num_features', StandardScaler(), features)], remainder='passthrough')
models = [LogisticRegression(), SVC(), AdaBoostClassifier(), RandomForestClassifier(), XGBClassifier(),DecisionTreeClassifier(), KNeighborsClassifier(), CatBoostClassifier()]
model_labels = ['LogisticReg.','SVC','AdaBoost','RandomForest','Xgboost','DecisionTree','KNN', 'CatBoost']
mean_validation_f1_scores = []
for model in models:
data_pipeline = Pipeline(steps = [
('scaler', scaler),
('resample', SMOTETomek()),
('model', model)
])
mean_validation_f1 = float(cross_val_score(data_pipeline, df1, salary, cv=KFold(n_splits=10), scoring='f1',n_jobs=-1).mean())
mean_validation_f1_scores.append(mean_validation_f1)
print(mean_validation_f1_scores)
fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize = (15,8))
sns.set_style('dark')
sns.barplot(y = model_labels ,x = mean_validation_f1_scores, ax=axes[0])
axes[0].grid(True, color='k')
sns.set_style('whitegrid')
sns.lineplot(x = model_labels, y = mean_validation_f1_scores)
axes[1].grid(True, color='k')
fig.show() | from featur_selection import df,race,occupation,workclass,country
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score,KFold
from sklearn.linear_model import LogisticRegression
from imblearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from imblearn.combine import SMOTETomek
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoostClassifier
from xgboost import XGBClassifier
from sklearn.svm import SVC
from matplotlib import pyplot as plt
import seaborn as sns
df1=df.copy()
salary=df1['salary'].reset_index(drop=True)
df1=df1.drop(['salary'],axis=1)
def concat_dataframes(data):
dataframe = pd.concat([data, workclass.iloc[data.index, :], race.iloc[data.index , :], occupation.iloc[data.index, :], country.iloc[data.index, :]], axis = 1)
dataframe = dataframe.dropna()
dataframe = dataframe.reset_index(drop=True)
return dataframe
df1= concat_dataframes(df1)
features=['age_logarthmic','hours_per_week']
scaler = ColumnTransformer(transformers = [('scale_num_features', StandardScaler(), features)], remainder='passthrough')
models = [LogisticRegression(), SVC(), AdaBoostClassifier(), RandomForestClassifier(), XGBClassifier(),DecisionTreeClassifier(), KNeighborsClassifier(), CatBoostClassifier()]
model_labels = ['LogisticReg.','SVC','AdaBoost','RandomForest','Xgboost','DecisionTree','KNN', 'CatBoost']
mean_validation_f1_scores = []
for model in models:
data_pipeline = Pipeline(steps = [
('scaler', scaler),
('resample', SMOTETomek()),
('model', model)
])
mean_validation_f1 = float(cross_val_score(data_pipeline, df1, salary, cv=KFold(n_splits=10), scoring='f1',n_jobs=-1).mean())
mean_validation_f1_scores.append(mean_validation_f1)
print(mean_validation_f1_scores)
fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize = (15,8))
sns.set_style('dark')
sns.barplot(y = model_labels ,x = mean_validation_f1_scores, ax=axes[0])
axes[0].grid(True, color='k')
sns.set_style('whitegrid')
sns.lineplot(x = model_labels, y = mean_validation_f1_scores)
axes[1].grid(True, color='k')
fig.show() | none | 1 | 2.798963 | 3 |
|
tests/apps/newlayout/tasks/init_data.py | blazelibs/blazeweb | 0 | 8986 | <gh_stars>0
from __future__ import print_function
def action_010():
print('doit')
| from __future__ import print_function
def action_010():
print('doit') | none | 1 | 1.339277 | 1 |
|
2021/d8b_bits.py | apie/advent-of-code | 4 | 8987 | <reponame>apie/advent-of-code<gh_stars>1-10
#!/usr/bin/env python3
import pytest
import fileinput
from os.path import splitext, abspath
F_NAME = 'd8'
#implement day8 using bits
def find_ones(d):
'''count number of ones in binary number'''
ones = 0
while d > 0:
ones += d & 1
d >>= 1
return ones
# Assign each segment a 'wire'.
lut = {
'a':0b0000001,
'b':0b0000010,
'c':0b0000100,
'd':0b0001000,
'e':0b0010000,
'f':0b0100000,
'g':0b1000000,
}
def solve_line(line):
def solve_output_val(output_values):
'''Look up each output val in binary repr in the mapping and add them together shifting each digit to the left.'''
output = 0
for o in output_values:
b_val = sum(lut[c] for c in o)
for k,v in mapping.items():
if v == b_val:
output = output*10 + k
break
else:
raise Exception(b_val, 'not found')
return output
def found(digit, bit_pattern):
mapping[digit] = bit_pattern
bpatterns.remove(bit_pattern)
signal_pattern, output_value = line.split(' | ')
# Convert letter string to binary pattern
bpatterns = {
sum(lut[c] for c in p)
for p in signal_pattern.split()
}
## Search for each digit and if found, remove it from bpatterns and add the digit to the mapping.
######################################
mapping = {}
# 1,4,7,8 all have a unique count of segments. Find them.
for bp in list(bpatterns):
if find_ones(bp) == 2:
found(1, bp)
elif find_ones(bp) == 4:
found(4, bp)
elif find_ones(bp) == 3:
found(7, bp)
elif find_ones(bp) == 7:
found(8, bp)
# Find 0, 6, 9. All have 6 segments
for bp in list(bpatterns):
if find_ones(bp) != 6:
continue
#is 4 contained within p, then it is 9
if mapping[4] & bp >= mapping[4]:
found(9, bp)
#is 1 contained within p, then it is 0
elif mapping[1] & bp >= mapping[1]:
found(0, bp)
else: # 6 is left
found(6, bp)
#is p contained within 6, then it is 5
for bp in bpatterns:
if mapping[6] & bp >= bp:
found(5, bp)
break
#is p contained within 9, and it is not 8 or 5, then it is 3
for bp in bpatterns:
if mapping[9] & bp >= bp:
found(3, bp)
break
assert len(bpatterns) == 1, bpatterns
#what is left is 2
for bp in bpatterns:
found(2, bp)
break
assert len(bpatterns) == 0, bpatterns
return solve_output_val(output_value.split())
def answer(lines):
return sum(solve_line(line) for line in map(str.strip, lines))
@pytest.fixture
def example_input1():
return fileinput.input(F_NAME + '.test.1')
def test_answer1(example_input1):
assert answer(example_input1) == 5353
@pytest.fixture
def example_input():
return fileinput.input(F_NAME + '.test')
def test_answer(example_input):
assert answer(example_input) == 61229
if __name__ == '__main__':
import timeit
start = timeit.default_timer()
filename = fileinput.input(F_NAME + '.input')
ans = answer(filename)
print('Answer:', ans)
duration = timeit.default_timer()-start
print(f'Execution time: {duration:.3f} s')
| #!/usr/bin/env python3
import pytest
import fileinput
from os.path import splitext, abspath
F_NAME = 'd8'
#implement day8 using bits
def find_ones(d):
'''count number of ones in binary number'''
ones = 0
while d > 0:
ones += d & 1
d >>= 1
return ones
# Assign each segment a 'wire'.
lut = {
'a':0b0000001,
'b':0b0000010,
'c':0b0000100,
'd':0b0001000,
'e':0b0010000,
'f':0b0100000,
'g':0b1000000,
}
def solve_line(line):
def solve_output_val(output_values):
'''Look up each output val in binary repr in the mapping and add them together shifting each digit to the left.'''
output = 0
for o in output_values:
b_val = sum(lut[c] for c in o)
for k,v in mapping.items():
if v == b_val:
output = output*10 + k
break
else:
raise Exception(b_val, 'not found')
return output
def found(digit, bit_pattern):
mapping[digit] = bit_pattern
bpatterns.remove(bit_pattern)
signal_pattern, output_value = line.split(' | ')
# Convert letter string to binary pattern
bpatterns = {
sum(lut[c] for c in p)
for p in signal_pattern.split()
}
## Search for each digit and if found, remove it from bpatterns and add the digit to the mapping.
######################################
mapping = {}
# 1,4,7,8 all have a unique count of segments. Find them.
for bp in list(bpatterns):
if find_ones(bp) == 2:
found(1, bp)
elif find_ones(bp) == 4:
found(4, bp)
elif find_ones(bp) == 3:
found(7, bp)
elif find_ones(bp) == 7:
found(8, bp)
# Find 0, 6, 9. All have 6 segments
for bp in list(bpatterns):
if find_ones(bp) != 6:
continue
#is 4 contained within p, then it is 9
if mapping[4] & bp >= mapping[4]:
found(9, bp)
#is 1 contained within p, then it is 0
elif mapping[1] & bp >= mapping[1]:
found(0, bp)
else: # 6 is left
found(6, bp)
#is p contained within 6, then it is 5
for bp in bpatterns:
if mapping[6] & bp >= bp:
found(5, bp)
break
#is p contained within 9, and it is not 8 or 5, then it is 3
for bp in bpatterns:
if mapping[9] & bp >= bp:
found(3, bp)
break
assert len(bpatterns) == 1, bpatterns
#what is left is 2
for bp in bpatterns:
found(2, bp)
break
assert len(bpatterns) == 0, bpatterns
return solve_output_val(output_value.split())
def answer(lines):
return sum(solve_line(line) for line in map(str.strip, lines))
@pytest.fixture
def example_input1():
return fileinput.input(F_NAME + '.test.1')
def test_answer1(example_input1):
assert answer(example_input1) == 5353
@pytest.fixture
def example_input():
return fileinput.input(F_NAME + '.test')
def test_answer(example_input):
assert answer(example_input) == 61229
if __name__ == '__main__':
import timeit
start = timeit.default_timer()
filename = fileinput.input(F_NAME + '.input')
ans = answer(filename)
print('Answer:', ans)
duration = timeit.default_timer()-start
print(f'Execution time: {duration:.3f} s') | en | 0.789035 | #!/usr/bin/env python3 #implement day8 using bits count number of ones in binary number # Assign each segment a 'wire'. Look up each output val in binary repr in the mapping and add them together shifting each digit to the left. # Convert letter string to binary pattern ## Search for each digit and if found, remove it from bpatterns and add the digit to the mapping. ###################################### # 1,4,7,8 all have a unique count of segments. Find them. # Find 0, 6, 9. All have 6 segments #is 4 contained within p, then it is 9 #is 1 contained within p, then it is 0 # 6 is left #is p contained within 6, then it is 5 #is p contained within 9, and it is not 8 or 5, then it is 3 #what is left is 2 | 3.340067 | 3 |
frame_dataloader/spatial_dataloader.py | rizkiailham/two-stream-action-recognition-1 | 67 | 8988 | """
********************************
* Created by mohammed-alaa *
********************************
Spatial Dataloader implementing sequence api from keras (defines how to load a single item)
this loads batches of images for each iteration it returns [batch_size, height, width ,3] ndarrays
"""
import copy
import random
import cv2
import numpy as np
import tensorflow.keras as keras
from .UCF_splitting_kernel import *
from .helpers import get_training_augmenter, get_validation_augmenter
class SpatialSequence(keras.utils.Sequence):
def __init__(self, data_to_load, data_root_path, batch_size, is_training, augmenter):
"""get data structure to load data"""
# list of (video names,frame/max_frame,label)
self.data_to_load = copy.deepcopy(data_to_load)
self.batch_size = batch_size
self.is_training = is_training
self.augmenter = copy.deepcopy(augmenter)
self.data_root_path = data_root_path
self.video_names, self.frames, self.labels = [list(one_of_three_tuples) for one_of_three_tuples in zip(*self.data_to_load)] # three lists
def __len__(self):
"""Denotes the number of batches per epoch"""
return (len(self.video_names) + self.batch_size - 1) // self.batch_size # ceiling div
def get_actual_length(self):
"""Denotes the total number of samples"""
return len(self.video_names)
def __getitem__(self, batch_start):
"""Gets one batch"""
batch_video_names = self.video_names[batch_start * self.batch_size:(batch_start + 1) * self.batch_size]
batch_frames = self.frames[batch_start * self.batch_size:(batch_start + 1) * self.batch_size]
batch_y = np.array(self.labels[batch_start * self.batch_size:(batch_start + 1) * self.batch_size])
batch_x = [] # could be less or equal batch size
#
for vid_id, _ in enumerate(batch_y):
if self.is_training: # max frame is given
frame_id = random.randint(1, batch_frames[vid_id]) # random frame (one based)
else:
frame_id = batch_frames[vid_id] # just as selected
batch_x.append(
cv2.cvtColor(cv2.imread(os.path.join(self.data_root_path, "v_" + batch_video_names[vid_id], 'frame{}'.format(str(frame_id).zfill(6)) + '.jpg')), cv2.COLOR_BGR2RGB)
)
if self.is_training:
return np.array(self.augmenter.augment_images(batch_x), dtype=np.float32) / 255.0, batch_y
else:
# no label needed since (test_video_to_label mapping) (dictionary of name to label) is returned
return batch_video_names, np.array(self.augmenter.augment_images(batch_x), dtype=np.float32) / 255.0
def shuffle_and_reset(self):
"""
new data for the next epoch
"""
random.shuffle(self.data_to_load)
self.video_names, self.frames, self.labels = [list(one_of_three_tuples) for one_of_three_tuples in zip(*self.data_to_load)] # shuffle all
class SpatialDataLoader:
def __init__(self, batch_size, testing_samples_per_video, width, height, log_stream=open("/tmp/null.log", "w"), augmenter_level=1, data_root_path='./jpegs_256/', ucf_list_path='./UCF_list/', ucf_split='01'):
"""
get the mapping and initialize the augmenter
"""
self.batch_size = batch_size
self.width, self.height = width, height
self.data_root_path = data_root_path
self.testing_samples_per_video = testing_samples_per_video
self.log_stream = log_stream
# split the training and testing videos
data_util_ = DataUtil(path=ucf_list_path, split=ucf_split)
self.train_video_to_label, self.test_video_to_label = data_util_.get_train_test_video_to_label_mapping() # name without v_ or .avi and small s .. name to numeric label starts at 0
# get video frames
self.video_frame_count = data_util_.get_video_frame_count() # name without v_ or .avi and small s
self.augmenter_level = augmenter_level
def run(self):
"""
get the data structure for training and validation
"""
train_loader = self.get_training_loader()
val_loader = self.get_testing_loader()
return train_loader, val_loader, self.test_video_to_label
def get_training_data_structure(self):
"""
get the data structure for training
"""
training_data_structure = [] # list of (video names,frame/max_frame,label)
for video_name in self.train_video_to_label: # sample from the whole video frames
training_data_structure.append((video_name, self.video_frame_count[video_name], self.train_video_to_label[video_name]))
return training_data_structure
def get_testing_data_structure(self):
"""
get the data structure for validation
"""
test_data_structure = [] # list of (video names,frame/max_frame,label)
for video_name in self.test_video_to_label:
nb_frame = self.video_frame_count[video_name]
interval = nb_frame // self.testing_samples_per_video
if interval == 0: # for videos shorter than self.testing_samples_per_video
interval = 1
# range is exclusive add one to be inclusive
# 1 > self.testing_samples_per_video * interval
for frame_idx in range(1, min(self.testing_samples_per_video * interval, nb_frame) + 1, interval):
test_data_structure.append((video_name, frame_idx, self.test_video_to_label[video_name]))
return test_data_structure
def get_training_loader(self):
"""
an instance of sequence loader for spatial model for parallel dataloading using keras sequence
"""
loader = SpatialSequence(data_to_load=self.get_training_data_structure(),
data_root_path=self.data_root_path,
batch_size=self.batch_size,
is_training=True,
augmenter=get_training_augmenter(height=self.height, width=self.width, augmenter_level=self.augmenter_level),
)
print('==> Training data :', len(loader.data_to_load), 'videos', file=self.log_stream)
print('==> Training data :', len(loader.data_to_load), 'videos')
return loader
def get_testing_loader(self):
"""
an instance of sequence loader for spatial model for parallel dataloading using keras sequence
"""
loader = SpatialSequence(data_to_load=self.get_testing_data_structure(),
data_root_path=self.data_root_path,
batch_size=self.batch_size,
is_training=False,
augmenter=get_validation_augmenter(height=self.height, width=self.width),
)
print('==> Validation data :', len(loader.data_to_load), 'frames', file=self.log_stream)
print('==> Validation data :', len(loader.data_to_load), 'frames')
return loader
if __name__ == '__main__':
data_loader = SpatialDataLoader(batch_size=64, use_multiprocessing=True, # data_root_path="data",
ucf_split='01',
testing_samples_per_video=19, width=224, height=224, num_workers=2)
train_loader, test_loader, test_video_level_label = data_loader.run()
print(len(train_loader))
print(len(test_loader))
print(train_loader.get_actual_length())
print(test_loader.get_actual_length())
print(train_loader.sequence[0][0].shape, train_loader.sequence[0][1].shape)
print(train_loader[0][0].shape, train_loader[0][1].shape)
# import tqdm
# progress = tqdm.tqdm(train_loader.get_epoch_generator(), total=len(train_loader))
# for (sampled_frame, label) in progress:
# pass
import matplotlib.pyplot as plt
# preview raw data
def preview(data, labels):
# 3 channels
fig, axeslist = plt.subplots(ncols=8, nrows=8, figsize=(10, 10))
for i, sample in enumerate(data):
axeslist.ravel()[i].imshow(data[i])
axeslist.ravel()[i].set_title(labels[i])
axeslist.ravel()[i].set_axis_off()
plt.subplots_adjust(wspace=.4, hspace=.4)
print("train sample")
for batch in train_loader.get_epoch_generator():
print(batch[0].shape, batch[1].shape)
print(batch[1])
preview(batch[0], batch[1])
break
print("test sample") # same name will be displayed testing_samples_per_video with no shuffling
for batch in test_loader.get_epoch_generator():
print(batch[1].shape, batch[2].shape)
print(batch[0], batch[2])
preview(batch[1], batch[2])
break
| """
********************************
* Created by mohammed-alaa *
********************************
Spatial Dataloader implementing sequence api from keras (defines how to load a single item)
this loads batches of images for each iteration it returns [batch_size, height, width ,3] ndarrays
"""
import copy
import random
import cv2
import numpy as np
import tensorflow.keras as keras
from .UCF_splitting_kernel import *
from .helpers import get_training_augmenter, get_validation_augmenter
class SpatialSequence(keras.utils.Sequence):
def __init__(self, data_to_load, data_root_path, batch_size, is_training, augmenter):
"""get data structure to load data"""
# list of (video names,frame/max_frame,label)
self.data_to_load = copy.deepcopy(data_to_load)
self.batch_size = batch_size
self.is_training = is_training
self.augmenter = copy.deepcopy(augmenter)
self.data_root_path = data_root_path
self.video_names, self.frames, self.labels = [list(one_of_three_tuples) for one_of_three_tuples in zip(*self.data_to_load)] # three lists
def __len__(self):
"""Denotes the number of batches per epoch"""
return (len(self.video_names) + self.batch_size - 1) // self.batch_size # ceiling div
def get_actual_length(self):
"""Denotes the total number of samples"""
return len(self.video_names)
def __getitem__(self, batch_start):
"""Gets one batch"""
batch_video_names = self.video_names[batch_start * self.batch_size:(batch_start + 1) * self.batch_size]
batch_frames = self.frames[batch_start * self.batch_size:(batch_start + 1) * self.batch_size]
batch_y = np.array(self.labels[batch_start * self.batch_size:(batch_start + 1) * self.batch_size])
batch_x = [] # could be less or equal batch size
#
for vid_id, _ in enumerate(batch_y):
if self.is_training: # max frame is given
frame_id = random.randint(1, batch_frames[vid_id]) # random frame (one based)
else:
frame_id = batch_frames[vid_id] # just as selected
batch_x.append(
cv2.cvtColor(cv2.imread(os.path.join(self.data_root_path, "v_" + batch_video_names[vid_id], 'frame{}'.format(str(frame_id).zfill(6)) + '.jpg')), cv2.COLOR_BGR2RGB)
)
if self.is_training:
return np.array(self.augmenter.augment_images(batch_x), dtype=np.float32) / 255.0, batch_y
else:
# no label needed since (test_video_to_label mapping) (dictionary of name to label) is returned
return batch_video_names, np.array(self.augmenter.augment_images(batch_x), dtype=np.float32) / 255.0
def shuffle_and_reset(self):
"""
new data for the next epoch
"""
random.shuffle(self.data_to_load)
self.video_names, self.frames, self.labels = [list(one_of_three_tuples) for one_of_three_tuples in zip(*self.data_to_load)] # shuffle all
class SpatialDataLoader:
def __init__(self, batch_size, testing_samples_per_video, width, height, log_stream=open("/tmp/null.log", "w"), augmenter_level=1, data_root_path='./jpegs_256/', ucf_list_path='./UCF_list/', ucf_split='01'):
"""
get the mapping and initialize the augmenter
"""
self.batch_size = batch_size
self.width, self.height = width, height
self.data_root_path = data_root_path
self.testing_samples_per_video = testing_samples_per_video
self.log_stream = log_stream
# split the training and testing videos
data_util_ = DataUtil(path=ucf_list_path, split=ucf_split)
self.train_video_to_label, self.test_video_to_label = data_util_.get_train_test_video_to_label_mapping() # name without v_ or .avi and small s .. name to numeric label starts at 0
# get video frames
self.video_frame_count = data_util_.get_video_frame_count() # name without v_ or .avi and small s
self.augmenter_level = augmenter_level
def run(self):
"""
get the data structure for training and validation
"""
train_loader = self.get_training_loader()
val_loader = self.get_testing_loader()
return train_loader, val_loader, self.test_video_to_label
def get_training_data_structure(self):
"""
get the data structure for training
"""
training_data_structure = [] # list of (video names,frame/max_frame,label)
for video_name in self.train_video_to_label: # sample from the whole video frames
training_data_structure.append((video_name, self.video_frame_count[video_name], self.train_video_to_label[video_name]))
return training_data_structure
def get_testing_data_structure(self):
"""
get the data structure for validation
"""
test_data_structure = [] # list of (video names,frame/max_frame,label)
for video_name in self.test_video_to_label:
nb_frame = self.video_frame_count[video_name]
interval = nb_frame // self.testing_samples_per_video
if interval == 0: # for videos shorter than self.testing_samples_per_video
interval = 1
# range is exclusive add one to be inclusive
# 1 > self.testing_samples_per_video * interval
for frame_idx in range(1, min(self.testing_samples_per_video * interval, nb_frame) + 1, interval):
test_data_structure.append((video_name, frame_idx, self.test_video_to_label[video_name]))
return test_data_structure
def get_training_loader(self):
"""
an instance of sequence loader for spatial model for parallel dataloading using keras sequence
"""
loader = SpatialSequence(data_to_load=self.get_training_data_structure(),
data_root_path=self.data_root_path,
batch_size=self.batch_size,
is_training=True,
augmenter=get_training_augmenter(height=self.height, width=self.width, augmenter_level=self.augmenter_level),
)
print('==> Training data :', len(loader.data_to_load), 'videos', file=self.log_stream)
print('==> Training data :', len(loader.data_to_load), 'videos')
return loader
def get_testing_loader(self):
"""
an instance of sequence loader for spatial model for parallel dataloading using keras sequence
"""
loader = SpatialSequence(data_to_load=self.get_testing_data_structure(),
data_root_path=self.data_root_path,
batch_size=self.batch_size,
is_training=False,
augmenter=get_validation_augmenter(height=self.height, width=self.width),
)
print('==> Validation data :', len(loader.data_to_load), 'frames', file=self.log_stream)
print('==> Validation data :', len(loader.data_to_load), 'frames')
return loader
if __name__ == '__main__':
data_loader = SpatialDataLoader(batch_size=64, use_multiprocessing=True, # data_root_path="data",
ucf_split='01',
testing_samples_per_video=19, width=224, height=224, num_workers=2)
train_loader, test_loader, test_video_level_label = data_loader.run()
print(len(train_loader))
print(len(test_loader))
print(train_loader.get_actual_length())
print(test_loader.get_actual_length())
print(train_loader.sequence[0][0].shape, train_loader.sequence[0][1].shape)
print(train_loader[0][0].shape, train_loader[0][1].shape)
# import tqdm
# progress = tqdm.tqdm(train_loader.get_epoch_generator(), total=len(train_loader))
# for (sampled_frame, label) in progress:
# pass
import matplotlib.pyplot as plt
# preview raw data
def preview(data, labels):
# 3 channels
fig, axeslist = plt.subplots(ncols=8, nrows=8, figsize=(10, 10))
for i, sample in enumerate(data):
axeslist.ravel()[i].imshow(data[i])
axeslist.ravel()[i].set_title(labels[i])
axeslist.ravel()[i].set_axis_off()
plt.subplots_adjust(wspace=.4, hspace=.4)
print("train sample")
for batch in train_loader.get_epoch_generator():
print(batch[0].shape, batch[1].shape)
print(batch[1])
preview(batch[0], batch[1])
break
print("test sample") # same name will be displayed testing_samples_per_video with no shuffling
for batch in test_loader.get_epoch_generator():
print(batch[1].shape, batch[2].shape)
print(batch[0], batch[2])
preview(batch[1], batch[2])
break
| en | 0.676978 | ******************************** * Created by mohammed-alaa * ******************************** Spatial Dataloader implementing sequence api from keras (defines how to load a single item) this loads batches of images for each iteration it returns [batch_size, height, width ,3] ndarrays get data structure to load data # list of (video names,frame/max_frame,label) # three lists Denotes the number of batches per epoch # ceiling div Denotes the total number of samples Gets one batch # could be less or equal batch size # # max frame is given # random frame (one based) # just as selected # no label needed since (test_video_to_label mapping) (dictionary of name to label) is returned new data for the next epoch # shuffle all get the mapping and initialize the augmenter # split the training and testing videos # name without v_ or .avi and small s .. name to numeric label starts at 0 # get video frames # name without v_ or .avi and small s get the data structure for training and validation get the data structure for training # list of (video names,frame/max_frame,label) # sample from the whole video frames get the data structure for validation # list of (video names,frame/max_frame,label) # for videos shorter than self.testing_samples_per_video # range is exclusive add one to be inclusive # 1 > self.testing_samples_per_video * interval an instance of sequence loader for spatial model for parallel dataloading using keras sequence an instance of sequence loader for spatial model for parallel dataloading using keras sequence # data_root_path="data", # import tqdm # progress = tqdm.tqdm(train_loader.get_epoch_generator(), total=len(train_loader)) # for (sampled_frame, label) in progress: # pass # preview raw data # 3 channels # same name will be displayed testing_samples_per_video with no shuffling | 2.945303 | 3 |
dianhua/worker/crawler/china_mobile/hunan/base_request_param.py | Svolcano/python_exercise | 6 | 8989 | <gh_stars>1-10
# -*- coding:utf-8 -*-
"""
@version: v1.0
@author: xuelong.liu
@license: Apache Licence
@contact: <EMAIL>
@software: PyCharm
@file: base_request_param.py
@time: 12/21/16 6:48 PM
"""
class RequestParam(object):
"""
请求相关
"""
# URL
START_URL = "https://www.hn.10086.cn/service/static/componant/login.html"
# GET_CAPTCHA_URL = "http://www.hn.10086.cn/service/ics/servlet/ImageServlet"
GET_CAPTCHA_URL = "https://www.hn.10086.cn/service/ics/login/sendSms"
# GET_CAPTCHA_URL = "http://www.hn.10086.cn/newservice/ics/servlet/ImageServlet?random=0.14531555527237483"
LOGIN_URL = "https://www.hn.10086.cn/service/ics/login/SSOLogin"
# GET_SMS_URL = "http://www.hn.10086.cn/newservice/ics/componant/initSendHattedCode?requestTel=%s&ajaxSubmitType=post&ajax_randomcode=0.5158618472543544"
GET_SMS_URL_READY = "https://www.hn.10086.cn/service/ics/componant/initTelQCellCore?tel=%s&ajaxSubmitType=post&ajax_randomcode=0.9461358208494027"
GET_SMS_URL = "https://www.hn.10086.cn/service/ics/componant/initSendHattedCode?requestTel=%s&ajaxSubmitType=post&ajax_randomcode=0.9461358208494027"
# SMS_URL = "http://www.hn.10086.cn/newservice/ics/componant/initSmsCodeAndServicePwd"
SMS_URL = "https://www.hn.10086.cn/service/ics/componant/initSmsCodeAndServicePwd?smsCode=%s&servicePwd=<PASSWORD>&requestTel=%s&ajaxSubmitType=post&ajax_randomcode=0.012645535304207867"
GET_CAL_LOG = "https://www.hn.10086.cn/service/ics/detailBillQuery/queryDetailBill"
GET_USER_INFO = "https://www.hn.10086.cn/service/ics/basicInfo/queryUserBasicInfo"
| # -*- coding:utf-8 -*-
"""
@version: v1.0
@author: xuelong.liu
@license: Apache Licence
@contact: <EMAIL>
@software: PyCharm
@file: base_request_param.py
@time: 12/21/16 6:48 PM
"""
class RequestParam(object):
"""
请求相关
"""
# URL
START_URL = "https://www.hn.10086.cn/service/static/componant/login.html"
# GET_CAPTCHA_URL = "http://www.hn.10086.cn/service/ics/servlet/ImageServlet"
GET_CAPTCHA_URL = "https://www.hn.10086.cn/service/ics/login/sendSms"
# GET_CAPTCHA_URL = "http://www.hn.10086.cn/newservice/ics/servlet/ImageServlet?random=0.14531555527237483"
LOGIN_URL = "https://www.hn.10086.cn/service/ics/login/SSOLogin"
# GET_SMS_URL = "http://www.hn.10086.cn/newservice/ics/componant/initSendHattedCode?requestTel=%s&ajaxSubmitType=post&ajax_randomcode=0.5158618472543544"
GET_SMS_URL_READY = "https://www.hn.10086.cn/service/ics/componant/initTelQCellCore?tel=%s&ajaxSubmitType=post&ajax_randomcode=0.9461358208494027"
GET_SMS_URL = "https://www.hn.10086.cn/service/ics/componant/initSendHattedCode?requestTel=%s&ajaxSubmitType=post&ajax_randomcode=0.9461358208494027"
# SMS_URL = "http://www.hn.10086.cn/newservice/ics/componant/initSmsCodeAndServicePwd"
SMS_URL = "https://www.hn.10086.cn/service/ics/componant/initSmsCodeAndServicePwd?smsCode=%s&servicePwd=<PASSWORD>&requestTel=%s&ajaxSubmitType=post&ajax_randomcode=0.012645535304207867"
GET_CAL_LOG = "https://www.hn.10086.cn/service/ics/detailBillQuery/queryDetailBill"
GET_USER_INFO = "https://www.hn.10086.cn/service/ics/basicInfo/queryUserBasicInfo" | en | 0.264811 | # -*- coding:utf-8 -*- @version: v1.0 @author: xuelong.liu @license: Apache Licence @contact: <EMAIL> @software: PyCharm @file: base_request_param.py @time: 12/21/16 6:48 PM 请求相关 # URL # GET_CAPTCHA_URL = "http://www.hn.10086.cn/service/ics/servlet/ImageServlet" # GET_CAPTCHA_URL = "http://www.hn.10086.cn/newservice/ics/servlet/ImageServlet?random=0.14531555527237483" # GET_SMS_URL = "http://www.hn.10086.cn/newservice/ics/componant/initSendHattedCode?requestTel=%s&ajaxSubmitType=post&ajax_randomcode=0.5158618472543544" # SMS_URL = "http://www.hn.10086.cn/newservice/ics/componant/initSmsCodeAndServicePwd" | 2.217799 | 2 |
day02/puzzle2.py | jack-beach/AdventOfCode2019 | 0 | 8990 | <reponame>jack-beach/AdventOfCode2019
# stdlib imports
import copy
# vendor imports
import click
@click.command()
@click.argument("input_file", type=click.File("r"))
def main(input_file):
"""Put your puzzle execution code here"""
# Convert the comma-delimited string of numbers into a list of ints
masterRegister = list(
map(lambda op: int(op), input_file.read().strip().split(","))
)
def execute(noun, verb):
# Create a local copy of the register for this execution
register = copy.deepcopy(masterRegister)
# Inject the noun and verb
register[1] = noun
register[2] = verb
# We will start reading the opcodes at position 0
pointer = 0
# Loop infinitely until we reach the termination instruction
while True:
# Get the code at the current read position
code = register[pointer]
# Code 99 means immediate termination
if code == 99:
break
# Code 1 is addition
elif code == 1:
# Get register addresses
addendAPointer = register[pointer + 1]
addendBPointer = register[pointer + 2]
sumPointer = register[pointer + 3]
# Perform the addition
register[sumPointer] = (
register[addendAPointer] + register[addendBPointer]
)
# Advance the code position by 4
pointer += 4
# Code 2 is multiplication
elif code == 2:
# Get register addresses
factorAPointer = register[pointer + 1]
factorBPointer = register[pointer + 2]
productPointer = register[pointer + 3]
# Perform the addition
register[productPointer] = (
register[factorAPointer] * register[factorBPointer]
)
# Advance the code position by 4
pointer += 4
# Unknown opcode means there was an error
else:
raise RuntimeError(
f"Unknown opcode {code} at position {pointer}"
)
# Return the result
return register[0]
# Iterate through all the possible combinations until the target is found
target = 19690720
found = None
for noun in range(100):
for verb in range(100):
result = execute(noun, verb)
if result == target:
found = (noun, verb)
break
if found:
break
# Calculate the final result
print("RESULT:", 100 * found[0] + found[1])
# Execute cli function on main
if __name__ == "__main__":
main()
| # stdlib imports
import copy
# vendor imports
import click
@click.command()
@click.argument("input_file", type=click.File("r"))
def main(input_file):
"""Put your puzzle execution code here"""
# Convert the comma-delimited string of numbers into a list of ints
masterRegister = list(
map(lambda op: int(op), input_file.read().strip().split(","))
)
def execute(noun, verb):
# Create a local copy of the register for this execution
register = copy.deepcopy(masterRegister)
# Inject the noun and verb
register[1] = noun
register[2] = verb
# We will start reading the opcodes at position 0
pointer = 0
# Loop infinitely until we reach the termination instruction
while True:
# Get the code at the current read position
code = register[pointer]
# Code 99 means immediate termination
if code == 99:
break
# Code 1 is addition
elif code == 1:
# Get register addresses
addendAPointer = register[pointer + 1]
addendBPointer = register[pointer + 2]
sumPointer = register[pointer + 3]
# Perform the addition
register[sumPointer] = (
register[addendAPointer] + register[addendBPointer]
)
# Advance the code position by 4
pointer += 4
# Code 2 is multiplication
elif code == 2:
# Get register addresses
factorAPointer = register[pointer + 1]
factorBPointer = register[pointer + 2]
productPointer = register[pointer + 3]
# Perform the addition
register[productPointer] = (
register[factorAPointer] * register[factorBPointer]
)
# Advance the code position by 4
pointer += 4
# Unknown opcode means there was an error
else:
raise RuntimeError(
f"Unknown opcode {code} at position {pointer}"
)
# Return the result
return register[0]
# Iterate through all the possible combinations until the target is found
target = 19690720
found = None
for noun in range(100):
for verb in range(100):
result = execute(noun, verb)
if result == target:
found = (noun, verb)
break
if found:
break
# Calculate the final result
print("RESULT:", 100 * found[0] + found[1])
# Execute cli function on main
if __name__ == "__main__":
main() | en | 0.850623 | # stdlib imports # vendor imports Put your puzzle execution code here # Convert the comma-delimited string of numbers into a list of ints # Create a local copy of the register for this execution # Inject the noun and verb # We will start reading the opcodes at position 0 # Loop infinitely until we reach the termination instruction # Get the code at the current read position # Code 99 means immediate termination # Code 1 is addition # Get register addresses # Perform the addition # Advance the code position by 4 # Code 2 is multiplication # Get register addresses # Perform the addition # Advance the code position by 4 # Unknown opcode means there was an error # Return the result # Iterate through all the possible combinations until the target is found # Calculate the final result # Execute cli function on main | 3.682303 | 4 |
ionosenterprise/items/backupunit.py | ionos-cloud/ionos-enterprise-sdk-python | 6 | 8991 | <filename>ionosenterprise/items/backupunit.py
class BackupUnit(object):
def __init__(self, name, password=<PASSWORD>, email=None):
"""
BackupUnit class initializer.
:param name: A name of that resource (only alphanumeric characters are acceptable)"
:type name: ``str``
:param password: The password associated to that resource.
:type password: ``str``
:param email: The email associated with the backup unit.
Bear in mind that this email does not be the same email as of the user.
:type email: ``str``
"""
self.name = name
self.password = password
self.email = email
def __repr__(self):
return ('<BackupUnit: name=%s, password=%s, email=%s>'
% (self.name, str(self.password), self.email))
| <filename>ionosenterprise/items/backupunit.py
class BackupUnit(object):
def __init__(self, name, password=<PASSWORD>, email=None):
"""
BackupUnit class initializer.
:param name: A name of that resource (only alphanumeric characters are acceptable)"
:type name: ``str``
:param password: The password associated to that resource.
:type password: ``str``
:param email: The email associated with the backup unit.
Bear in mind that this email does not be the same email as of the user.
:type email: ``str``
"""
self.name = name
self.password = password
self.email = email
def __repr__(self):
return ('<BackupUnit: name=%s, password=%s, email=%s>'
% (self.name, str(self.password), self.email))
| en | 0.861247 | BackupUnit class initializer. :param name: A name of that resource (only alphanumeric characters are acceptable)" :type name: ``str`` :param password: The password associated to that resource. :type password: ``str`` :param email: The email associated with the backup unit. Bear in mind that this email does not be the same email as of the user. :type email: ``str`` | 2.959529 | 3 |
install.py | X-lab-3D/PANDORA | 0 | 8992 | import os
dirs = [
'./PANDORA_files', './PANDORA_files/data', './PANDORA_files/data/csv_pkl_files',
'./PANDORA_files/data/csv_pkl_files/mhcseqs', './PANDORA_files/data/PDBs',
'./PANDORA_files/data/PDBs/pMHCI', './PANDORA_files/data/PDBs/pMHCII',
'./PANDORA_files/data/PDBs/Bad', './PANDORA_files/data/PDBs/Bad/pMHCI',
'./PANDORA_files/data/PDBs/Bad/pMHCII', './PANDORA_files/data/PDBs/IMGT_retrieved',
'./PANDORA_files/data/outputs',
'./test/test_data/PDBs/Bad','./test/test_data/PDBs/Bad/pMHCI',
'./test/test_data/PDBs/Bad/pMHCII', './test/test_data/csv_pkl_files'
]
for D in dirs:
try:
os.mkdir(D)
except OSError:
print('Could not make directory: ' + D)
# Install dependenciess
# os.popen("alias KEY_MODELLER='XXXX'").read()
# os.popen("conda install -y -c salilab modeller").read()
# os.popen("conda install -y -c bioconda muscle").read()
# os.popen("pip install -e ./").read()
| import os
dirs = [
'./PANDORA_files', './PANDORA_files/data', './PANDORA_files/data/csv_pkl_files',
'./PANDORA_files/data/csv_pkl_files/mhcseqs', './PANDORA_files/data/PDBs',
'./PANDORA_files/data/PDBs/pMHCI', './PANDORA_files/data/PDBs/pMHCII',
'./PANDORA_files/data/PDBs/Bad', './PANDORA_files/data/PDBs/Bad/pMHCI',
'./PANDORA_files/data/PDBs/Bad/pMHCII', './PANDORA_files/data/PDBs/IMGT_retrieved',
'./PANDORA_files/data/outputs',
'./test/test_data/PDBs/Bad','./test/test_data/PDBs/Bad/pMHCI',
'./test/test_data/PDBs/Bad/pMHCII', './test/test_data/csv_pkl_files'
]
for D in dirs:
try:
os.mkdir(D)
except OSError:
print('Could not make directory: ' + D)
# Install dependenciess
# os.popen("alias KEY_MODELLER='XXXX'").read()
# os.popen("conda install -y -c salilab modeller").read()
# os.popen("conda install -y -c bioconda muscle").read()
# os.popen("pip install -e ./").read()
| en | 0.205351 | # Install dependenciess # os.popen("alias KEY_MODELLER='XXXX'").read() # os.popen("conda install -y -c salilab modeller").read() # os.popen("conda install -y -c bioconda muscle").read() # os.popen("pip install -e ./").read() | 2.241816 | 2 |
app/auth/views.py | MainaKamau92/apexselftaught | 4 | 8993 | <gh_stars>1-10
# app/auth/views.py
import os
from flask import flash, redirect, render_template, url_for, request
from flask_login import login_required, login_user, logout_user, current_user
from . import auth
from .forms import (LoginForm, RegistrationForm,
RequestResetForm, ResetPasswordForm)
from .. import db, mail
from ..models import User
from flask_mail import Message
from werkzeug.security import generate_password_hash
@auth.route('/register/', methods=['GET', 'POST'])
def register():
"""
Handle requests to the /register route
Add an user to the database through the registration form
"""
logout_user()
form = RegistrationForm()
if form.validate_on_submit():
user = User(first_name=form.first_name.data,
last_name=form.last_name.data,
email=form.email.data,
username=form.username.data,
password=<PASSWORD>,
is_freelancer=form.freelancer.data,
is_employer=form.employer.data)
# add user to the database
db.session.add(user)
db.session.commit()
flash(f'You have successfully registered! You may now login', 'success')
# redirect to the login page
return redirect(url_for('auth.login'))
# load registration form
return render_template('auth/register.html', form=form, title='Register')
@auth.route('/login/', methods=['GET', 'POST'])
def login():
"""
Handle requests to the /login route
Log an employee in through the login form
"""
if current_user.is_authenticated:
if current_user.is_freelancer == True and current_user.is_employer == False:
# redirect to the freelancer dashboard page after login
return redirect(url_for('freelancer.dashboard'))
elif current_user.is_employer == True and current_user.is_freelancer == False:
# redirect to the employer dashboard page after login
return redirect(url_for('employer.dashboard'))
elif current_user.is_employer and current_user.is_freelancer:
# redirect to the employer dashboard page after login
return redirect(url_for('employer.dashboard'))
else:
# redirect to the admin dashboard
return redirect(url_for('admin.admin_dashboard'))
form = LoginForm()
if form.validate_on_submit():
# check whether user exists in the database
# the password entered matches the password in the database
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, remember=form.remember.data)
#flash(f'Logged In', 'success')
if user.is_freelancer == True and user.is_employer == False:
# redirect to the freelancer dashboard page after login
return redirect(url_for('freelancer.dashboard'))
elif user.is_employer == True and user.is_freelancer == False:
# redirect to the employer dashboard page after login
return redirect(url_for('employer.dashboard'))
elif user.is_employer and user.is_freelancer:
# redirect to the employer dashboard page after login
return redirect(url_for('employer.dashboard'))
else:
# redirect to the admin dashboard
return redirect(url_for('admin.admin_dashboard'))
flash(f'Invalid Credentials', 'danger')
# load login template
return render_template('auth/login.html', form=form, title='Login')
@auth.route('/logout/', methods=['GET', 'POST'])
@login_required
def logout():
"""
Handle requests to the /logout route
Log an employee out through the logout link
"""
logout_user()
flash(f'You have been logged out', 'success')
# redirect to the login page
return redirect(url_for('auth.login'))
def send_reset_email(user):
try:
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='<EMAIL>',
recipients=[user.email])
msg.body = f''' To reset your password visit the following link
{url_for('auth.reset_password', token=token, _external=True)}
If you did not make this request ignore this email
'''
mail.send(msg)
except Exception as e:
print(e)
@auth.route('/reset-password', methods=['GET', 'POST'])
def request_reset():
if current_user.is_authenticated:
next_page = request.args.get('next')
if current_user.is_freelancer == True and current_user.is_employer == False:
# redirect to the freelancer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('freelancer.dashboard'))
elif current_user.is_employer == True and current_user.is_freelancer == False:
# redirect to the employer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('employer.dashboard'))
elif current_user.is_employer and current_user.is_freelancer:
# redirect to the employer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('employer.dashboard'))
else:
# redirect to the admin dashboard
return redirect(next_page) if next_page else redirect(url_for('admin.admin_dashboard'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
send_reset_email(user)
flash(f'Email has been sent with password reset instructions', 'info')
return redirect(url_for('auth.login'))
return render_template('auth/reset_request.html', form=form, title='Request Reset Password')
@auth.route('/reset-password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
next_page = request.args.get('next')
if current_user.is_freelancer == True and current_user.is_employer == False:
# redirect to the freelancer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('freelancer.dashboard'))
elif current_user.is_employer == True and current_user.is_freelancer == False:
# redirect to the employer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('employer.dashboard'))
elif current_user.is_employer and current_user.is_freelancer:
# redirect to the employer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('employer.dashboard'))
else:
# redirect to the admin dashboard
return redirect(next_page) if next_page else redirect(url_for('admin.admin_dashboard'))
user = User.verify_reset_token(token)
if user is None:
flash(f'Invalid token or expired token', 'warning')
return redirect(url_for('auth.request_reset'))
form = ResetPasswordForm()
if form.validate_on_submit():
# add user to the database
hashed_password = <PASSWORD>_password_hash(form.password.data)
user.password_hash = <PASSWORD>
db.session.commit()
flash(
f'Your password has been reset successfully! You may now login', 'success')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form, title='Reset Password')
| # app/auth/views.py
import os
from flask import flash, redirect, render_template, url_for, request
from flask_login import login_required, login_user, logout_user, current_user
from . import auth
from .forms import (LoginForm, RegistrationForm,
RequestResetForm, ResetPasswordForm)
from .. import db, mail
from ..models import User
from flask_mail import Message
from werkzeug.security import generate_password_hash
@auth.route('/register/', methods=['GET', 'POST'])
def register():
"""
Handle requests to the /register route
Add an user to the database through the registration form
"""
logout_user()
form = RegistrationForm()
if form.validate_on_submit():
user = User(first_name=form.first_name.data,
last_name=form.last_name.data,
email=form.email.data,
username=form.username.data,
password=<PASSWORD>,
is_freelancer=form.freelancer.data,
is_employer=form.employer.data)
# add user to the database
db.session.add(user)
db.session.commit()
flash(f'You have successfully registered! You may now login', 'success')
# redirect to the login page
return redirect(url_for('auth.login'))
# load registration form
return render_template('auth/register.html', form=form, title='Register')
@auth.route('/login/', methods=['GET', 'POST'])
def login():
"""
Handle requests to the /login route
Log an employee in through the login form
"""
if current_user.is_authenticated:
if current_user.is_freelancer == True and current_user.is_employer == False:
# redirect to the freelancer dashboard page after login
return redirect(url_for('freelancer.dashboard'))
elif current_user.is_employer == True and current_user.is_freelancer == False:
# redirect to the employer dashboard page after login
return redirect(url_for('employer.dashboard'))
elif current_user.is_employer and current_user.is_freelancer:
# redirect to the employer dashboard page after login
return redirect(url_for('employer.dashboard'))
else:
# redirect to the admin dashboard
return redirect(url_for('admin.admin_dashboard'))
form = LoginForm()
if form.validate_on_submit():
# check whether user exists in the database
# the password entered matches the password in the database
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, remember=form.remember.data)
#flash(f'Logged In', 'success')
if user.is_freelancer == True and user.is_employer == False:
# redirect to the freelancer dashboard page after login
return redirect(url_for('freelancer.dashboard'))
elif user.is_employer == True and user.is_freelancer == False:
# redirect to the employer dashboard page after login
return redirect(url_for('employer.dashboard'))
elif user.is_employer and user.is_freelancer:
# redirect to the employer dashboard page after login
return redirect(url_for('employer.dashboard'))
else:
# redirect to the admin dashboard
return redirect(url_for('admin.admin_dashboard'))
flash(f'Invalid Credentials', 'danger')
# load login template
return render_template('auth/login.html', form=form, title='Login')
@auth.route('/logout/', methods=['GET', 'POST'])
@login_required
def logout():
"""
Handle requests to the /logout route
Log an employee out through the logout link
"""
logout_user()
flash(f'You have been logged out', 'success')
# redirect to the login page
return redirect(url_for('auth.login'))
def send_reset_email(user):
try:
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='<EMAIL>',
recipients=[user.email])
msg.body = f''' To reset your password visit the following link
{url_for('auth.reset_password', token=token, _external=True)}
If you did not make this request ignore this email
'''
mail.send(msg)
except Exception as e:
print(e)
@auth.route('/reset-password', methods=['GET', 'POST'])
def request_reset():
if current_user.is_authenticated:
next_page = request.args.get('next')
if current_user.is_freelancer == True and current_user.is_employer == False:
# redirect to the freelancer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('freelancer.dashboard'))
elif current_user.is_employer == True and current_user.is_freelancer == False:
# redirect to the employer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('employer.dashboard'))
elif current_user.is_employer and current_user.is_freelancer:
# redirect to the employer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('employer.dashboard'))
else:
# redirect to the admin dashboard
return redirect(next_page) if next_page else redirect(url_for('admin.admin_dashboard'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
send_reset_email(user)
flash(f'Email has been sent with password reset instructions', 'info')
return redirect(url_for('auth.login'))
return render_template('auth/reset_request.html', form=form, title='Request Reset Password')
@auth.route('/reset-password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
next_page = request.args.get('next')
if current_user.is_freelancer == True and current_user.is_employer == False:
# redirect to the freelancer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('freelancer.dashboard'))
elif current_user.is_employer == True and current_user.is_freelancer == False:
# redirect to the employer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('employer.dashboard'))
elif current_user.is_employer and current_user.is_freelancer:
# redirect to the employer dashboard page after login
return redirect(next_page) if next_page else redirect(url_for('employer.dashboard'))
else:
# redirect to the admin dashboard
return redirect(next_page) if next_page else redirect(url_for('admin.admin_dashboard'))
user = User.verify_reset_token(token)
if user is None:
flash(f'Invalid token or expired token', 'warning')
return redirect(url_for('auth.request_reset'))
form = ResetPasswordForm()
if form.validate_on_submit():
# add user to the database
hashed_password = <PASSWORD>_password_hash(form.password.data)
user.password_hash = <PASSWORD>
db.session.commit()
flash(
f'Your password has been reset successfully! You may now login', 'success')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form, title='Reset Password') | en | 0.802745 | # app/auth/views.py Handle requests to the /register route Add an user to the database through the registration form # add user to the database # redirect to the login page # load registration form Handle requests to the /login route Log an employee in through the login form # redirect to the freelancer dashboard page after login # redirect to the employer dashboard page after login # redirect to the employer dashboard page after login # redirect to the admin dashboard # check whether user exists in the database # the password entered matches the password in the database #flash(f'Logged In', 'success') # redirect to the freelancer dashboard page after login # redirect to the employer dashboard page after login # redirect to the employer dashboard page after login # redirect to the admin dashboard # load login template Handle requests to the /logout route Log an employee out through the logout link # redirect to the login page To reset your password visit the following link {url_for('auth.reset_password', token=token, _external=True)} If you did not make this request ignore this email # redirect to the freelancer dashboard page after login # redirect to the employer dashboard page after login # redirect to the employer dashboard page after login # redirect to the admin dashboard # redirect to the freelancer dashboard page after login # redirect to the employer dashboard page after login # redirect to the employer dashboard page after login # redirect to the admin dashboard # add user to the database | 2.626333 | 3 |
oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_dealer_publisher.py | devendermishrajio/oslo.messaging | 1 | 8994 | <filename>oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_dealer_publisher.py
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_messaging._drivers.zmq_driver.client.publishers\
import zmq_publisher_base
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._drivers.zmq_driver import zmq_names
from oslo_messaging._i18n import _LI, _LW
LOG = logging.getLogger(__name__)
zmq = zmq_async.import_zmq()
class DealerPublisher(zmq_publisher_base.PublisherMultisend):
def __init__(self, conf, matchmaker):
super(DealerPublisher, self).__init__(conf, matchmaker, zmq.DEALER)
def send_request(self, request):
self._check_request_pattern(request)
dealer_socket, hosts = self._check_hosts_connections(request.target)
if not dealer_socket.connections:
# NOTE(ozamiatin): Here we can provide
# a queue for keeping messages to send them later
# when some listener appears. However such approach
# being more reliable will consume additional memory.
LOG.warning(_LW("Request %s was dropped because no connection")
% request.msg_type)
return
if request.msg_type in zmq_names.MULTISEND_TYPES:
for _ in range(dealer_socket.connections_count()):
self._send_request(dealer_socket, request)
else:
self._send_request(dealer_socket, request)
def _check_request_pattern(self, request):
if request.msg_type == zmq_names.CALL_TYPE:
raise zmq_publisher_base.UnsupportedSendPattern(request.msg_type)
def _send_request(self, socket, request):
socket.send(b'', zmq.SNDMORE)
socket.send_pyobj(request)
LOG.info(_LI("Sending message_id %(message)s to a target %(target)s")
% {"message": request.message_id,
"target": request.target})
def cleanup(self):
super(DealerPublisher, self).cleanup()
class DealerPublisherLight(zmq_publisher_base.PublisherBase):
def __init__(self, conf, address):
super(DealerPublisherLight, self).__init__(conf)
self.socket = self.zmq_context.socket(zmq.DEALER)
self.socket.connect(address)
def send_request(self, request):
if request.msg_type == zmq_names.CALL_TYPE:
raise zmq_publisher_base.UnsupportedSendPattern(request.msg_type)
envelope = request.create_envelope()
self.socket.send(b'', zmq.SNDMORE)
self.socket.send_pyobj(envelope, zmq.SNDMORE)
self.socket.send_pyobj(request)
def cleanup(self):
self.socket.setsockopt(zmq.LINGER, 0)
self.socket.close()
class DealerPublisherProxy(DealerPublisher):
def __init__(self, conf, matchmaker, reply_receiver):
super(DealerPublisherProxy, self).__init__(conf, matchmaker)
self.reply_receiver = reply_receiver
def send_request(self, multipart_message):
envelope = multipart_message[zmq_names.MULTIPART_IDX_ENVELOPE]
LOG.info(_LI("Envelope: %s") % envelope)
target = envelope[zmq_names.FIELD_TARGET]
dealer_socket, hosts = self._check_hosts_connections(target)
if not dealer_socket.connections:
# NOTE(ozamiatin): Here we can provide
# a queue for keeping messages to send them later
# when some listener appears. However such approach
# being more reliable will consume additional memory.
LOG.warning(_LW("Request %s was dropped because no connection")
% envelope[zmq_names.FIELD_MSG_TYPE])
return
self.reply_receiver.track_socket(dealer_socket.handle)
LOG.info(_LI("Sending message %(message)s to a target %(target)s")
% {"message": envelope[zmq_names.FIELD_MSG_ID],
"target": envelope[zmq_names.FIELD_TARGET]})
if envelope[zmq_names.FIELD_MSG_TYPE] in zmq_names.MULTISEND_TYPES:
for _ in range(dealer_socket.connections_count()):
self._send_request(dealer_socket, multipart_message)
else:
self._send_request(dealer_socket, multipart_message)
def _send_request(self, socket, multipart_message):
socket.send(b'', zmq.SNDMORE)
socket.send_pyobj(
multipart_message[zmq_names.MULTIPART_IDX_ENVELOPE],
zmq.SNDMORE)
socket.send(multipart_message[zmq_names.MULTIPART_IDX_BODY])
class ReplyReceiver(object):
def __init__(self, poller):
self.poller = poller
LOG.info(_LI("Reply waiter created in broker"))
def _receive_reply(self, socket):
return socket.recv_multipart()
def track_socket(self, socket):
self.poller.register(socket, self._receive_reply)
def cleanup(self):
self.poller.close()
class AcknowledgementReceiver(object):
def __init__(self):
self.poller = zmq_async.get_poller()
self.thread = zmq_async.get_executor(self.poll_for_acknowledgements)
self.thread.execute()
def _receive_acknowledgement(self, socket):
empty = socket.recv()
assert empty == b"", "Empty delimiter expected"
ack_message = socket.recv_pyobj()
return ack_message
def track_socket(self, socket):
self.poller.register(socket, self._receive_acknowledgement)
def poll_for_acknowledgements(self):
ack_message, socket = self.poller.poll()
LOG.info(_LI("Message %s acknowledged")
% ack_message[zmq_names.FIELD_ID])
def cleanup(self):
self.thread.stop()
self.poller.close()
| <filename>oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_dealer_publisher.py
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_messaging._drivers.zmq_driver.client.publishers\
import zmq_publisher_base
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._drivers.zmq_driver import zmq_names
from oslo_messaging._i18n import _LI, _LW
LOG = logging.getLogger(__name__)
zmq = zmq_async.import_zmq()
class DealerPublisher(zmq_publisher_base.PublisherMultisend):
def __init__(self, conf, matchmaker):
super(DealerPublisher, self).__init__(conf, matchmaker, zmq.DEALER)
def send_request(self, request):
self._check_request_pattern(request)
dealer_socket, hosts = self._check_hosts_connections(request.target)
if not dealer_socket.connections:
# NOTE(ozamiatin): Here we can provide
# a queue for keeping messages to send them later
# when some listener appears. However such approach
# being more reliable will consume additional memory.
LOG.warning(_LW("Request %s was dropped because no connection")
% request.msg_type)
return
if request.msg_type in zmq_names.MULTISEND_TYPES:
for _ in range(dealer_socket.connections_count()):
self._send_request(dealer_socket, request)
else:
self._send_request(dealer_socket, request)
def _check_request_pattern(self, request):
if request.msg_type == zmq_names.CALL_TYPE:
raise zmq_publisher_base.UnsupportedSendPattern(request.msg_type)
def _send_request(self, socket, request):
socket.send(b'', zmq.SNDMORE)
socket.send_pyobj(request)
LOG.info(_LI("Sending message_id %(message)s to a target %(target)s")
% {"message": request.message_id,
"target": request.target})
def cleanup(self):
super(DealerPublisher, self).cleanup()
class DealerPublisherLight(zmq_publisher_base.PublisherBase):
def __init__(self, conf, address):
super(DealerPublisherLight, self).__init__(conf)
self.socket = self.zmq_context.socket(zmq.DEALER)
self.socket.connect(address)
def send_request(self, request):
if request.msg_type == zmq_names.CALL_TYPE:
raise zmq_publisher_base.UnsupportedSendPattern(request.msg_type)
envelope = request.create_envelope()
self.socket.send(b'', zmq.SNDMORE)
self.socket.send_pyobj(envelope, zmq.SNDMORE)
self.socket.send_pyobj(request)
def cleanup(self):
self.socket.setsockopt(zmq.LINGER, 0)
self.socket.close()
class DealerPublisherProxy(DealerPublisher):
def __init__(self, conf, matchmaker, reply_receiver):
super(DealerPublisherProxy, self).__init__(conf, matchmaker)
self.reply_receiver = reply_receiver
def send_request(self, multipart_message):
envelope = multipart_message[zmq_names.MULTIPART_IDX_ENVELOPE]
LOG.info(_LI("Envelope: %s") % envelope)
target = envelope[zmq_names.FIELD_TARGET]
dealer_socket, hosts = self._check_hosts_connections(target)
if not dealer_socket.connections:
# NOTE(ozamiatin): Here we can provide
# a queue for keeping messages to send them later
# when some listener appears. However such approach
# being more reliable will consume additional memory.
LOG.warning(_LW("Request %s was dropped because no connection")
% envelope[zmq_names.FIELD_MSG_TYPE])
return
self.reply_receiver.track_socket(dealer_socket.handle)
LOG.info(_LI("Sending message %(message)s to a target %(target)s")
% {"message": envelope[zmq_names.FIELD_MSG_ID],
"target": envelope[zmq_names.FIELD_TARGET]})
if envelope[zmq_names.FIELD_MSG_TYPE] in zmq_names.MULTISEND_TYPES:
for _ in range(dealer_socket.connections_count()):
self._send_request(dealer_socket, multipart_message)
else:
self._send_request(dealer_socket, multipart_message)
def _send_request(self, socket, multipart_message):
socket.send(b'', zmq.SNDMORE)
socket.send_pyobj(
multipart_message[zmq_names.MULTIPART_IDX_ENVELOPE],
zmq.SNDMORE)
socket.send(multipart_message[zmq_names.MULTIPART_IDX_BODY])
class ReplyReceiver(object):
def __init__(self, poller):
self.poller = poller
LOG.info(_LI("Reply waiter created in broker"))
def _receive_reply(self, socket):
return socket.recv_multipart()
def track_socket(self, socket):
self.poller.register(socket, self._receive_reply)
def cleanup(self):
self.poller.close()
class AcknowledgementReceiver(object):
def __init__(self):
self.poller = zmq_async.get_poller()
self.thread = zmq_async.get_executor(self.poll_for_acknowledgements)
self.thread.execute()
def _receive_acknowledgement(self, socket):
empty = socket.recv()
assert empty == b"", "Empty delimiter expected"
ack_message = socket.recv_pyobj()
return ack_message
def track_socket(self, socket):
self.poller.register(socket, self._receive_acknowledgement)
def poll_for_acknowledgements(self):
ack_message, socket = self.poller.poll()
LOG.info(_LI("Message %s acknowledged")
% ack_message[zmq_names.FIELD_ID])
def cleanup(self):
self.thread.stop()
self.poller.close()
| en | 0.909297 | # Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(ozamiatin): Here we can provide # a queue for keeping messages to send them later # when some listener appears. However such approach # being more reliable will consume additional memory. # NOTE(ozamiatin): Here we can provide # a queue for keeping messages to send them later # when some listener appears. However such approach # being more reliable will consume additional memory. | 1.902001 | 2 |
coba/environments/filters.py | mrucker/banditbenchmark | 0 | 8995 | import pickle
import warnings
import collections.abc
from math import isnan
from statistics import mean, median, stdev, mode
from abc import abstractmethod, ABC
from numbers import Number
from collections import defaultdict
from itertools import islice, chain
from typing import Hashable, Optional, Sequence, Union, Iterable, Dict, Any, List, Tuple, Callable, Mapping
from coba.backports import Literal
from coba import pipes
from coba.random import CobaRandom
from coba.exceptions import CobaException
from coba.statistics import iqr
from coba.pipes import Flatten
from coba.environments.primitives import Interaction
from coba.environments.logged.primitives import LoggedInteraction
from coba.environments.simulated.primitives import SimulatedInteraction
class EnvironmentFilter(pipes.Filter[Iterable[Interaction],Iterable[Interaction]], ABC):
"""A filter that can be applied to an Environment."""
@abstractmethod
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
"""Apply a filter to an Environment's interactions."""
...
class Identity(pipes.Identity, EnvironmentFilter):
"""Return whatever interactions are given to the filter."""
pass
class Take(pipes.Take, EnvironmentFilter):
"""Take a fixed number of interactions from an Environment."""
pass
class Shuffle(pipes.Shuffle, EnvironmentFilter):
"""Shuffle a sequence of Interactions in an Environment."""
pass
class Reservoir(pipes.Reservoir, EnvironmentFilter):
"""Take a fixed number of random Interactions from an Environment."""
pass
class Scale(EnvironmentFilter):
"""Shift and scale features to precondition them before learning."""
def __init__(self,
shift: Union[Number,Literal["min","mean","med"]] = 0,
scale: Union[Number,Literal["minmax","std","iqr","maxabs"]] = "minmax",
target: Literal["features","rewards"] = "features",
using: Optional[int] = None):
"""Instantiate a Scale filter.
Args:
shift: The statistic to use to shift each context feature.
scale: The statistic to use to scale each context feature.
target: The target data we wish to scale in the environment.
using: The number of interactions to use when calculating the necessary statistics.
"""
assert isinstance(shift,Number) or shift in ["min","mean","med"]
assert isinstance(scale,Number) or scale in ["minmax","std","iqr","maxabs"]
self._shift = shift
self._scale = scale
self._using = using
self._target = target
@property
def params(self) -> Dict[str, Any]:
return {
"scale_shift": self._shift,
"scale_scale": self._scale,
"scale_using": self._using,
"scale_target": self._target
}
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
iter_interactions = iter(interactions)
fitting_interactions = list(islice(iter_interactions,self._using))
shifts : Dict[Hashable,float] = defaultdict(lambda:0)
scales : Dict[Hashable,float] = defaultdict(lambda:1)
unscaled: Dict[Hashable,List[Any]] = defaultdict(list)
if any([isinstance(i.context,dict) for i in fitting_interactions]) and self._shift != 0:
raise CobaException("Shift is required to be 0 for sparse environments. Otherwise the environment will become dense.")
mixed = set()
had_non_numeric = set()
for interaction in fitting_interactions:
if self._target == "features":
for name,value in self._feature_pairs(interaction.context):
if name in mixed: continue
is_numeric = isinstance(value,Number)
is_nan = is_numeric and isnan(value)
if is_nan:
pass
elif (not is_numeric and name in unscaled) or (is_numeric and name in had_non_numeric):
mixed.add(name)
if name in unscaled: del unscaled[name]
if name in had_non_numeric: had_non_numeric.remove(name)
elif not is_numeric:
had_non_numeric.add(name)
elif is_numeric and not is_nan:
unscaled[name].append(value)
if self._target == "rewards":
unscaled["rewards"].extend(interaction.rewards)
if mixed: warnings.warn(f"Some features were not scaled due to having mixed types: {mixed}. ")
has_sparse_zero = set()
for interaction in fitting_interactions:
if isinstance(interaction.context,dict):
has_sparse_zero |= unscaled.keys() - interaction.context.keys() - {"rewards"}
for key in has_sparse_zero:
unscaled[key].append(0)
for name, values in unscaled.items():
if isinstance(self._shift, Number):
shift = self._shift
if self._shift == "min":
shift = min(values)
if self._shift == "mean":
shift = mean(values)
if self._shift == "med":
shift = median(values)
if isinstance(self._scale, Number):
scale_num = self._scale
scale_den = 1
if self._scale == "std":
scale_num = 1
scale_den = stdev(values)
if self._scale == "minmax":
scale_num = 1
scale_den = max(values)-min(values)
if self._scale == "iqr":
scale_num = 1
scale_den = iqr(values)
if self._scale == "maxabs":
scale_num = 1
scale_den = max([abs(v-shift) for v in values])
shifts[name] = shift
scales[name] = scale_num/scale_den if round(scale_den,10) != 0 else 1
for interaction in chain(fitting_interactions, iter_interactions):
scaled_values = {}
final_context = interaction.context
final_rewards = None
final_kwargs = interaction.kwargs.copy()
if self._target == "features":
for name,value in self._feature_pairs(interaction.context):
if isinstance(value,Number):
scaled_values[name] = (value-shifts[name])*scales[name]
else:
scaled_values[name] = value
if interaction.context is None:
final_context = None
elif isinstance(interaction.context,dict):
final_context = scaled_values
elif isinstance(interaction.context,tuple):
final_context = tuple(scaled_values[k] for k,_ in self._feature_pairs(interaction.context))
else:
final_context = scaled_values[1]
if self._target == "rewards":
final_rewards = [ (r-shifts['rewards'])*scales['rewards'] for r in interaction.rewards ]
if isinstance(interaction, SimulatedInteraction):
yield SimulatedInteraction(
final_context,
interaction.actions,
final_rewards or interaction.rewards,
**interaction.kwargs
)
elif isinstance(interaction, LoggedInteraction):
yield LoggedInteraction(
final_context,
interaction.action,
interaction.reward,
interaction.probability,
interaction.actions,
**interaction.kwargs
)
else: #pragma: no cover
raise CobaException("Unknown interactions were given to Scale.")
def _feature_pairs(self,context) -> Sequence[Tuple[Hashable,Any]]:
if isinstance(context,dict ): return context.items()
if isinstance(context,tuple): return enumerate(context)
if context is not None : return [(1,context)]
return []
class Impute(EnvironmentFilter):
"""Impute missing values (nan) in Interaction contexts."""
def __init__(self,
stat : Literal["mean","median","mode"] = "mean",
using: Optional[int] = None):
"""Instantiate an Impute filter.
Args:
stat: The statistic to use for impuatation.
using: The number of interactions to use to calculate the imputation statistics.
"""
assert stat in ["mean","median","mode"]
self._stat = stat
self._using = using
@property
def params(self) -> Dict[str, Any]:
return { "impute_stat": self._stat, "impute_using": self._using }
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
iter_interactions = iter(interactions)
train_interactions = list(islice(iter_interactions,self._using))
test_interactions = chain.from_iterable([train_interactions, iter_interactions])
stats : Dict[Hashable,float] = defaultdict(int)
features: Dict[Hashable,List[Number]] = defaultdict(list)
for interaction in train_interactions:
for name,value in self._context_as_name_values(interaction.context):
if isinstance(value,Number) and not isnan(value):
features[name].append(value)
for feat_name, feat_numeric_values in features.items():
if self._stat == "mean":
stats[feat_name] = mean(feat_numeric_values)
if self._stat == "median":
stats[feat_name] = median(feat_numeric_values)
if self._stat == "mode":
stats[feat_name] = mode(feat_numeric_values)
for interaction in test_interactions:
kv_imputed_context = {}
for name,value in self._context_as_name_values(interaction.context):
kv_imputed_context[name] = stats[name] if isinstance(value,Number) and isnan(value) else value
if interaction.context is None:
final_context = None
elif isinstance(interaction.context,dict):
final_context = kv_imputed_context
elif isinstance(interaction.context,tuple):
final_context = tuple(kv_imputed_context[k] for k,_ in self._context_as_name_values(interaction.context))
else:
final_context = kv_imputed_context[1]
if isinstance(interaction, SimulatedInteraction):
yield SimulatedInteraction(
final_context,
interaction.actions,
interaction.rewards,
**interaction.kwargs
)
elif isinstance(interaction, LoggedInteraction):
yield LoggedInteraction(
final_context,
interaction.action,
interaction.reward,
**interaction.kwargs
)
else: #pragma: no cover
raise CobaException("Unknown interactions were given to Impute.")
def _context_as_name_values(self,context) -> Sequence[Tuple[Hashable,Any]]:
if isinstance(context,dict ): return context.items()
if isinstance(context,tuple): return enumerate(context)
if context is not None : return [(1,context)]
return []
class Sparse(EnvironmentFilter):
"""Sparsify an environment's feature representation.
This has little utility beyond debugging.
"""
def __init__(self, context:bool = True, action:bool = False):
"""Instantiate a Sparse filter.
Args:
context: If True then contexts should be made sparse otherwise leave them alone.
action: If True then actions should be made sparse otherwise leave them alone.
"""
self._context = context
self._action = action
@property
def params(self) -> Dict[str, Any]:
return { "sparse_C": self._context, "sparse_A": self._action }
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
for interaction in interactions:
sparse_context = self._make_sparse(interaction.context) if self._context else interaction.context
if isinstance(interaction, SimulatedInteraction):
sparse_actions = list(map(self._make_sparse,interaction.actions)) if self._action else interaction.actions
yield SimulatedInteraction(
sparse_context,
sparse_actions,
interaction.rewards
)
elif isinstance(interaction, LoggedInteraction):
sparse_action = self._make_sparse(interaction.action) if self._action else interaction.action
yield LoggedInteraction(
sparse_context,
sparse_action,
interaction.reward,
interaction.probability,
interaction.actions,
**interaction.kwargs
)
else: #pragma: no cover
raise CobaException("Unknown interactions were given to Sparse.")
def _make_sparse(self, value) -> Optional[dict]:
if isinstance(value,dict) or value is None:
return value
if isinstance(value,(list,tuple)):
return dict(enumerate(value))
return {0:value}
class Cycle(EnvironmentFilter):
"""Cycle all rewards associated with actions by one place.
This filter is useful for testing an algorithms response to a non-stationary shock.
"""
def __init__(self, after:int = 0):
"""Instantiate a Cycle filter.
Args:
after: How many interactions should be seen before applying the cycle filter.
"""
self._after = after
@property
def params(self) -> Dict[str, Any]:
return { "cycle_after": self._after }
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[SimulatedInteraction]:
underlying_iterable = iter(interactions)
sans_cycle_interactions = islice(underlying_iterable, self._after)
with_cycle_interactions = underlying_iterable
for interaction in sans_cycle_interactions:
yield interaction
try:
first_interaction = next(with_cycle_interactions)
action_set = set(first_interaction.actions)
n_actions = len(action_set)
featureless_actions = [tuple([0]*n+[1]+[0]*(n_actions-n-1)) for n in range(n_actions)]
with_cycle_interactions = chain([first_interaction], with_cycle_interactions)
if len(set(action_set) & set(featureless_actions)) != len(action_set):
warnings.warn("Cycle only works for environments without action features. It will be ignored in this case.")
for interaction in with_cycle_interactions:
yield interaction
else:
for interaction in with_cycle_interactions:
rewards = interaction.rewards[-1:] + interaction.rewards[:-1]
yield SimulatedInteraction(interaction.context, interaction.actions, rewards, **interaction.kwargs)
except StopIteration:
pass
class Binary(EnvironmentFilter):
"""Binarize all rewards to either 1 (max rewards) or 0 (all others)."""
@property
def params(self) -> Dict[str, Any]:
return { "binary": True }
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[SimulatedInteraction]:
for interaction in interactions:
max_rwd = max(interaction.rewards)
rewards = [int(r==max_rwd) for r in interaction.rewards]
yield SimulatedInteraction(interaction.context, interaction.actions, rewards, **interaction.kwargs)
class Sort(EnvironmentFilter):
"""Sort a sequence of Interactions in an Environment."""
def __init__(self, *keys: Union[str,int,Sequence[Union[str,int]]]) -> None:
"""Instantiate a Sort filter.
Args:
*keys: The context items that should be sorted on.
"""
self._keys = list(Flatten().filter([list(keys)]))[0]
@property
def params(self) -> Dict[str, Any]:
return { "sort": self._keys or '*' }
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
full_sorter = lambda interaction: tuple(interaction.context )
list_sorter = lambda interaction: tuple(interaction.context[key] for key in self._keys)
dict_sorter = lambda interaction: tuple(interaction.context.get(key,0) for key in self._keys)
interactions = list(interactions)
is_sparse = isinstance(interactions[0].context,dict)
sorter = full_sorter if not self._keys else dict_sorter if is_sparse else list_sorter
return sorted(interactions, key=sorter)
class Where(EnvironmentFilter):
"""Define Environment selection criteria for an Environments pipe."""
def __init__(self, *, n_interactions: Union[int,Tuple[Optional[int],Optional[int]]] = None) -> None:
"""Instantiate a Where filter.
Args:
n_interactions: The minimum, maximum or exact number of interactions Environments must have.
"""
self._n_interactions = n_interactions
@property
def params(self) -> Dict[str, Any]:
params = {}
if self._n_interactions is not None:
params["where_n_interactions"] = self._n_interactions
return params
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
interactions = iter(interactions)
if self._n_interactions is None or self._n_interactions == (None,None):
min_interactions = None
max_interactions = None
take_interactions = 0
elif isinstance(self._n_interactions, int):
min_interactions = self._n_interactions
max_interactions = self._n_interactions
take_interactions = self._n_interactions+1
else:
min_interactions = self._n_interactions[0]
max_interactions = self._n_interactions[1]
take_interactions = max(filter(lambda x: x is not None, list(self._n_interactions)))+1
taken_interactions = list(islice(interactions, take_interactions))
if max_interactions is not None and len(taken_interactions) > max_interactions:
return []
if min_interactions is not None and len(taken_interactions) < min_interactions:
return []
return chain(taken_interactions, interactions)
class Warm(EnvironmentFilter):
"""Turn a SimulatedEnvironment into a WarmStartEnvironment."""
def __init__(self, n_warm:int, seed:int = 1):
"""Instantiate a Warm filter.
Args:
n_warm: The number of interactions that should be turned into LoggedInteractions.
seed: The random number seed that determines the random logging policy for LoggedInteractions.
"""
self._n_warm = n_warm
self._seed = seed
@property
def params(self) -> Dict[str, Any]:
return { "n_warm": self._n_warm }
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[Interaction]:
self._rng = CobaRandom(self._seed)
underlying_iterable = iter(interactions)
logged_interactions = map(self._to_logged_interaction, islice(underlying_iterable, self._n_warm))
simulated_interactions = underlying_iterable
return chain(logged_interactions, simulated_interactions)
def _to_logged_interaction(self, interaction: SimulatedInteraction) -> LoggedInteraction:
num_actions = len(interaction.actions)
probabilities = [1/num_actions] * num_actions
idx = self._rng.choice(list(range(num_actions)), probabilities)
actions = interaction.actions
action = interaction.actions[idx]
prob = probabilities[idx]
reward = interaction.rewards[idx]
return LoggedInteraction(interaction.context, action, reward, prob, actions)
class Riffle(EnvironmentFilter):
"""Riffle shuffle Interactions by taking actions from the end and evenly distributing into the beginning."""
def __init__(self, spacing: int = 3, seed=1) -> None:
"""Instantiate a Riffle filter.
Args:
spacing: The number of interactions from the beginning between each interaction shuffled in from the end.
seed: The seed used to determine the location of each ending interaction when placed within its beginning space.
"""
self._spacing = spacing
self._seed = seed
@property
def params(self) -> Dict[str, Any]:
return {"riffle_spacing": self._spacing, "riffle_seed": self._seed}
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
rng = CobaRandom(self._seed)
interactions = list(interactions)
for i in range(int(len(interactions)/(self._spacing+1))):
interactions.insert(i*self._spacing+rng.randint(0,self._spacing), interactions.pop())
return interactions
class Noise(EnvironmentFilter):
"""Introduce noise to an environment."""
def __init__(self,
context: Callable[[float,CobaRandom], float] = None,
action : Callable[[float,CobaRandom], float] = None,
reward : Callable[[float,CobaRandom], float] = None,
seed : int = 1) -> None:
"""Instantiate a Noise EnvironmentFilter.
Args:
context: A noise generator for context features.
action : A noise generator for action features.
reward : A noise generator for rewards.
seed : The seed initializing the random state of the noise generators.
"""
self._args = (context,action,reward,seed)
self._no_noise = lambda x, _: x
if context is None and action is None and reward is None:
context = lambda x, rng: x+rng.gauss(0,1)
self._context_noise = context or self._no_noise
self._action_noise = action or self._no_noise
self._reward_noise = reward or self._no_noise
self._seed = seed
def __reduce__(self) -> tuple:
try:
pickle.dumps(self._args)
except Exception:
message = (
"We were unable to pickle the Noise filter. This is likely due to using lambda functions for noise generation. "
"To work around this we recommend you first define your lambda functions as a named function and then pass the "
"named function to Noise."
)
raise CobaException(message)
else:
return (Noise, self._args)
@property
def params(self) -> Dict[str, Any]:
params = {}
if self._context_noise != self._no_noise: params['context_noise'] = True
if self._action_noise != self._no_noise : params['action_noise' ] = True
if self._reward_noise != self._no_noise : params['reward_noise' ] = True
params['noise_seed'] = self._seed
return params
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[SimulatedInteraction]:
rng = CobaRandom(self._seed)
for interaction in interactions:
if isinstance(interaction, LoggedInteraction):
raise CobaException("We do not currently support adding noise to a LoggedInteraction.")
noisy_context = self._noises(interaction.context, rng, self._context_noise)
noisy_actions = [ self._noises(a, rng, self._action_noise) for a in interaction.actions ]
noisy_rewards = [ self._noises(r, rng, self._reward_noise) for r in interaction.rewards ]
yield SimulatedInteraction(noisy_context, noisy_actions, noisy_rewards, **interaction.kwargs)
def _noises(self, value:Union[None,float,str,Mapping,Sequence], rng: CobaRandom, noiser: Callable[[float,CobaRandom], float]):
if isinstance(value, collections.abc.Mapping):
#we sort so that noise generation is deterministic with respect to seed
return { k:self._noise(v, rng, noiser) for k,v in sorted(value.items()) }
if isinstance(value, collections.abc.Sequence) and not isinstance(value, str):
return [ self._noise(v, rng, noiser) for v in value ]
return self._noise(value, rng, noiser)
def _noise(self, value:Union[None,float,str], rng: CobaRandom, noiser: Callable[[float,CobaRandom], float]) -> float:
return value if not isinstance(value,(int,float)) else noiser(value, rng)
| import pickle
import warnings
import collections.abc
from math import isnan
from statistics import mean, median, stdev, mode
from abc import abstractmethod, ABC
from numbers import Number
from collections import defaultdict
from itertools import islice, chain
from typing import Hashable, Optional, Sequence, Union, Iterable, Dict, Any, List, Tuple, Callable, Mapping
from coba.backports import Literal
from coba import pipes
from coba.random import CobaRandom
from coba.exceptions import CobaException
from coba.statistics import iqr
from coba.pipes import Flatten
from coba.environments.primitives import Interaction
from coba.environments.logged.primitives import LoggedInteraction
from coba.environments.simulated.primitives import SimulatedInteraction
class EnvironmentFilter(pipes.Filter[Iterable[Interaction],Iterable[Interaction]], ABC):
"""A filter that can be applied to an Environment."""
@abstractmethod
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
"""Apply a filter to an Environment's interactions."""
...
class Identity(pipes.Identity, EnvironmentFilter):
"""Return whatever interactions are given to the filter."""
pass
class Take(pipes.Take, EnvironmentFilter):
"""Take a fixed number of interactions from an Environment."""
pass
class Shuffle(pipes.Shuffle, EnvironmentFilter):
"""Shuffle a sequence of Interactions in an Environment."""
pass
class Reservoir(pipes.Reservoir, EnvironmentFilter):
"""Take a fixed number of random Interactions from an Environment."""
pass
class Scale(EnvironmentFilter):
"""Shift and scale features to precondition them before learning."""
def __init__(self,
shift: Union[Number,Literal["min","mean","med"]] = 0,
scale: Union[Number,Literal["minmax","std","iqr","maxabs"]] = "minmax",
target: Literal["features","rewards"] = "features",
using: Optional[int] = None):
"""Instantiate a Scale filter.
Args:
shift: The statistic to use to shift each context feature.
scale: The statistic to use to scale each context feature.
target: The target data we wish to scale in the environment.
using: The number of interactions to use when calculating the necessary statistics.
"""
assert isinstance(shift,Number) or shift in ["min","mean","med"]
assert isinstance(scale,Number) or scale in ["minmax","std","iqr","maxabs"]
self._shift = shift
self._scale = scale
self._using = using
self._target = target
@property
def params(self) -> Dict[str, Any]:
return {
"scale_shift": self._shift,
"scale_scale": self._scale,
"scale_using": self._using,
"scale_target": self._target
}
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
iter_interactions = iter(interactions)
fitting_interactions = list(islice(iter_interactions,self._using))
shifts : Dict[Hashable,float] = defaultdict(lambda:0)
scales : Dict[Hashable,float] = defaultdict(lambda:1)
unscaled: Dict[Hashable,List[Any]] = defaultdict(list)
if any([isinstance(i.context,dict) for i in fitting_interactions]) and self._shift != 0:
raise CobaException("Shift is required to be 0 for sparse environments. Otherwise the environment will become dense.")
mixed = set()
had_non_numeric = set()
for interaction in fitting_interactions:
if self._target == "features":
for name,value in self._feature_pairs(interaction.context):
if name in mixed: continue
is_numeric = isinstance(value,Number)
is_nan = is_numeric and isnan(value)
if is_nan:
pass
elif (not is_numeric and name in unscaled) or (is_numeric and name in had_non_numeric):
mixed.add(name)
if name in unscaled: del unscaled[name]
if name in had_non_numeric: had_non_numeric.remove(name)
elif not is_numeric:
had_non_numeric.add(name)
elif is_numeric and not is_nan:
unscaled[name].append(value)
if self._target == "rewards":
unscaled["rewards"].extend(interaction.rewards)
if mixed: warnings.warn(f"Some features were not scaled due to having mixed types: {mixed}. ")
has_sparse_zero = set()
for interaction in fitting_interactions:
if isinstance(interaction.context,dict):
has_sparse_zero |= unscaled.keys() - interaction.context.keys() - {"rewards"}
for key in has_sparse_zero:
unscaled[key].append(0)
for name, values in unscaled.items():
if isinstance(self._shift, Number):
shift = self._shift
if self._shift == "min":
shift = min(values)
if self._shift == "mean":
shift = mean(values)
if self._shift == "med":
shift = median(values)
if isinstance(self._scale, Number):
scale_num = self._scale
scale_den = 1
if self._scale == "std":
scale_num = 1
scale_den = stdev(values)
if self._scale == "minmax":
scale_num = 1
scale_den = max(values)-min(values)
if self._scale == "iqr":
scale_num = 1
scale_den = iqr(values)
if self._scale == "maxabs":
scale_num = 1
scale_den = max([abs(v-shift) for v in values])
shifts[name] = shift
scales[name] = scale_num/scale_den if round(scale_den,10) != 0 else 1
for interaction in chain(fitting_interactions, iter_interactions):
scaled_values = {}
final_context = interaction.context
final_rewards = None
final_kwargs = interaction.kwargs.copy()
if self._target == "features":
for name,value in self._feature_pairs(interaction.context):
if isinstance(value,Number):
scaled_values[name] = (value-shifts[name])*scales[name]
else:
scaled_values[name] = value
if interaction.context is None:
final_context = None
elif isinstance(interaction.context,dict):
final_context = scaled_values
elif isinstance(interaction.context,tuple):
final_context = tuple(scaled_values[k] for k,_ in self._feature_pairs(interaction.context))
else:
final_context = scaled_values[1]
if self._target == "rewards":
final_rewards = [ (r-shifts['rewards'])*scales['rewards'] for r in interaction.rewards ]
if isinstance(interaction, SimulatedInteraction):
yield SimulatedInteraction(
final_context,
interaction.actions,
final_rewards or interaction.rewards,
**interaction.kwargs
)
elif isinstance(interaction, LoggedInteraction):
yield LoggedInteraction(
final_context,
interaction.action,
interaction.reward,
interaction.probability,
interaction.actions,
**interaction.kwargs
)
else: #pragma: no cover
raise CobaException("Unknown interactions were given to Scale.")
def _feature_pairs(self,context) -> Sequence[Tuple[Hashable,Any]]:
if isinstance(context,dict ): return context.items()
if isinstance(context,tuple): return enumerate(context)
if context is not None : return [(1,context)]
return []
class Impute(EnvironmentFilter):
"""Impute missing values (nan) in Interaction contexts."""
def __init__(self,
stat : Literal["mean","median","mode"] = "mean",
using: Optional[int] = None):
"""Instantiate an Impute filter.
Args:
stat: The statistic to use for impuatation.
using: The number of interactions to use to calculate the imputation statistics.
"""
assert stat in ["mean","median","mode"]
self._stat = stat
self._using = using
@property
def params(self) -> Dict[str, Any]:
return { "impute_stat": self._stat, "impute_using": self._using }
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
iter_interactions = iter(interactions)
train_interactions = list(islice(iter_interactions,self._using))
test_interactions = chain.from_iterable([train_interactions, iter_interactions])
stats : Dict[Hashable,float] = defaultdict(int)
features: Dict[Hashable,List[Number]] = defaultdict(list)
for interaction in train_interactions:
for name,value in self._context_as_name_values(interaction.context):
if isinstance(value,Number) and not isnan(value):
features[name].append(value)
for feat_name, feat_numeric_values in features.items():
if self._stat == "mean":
stats[feat_name] = mean(feat_numeric_values)
if self._stat == "median":
stats[feat_name] = median(feat_numeric_values)
if self._stat == "mode":
stats[feat_name] = mode(feat_numeric_values)
for interaction in test_interactions:
kv_imputed_context = {}
for name,value in self._context_as_name_values(interaction.context):
kv_imputed_context[name] = stats[name] if isinstance(value,Number) and isnan(value) else value
if interaction.context is None:
final_context = None
elif isinstance(interaction.context,dict):
final_context = kv_imputed_context
elif isinstance(interaction.context,tuple):
final_context = tuple(kv_imputed_context[k] for k,_ in self._context_as_name_values(interaction.context))
else:
final_context = kv_imputed_context[1]
if isinstance(interaction, SimulatedInteraction):
yield SimulatedInteraction(
final_context,
interaction.actions,
interaction.rewards,
**interaction.kwargs
)
elif isinstance(interaction, LoggedInteraction):
yield LoggedInteraction(
final_context,
interaction.action,
interaction.reward,
**interaction.kwargs
)
else: #pragma: no cover
raise CobaException("Unknown interactions were given to Impute.")
def _context_as_name_values(self,context) -> Sequence[Tuple[Hashable,Any]]:
if isinstance(context,dict ): return context.items()
if isinstance(context,tuple): return enumerate(context)
if context is not None : return [(1,context)]
return []
class Sparse(EnvironmentFilter):
"""Sparsify an environment's feature representation.
This has little utility beyond debugging.
"""
def __init__(self, context:bool = True, action:bool = False):
"""Instantiate a Sparse filter.
Args:
context: If True then contexts should be made sparse otherwise leave them alone.
action: If True then actions should be made sparse otherwise leave them alone.
"""
self._context = context
self._action = action
@property
def params(self) -> Dict[str, Any]:
return { "sparse_C": self._context, "sparse_A": self._action }
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
for interaction in interactions:
sparse_context = self._make_sparse(interaction.context) if self._context else interaction.context
if isinstance(interaction, SimulatedInteraction):
sparse_actions = list(map(self._make_sparse,interaction.actions)) if self._action else interaction.actions
yield SimulatedInteraction(
sparse_context,
sparse_actions,
interaction.rewards
)
elif isinstance(interaction, LoggedInteraction):
sparse_action = self._make_sparse(interaction.action) if self._action else interaction.action
yield LoggedInteraction(
sparse_context,
sparse_action,
interaction.reward,
interaction.probability,
interaction.actions,
**interaction.kwargs
)
else: #pragma: no cover
raise CobaException("Unknown interactions were given to Sparse.")
def _make_sparse(self, value) -> Optional[dict]:
if isinstance(value,dict) or value is None:
return value
if isinstance(value,(list,tuple)):
return dict(enumerate(value))
return {0:value}
class Cycle(EnvironmentFilter):
"""Cycle all rewards associated with actions by one place.
This filter is useful for testing an algorithms response to a non-stationary shock.
"""
def __init__(self, after:int = 0):
"""Instantiate a Cycle filter.
Args:
after: How many interactions should be seen before applying the cycle filter.
"""
self._after = after
@property
def params(self) -> Dict[str, Any]:
return { "cycle_after": self._after }
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[SimulatedInteraction]:
underlying_iterable = iter(interactions)
sans_cycle_interactions = islice(underlying_iterable, self._after)
with_cycle_interactions = underlying_iterable
for interaction in sans_cycle_interactions:
yield interaction
try:
first_interaction = next(with_cycle_interactions)
action_set = set(first_interaction.actions)
n_actions = len(action_set)
featureless_actions = [tuple([0]*n+[1]+[0]*(n_actions-n-1)) for n in range(n_actions)]
with_cycle_interactions = chain([first_interaction], with_cycle_interactions)
if len(set(action_set) & set(featureless_actions)) != len(action_set):
warnings.warn("Cycle only works for environments without action features. It will be ignored in this case.")
for interaction in with_cycle_interactions:
yield interaction
else:
for interaction in with_cycle_interactions:
rewards = interaction.rewards[-1:] + interaction.rewards[:-1]
yield SimulatedInteraction(interaction.context, interaction.actions, rewards, **interaction.kwargs)
except StopIteration:
pass
class Binary(EnvironmentFilter):
"""Binarize all rewards to either 1 (max rewards) or 0 (all others)."""
@property
def params(self) -> Dict[str, Any]:
return { "binary": True }
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[SimulatedInteraction]:
for interaction in interactions:
max_rwd = max(interaction.rewards)
rewards = [int(r==max_rwd) for r in interaction.rewards]
yield SimulatedInteraction(interaction.context, interaction.actions, rewards, **interaction.kwargs)
class Sort(EnvironmentFilter):
"""Sort a sequence of Interactions in an Environment."""
def __init__(self, *keys: Union[str,int,Sequence[Union[str,int]]]) -> None:
"""Instantiate a Sort filter.
Args:
*keys: The context items that should be sorted on.
"""
self._keys = list(Flatten().filter([list(keys)]))[0]
@property
def params(self) -> Dict[str, Any]:
return { "sort": self._keys or '*' }
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
full_sorter = lambda interaction: tuple(interaction.context )
list_sorter = lambda interaction: tuple(interaction.context[key] for key in self._keys)
dict_sorter = lambda interaction: tuple(interaction.context.get(key,0) for key in self._keys)
interactions = list(interactions)
is_sparse = isinstance(interactions[0].context,dict)
sorter = full_sorter if not self._keys else dict_sorter if is_sparse else list_sorter
return sorted(interactions, key=sorter)
class Where(EnvironmentFilter):
"""Define Environment selection criteria for an Environments pipe."""
def __init__(self, *, n_interactions: Union[int,Tuple[Optional[int],Optional[int]]] = None) -> None:
"""Instantiate a Where filter.
Args:
n_interactions: The minimum, maximum or exact number of interactions Environments must have.
"""
self._n_interactions = n_interactions
@property
def params(self) -> Dict[str, Any]:
params = {}
if self._n_interactions is not None:
params["where_n_interactions"] = self._n_interactions
return params
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
interactions = iter(interactions)
if self._n_interactions is None or self._n_interactions == (None,None):
min_interactions = None
max_interactions = None
take_interactions = 0
elif isinstance(self._n_interactions, int):
min_interactions = self._n_interactions
max_interactions = self._n_interactions
take_interactions = self._n_interactions+1
else:
min_interactions = self._n_interactions[0]
max_interactions = self._n_interactions[1]
take_interactions = max(filter(lambda x: x is not None, list(self._n_interactions)))+1
taken_interactions = list(islice(interactions, take_interactions))
if max_interactions is not None and len(taken_interactions) > max_interactions:
return []
if min_interactions is not None and len(taken_interactions) < min_interactions:
return []
return chain(taken_interactions, interactions)
class Warm(EnvironmentFilter):
"""Turn a SimulatedEnvironment into a WarmStartEnvironment."""
def __init__(self, n_warm:int, seed:int = 1):
"""Instantiate a Warm filter.
Args:
n_warm: The number of interactions that should be turned into LoggedInteractions.
seed: The random number seed that determines the random logging policy for LoggedInteractions.
"""
self._n_warm = n_warm
self._seed = seed
@property
def params(self) -> Dict[str, Any]:
return { "n_warm": self._n_warm }
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[Interaction]:
self._rng = CobaRandom(self._seed)
underlying_iterable = iter(interactions)
logged_interactions = map(self._to_logged_interaction, islice(underlying_iterable, self._n_warm))
simulated_interactions = underlying_iterable
return chain(logged_interactions, simulated_interactions)
def _to_logged_interaction(self, interaction: SimulatedInteraction) -> LoggedInteraction:
num_actions = len(interaction.actions)
probabilities = [1/num_actions] * num_actions
idx = self._rng.choice(list(range(num_actions)), probabilities)
actions = interaction.actions
action = interaction.actions[idx]
prob = probabilities[idx]
reward = interaction.rewards[idx]
return LoggedInteraction(interaction.context, action, reward, prob, actions)
class Riffle(EnvironmentFilter):
"""Riffle shuffle Interactions by taking actions from the end and evenly distributing into the beginning."""
def __init__(self, spacing: int = 3, seed=1) -> None:
"""Instantiate a Riffle filter.
Args:
spacing: The number of interactions from the beginning between each interaction shuffled in from the end.
seed: The seed used to determine the location of each ending interaction when placed within its beginning space.
"""
self._spacing = spacing
self._seed = seed
@property
def params(self) -> Dict[str, Any]:
return {"riffle_spacing": self._spacing, "riffle_seed": self._seed}
def filter(self, interactions: Iterable[Interaction]) -> Iterable[Interaction]:
rng = CobaRandom(self._seed)
interactions = list(interactions)
for i in range(int(len(interactions)/(self._spacing+1))):
interactions.insert(i*self._spacing+rng.randint(0,self._spacing), interactions.pop())
return interactions
class Noise(EnvironmentFilter):
"""Introduce noise to an environment."""
def __init__(self,
context: Callable[[float,CobaRandom], float] = None,
action : Callable[[float,CobaRandom], float] = None,
reward : Callable[[float,CobaRandom], float] = None,
seed : int = 1) -> None:
"""Instantiate a Noise EnvironmentFilter.
Args:
context: A noise generator for context features.
action : A noise generator for action features.
reward : A noise generator for rewards.
seed : The seed initializing the random state of the noise generators.
"""
self._args = (context,action,reward,seed)
self._no_noise = lambda x, _: x
if context is None and action is None and reward is None:
context = lambda x, rng: x+rng.gauss(0,1)
self._context_noise = context or self._no_noise
self._action_noise = action or self._no_noise
self._reward_noise = reward or self._no_noise
self._seed = seed
def __reduce__(self) -> tuple:
try:
pickle.dumps(self._args)
except Exception:
message = (
"We were unable to pickle the Noise filter. This is likely due to using lambda functions for noise generation. "
"To work around this we recommend you first define your lambda functions as a named function and then pass the "
"named function to Noise."
)
raise CobaException(message)
else:
return (Noise, self._args)
@property
def params(self) -> Dict[str, Any]:
params = {}
if self._context_noise != self._no_noise: params['context_noise'] = True
if self._action_noise != self._no_noise : params['action_noise' ] = True
if self._reward_noise != self._no_noise : params['reward_noise' ] = True
params['noise_seed'] = self._seed
return params
def filter(self, interactions: Iterable[SimulatedInteraction]) -> Iterable[SimulatedInteraction]:
rng = CobaRandom(self._seed)
for interaction in interactions:
if isinstance(interaction, LoggedInteraction):
raise CobaException("We do not currently support adding noise to a LoggedInteraction.")
noisy_context = self._noises(interaction.context, rng, self._context_noise)
noisy_actions = [ self._noises(a, rng, self._action_noise) for a in interaction.actions ]
noisy_rewards = [ self._noises(r, rng, self._reward_noise) for r in interaction.rewards ]
yield SimulatedInteraction(noisy_context, noisy_actions, noisy_rewards, **interaction.kwargs)
def _noises(self, value:Union[None,float,str,Mapping,Sequence], rng: CobaRandom, noiser: Callable[[float,CobaRandom], float]):
if isinstance(value, collections.abc.Mapping):
#we sort so that noise generation is deterministic with respect to seed
return { k:self._noise(v, rng, noiser) for k,v in sorted(value.items()) }
if isinstance(value, collections.abc.Sequence) and not isinstance(value, str):
return [ self._noise(v, rng, noiser) for v in value ]
return self._noise(value, rng, noiser)
def _noise(self, value:Union[None,float,str], rng: CobaRandom, noiser: Callable[[float,CobaRandom], float]) -> float:
return value if not isinstance(value,(int,float)) else noiser(value, rng)
| en | 0.831988 | A filter that can be applied to an Environment. Apply a filter to an Environment's interactions. Return whatever interactions are given to the filter. Take a fixed number of interactions from an Environment. Shuffle a sequence of Interactions in an Environment. Take a fixed number of random Interactions from an Environment. Shift and scale features to precondition them before learning. Instantiate a Scale filter. Args: shift: The statistic to use to shift each context feature. scale: The statistic to use to scale each context feature. target: The target data we wish to scale in the environment. using: The number of interactions to use when calculating the necessary statistics. #pragma: no cover Impute missing values (nan) in Interaction contexts. Instantiate an Impute filter. Args: stat: The statistic to use for impuatation. using: The number of interactions to use to calculate the imputation statistics. #pragma: no cover Sparsify an environment's feature representation. This has little utility beyond debugging. Instantiate a Sparse filter. Args: context: If True then contexts should be made sparse otherwise leave them alone. action: If True then actions should be made sparse otherwise leave them alone. #pragma: no cover Cycle all rewards associated with actions by one place. This filter is useful for testing an algorithms response to a non-stationary shock. Instantiate a Cycle filter. Args: after: How many interactions should be seen before applying the cycle filter. Binarize all rewards to either 1 (max rewards) or 0 (all others). Sort a sequence of Interactions in an Environment. Instantiate a Sort filter. Args: *keys: The context items that should be sorted on. Define Environment selection criteria for an Environments pipe. Instantiate a Where filter. Args: n_interactions: The minimum, maximum or exact number of interactions Environments must have. Turn a SimulatedEnvironment into a WarmStartEnvironment. Instantiate a Warm filter. Args: n_warm: The number of interactions that should be turned into LoggedInteractions. seed: The random number seed that determines the random logging policy for LoggedInteractions. Riffle shuffle Interactions by taking actions from the end and evenly distributing into the beginning. Instantiate a Riffle filter. Args: spacing: The number of interactions from the beginning between each interaction shuffled in from the end. seed: The seed used to determine the location of each ending interaction when placed within its beginning space. Introduce noise to an environment. Instantiate a Noise EnvironmentFilter. Args: context: A noise generator for context features. action : A noise generator for action features. reward : A noise generator for rewards. seed : The seed initializing the random state of the noise generators. #we sort so that noise generation is deterministic with respect to seed | 2.495381 | 2 |
python/cuml/preprocessing/LabelEncoder.py | egoolish/cuml | 7 | 8996 | <gh_stars>1-10
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import nvcategory
from librmm_cffi import librmm
import numpy as np
def _enforce_str(y: cudf.Series) -> cudf.Series:
''' Ensure that nvcategory is being given strings
'''
if y.dtype != "object":
return y.astype("str")
return y
def _enforce_npint32(y: cudf.Series) -> cudf.Series:
if y.dtype != np.int32:
return y.astype(np.int32)
return y
class LabelEncoder(object):
"""
An nvcategory based implementation of ordinal label encoding
Examples
--------
Converting a categorical implementation to a numerical one
.. code-block:: python
from cudf import DataFrame, Series
data = DataFrame({'category': ['a', 'b', 'c', 'd']})
# There are two functionally equivalent ways to do this
le = LabelEncoder()
le.fit(data.category) # le = le.fit(data.category) also works
encoded = le.transform(data.category)
print(encoded)
# This method is preferred
le = LabelEncoder()
encoded = le.fit_transform(data.category)
print(encoded)
# We can assign this to a new column
data = data.assign(encoded=encoded)
print(data.head())
# We can also encode more data
test_data = Series(['c', 'a'])
encoded = le.transform(test_data)
print(encoded)
# After train, ordinal label can be inverse_transform() back to
# string labels
ord_label = cudf.Series([0, 0, 1, 2, 1])
ord_label = dask_cudf.from_cudf(data, npartitions=2)
str_label = le.inverse_transform(ord_label)
print(str_label)
Output:
.. code-block:: python
0 0
1 1
2 2
3 3
dtype: int64
0 0
1 1
2 2
3 3
dtype: int32
category encoded
0 a 0
1 b 1
2 c 2
3 d 3
0 2
1 0
dtype: int64
0 a
1 a
2 b
3 c
4 b
dtype: object
"""
def __init__(self, *args, **kwargs):
self._cats: nvcategory.nvcategory = None
self._dtype = None
self._fitted: bool = False
def _check_is_fitted(self):
if not self._fitted:
raise RuntimeError("Model must first be .fit()")
def fit(self, y: cudf.Series) -> "LabelEncoder":
"""
Fit a LabelEncoder (nvcategory) instance to a set of categories
Parameters
---------
y : cudf.Series
Series containing the categories to be encoded. It's elements
may or may not be unique
Returns
-------
self : LabelEncoder
A fitted instance of itself to allow method chaining
"""
self._dtype = y.dtype
y = _enforce_str(y)
self._cats = nvcategory.from_strings(y.data)
self._fitted = True
return self
def transform(self, y: cudf.Series) -> cudf.Series:
"""
Transform an input into its categorical keys.
This is intended for use with small inputs relative to the size of the
dataset. For fitting and transforming an entire dataset, prefer
`fit_transform`.
Parameters
----------
y : cudf.Series
Input keys to be transformed. Its values should match the
categories given to `fit`
Returns
------
encoded : cudf.Series
The ordinally encoded input series
Raises
------
KeyError
if a category appears that was not seen in `fit`
"""
self._check_is_fitted()
y = _enforce_str(y)
encoded = cudf.Series(
nvcategory.from_strings(y.data)
.set_keys(self._cats.keys())
.values()
)
if -1 in encoded:
raise KeyError("Attempted to encode unseen key")
return encoded
def fit_transform(self, y: cudf.Series) -> cudf.Series:
"""
Simultaneously fit and transform an input
This is functionally equivalent to (but faster than)
`LabelEncoder().fit(y).transform(y)`
"""
self._dtype = y.dtype
# Convert y to nvstrings series, if it isn't one
y = _enforce_str(y)
# Bottleneck is here, despite everything being done on the device
self._cats = nvcategory.from_strings(y.data)
self._fitted = True
arr: librmm.device_array = librmm.device_array(
y.data.size(), dtype=np.int32
)
self._cats.values(devptr=arr.device_ctypes_pointer.value)
return cudf.Series(arr)
def inverse_transform(self, y: cudf.Series) -> cudf.Series:
''' Revert ordinal label to original label
Parameters
----------
y : cudf.Series, dtype=int32
Ordinal labels to be reverted
Returns
-------
reverted : cudf.Series
Reverted labels
'''
# check LabelEncoder is fitted
self._check_is_fitted()
# check input type is cudf.Series
if not isinstance(y, cudf.Series):
raise TypeError(
'Input of type {} is not cudf.Series'.format(type(y)))
# check if y's dtype is np.int32, otherwise convert it
y = _enforce_npint32(y)
# check if ord_label out of bound
ord_label = y.unique()
category_num = len(self._cats.keys())
for ordi in ord_label:
if ordi < 0 or ordi >= category_num:
raise ValueError(
'y contains previously unseen label {}'.format(ordi))
# convert ordinal label to string label
reverted = cudf.Series(self._cats.gather_strings(
y.data.mem.device_ctypes_pointer.value, len(y)))
return reverted
| #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import nvcategory
from librmm_cffi import librmm
import numpy as np
def _enforce_str(y: cudf.Series) -> cudf.Series:
''' Ensure that nvcategory is being given strings
'''
if y.dtype != "object":
return y.astype("str")
return y
def _enforce_npint32(y: cudf.Series) -> cudf.Series:
if y.dtype != np.int32:
return y.astype(np.int32)
return y
class LabelEncoder(object):
"""
An nvcategory based implementation of ordinal label encoding
Examples
--------
Converting a categorical implementation to a numerical one
.. code-block:: python
from cudf import DataFrame, Series
data = DataFrame({'category': ['a', 'b', 'c', 'd']})
# There are two functionally equivalent ways to do this
le = LabelEncoder()
le.fit(data.category) # le = le.fit(data.category) also works
encoded = le.transform(data.category)
print(encoded)
# This method is preferred
le = LabelEncoder()
encoded = le.fit_transform(data.category)
print(encoded)
# We can assign this to a new column
data = data.assign(encoded=encoded)
print(data.head())
# We can also encode more data
test_data = Series(['c', 'a'])
encoded = le.transform(test_data)
print(encoded)
# After train, ordinal label can be inverse_transform() back to
# string labels
ord_label = cudf.Series([0, 0, 1, 2, 1])
ord_label = dask_cudf.from_cudf(data, npartitions=2)
str_label = le.inverse_transform(ord_label)
print(str_label)
Output:
.. code-block:: python
0 0
1 1
2 2
3 3
dtype: int64
0 0
1 1
2 2
3 3
dtype: int32
category encoded
0 a 0
1 b 1
2 c 2
3 d 3
0 2
1 0
dtype: int64
0 a
1 a
2 b
3 c
4 b
dtype: object
"""
def __init__(self, *args, **kwargs):
self._cats: nvcategory.nvcategory = None
self._dtype = None
self._fitted: bool = False
def _check_is_fitted(self):
if not self._fitted:
raise RuntimeError("Model must first be .fit()")
def fit(self, y: cudf.Series) -> "LabelEncoder":
"""
Fit a LabelEncoder (nvcategory) instance to a set of categories
Parameters
---------
y : cudf.Series
Series containing the categories to be encoded. It's elements
may or may not be unique
Returns
-------
self : LabelEncoder
A fitted instance of itself to allow method chaining
"""
self._dtype = y.dtype
y = _enforce_str(y)
self._cats = nvcategory.from_strings(y.data)
self._fitted = True
return self
def transform(self, y: cudf.Series) -> cudf.Series:
"""
Transform an input into its categorical keys.
This is intended for use with small inputs relative to the size of the
dataset. For fitting and transforming an entire dataset, prefer
`fit_transform`.
Parameters
----------
y : cudf.Series
Input keys to be transformed. Its values should match the
categories given to `fit`
Returns
------
encoded : cudf.Series
The ordinally encoded input series
Raises
------
KeyError
if a category appears that was not seen in `fit`
"""
self._check_is_fitted()
y = _enforce_str(y)
encoded = cudf.Series(
nvcategory.from_strings(y.data)
.set_keys(self._cats.keys())
.values()
)
if -1 in encoded:
raise KeyError("Attempted to encode unseen key")
return encoded
def fit_transform(self, y: cudf.Series) -> cudf.Series:
"""
Simultaneously fit and transform an input
This is functionally equivalent to (but faster than)
`LabelEncoder().fit(y).transform(y)`
"""
self._dtype = y.dtype
# Convert y to nvstrings series, if it isn't one
y = _enforce_str(y)
# Bottleneck is here, despite everything being done on the device
self._cats = nvcategory.from_strings(y.data)
self._fitted = True
arr: librmm.device_array = librmm.device_array(
y.data.size(), dtype=np.int32
)
self._cats.values(devptr=arr.device_ctypes_pointer.value)
return cudf.Series(arr)
def inverse_transform(self, y: cudf.Series) -> cudf.Series:
''' Revert ordinal label to original label
Parameters
----------
y : cudf.Series, dtype=int32
Ordinal labels to be reverted
Returns
-------
reverted : cudf.Series
Reverted labels
'''
# check LabelEncoder is fitted
self._check_is_fitted()
# check input type is cudf.Series
if not isinstance(y, cudf.Series):
raise TypeError(
'Input of type {} is not cudf.Series'.format(type(y)))
# check if y's dtype is np.int32, otherwise convert it
y = _enforce_npint32(y)
# check if ord_label out of bound
ord_label = y.unique()
category_num = len(self._cats.keys())
for ordi in ord_label:
if ordi < 0 or ordi >= category_num:
raise ValueError(
'y contains previously unseen label {}'.format(ordi))
# convert ordinal label to string label
reverted = cudf.Series(self._cats.gather_strings(
y.data.mem.device_ctypes_pointer.value, len(y)))
return reverted | en | 0.636093 | # # Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Ensure that nvcategory is being given strings An nvcategory based implementation of ordinal label encoding Examples -------- Converting a categorical implementation to a numerical one .. code-block:: python from cudf import DataFrame, Series data = DataFrame({'category': ['a', 'b', 'c', 'd']}) # There are two functionally equivalent ways to do this le = LabelEncoder() le.fit(data.category) # le = le.fit(data.category) also works encoded = le.transform(data.category) print(encoded) # This method is preferred le = LabelEncoder() encoded = le.fit_transform(data.category) print(encoded) # We can assign this to a new column data = data.assign(encoded=encoded) print(data.head()) # We can also encode more data test_data = Series(['c', 'a']) encoded = le.transform(test_data) print(encoded) # After train, ordinal label can be inverse_transform() back to # string labels ord_label = cudf.Series([0, 0, 1, 2, 1]) ord_label = dask_cudf.from_cudf(data, npartitions=2) str_label = le.inverse_transform(ord_label) print(str_label) Output: .. code-block:: python 0 0 1 1 2 2 3 3 dtype: int64 0 0 1 1 2 2 3 3 dtype: int32 category encoded 0 a 0 1 b 1 2 c 2 3 d 3 0 2 1 0 dtype: int64 0 a 1 a 2 b 3 c 4 b dtype: object Fit a LabelEncoder (nvcategory) instance to a set of categories Parameters --------- y : cudf.Series Series containing the categories to be encoded. It's elements may or may not be unique Returns ------- self : LabelEncoder A fitted instance of itself to allow method chaining Transform an input into its categorical keys. This is intended for use with small inputs relative to the size of the dataset. For fitting and transforming an entire dataset, prefer `fit_transform`. Parameters ---------- y : cudf.Series Input keys to be transformed. Its values should match the categories given to `fit` Returns ------ encoded : cudf.Series The ordinally encoded input series Raises ------ KeyError if a category appears that was not seen in `fit` Simultaneously fit and transform an input This is functionally equivalent to (but faster than) `LabelEncoder().fit(y).transform(y)` # Convert y to nvstrings series, if it isn't one # Bottleneck is here, despite everything being done on the device Revert ordinal label to original label Parameters ---------- y : cudf.Series, dtype=int32 Ordinal labels to be reverted Returns ------- reverted : cudf.Series Reverted labels # check LabelEncoder is fitted # check input type is cudf.Series # check if y's dtype is np.int32, otherwise convert it # check if ord_label out of bound # convert ordinal label to string label | 3.116228 | 3 |
cleaning.py | jhamrick/cogsci-proceedings-analysis | 0 | 8997 | <filename>cleaning.py
import re
import difflib
import pandas as pd
import numpy as np
from nameparser import HumanName
from nameparser.config import CONSTANTS
CONSTANTS.titles.remove("gen")
CONSTANTS.titles.remove("prin")
def parse_paper_type(section_name):
section_name = section_name.strip().lower()
if section_name == '':
paper_type = None
elif re.match('.*workshop.*', section_name):
paper_type = 'workshop'
elif re.match('.*symposi.*', section_name):
paper_type = 'symposium'
elif re.match('.*poster.*', section_name):
paper_type = 'poster'
elif re.match('.*tutorial.*', section_name):
paper_type = 'workshop'
elif re.match('.*abstract.*', section_name):
paper_type = 'poster'
elif re.match('.*addenda.*', section_name):
paper_type = 'other'
else:
paper_type = 'talk'
return paper_type
def clean_authors(authors):
cleaned_authors = []
authors = authors.lower()
# get rid of commas where there are suffixes, like Jr. or III
authors = authors.replace(", jr.", " jr.")
authors = authors.replace(", iii", " iii")
authors = authors.replace(", ph.d", "")
# special cases
authors = authors.replace("organizer:", "")
authors = authors.replace("roel m,", "roel m.")
if authors == '<NAME>, <NAME>, t.':
author_list = ['<NAME>', '<NAME>, t.']
else:
author_list = authors.split(",")
for author in author_list:
author = HumanName(author.lower())
if author.first == '' or author.last == '':
raise ValueError("invalid author name: {}".format(author))
author.capitalize()
author.string_format = u"{last}, {title} {first} {middle}, {suffix}"
cleaned_authors.append(unicode(author))
return cleaned_authors
def extract_authors(papers):
author_papers = []
for i, paper in papers.iterrows():
authors = clean_authors(paper['authors'])
for author in authors:
entry = paper.copy().drop('authors')
entry['author'] = author
author_papers.append(entry)
author_papers = pd.DataFrame(author_papers)
return author_papers
def fix_author_misspellings(papers, G):
authors = np.sort(papers['author'].unique())
for i in xrange(len(authors)):
window = 20
lower = i + 1
upper = min(i + 1 + window, len(authors) - 1)
for j in xrange(len(authors[lower:upper])):
author1 = authors[i]
author2 = authors[lower + j]
if author1 == author2:
continue
author1_hn = HumanName(author1)
author2_hn = HumanName(author2)
same_first = author1_hn.first == author2_hn.first
same_last = author1_hn.last == author2_hn.last
if same_first and same_last:
replace = True
else:
ratio = difflib.SequenceMatcher(None, author1, author2).ratio()
if ratio > 0.9:
coauthors = set(G[author1].keys()) & set(G[author2].keys())
if len(coauthors) > 0:
replace = True
else:
print u"\nPossible match: '{}' vs '{}' (r={})".format(
author1, author2, ratio)
print sorted(G[author1].keys())
print sorted(G[author2].keys())
accept = ""
while accept not in ("y", "n"):
accept = raw_input("Accept? (y/n) ")
replace = accept == "y"
else:
replace = False
if replace:
num1 = len(papers.groupby('author').get_group(author1))
num2 = len(papers.groupby('author').get_group(author2))
if num1 > num2:
oldname = author2
newname = author1
else:
oldname = author1
newname = author2
print u"Replacing '{}' with '{}'".format(oldname, newname)
papers.loc[papers['author'] == oldname, 'author'] = newname
authors[authors == oldname] = newname
for neighbor in G[oldname]:
if neighbor not in G[newname]:
G.add_edge(newname, neighbor)
G[newname][neighbor]['weight'] = 0
weight = G[oldname][neighbor]['weight']
G[newname][neighbor]['weight'] += weight
G.remove_node(oldname)
return papers, G
if __name__ == "__main__":
import graph
papers = pd.read_csv("cogsci_proceedings_raw.csv")
papers['type'] = papers['section'].apply(parse_paper_type)
papers = extract_authors(papers)
G = graph.make_author_graph(papers)
papers, G = fix_author_misspellings(papers, G)
papers.to_csv("cogsci_proceedings.csv", encoding='utf-8')
| <filename>cleaning.py
import re
import difflib
import pandas as pd
import numpy as np
from nameparser import HumanName
from nameparser.config import CONSTANTS
CONSTANTS.titles.remove("gen")
CONSTANTS.titles.remove("prin")
def parse_paper_type(section_name):
section_name = section_name.strip().lower()
if section_name == '':
paper_type = None
elif re.match('.*workshop.*', section_name):
paper_type = 'workshop'
elif re.match('.*symposi.*', section_name):
paper_type = 'symposium'
elif re.match('.*poster.*', section_name):
paper_type = 'poster'
elif re.match('.*tutorial.*', section_name):
paper_type = 'workshop'
elif re.match('.*abstract.*', section_name):
paper_type = 'poster'
elif re.match('.*addenda.*', section_name):
paper_type = 'other'
else:
paper_type = 'talk'
return paper_type
def clean_authors(authors):
cleaned_authors = []
authors = authors.lower()
# get rid of commas where there are suffixes, like Jr. or III
authors = authors.replace(", jr.", " jr.")
authors = authors.replace(", iii", " iii")
authors = authors.replace(", ph.d", "")
# special cases
authors = authors.replace("organizer:", "")
authors = authors.replace("roel m,", "roel m.")
if authors == '<NAME>, <NAME>, t.':
author_list = ['<NAME>', '<NAME>, t.']
else:
author_list = authors.split(",")
for author in author_list:
author = HumanName(author.lower())
if author.first == '' or author.last == '':
raise ValueError("invalid author name: {}".format(author))
author.capitalize()
author.string_format = u"{last}, {title} {first} {middle}, {suffix}"
cleaned_authors.append(unicode(author))
return cleaned_authors
def extract_authors(papers):
author_papers = []
for i, paper in papers.iterrows():
authors = clean_authors(paper['authors'])
for author in authors:
entry = paper.copy().drop('authors')
entry['author'] = author
author_papers.append(entry)
author_papers = pd.DataFrame(author_papers)
return author_papers
def fix_author_misspellings(papers, G):
authors = np.sort(papers['author'].unique())
for i in xrange(len(authors)):
window = 20
lower = i + 1
upper = min(i + 1 + window, len(authors) - 1)
for j in xrange(len(authors[lower:upper])):
author1 = authors[i]
author2 = authors[lower + j]
if author1 == author2:
continue
author1_hn = HumanName(author1)
author2_hn = HumanName(author2)
same_first = author1_hn.first == author2_hn.first
same_last = author1_hn.last == author2_hn.last
if same_first and same_last:
replace = True
else:
ratio = difflib.SequenceMatcher(None, author1, author2).ratio()
if ratio > 0.9:
coauthors = set(G[author1].keys()) & set(G[author2].keys())
if len(coauthors) > 0:
replace = True
else:
print u"\nPossible match: '{}' vs '{}' (r={})".format(
author1, author2, ratio)
print sorted(G[author1].keys())
print sorted(G[author2].keys())
accept = ""
while accept not in ("y", "n"):
accept = raw_input("Accept? (y/n) ")
replace = accept == "y"
else:
replace = False
if replace:
num1 = len(papers.groupby('author').get_group(author1))
num2 = len(papers.groupby('author').get_group(author2))
if num1 > num2:
oldname = author2
newname = author1
else:
oldname = author1
newname = author2
print u"Replacing '{}' with '{}'".format(oldname, newname)
papers.loc[papers['author'] == oldname, 'author'] = newname
authors[authors == oldname] = newname
for neighbor in G[oldname]:
if neighbor not in G[newname]:
G.add_edge(newname, neighbor)
G[newname][neighbor]['weight'] = 0
weight = G[oldname][neighbor]['weight']
G[newname][neighbor]['weight'] += weight
G.remove_node(oldname)
return papers, G
if __name__ == "__main__":
import graph
papers = pd.read_csv("cogsci_proceedings_raw.csv")
papers['type'] = papers['section'].apply(parse_paper_type)
papers = extract_authors(papers)
G = graph.make_author_graph(papers)
papers, G = fix_author_misspellings(papers, G)
papers.to_csv("cogsci_proceedings.csv", encoding='utf-8')
| en | 0.864058 | # get rid of commas where there are suffixes, like Jr. or III # special cases | 2.891892 | 3 |
quem_foi_para_mar_core/migrations/0004_auto_20200811_1945.py | CamilaBodack/template-projeto-selecao | 1 | 8998 | <filename>quem_foi_para_mar_core/migrations/0004_auto_20200811_1945.py
# Generated by Django 3.1 on 2020-08-11 19:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quem_foi_para_mar_core', '0003_auto_20200811_1944'),
]
operations = [
migrations.RenameField(
model_name='contato',
old_name='pescador_id',
new_name='pescador',
),
]
| <filename>quem_foi_para_mar_core/migrations/0004_auto_20200811_1945.py
# Generated by Django 3.1 on 2020-08-11 19:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quem_foi_para_mar_core', '0003_auto_20200811_1944'),
]
operations = [
migrations.RenameField(
model_name='contato',
old_name='pescador_id',
new_name='pescador',
),
]
| en | 0.811966 | # Generated by Django 3.1 on 2020-08-11 19:45 | 1.563202 | 2 |
examples/tinytag/fuzz.py | MJ-SEO/py_fuzz | 0 | 8999 | from pythonfuzz.main import PythonFuzz
from tinytag import TinyTag
import io
@PythonFuzz
def fuzz(buf):
try:
f = open('temp.mp4', "wb")
f.write(buf)
f.seek(0)
tag = TinyTag.get(f.name)
except UnicodeDecodeError:
pass
if __name__ == '__main__':
fuzz()
| from pythonfuzz.main import PythonFuzz
from tinytag import TinyTag
import io
@PythonFuzz
def fuzz(buf):
try:
f = open('temp.mp4', "wb")
f.write(buf)
f.seek(0)
tag = TinyTag.get(f.name)
except UnicodeDecodeError:
pass
if __name__ == '__main__':
fuzz()
| none | 1 | 2.224836 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.