max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
src/_main/RTyyyy_main.py | ufo2011/NXP-MCUBootUtility | 174 | 11144374 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
import wx
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import time
sys.path.append(os.path.abspath(".."))
from mem import RTyyyy_memcore
from ui import RTyyyy_uidef
from ui import uidef
from ui import uivar
from ui import uilang
from fuse import RTyyyy_fusedef
from ui import ui_cfg_dcd
from ui import ui_settings_cert
from ui import ui_settings_sign
from ui import ui_settings_fixed_otpmk_key
from ui import ui_settings_flexible_user_keys_bee
from ui import ui_settings_flexible_user_keys_otfad
from ui import RT10yy_ui_efuse_lock
from ui import RT10yy_ui_efuse_bootcfg0_flexspinor_3bits
from ui import RT10yy_ui_efuse_bootcfg0_flexspinor_10bits
from ui import RT10yy_ui_efuse_bootcfg0_flexspinor_12bits
from ui import RT10yy_ui_efuse_bootcfg1
from ui import RT10yy_ui_efuse_bootcfg2
from ui import RT10yy_ui_efuse_miscconf0
from ui import RT10yy_ui_efuse_miscconf1_flexspinor
kRetryPingTimes = 5
kBootloaderType_Rom = 0
kBootloaderType_Flashloader = 1
class secBootRTyyyyMain(RTyyyy_memcore.secBootRTyyyyMem):
def __init__(self, parent):
RTyyyy_memcore.secBootRTyyyyMem.__init__(self, parent)
self.RTyyyy_isAllInOneActionTaskPending = False
if self.mcuSeries in uidef.kMcuSeries_iMXRTyyyy:
self._RTyyyy_initMain()
def _RTyyyy_initMain( self ):
if self.toolRunMode != uidef.kToolRunMode_SblOta:
self.connectStage = uidef.kConnectStage_Rom
self.updateConnectStatus('black')
else:
self.connectStage = uidef.kConnectStage_Flashloader
self.updateConnectStatus('yellow_ota')
usbIdList = self.RTyyyy_getUsbid()
self.setPortSetupValue(self.connectStage, usbIdList, False, False)
self.isBootableAppAllowedToView = False
self.lastTime = None
self.isThereBoardConnection = False
def _RTyyyy_startGaugeTimer( self ):
if not self.RTyyyy_isAllInOneActionTaskPending:
self.lastTime = time.time()
self.initGauge()
def _RTyyyy_stopGaugeTimer( self ):
if not self.RTyyyy_isAllInOneActionTaskPending:
self.deinitGauge()
self.updateCostTime()
def RTyyyy_callbackSetMcuSeries( self ):
self.RTyyyy_initUi()
self.RTyyyy_initGen()
self.RTyyyy_initRun()
self.RTyyyy_initFuse()
self.RTyyyy_initMem()
self._RTyyyy_initMain()
self.RTyyyy_setTargetSetupValue()
def RTyyyy_callbackSetMcuDevice( self ):
self.RTyyyy_setTargetSetupValue()
self.applyFuseOperToRunMode()
needToPlaySound = False
self.RTyyyy_setSecureBootSeqColor(needToPlaySound)
def RTyyyy_callbackSetBootDevice( self ):
self.RTyyyy_setTargetSetupValue()
needToPlaySound = False
self.RTyyyy_setSecureBootSeqColor(needToPlaySound)
def callbackDeviceConfigurationData( self, event ):
if self.checkIfSubWinHasBeenOpened():
return
dcdFrame = ui_cfg_dcd.secBootUiCfgDcd(None)
dcdFrame.SetTitle(uilang.kSubLanguageContentDict['dcd_title'][self.languageIndex])
dcdFrame.setNecessaryInfo(self.dcdBinFilename, self.dcdCfgFilename, self.dcdModelFolder)
dcdFrame.Show(True)
def _RTyyyy_retryToPingBootloader( self, bootType ):
pingStatus = False
pingCnt = kRetryPingTimes
while (not pingStatus) and pingCnt > 0:
if bootType == kBootloaderType_Rom:
pingStatus = self.RTyyyy_pingRom()
elif bootType == kBootloaderType_Flashloader:
# This is mainly for RT1170 flashloader, but it is also ok for other RT devices
if (self.isOneStepConnectMode and (not self.isUsbhidPortSelected)):
time.sleep(3)
pingStatus = self.RTyyyy_pingFlashloader()
else:
pass
if pingStatus:
break
pingCnt = pingCnt - 1
if self.isUsbhidPortSelected:
time.sleep(2)
return pingStatus
def _RTyyyy_connectFailureHandler( self ):
if self.toolRunMode != uidef.kToolRunMode_SblOta:
self.connectStage = uidef.kConnectStage_Rom
else:
self.connectStage = uidef.kConnectStage_Flashloader
self.updateConnectStatus('red')
usbIdList = self.RTyyyy_getUsbid()
self.setPortSetupValue(self.connectStage, usbIdList, False, False)
self.isBootableAppAllowedToView = False
def _RTyyyy_connectStateMachine( self, showError=True ):
connectSteps = RTyyyy_uidef.kConnectStep_Normal
self.getOneStepConnectMode()
retryToDetectUsb = False
if ((self.toolRunMode != uidef.kToolRunMode_SblOta) and self.isOneStepConnectMode):
if self.connectStage == uidef.kConnectStage_Reset or self.connectStage == uidef.kConnectStage_ExternalMemory:
connectSteps = RTyyyy_uidef.kConnectStep_Fast - 2
elif self.connectStage == uidef.kConnectStage_Flashloader:
connectSteps = RTyyyy_uidef.kConnectStep_Fast - 1
retryToDetectUsb = True
elif self.connectStage == uidef.kConnectStage_Rom:
connectSteps = RTyyyy_uidef.kConnectStep_Fast
retryToDetectUsb = True
else:
pass
while connectSteps:
if not self.updatePortSetupValue(retryToDetectUsb, showError):
if self.connectStage == uidef.kConnectStage_Rom:
if showError:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_doubleCheckBmod'][self.languageIndex])
self._RTyyyy_connectFailureHandler()
return
if self.connectStage == uidef.kConnectStage_Rom:
self.RTyyyy_connectToDevice(self.connectStage)
if self._RTyyyy_retryToPingBootloader(kBootloaderType_Rom):
self.RTyyyy_getMcuDeviceInfoViaRom()
self.getMcuDeviceHabStatus()
if self.RTyyyy_jumpToFlashloader():
self.connectStage = uidef.kConnectStage_Flashloader
self.updateConnectStatus('yellow')
usbIdList = self.RTyyyy_getUsbid()
self.setPortSetupValue(self.connectStage, usbIdList, True, True)
else:
self.updateConnectStatus('red')
if showError:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_failToJumpToFl'][self.languageIndex])
return
else:
self.updateConnectStatus('red')
if showError:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_doubleCheckBmod'][self.languageIndex])
return
elif self.connectStage == uidef.kConnectStage_Flashloader:
self.RTyyyy_connectToDevice(self.connectStage)
if self._RTyyyy_retryToPingBootloader(kBootloaderType_Flashloader):
if self.toolRunMode != uidef.kToolRunMode_SblOta:
self.getMcuDeviceInfoViaFlashloader()
self.getMcuDeviceBtFuseSel()
self.getFlexramInfoViaFlashloader()
self.updateConnectStatus('green')
self.connectStage = uidef.kConnectStage_ExternalMemory
else:
self.getBootDeviceInfoViaFlashloader()
self.connectStage = uidef.kConnectStage_Reset
self.updateConnectStatus('blue')
else:
if showError:
if self.toolRunMode != uidef.kToolRunMode_SblOta:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_failToPingFl'][self.languageIndex])
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_failToPingSblIsp'][self.languageIndex])
self._RTyyyy_connectFailureHandler()
return
elif self.connectStage == uidef.kConnectStage_ExternalMemory:
if self.RTyyyy_configureBootDevice():
self.getBootDeviceInfoViaFlashloader()
self.connectStage = uidef.kConnectStage_Reset
self.updateConnectStatus('blue')
else:
if showError:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_failToCfgBootDevice'][self.languageIndex])
self._RTyyyy_connectFailureHandler()
return
elif self.connectStage == uidef.kConnectStage_Reset:
self.RTyyyy_resetMcuDevice()
self.isBootableAppAllowedToView = False
if self.toolRunMode != uidef.kToolRunMode_SblOta:
self.connectStage = uidef.kConnectStage_Rom
self.updateConnectStatus('black')
else:
self.connectStage = uidef.kConnectStage_Flashloader
self.updateConnectStatus('yellow_ota')
usbIdList = self.RTyyyy_getUsbid()
self.setPortSetupValue(self.connectStage, usbIdList, True, True)
self.RTyyyy_connectToDevice(self.connectStage)
else:
pass
connectSteps -= 1
def RTyyyy_callbackConnectToDevice( self ):
self._RTyyyy_startGaugeTimer()
self.printLog("'Connect to xxx' button is clicked")
if not self.isSbFileEnabledToGen:
self._RTyyyy_connectStateMachine(True)
else:
if not self.isThereBoardConnection:
if self.connectStage == uidef.kConnectStage_Rom:
self.RTyyyy_initSbAppBdfilesContent()
else:
# It means there is board connection
self.isThereBoardConnection = True
self._RTyyyy_connectStateMachine(False)
if not self.isThereBoardConnection:
if self.connectStage == uidef.kConnectStage_Rom:
# It means there is no board connection, but we need to set it as True for SB generation
self.isThereBoardConnection = True
self.RTyyyy_connectToDevice(uidef.kConnectStage_Flashloader)
self.RTyyyy_isDeviceEnabledToOperate = False
self.RTyyyy_configureBootDevice()
self.connectStage = uidef.kConnectStage_Reset
self.updateConnectStatus('blue')
else:
self.isThereBoardConnection = False
else:
self.isThereBoardConnection = False
self.RTyyyy_isDeviceEnabledToOperate = True
self.connectStage = uidef.kConnectStage_Rom
self.updateConnectStatus('black')
self._RTyyyy_stopGaugeTimer()
def RTyyyy_callbackSetSecureBootType( self ):
self.setCostTime(0)
self.RTyyyy_setSecureBootSeqColor()
def RTyyyy_task_doAllInOneAction( self ):
while True:
if self.RTyyyy_isAllInOneActionTaskPending:
self._RTyyyy_doAllInOneAction()
self.RTyyyy_isAllInOneActionTaskPending = False
self._RTyyyy_stopGaugeTimer()
time.sleep(1)
def _RTyyyy_doAllInOneAction( self ):
allInOneSeqCnt = 1
directReuseCert = False
status = False
while allInOneSeqCnt:
if self.secureBootType == RTyyyy_uidef.kSecureBootType_HabAuth or \
self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto or \
((self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor and self.isCertEnabledForHwCrypto):
status = self._doGenCert(directReuseCert)
if not status:
break
status = self._doProgramSrk()
if not status:
break
status = self._RTyyyy_doGenImage()
if not status:
break
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
status = self._doHwEncryption()
if not status:
break
if self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FlexibleUserKeys:
status = self._doProgramHwCryptoDek()
if not status:
break
elif self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FixedOtpmkKey:
if self.isCertEnabledForHwCrypto:
# If HAB is not closed here, we need to close HAB and re-do All-In-One Action
if self.mcuDeviceHabStatus != RTyyyy_fusedef.kHabStatus_Closed0 and \
self.mcuDeviceHabStatus != RTyyyy_fusedef.kHabStatus_Closed1:
if not self.isSbFileEnabledToGen:
self.enableHab()
self._RTyyyy_connectStateMachine()
while self.connectStage != uidef.kConnectStage_Reset:
self._RTyyyy_connectStateMachine()
directReuseCert = True
allInOneSeqCnt += 1
else:
pass
status = self._RTyyyy_doFlashImage()
if not status:
break
if self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto:
status = self._doFlashHabDek()
if not status:
break
allInOneSeqCnt -= 1
if self.isSbFileEnabledToGen:
status = self.RTyyyy_genSbAppImages()
else:
if status and self.isAutomaticImageReadback:
self.showPageInMainBootSeqWin(uidef.kPageIndex_BootDeviceMemory)
self._RTyyyy_doViewMem()
self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_AllInOne, status)
def RTyyyy_callbackAllInOneAction( self ):
self._RTyyyy_startGaugeTimer()
self.RTyyyy_isAllInOneActionTaskPending = True
def callbackAdvCertSettings( self, event ):
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice != RTyyyy_uidef.kBootDevice_FlexspiNor:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHwCryptoError_onlyForFlexspiNor'][self.languageIndex])
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto and \
(self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor or self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor) and \
(not self.tgt.isNonXipImageAppliableForXipableDeviceUnderClosedHab):
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHabError_notAppliableDevice'][self.languageIndex])
elif self.secureBootType != RTyyyy_uidef.kSecureBootType_Development:
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and (not self.isCertEnabledForHwCrypto):
self.popupMsgBox(uilang.kMsgLanguageContentDict['certGenError_notEnabledForHwCrypto'][self.languageIndex])
else:
if self.checkIfSubWinHasBeenOpened():
return
certSettingsFrame = ui_settings_cert.secBootUiSettingsCert(None)
certSettingsFrame.SetTitle(uilang.kSubLanguageContentDict['cert_title'][self.languageIndex])
certSettingsFrame.Show(True)
self.updateAllCstPathToCorrectVersion()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['certGenError_noNeedToSetForUnsigned'][self.languageIndex])
def _wantToReuseAvailableCert( self, directReuseCert ):
certAnswer = wx.NO
if self.isCertificateGenerated(self.secureBootType):
if not directReuseCert:
msgText = ((uilang.kMsgLanguageContentDict['certGenInfo_reuseOldCert'][self.languageIndex]))
certAnswer = wx.MessageBox(msgText, "Certificate Question", wx.YES_NO | wx.CANCEL | wx.ICON_QUESTION)
if certAnswer == wx.CANCEL:
return None
elif certAnswer == wx.NO:
msgText = ((uilang.kMsgLanguageContentDict['certGenInfo_haveNewCert'][self.languageIndex]))
certAnswer = wx.MessageBox(msgText, "Certificate Question", wx.YES_NO | wx.CANCEL | wx.ICON_QUESTION)
if certAnswer == wx.CANCEL:
return None
elif certAnswer == wx.YES:
certAnswer = wx.NO
else:
certAnswer = wx.YES
else:
certAnswer = wx.YES
return (certAnswer == wx.YES)
def _doGenCert( self, directReuseCert=False ):
status = False
reuseCert = None
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice != RTyyyy_uidef.kBootDevice_FlexspiNor:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHwCryptoError_onlyForFlexspiNor'][self.languageIndex])
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto and \
(self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor or self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor) and \
(not self.tgt.isNonXipImageAppliableForXipableDeviceUnderClosedHab):
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHabError_notAppliableDevice'][self.languageIndex])
elif self.secureBootType != RTyyyy_uidef.kSecureBootType_Development:
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and (not self.isCertEnabledForHwCrypto):
self.popupMsgBox(uilang.kMsgLanguageContentDict['certGenError_notEnabledForHwCrypto'][self.languageIndex])
else:
self._RTyyyy_startGaugeTimer()
self.printLog("'Generate Certificate' button is clicked")
self.updateAllCstPathToCorrectVersion()
reuseCert = self._wantToReuseAvailableCert(directReuseCert)
if reuseCert == None:
pass
elif not reuseCert:
self.cleanUpCertificate()
if self.createSerialAndKeypassfile():
self.RTyyyy_setSecureBootButtonColor()
self.genCertificate()
self.genSuperRootKeys()
self.showSuperRootKeys()
self.backUpCertificate()
status = True
else:
status = True
self._RTyyyy_stopGaugeTimer()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['certGenError_noNeedToGenForUnsigned'][self.languageIndex])
if reuseCert != None:
self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_GenCert, status)
return status
def callbackGenCert( self, event ):
if self.toolRunMode != uidef.kToolRunMode_Entry:
self._doGenCert()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['separActnError_notAvailUnderEntry'][self.languageIndex])
def callbackAdvSignSettings( self, event ):
if self.secureBootType == RTyyyy_uidef.kSecureBootType_HabAuth or \
self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto or \
((self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and (self.isCertEnabledForHwCrypto)):
if self.checkIfSubWinHasBeenOpened():
return
signSettingsFrame = ui_settings_sign.secBootUiSettingsSign(None)
signSettingsFrame.SetTitle(uilang.kSubLanguageContentDict['sign_title'][self.languageIndex])
signSettingsFrame.Show(True)
else:
pass
def _RTyyyy_doGenImage( self ):
status = False
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice != RTyyyy_uidef.kBootDevice_FlexspiNor:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHwCryptoError_onlyForFlexspiNor'][self.languageIndex])
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto and \
(self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor or self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor) and \
(not self.tgt.isNonXipImageAppliableForXipableDeviceUnderClosedHab):
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHabError_notAppliableDevice'][self.languageIndex])
else:
self._RTyyyy_startGaugeTimer()
self.printLog("'Generate Bootable Image' button is clicked")
if self.createMatchedAppBdfile():
# Need to update image picture for DCD
needToPlaySound = False
self.RTyyyy_setSecureBootSeqColor(needToPlaySound)
if self.RTyyyy_genBootableImage():
self.showHabDekIfApplicable()
status = True
self._RTyyyy_stopGaugeTimer()
self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_GenImage, status)
return status
def RTyyyy_callbackGenImage( self ):
if self.toolRunMode != uidef.kToolRunMode_Entry:
self._RTyyyy_doGenImage()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['separActnError_notAvailUnderEntry'][self.languageIndex])
def callbackSetCertForHwCrypto( self, event ):
if self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto:
self.setHwCryptoCertColor()
def callbackSetKeyStorageRegion( self, event ):
if self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto:
self.setKeyStorageRegionColor()
def callbackAdvKeySettings( self, event ):
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
if self.checkIfSubWinHasBeenOpened():
return
if self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FixedOtpmkKey:
otpmkKeySettingsFrame = ui_settings_fixed_otpmk_key.secBootUiSettingsFixedOtpmkKey(None)
otpmkKeySettingsFrame.SetTitle(uilang.kSubLanguageContentDict['otpmkKey_title'][self.languageIndex])
otpmkKeySettingsFrame.setNecessaryInfo(self.secureBootType, self.tgt.flexspiNorMemBase)
otpmkKeySettingsFrame.Show(True)
elif self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FlexibleUserKeys:
userKeySettingsFrame = None
if self.secureBootType == RTyyyy_uidef.kSecureBootType_BeeCrypto:
userKeySettingsFrame = ui_settings_flexible_user_keys_bee.secBootUiSettingsFlexibleUserKeysBee(None)
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
userKeySettingsFrame = ui_settings_flexible_user_keys_otfad.secBootUiSettingsFlexibleUserKeysOtfad(None)
else:
pass
userKeySettingsFrame.SetTitle(uilang.kSubLanguageContentDict['userKey_title'][self.languageIndex])
userKeySettingsFrame.setNecessaryInfo(self.mcuDevice, self.tgt.flexspiNorMemBase)
userKeySettingsFrame.Show(True)
else:
pass
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['keyGenError_onlyForHwCrypto'][self.languageIndex])
def _doHwEncryption( self ):
status = False
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
self._RTyyyy_startGaugeTimer()
if self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FixedOtpmkKey:
if self.connectStage == uidef.kConnectStage_Reset:
if not self.prepareForFixedOtpmkEncryption():
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHwCryptoError_failToPrepareForSnvs'][self.languageIndex])
else:
status = True
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_hasnotCfgBootDevice'][self.languageIndex])
elif self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FlexibleUserKeys:
self.encrypteImageUsingFlexibleUserKeys()
status = True
else:
pass
self._RTyyyy_stopGaugeTimer()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHwCryptoError_onlyForHwCrypto'][self.languageIndex])
self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_PrepHwCrypto, status)
return status
def callbackDoHwEncryption( self, event ):
if self.toolRunMode != uidef.kToolRunMode_Entry:
self._doHwEncryption()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['separActnError_notAvailUnderEntry'][self.languageIndex])
def _doProgramSrk( self ):
status = False
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice != RTyyyy_uidef.kBootDevice_FlexspiNor:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHwCryptoError_onlyForFlexspiNor'][self.languageIndex])
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto and \
(self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor or self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor) and \
(not self.tgt.isNonXipImageAppliableForXipableDeviceUnderClosedHab):
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHabError_notAppliableDevice'][self.languageIndex])
elif self.secureBootType != RTyyyy_uidef.kSecureBootType_Development:
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and (not self.isCertEnabledForHwCrypto):
self.popupMsgBox(uilang.kMsgLanguageContentDict['certGenError_notEnabledForHwCrypto'][self.languageIndex])
else:
if self.connectStage == uidef.kConnectStage_ExternalMemory or \
self.connectStage == uidef.kConnectStage_Reset:
self._RTyyyy_startGaugeTimer()
self.printLog("'Load SRK data' button is clicked")
if self.burnSrkData():
status = True
self._RTyyyy_stopGaugeTimer()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_hasnotEnterFl'][self.languageIndex])
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operKeyError_srkNotForUnsigned'][self.languageIndex])
self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_ProgSrk, status)
return status
def callbackProgramSrk( self, event ):
if self.toolRunMode != uidef.kToolRunMode_Entry:
self._doProgramSrk()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['separActnError_notAvailUnderEntry'][self.languageIndex])
def _doProgramHwCryptoDek( self ):
status = False
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
if self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FlexibleUserKeys:
if self.connectStage == uidef.kConnectStage_ExternalMemory or \
self.connectStage == uidef.kConnectStage_Reset:
self._RTyyyy_startGaugeTimer()
if self.burnHwCryptoDekData():
status = True
self._RTyyyy_stopGaugeTimer()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_hasnotEnterFl'][self.languageIndex])
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operKeyError_dekNotForSnvs'][self.languageIndex])
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operKeyError_dekOnlyForHwCrypto'][self.languageIndex])
self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_OperHwCrypto, status)
return status
def callbackProgramHwCryptoDek( self, event ):
if self.toolRunMode != uidef.kToolRunMode_Entry:
self._doProgramHwCryptoDek()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['separActnError_notAvailUnderEntry'][self.languageIndex])
def _RTyyyy_doFlashImage( self ):
status = False
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice != RTyyyy_uidef.kBootDevice_FlexspiNor:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHwCryptoError_onlyForFlexspiNor'][self.languageIndex])
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto and \
(self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor or self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor) and \
(not self.tgt.isNonXipImageAppliableForXipableDeviceUnderClosedHab):
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHabError_notAppliableDevice'][self.languageIndex])
else:
if self.connectStage == uidef.kConnectStage_Reset:
self._RTyyyy_startGaugeTimer()
self.printLog("'Load Bootable Image' button is clicked")
if not self.RTyyyy_flashBootableImage():
self.popupMsgBox(uilang.kMsgLanguageContentDict['operImgError_failToFlashImage'][self.languageIndex])
else:
self.isBootableAppAllowedToView = True
if self.RTyyyy_burnBootDeviceFuses():
if (self.secureBootType == RTyyyy_uidef.kSecureBootType_HabAuth) or \
(self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto and self.isCertEnabledForHwCrypto):
if self.mcuDeviceHabStatus != RTyyyy_fusedef.kHabStatus_Closed0 and \
self.mcuDeviceHabStatus != RTyyyy_fusedef.kHabStatus_Closed1:
self.enableHab()
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
if self.burnHwCryptoKeySel() and self.burnHwCryptoEnablements():
status = True
else:
status = True
self._RTyyyy_stopGaugeTimer()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_hasnotCfgBootDevice'][self.languageIndex])
self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_FlashImage, status)
return status
def RTyyyy_callbackFlashImage( self ):
if self.toolRunMode != uidef.kToolRunMode_Entry:
self._RTyyyy_doFlashImage()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['separActnError_notAvailUnderEntry'][self.languageIndex])
def _doFlashHabDek( self ):
status = False
if (self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto) and self.bootDevice != RTyyyy_uidef.kBootDevice_FlexspiNor:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHwCryptoError_onlyForFlexspiNor'][self.languageIndex])
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto and \
(self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor or self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor) and \
(not self.tgt.isNonXipImageAppliableForXipableDeviceUnderClosedHab):
self.popupMsgBox(uilang.kMsgLanguageContentDict['operHabError_notAppliableDevice'][self.languageIndex])
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto:
if self.connectStage == uidef.kConnectStage_Reset:
self._RTyyyy_startGaugeTimer()
self.printLog("'Load KeyBlob Data' button is clicked")
if self.mcuDeviceHabStatus != RTyyyy_fusedef.kHabStatus_Closed0 and \
self.mcuDeviceHabStatus != RTyyyy_fusedef.kHabStatus_Closed1:
if not self.isSbFileEnabledToGen:
self.enableHab()
self._RTyyyy_connectStateMachine()
while self.connectStage != uidef.kConnectStage_Reset:
self._RTyyyy_connectStateMachine()
self.flashHabDekToGenerateKeyBlob()
self.isBootableAppAllowedToView = True
status = True
self._RTyyyy_stopGaugeTimer()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_hasnotCfgBootDevice'][self.languageIndex])
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operImgError_keyBlobOnlyForHab'][self.languageIndex])
self.invalidateStepButtonColor(uidef.kSecureBootSeqStep_ProgDek, status)
return status
def callbackFlashHabDek( self, event ):
if self.toolRunMode != uidef.kToolRunMode_Entry:
self._doFlashHabDek()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['separActnError_notAvailUnderEntry'][self.languageIndex])
def callbackSetEfuseLock( self, event ):
if self.checkIfSubWinHasBeenOpened():
return
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
efuseLockFrame = RT10yy_ui_efuse_lock.secBootUiEfuseLock(None)
efuseLockFrame.SetTitle("eFuse 0x400 Lock")
efuseLockFrame.setNecessaryInfo(self.tgt.efuseDescDiffDict)
efuseLockFrame.Show(True)
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
pass
else:
pass
def callbackEnterEfuseLock( self, event ):
self.enterSettableEfuse(self.tgt.efusemapIndexDict['kEfuseIndex_LOCK'])
def callbackSetEfuseBootCfg0( self, event ):
if self.checkIfSubWinHasBeenOpened():
return
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
efuseBootCfg0Frame = None
if self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
if self.tgt.flexspiNorEfuseBootCfg0Bits == 3:
efuseBootCfg0Frame = RT10yy_ui_efuse_bootcfg0_flexspinor_3bits.secBootUiEfuseBootCfg0FlexspiNor3bits(None)
elif self.tgt.flexspiNorEfuseBootCfg0Bits == 10:
efuseBootCfg0Frame = RT10yy_ui_efuse_bootcfg0_flexspinor_10bits.secBootUiEfuseBootCfg0FlexspiNor10bits(None)
elif self.tgt.flexspiNorEfuseBootCfg0Bits == 12:
efuseBootCfg0Frame = RT10yy_ui_efuse_bootcfg0_flexspinor_12bits.secBootUiEfuseBootCfg0FlexspiNor12bits(None)
else:
pass
efuseBootCfg0Frame.SetTitle("eFuse 0x450 Boot Cfg0 - FlexSPI NOR")
else:
uivar.setRuntimeSettings(False)
return
efuseBootCfg0Frame.setNecessaryInfo(self.tgt.efuseDescDiffDict)
efuseBootCfg0Frame.Show(True)
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
pass
else:
pass
def callbackEnterEfuseBootCfg0( self, event ):
self.enterSettableEfuse(self.tgt.efusemapIndexDict['kEfuseIndex_BOOT_CFG0'])
def callbackSetEfuseBootCfg1( self, event ):
if self.checkIfSubWinHasBeenOpened():
return
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
efuseBootCfg1Frame = RT10yy_ui_efuse_bootcfg1.secBootUiEfuseBootCfg1(None)
efuseBootCfg1Frame.SetTitle("eFuse 0x460 Boot Cfg1")
efuseBootCfg1Frame.setNecessaryInfo(self.tgt.efuseDescDiffDict)
efuseBootCfg1Frame.Show(True)
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
pass
else:
pass
def callbackEnterEfuseBootCfg1( self, event ):
self.enterSettableEfuse(self.tgt.efusemapIndexDict['kEfuseIndex_BOOT_CFG1'])
def callbackSetEfuseBootCfg2( self, event ):
if self.checkIfSubWinHasBeenOpened():
return
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
efuseBootCfg2Frame = RT10yy_ui_efuse_bootcfg2.secBootUiEfuseBootCfg2(None)
efuseBootCfg2Frame.SetTitle("eFuse 0x470 Boot Cfg2")
efuseBootCfg2Frame.setNecessaryInfo(self.tgt.efuseDescDiffDict)
efuseBootCfg2Frame.Show(True)
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
pass
else:
pass
def callbackEnterEfuseBootCfg2( self, event ):
self.enterSettableEfuse(self.tgt.efusemapIndexDict['kEfuseIndex_BOOT_CFG2'])
def callbackSetEfuseMiscConf0( self, event ):
if self.checkIfSubWinHasBeenOpened():
return
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
efuseMiscConf0Frame = RT10yy_ui_efuse_miscconf0.secBootUiEfuseMiscConf0(None)
efuseMiscConf0Frame.SetTitle("eFuse 0x6d0 Misc Conf0")
efuseMiscConf0Frame.setNecessaryInfo(self.tgt.efuseDescDiffDict)
efuseMiscConf0Frame.Show(True)
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
pass
else:
pass
def callbackEnterEfuseMiscConf0( self, event ):
self.enterSettableEfuse(self.tgt.efusemapIndexDict['kEfuseIndex_MISC_CONF0'])
def callbackSetEfuseMiscConf1( self, event ):
if self.checkIfSubWinHasBeenOpened():
return
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
efuseMiscConf1Frame = None
if self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
efuseMiscConf1Frame = RT10yy_ui_efuse_miscconf1_flexspinor.secBootUiEfuseMiscConf1FlexspiNor(None)
efuseMiscConf1Frame.SetTitle("eFuse 0x6e0 Misc Conf1 - FlexSPI NOR")
else:
uivar.setRuntimeSettings(False)
return
efuseMiscConf1Frame.setNecessaryInfo(self.tgt.efuseDescDiffDict)
efuseMiscConf1Frame.Show(True)
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
pass
else:
pass
def callbackEnterEfuseMiscConf1( self, event ):
self.enterSettableEfuse(self.tgt.efusemapIndexDict['kEfuseIndex_MISC_CONF1'])
def _RTyyyy_doViewMem( self ):
if self.connectStage == uidef.kConnectStage_Reset:
if self.isBootableAppAllowedToView:
self._RTyyyy_startGaugeTimer()
self.RTyyyy_readProgrammedMemoryAndShow()
self._RTyyyy_stopGaugeTimer()
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['operImgError_hasnotFlashImage'][self.languageIndex])
else:
self.popupMsgBox(uilang.kMsgLanguageContentDict['connectError_hasnotCfgBootDevice'][self.languageIndex])
def RTyyyy_callbackViewMem( self ):
self._RTyyyy_doViewMem()
def RTyyyy_switchToolRunMode( self ):
self.applyFuseOperToRunMode()
self.RTyyyy_setSecureBootButtonColor()
|
utils/CoNLLeval.py | heathher/neural_sequence_labeling | 240 | 11144375 | <reponame>heathher/neural_sequence_labeling
import argparse
import json
import re
import sys
from collections import defaultdict
"""
Borrowed from: https://github.com/AdolfVonKleist/rnn-slu/blob/master/rnnslu/CoNLLeval.py
"""
class CoNLLeval:
"""Evaluate the result of processing CoNLL-2000 shared task
Evaluate the result of processing CoNLL-2000 shared tasks. This is a
vanilla python port of the original perl script.
# usage: conlleval [-l] [-r] [-d delimiterTag] [-o oTag] < file
# README: http://cnts.uia.ac.be/conll2000/chunking/output.html
# options: l: generate LaTeX output for tables like in
# http://cnts.uia.ac.be/conll2003/ner/example.tex
# r: accept raw result tags (without B- and I- prefix;
# assumes one word per chunk)
# d: alternative delimiter tag (default is single space)
# o: alternative outside tag (default is O)
# note: the file should contain lines with items separated
# by $delimiter characters (default space). The final
# two items should contain the correct tag and the
# guessed tag in that order. Sentences should be
# separated from each other by empty lines or lines
# with $boundary fields (default -X-).
# url: http://lcg-www.uia.ac.be/conll2000/chunking/
"""
def __init__(self, verbose=0, raw=False, delimiter=" ", otag="O", boundary="-X-"):
self.verbose = verbose # verbosity level
self.boundary = boundary # sentence boundary
self.correct = None # current corpus chunk tag (I,O,B)
self.correct_chunk = 0 # number of correctly identified chunks
self.correct_tags = 0 # number of correct chunk tags
self.correct_type = None # type of current corpus chunk tag (NP,VP,etc.)
self.delimiter = delimiter # field delimiter
self.FB1 = 0.0 # FB1 score (<NAME> 1979)
self.accuracy = 0.0
self.first_item = None # first feature (for sentence boundary checks)
self.found_correct = 0 # number of chunks in corpus
self.found_guessed = 0 # number of identified chunks
self.guessed = None # current guessed chunk tag
self.guessed_type = None # type of current guessed chunk tag
self.i = None # miscellaneous counter
self.in_correct = False # currently processed chunk is correct until now
self.last_correct = "O" # previous chunk tag in corpus
self.latex = 0 # generate LaTeX formatted output
self.last_correct_type = "" # type of previously identified chunk tag
self.last_guessed = "O" # previously identified chunk tag
self.last_guessed_type = "" # type of previous chunk tag in corpus
self.last_type = None # temporary storage for detecting duplicates
self.line = None # line
self.nbr_of_features = -1 # number of features per line
self.precision = 0.0 # precision score
self.o_tag = otag # outside tag, default O
self.raw = raw # raw input: add B to every token
self.recall = 0.0 # recall score
self.token_counter = 0 # token counter (ignores sentence breaks)
self.correct_chunk = defaultdict(int) # number of correctly identified chunks per type
self.found_correct = defaultdict(int) # number of chunks in corpus per type
self.found_guessed = defaultdict(int) # number of identified chunks per type
self.features = [] # features on line
self.sorted_types = [] # sorted list of chunk type names
@staticmethod
def endOfChunk(prev_tag, tag, prev_type, tag_type, chunk_end=0):
"""Checks if a chunk ended between the previous and current word.
Checks if a chunk ended between the previous and current word.
Args:
prev_tag (str): Previous chunk tag identifier.
tag (str): Current chunk tag identifier.
prev_type (str): Previous chunk type identifier.
tag_type (str): Current chunk type identifier.
chunk_end (int): 0/True true/false identifier.
Returns:
int: 0/True true/false identifier.
"""
if prev_tag == "B" and tag == "B":
chunk_end = True
if prev_tag == "B" and tag == "O":
chunk_end = True
if prev_tag == "I" and tag == "B":
chunk_end = True
if prev_tag == "I" and tag == "O":
chunk_end = True
if prev_tag == "E" and tag == "E":
chunk_end = True
if prev_tag == "E" and tag == "I":
chunk_end = True
if prev_tag == "E" and tag == "O":
chunk_end = True
if prev_tag == "I" and tag == "O":
chunk_end = True
if prev_tag != "O" and prev_tag != "." and prev_type != tag_type:
chunk_end = True
# corrected 1998-12-22: these chunks are assumed to have length 1
if prev_tag == "]":
chunk_end = True
if prev_tag == "[":
chunk_end = True
return chunk_end
@staticmethod
def startOfChunk(prevTag, tag, prevType, tag_type, chunk_start=0):
"""Checks if a chunk started between the previous and current word.
Checks if a chunk started between the previous and current word.
Args:
prevTag (str): Previous chunk tag identifier.
tag (str): Current chunk tag identifier.
prevType (str): Previous chunk type identifier.
tag_type (str): Current chunk type identifier.
chunk_start:
Returns:
int: 0/True true/false identifier.
"""
if prevTag == "B" and tag == "B":
chunk_start = True
if prevTag == "I" and tag == "B":
chunk_start = True
if prevTag == "O" and tag == "B":
chunk_start = True
if prevTag == "O" and tag == "I":
chunk_start = True
if prevTag == "E" and tag == "E":
chunk_start = True
if prevTag == "E" and tag == "I":
chunk_start = True
if prevTag == "O" and tag == "E":
chunk_start = True
if prevTag == "O" and tag == "I":
chunk_start = True
if tag != "O" and tag != "." and prevType != tag_type:
chunk_start = True
# corrected 1998-12-22: these chunks are assumed to have length 1
if tag == "[":
chunk_start = True
if tag == "]":
chunk_start = True
return chunk_start
def Evaluate(self, infile):
"""Evaluate test outcome for a CoNLLeval shared task.
Evaluate test outcome for a CoNLLeval shared task.
Args:
infile (str): The input file for evaluation.
"""
with open(infile, "r") as ifp:
for line in ifp:
line = line.lstrip().rstrip()
self.features = re.split(self.delimiter, line)
if len(self.features) == 1 and re.match(r"^\s*$", self.features[0]):
self.features = []
if self.nbr_of_features < 0:
self.nbr_of_features = len(self.features) - 1
elif self.nbr_of_features != len(self.features) - 1 and len(self.features) != 0:
raise ValueError("Unexpected number of features: {0}\t{1}".format(len(self.features) + 1,
self.nbr_of_features + 1))
if len(self.features) == 0 or self.features[0] == self.boundary:
self.features = [self.boundary, "O", "O"]
if len(self.features) < 2:
raise ValueError("CoNLLeval: Unexpected number of features in line.")
if self.raw is True:
if self.features[-1] == self.o_tag:
self.features[-1] = "O"
if self.features[-2] == self.o_tag:
self.features[-2] = "O"
if not self.features[-1] == "O":
self.features[-1] = "B-{0}".format(self.features[-1])
if not self.features[-2] == "O":
self.features[-2] = "B-{0}".format(self.features[-2])
# 20040126 ET code which allows hyphens in the types
ffeat = re.search(r"^([^\-]*)-(.*)$", self.features[-1])
if ffeat:
self.guessed = ffeat.groups()[0]
self.guessed_type = ffeat.groups()[1]
else:
self.guessed = self.features[-1]
self.guessed_type = ""
self.features.pop(-1)
ffeat = re.search(r"^([^\-]*)-(.*)$", self.features[-1])
if ffeat:
self.correct = ffeat.groups()[0]
self.correct_type = ffeat.groups()[1]
else:
self.correct = self.features[-1]
self.correct_type = ""
self.features.pop(-1)
if self.guessed_type is None:
self.guessed_type = ""
if self.correct_type is None:
self.correct_type = ""
self.first_item = self.features.pop(0)
# 1999-06-26 sentence breaks should always be counted as out of chunk
if self.first_item == self.boundary:
self.guessed = "O"
if self.in_correct is True:
if self.endOfChunk(self.last_correct, self.correct, self.last_correct_type,
self.correct_type) is True and self.endOfChunk(self.last_guessed, self.guessed,
self.last_guessed_type,
self.guessed_type) is True \
and self.last_guessed_type == self.last_correct_type:
self.in_correct = False
self.correct_chunk[self.last_correct_type] += 1
elif self.endOfChunk(self.last_correct, self.correct, self.last_correct_type,
self.correct_type) != self.endOfChunk(self.last_guessed, self.guessed,
self.last_guessed_type,
self.guessed_type) \
or self.guessed_type != self.correct_type:
self.in_correct = False
if self.startOfChunk(self.last_correct, self.correct, self.last_correct_type,
self.correct_type) is True and self.startOfChunk(self.last_guessed, self.guessed,
self.last_guessed_type,
self.guessed_type) is True \
and self.guessed_type == self.correct_type:
self.in_correct = True
if self.startOfChunk(self.last_correct, self.correct, self.last_correct_type,
self.correct_type) is True:
self.found_correct[self.correct_type] += 1
if self.startOfChunk(self.last_guessed, self.guessed, self.last_guessed_type,
self.guessed_type) is True:
self.found_guessed[self.guessed_type] += 1
if self.first_item != self.boundary:
if self.correct == self.guessed and self.guessed_type == self.correct_type:
self.correct_tags += 1
self.token_counter += 1
self.last_guessed = self.guessed
self.last_correct = self.correct
self.last_guessed_type = self.guessed_type
self.last_correct_type = self.correct_type
if self.verbose > 1:
print("{0} {1} {2} {3} {4} {5} {6}".format(self.last_guessed, self.last_correct,
self.last_guessed_type, self.last_correct_type,
self.token_counter, len(self.found_correct.keys()),
len(self.found_guessed.keys())))
if self.in_correct is True:
self.correct_chunk[len(self.correct_chunk.keys())] = 0
self.correct_chunk[self.last_correct_type] += 1
def ComputeAccuracy(self):
"""Compute overall precision, recall and FB1 (default values are 0.0).
Compute overall precision, recall and FB1 (default values are 0.0).
Results:
list: accuracy, precision, recall, FB1 float values.
"""
if sum(self.found_guessed.values()) > 0:
self.precision = 100 * sum(self.correct_chunk.values()) / float(sum(self.found_guessed.values()))
if sum(self.found_correct.values()) > 0:
self.recall = 100 * sum(self.correct_chunk.values()) / float(sum(self.found_correct.values()))
if self.precision + self.recall > 0:
self.FB1 = 2 * self.precision * self.recall / (self.precision + self.recall)
overall = "processed {0} tokens with {1} phrases; found: {2} phrases; correct: {3}."
overall = overall.format(self.token_counter, sum(self.found_correct.values()), sum(self.found_guessed.values()),
sum(self.correct_chunk.values()))
if self.verbose > 0:
print(overall)
self.accuracy = 100 * self.correct_tags / float(self.token_counter)
if self.token_counter > 0 and self.verbose > 0:
print("accuracy: {0:0.2f}".format(self.accuracy))
print("precision: {0:0.2f}".format(self.precision))
print("recall: {0:0.2f}".format(self.recall))
print("FB1: {0:0.2f}".format(self.FB1))
return {"accuracy": self.accuracy, "precision": self.precision, "recall": self.recall, "FB1": self.FB1}
def conlleval(self, predictions, groundtruth, words, infile):
"""Evaluate the results of one training iteration.
Evaluate the results of one training iteration. This now
uses the native python port of the CoNLLeval perl script.
It computes the accuracy, precision, recall and FB1 scores,
and returns these as a dictionary.
Args:
predictions (list): Predictions from the network.
groundtruth (list): Ground truth for evaluation.
words (list): Corresponding words for de-referencing.
infile:
Returns:
dict: Accuracy (accuracy), precisions (p), recall (r), and FB1 (f1) scores represented as floats.
infile: The inputs written to file in the format understood by the conlleval.pl script and CoNLLeval python
port.
"""
ofp = open(infile, "w")
for sl, sp, sw in zip(groundtruth, predictions, words):
ofp.write(u"BOS O O\n")
for wl, wp, words in zip(sl, sp, sw):
line = u"{0} {1} {2}\n".format(words, wl, wp)
ofp.write(line)
ofp.write(u"EOS O O\n\n")
ofp.close()
self.Evaluate(infile)
return self.ComputeAccuracy()
if __name__ == "__main__":
example = "{0} --infile".format(sys.argv[0])
parser = argparse.ArgumentParser(description=example)
parser.add_argument("--infile", "-i", help="Input CoNLLeval results file.", required=True)
parser.add_argument("--raw", "-r", help="Accept raw result tags.", default=False, action="store_true")
parser.add_argument("--delimiter", "-d", help="Token delimiter.", default=" ", type=str)
parser.add_argument("--otag", "-ot", help="Alternative outside tag.", default="O", type=str)
parser.add_argument("--boundary", "-b", help="Boundary tag.", default="-X-", type=str)
parser.add_argument("--verbose", "-v", help="Verbose mode.", default=0, type=int)
args = parser.parse_args()
if args.verbose > 0:
for key, val in args.__dict__.iteritems():
print("{0}: {1}".format(key, val))
ce = CoNLLeval(verbose=args.verbose, raw=args.raw, delimiter=args.delimiter, otag=args.otag, boundary=args.boundary)
ce.Evaluate(args.infile)
results = ce.ComputeAccuracy()
print()
json.dumps(results, indent=4)
|
catboost/spark/catboost4j-spark/core/src/test/python/catboost_classifier_test.py | mjjohns1/catboost | 6,989 | 11144382 | <reponame>mjjohns1/catboost<gh_stars>1000+
import collections
import os
import shutil
import tempfile
import test_helpers
import pool_test_helpers
from pyspark.ml.linalg import Vectors, VectorUDT
from pyspark.sql import Row
from pyspark.sql.types import *
def testBinaryClassificationSimpleOnDataFrame():
spark = test_helpers.getOrCreateSparkSession(test_helpers.getCurrentMethodName())
import catboost_spark
featureNames = ["f1", "f2", "f3"]
srcDataSchema = pool_test_helpers.createSchema(
[
("features", VectorUDT()),
("label", DoubleType())
],
featureNames,
addFeatureNamesMetadata=True
)
srcData = [
Row(Vectors.dense(0.1, 0.2, 0.11), 1.0),
Row(Vectors.dense(0.97, 0.82, 0.33), 2.0),
Row(Vectors.dense(0.13, 0.22, 0.23), 2.0),
Row(Vectors.dense(0.14, 0.18, 0.1), 1.0),
Row(Vectors.dense(0.9, 0.67, 0.17), 2.0),
Row(Vectors.dense(0.66, 0.1, 0.31), 1.0)
]
df = spark.createDataFrame(spark.sparkContext.parallelize(srcData), StructType(srcDataSchema))
classifier = (catboost_spark.CatBoostClassifier()
.setIterations(20)
.setTrainDir(tempfile.mkdtemp(prefix=test_helpers.getCurrentMethodName())))
model = classifier.fit(df)
predictions = model.transform(df)
print ("predictions")
predictions.show(truncate=False)
def testSimpleBinaryClassification():
spark = test_helpers.getOrCreateSparkSession(test_helpers.getCurrentMethodName())
import catboost_spark
featureNames = ["f1", "f2", "f3"]
srcSchemaData = [
("features", VectorUDT()),
("label", StringType()),
("groupId", LongType()),
("groupWeight", FloatType()),
("subgroupId", IntegerType()),
("weight", FloatType())
]
srcData = [
Row(Vectors.dense(0.1, 0.2, 0.11), "0", 0xB337C6FEFE2E2F7, 1.0, 0xD34BFBD, 0.12),
Row(Vectors.dense(0.97, 0.82, 0.33), "0", 0xB337C6FEFE2E2F7, 1.0, 0x19CE5B0, 0.18),
Row(Vectors.dense(0.13, 0.22, 0.23), "1", 0x86F1B93B695F9E6, 0.0, 0x23D794E, 1.0),
Row(Vectors.dense(0.14, 0.18, 0.1), "1", 0xD9DBDD3199D6518, 0.5, 0x62772D1, 0.45),
Row(Vectors.dense(0.9, 0.67, 0.17), "0", 0xD9DBDD3199D6518, 0.5, 0x19CE5B0, 1.0),
Row(Vectors.dense(0.66, 0.1, 0.31), "1", 0xD9DBDD3199D6518, 0.5, 0x1FA606F, 2.0)
]
pool = pool_test_helpers.createRawPool(
test_helpers.getCurrentMethodName,
pool_test_helpers.createSchema(
srcSchemaData,
featureNames,
addFeatureNamesMetadata=True
),
srcData,
{"groupId": "groupId", "groupWeight": "groupWeight", "subgroupId": "subgroupId", "weight": "weight"}
)
classifier = (catboost_spark.CatBoostClassifier()
.setIterations(20)
.setTrainDir(tempfile.mkdtemp(prefix=test_helpers.getCurrentMethodName())))
model = classifier.fit(pool)
predictions = model.transform(pool.data)
for rawPrediction in [False, True]:
for probability in [False, True]:
for prediction in [False, True]:
model.setRawPredictionCol("rawPrediction" if (rawPrediction) else "")
model.setProbabilityCol("probability" if (probability) else "")
model.setPredictionCol("prediction" if (prediction) else "")
predictions = model.transform(pool.data)
print('\nrawPrediction=%s, probability=%s, prediction=%s' % (rawPrediction, probability, prediction))
predictions.show(truncate=False)
def testBinaryClassificationWithClassNamesAsIntSet():
spark = test_helpers.getOrCreateSparkSession(test_helpers.getCurrentMethodName())
import catboost_spark
featureNames = ["f1", "f2", "f3"]
srcSchemaData = [
("features", VectorUDT()),
("label", StringType())
]
srcData = [
Row(Vectors.dense(0.1, 0.2, 0.11), "1"),
Row(Vectors.dense(0.97, 0.82, 0.33), "2"),
Row(Vectors.dense(0.13, 0.22, 0.23), "2"),
Row(Vectors.dense(0.14, 0.18, 0.1), "1"),
Row(Vectors.dense(0.9, 0.67, 0.17), "2"),
Row(Vectors.dense(0.66, 0.1, 0.31), "1")
]
pool = pool_test_helpers.createRawPool(
test_helpers.getCurrentMethodName,
pool_test_helpers.createSchema(
srcSchemaData,
featureNames,
addFeatureNamesMetadata=True
),
srcData,
{}
)
classifier = (catboost_spark.CatBoostClassifier()
.setIterations(20)
.setClassNames(["1", "2"])
.setTrainDir(tempfile.mkdtemp(prefix=test_helpers.getCurrentMethodName())))
model = classifier.fit(pool)
predictions = model.transform(pool.data)
predictions.show(truncate=False)
def testBinaryClassificationWithTargetBorder():
spark = test_helpers.getOrCreateSparkSession(test_helpers.getCurrentMethodName())
import catboost_spark
featureNames = ["f1", "f2", "f3"]
srcSchemaData = [
("features", VectorUDT()),
("label", DoubleType())
]
srcData = [
Row(Vectors.dense(0.1, 0.2, 0.11), 0.12),
Row(Vectors.dense(0.97, 0.82, 0.33), 0.1),
Row(Vectors.dense(0.13, 0.22, 0.23), 0.7),
Row(Vectors.dense(0.14, 0.18, 0.1), 0.33),
Row(Vectors.dense(0.9, 0.67, 0.17), 0.82),
Row(Vectors.dense(0.66, 0.1, 0.31), 0.93)
]
pool = pool_test_helpers.createRawPool(
test_helpers.getCurrentMethodName,
pool_test_helpers.createSchema(
srcSchemaData,
featureNames,
addFeatureNamesMetadata=True
),
srcData,
{}
)
classifier = (catboost_spark.CatBoostClassifier()
.setIterations(20)
.setTargetBorder(0.5)
.setTrainDir(tempfile.mkdtemp(prefix=test_helpers.getCurrentMethodName())))
model = classifier.fit(pool)
predictions = model.transform(pool.data)
predictions.show(truncate=False)
# Good
def testBinaryClassificationWithClassWeightsMap():
spark = test_helpers.getOrCreateSparkSession(test_helpers.getCurrentMethodName())
import catboost_spark
featureNames = ["f1", "f2", "f3"]
srcSchemaData = [
("features", VectorUDT()),
("label", IntegerType())
]
srcData = [
Row(Vectors.dense(0.1, 0.2, 0.11), 0),
Row(Vectors.dense(0.97, 0.82, 0.33), 1),
Row(Vectors.dense(0.13, 0.22, 0.23), 1),
Row(Vectors.dense(0.14, 0.18, 0.1), 0),
Row(Vectors.dense(0.9, 0.67, 0.17), 0),
Row(Vectors.dense(0.66, 0.1, 0.31), 0)
]
pool = pool_test_helpers.createRawPool(
test_helpers.getCurrentMethodName,
pool_test_helpers.createSchema(
srcSchemaData,
featureNames,
addFeatureNamesMetadata=True
),
srcData,
{}
)
classWeightsMap = collections.OrderedDict([("0", 1.0), ("1", 2.0)])
classifier = (catboost_spark.CatBoostClassifier()
.setIterations(20)
.setClassWeightsMap(classWeightsMap)
.setLoggingLevel(catboost_spark.ELoggingLevel.Debug)
.setTrainDir(tempfile.mkdtemp(prefix=test_helpers.getCurrentMethodName())))
model = classifier.fit(pool)
predictions = model.transform(pool.data)
predictions.show(truncate=False)
def testClassifierSerialization():
spark = test_helpers.getOrCreateSparkSession(test_helpers.getCurrentMethodName())
import catboost_spark
serializationDir = tempfile.mkdtemp(prefix="catboost_models_")
path = os.path.join(serializationDir, "serialized_classifier_0")
classifier = catboost_spark.CatBoostClassifier()
classifier.write().overwrite().save(path)
loadedClassifier = catboost_spark.CatBoostClassifier.load(path)
path = os.path.join(serializationDir, "serialized_classifier_1")
classifier = (catboost_spark.CatBoostClassifier().setLossFunction("MultiClass").setIterations(2))
classifier.write().overwrite().save(path)
loadedClassifier = catboost_spark.CatBoostClassifier.load(path)
shutil.rmtree(serializationDir)
def testModelSerialization():
spark = test_helpers.getOrCreateSparkSession(test_helpers.getCurrentMethodName())
import catboost_spark
featureNames = ["f1", "f2", "f3"]
srcDataSchema = pool_test_helpers.createSchema(
[
("features", VectorUDT()),
("label", DoubleType())
],
featureNames,
addFeatureNamesMetadata=True
)
srcData = [
Row(Vectors.dense(0.1, 0.2, 0.11), 1.0),
Row(Vectors.dense(0.97, 0.82, 0.33), 2.0),
Row(Vectors.dense(0.13, 0.22, 0.23), 2.0),
Row(Vectors.dense(0.14, 0.18, 0.1), 1.0),
Row(Vectors.dense(0.9, 0.67, 0.17), 2.0),
Row(Vectors.dense(0.66, 0.1, 0.31), 1.0)
]
df = spark.createDataFrame(spark.sparkContext.parallelize(srcData), StructType(srcDataSchema))
classifier = (catboost_spark.CatBoostClassifier()
.setIterations(20)
.setTrainDir(tempfile.mkdtemp(prefix=test_helpers.getCurrentMethodName())))
model = classifier.fit(df)
predictions = model.transform(df)
print ("predictions")
predictions.show(truncate=False)
modelsDir = tempfile.mkdtemp(prefix="catboost_models_")
nativeCatBoostModelPath = os.path.join(modelsDir, "binclass_model_on_df.cbm")
model.saveNativeModel(nativeCatBoostModelPath)
loadedCatBoostModel = catboost_spark.CatBoostClassificationModel.loadNativeModel(nativeCatBoostModelPath)
predictionsLoadedCatBoost = loadedCatBoostModel.transform(df)
print ("predictionsLoadedCatBoost")
predictionsLoadedCatBoost.show(truncate=False)
nativeJsonModelPath = os.path.join(modelsDir, "binclass_model_on_df.json")
model.saveNativeModel(nativeJsonModelPath, catboost_spark.EModelType.Json)
nativeOnnxModelPath = os.path.join(modelsDir, "binclass_model_on_df.onnx")
model.saveNativeModel(
nativeOnnxModelPath,
catboost_spark.EModelType.Onnx,
{
"onnx_domain": "ai.catboost",
"onnx_model_version": 1,
"onnx_doc_string": "test model for classification",
"onnx_graph_name": "CatBoostModel_for_classification"
}
)
loadedOnnxModel = catboost_spark.CatBoostClassificationModel.loadNativeModel(nativeOnnxModelPath, catboost_spark.EModelType.Onnx)
predictionsLoadedOnnx = loadedOnnxModel.transform(df)
print ("predictionsLoadedOnnx")
predictionsLoadedOnnx.show(truncate=False)
sparkModelPath = os.path.join(modelsDir, "binclass_model_on_df")
model.write().overwrite().save(sparkModelPath)
loadedModel = catboost_spark.CatBoostClassificationModel.load(sparkModelPath)
predictionsLoaded = loadedModel.transform(df)
print ("predictionsLoaded")
predictionsLoaded.show(truncate=False)
shutil.rmtree(modelsDir)
|
tools/nntool/generation/generators/globals/mult8_rnn_infos_generator.py | 00-01/gap_sdk | 118 | 11144383 | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from generation.at_types.constant_info import ConstantInfo
from generation.at_types.tc_arg_info import GlobalArgInfo, GlobalResetArgInfo
from generation.generator_decorators import QREC_MULT8, generation_function
from graph.types import GRUParameters, LSTMParameters, RNNParameters
from quantization.qtype import QType
from quantization.symmetric.kernels.rnn import internal_qtype
from .global_names import *
from .mult8_infos_generator import gen_constant
@generation_function("globals",
(RNNParameters, LSTMParameters, GRUParameters),
qrec_types=(QREC_MULT8,))
def mult8_rnn_infos_generator(gen, node, qrec, pnode, fnode) -> bool:
del pnode
if fnode is not None:
return False
if isinstance(node, RNNParameters):
rnn_infos(gen, node, qrec)
elif isinstance(node, LSTMParameters):
lstm_infos(gen, node, qrec)
elif isinstance(node, GRUParameters):
gru_infos(gen, node, qrec)
else:
raise ValueError()
if node.rnn_states_as_inputs:
gen.globals.append(GlobalResetArgInfo(
f"{node.name}_Reset", 'AT_MEM_L2', 'AT_MEM_UNDEF'))
return True
def sigmoid_infos(gate_name, mult_qtype, qtype):
scale = mult_qtype.qbiases[0]
scale_n = mult_qtype.qnorms[0]
three = qtype.quantize(np.array([3]))[0]
sixth = qtype.quantize(np.array([1/6]))[0]
six = qtype.quantize(np.array([6]))[0]
actn = qtype.q
comment = str.format("{0}_scale: {1} {0}_scale_n: {2} A0: {3} B0: {4} C0: {5}",
gate_name, scale, scale_n, six, three, sixth, 1, actn)
contents = np.array([scale, scale_n, six, three,
sixth, 1, actn], dtype=np.int8)
return contents, comment
def htanh_infos(gate_name, mult_qtype, qtype):
scale = mult_qtype.qbiases[0]
scale_n = mult_qtype.qnorms[0]
one = qtype.quantize(np.array([1]))[0]
comment = str.format("{0}_scale: {1} {0}_scale_n: {2} A0: {3} B0: {4}",
gate_name, scale, scale_n, -one, one)
contents = np.array([scale, scale_n, -one, one], dtype=np.int8)
return contents, comment
def scale_infos(gate_name, mult_qtype):
scale = mult_qtype.qbiases[0]
scale_n = mult_qtype.qnorms[0]
comment = str.format("{0}_scale: {1} {0}_scale_n: {2}",
gate_name, scale, scale_n)
contents = np.array([scale, scale_n], dtype=np.int8)
return contents, comment
LSTM_INFOS_ORDER = {
'f': 'sigmoid',
'i': 'sigmoid',
'c': 'htanh',
'o': 'sigmoid',
}
GRU_INFOS_ORDER = {
'r': 'sigmoid',
'z': 'sigmoid',
'h': 'htanh',
}
INFOS_FUNCS = {
'sigmoid': sigmoid_infos,
'htanh': htanh_infos,
'tanh': htanh_infos,
}
def highb(x):
return (x >> 8) & 0xff
def lowb(x):
return x & 0xff
# define LSTM_F_INF 2
# define LSTM_F_OFF 0
# define LSTM_F_SCALE 0
# define LSTM_F_SCALEN 1
# define LSTM_I_INF 2
# define LSTM_I_OFF (LSTM_F_OFF+LSTM_F_INF)
# define LSTM_I_SCALE (0 + LSTM_I_OFF)
# define LSTM_I_SCALEN (1 + LSTM_I_OFF)
# define LSTM_G_INF 2
# define LSTM_G_OFF (LSTM_I_OFF+LSTM_I_INF)
# define LSTM_G_SCALE (0 + LSTM_G_OFF)
# define LSTM_G_SCALEN (1 + LSTM_G_OFF)
# define LSTM_O_INF 2
# define LSTM_O_OFF (LSTM_G_OFF+LSTM_G_INF)
# define LSTM_O_SCALE (0 + LSTM_O_OFF)
# define LSTM_O_SCALEN (1 + LSTM_O_OFF)
# define LSTM_COUT_INF 6
# define LSTM_COUT_OFF (LSTM_O_OFF+LSTM_O_INF)
# define LSTM_CIN_SCALE (0 + LSTM_COUT_OFF)
# define LSTM_CIN_SCALEN (1 + LSTM_COUT_OFF)
# define LSTM_COUT_SCALE (2 + LSTM_COUT_OFF)
# define LSTM_COUT_SCALEN (3 + LSTM_COUT_OFF)
# define LSTM_OUT_SCALE (4 + LSTM_COUT_OFF)
# define LSTM_OUT_SCALEN (5 + LSTM_COUT_OFF)
# define LSTM_INT_INF 7
# define LSTM_INT_OFF (LSTM_COUT_OFF+LSTM_COUT_INF)
# define LSTM_INT_A0 (0 + LSTM_INT_OFF)
# define LSTM_INT_B0 (2 + LSTM_INT_OFF)
# define LSTM_INT_C0 (4 + LSTM_INT_OFF)
# define LSTM_INT_Q (6 + LSTM_INT_OFF)
# define LSTM_X_IN_INF 7
# define LSTM_X_IN_OFF (LSTM_INT_OFF+LSTM_INT_INF)
# define LSTM_F_IN_SCALE (0 + LSTM_X_IN_OFF)
# define LSTM_F_IN_SCALEN (1 + LSTM_X_IN_OFF)
# define LSTM_I_IN_SCALE (2 + LSTM_X_IN_OFF)
# define LSTM_I_IN_SCALEN (3 + LSTM_X_IN_OFF)
# define LSTM_G_IN_SCALE (4 + LSTM_X_IN_OFF)
# define LSTM_G_IN_SCALEN (5 + LSTM_X_IN_OFF)
# define LSTM_O_IN_SCALE (6 + LSTM_X_IN_OFF)
# define LSTM_O_IN_SCALEN (7 + LSTM_X_IN_OFF)
def lstm_infos(gen, node, qrec):
i_qtype = internal_qtype(qrec)
contents = []
comments = []
for k, v in LSTM_INFOS_ORDER.items():
info, comment = scale_infos(k, qrec.cache["r_2_%s_q" % k])
contents.append(info)
comments.append(comment)
cin_scale = qrec.cache['cell_in_q'].qbiases[0]
cin_scalen = qrec.cache['cell_in_q'].qnorms[0]
cout_scale = qrec.cache['cell_out_q'].qbiases[0]
cout_scalen = qrec.cache['cell_out_q'].qnorms[0]
out_scale = qrec.cache['state_out_q'].qbiases[0]
out_scalen = qrec.cache['state_out_q'].qnorms[0]
comments.append(str.format("cin_scale: {} cin_scale_n: {} cout_scale: {} cout_scale_n: {}",
cin_scale, cin_scalen, cout_scale, cout_scalen,))
comments.append(str.format("out_scale: {} out_scale_n: {}",
out_scale, out_scalen))
contents.append(np.array([cin_scale, cin_scalen, cout_scale, cout_scalen,
out_scale, out_scalen], dtype=np.int8))
three = i_qtype.quantize(np.array([3]))[0]
six = i_qtype.quantize(np.array([6]))[0]
sixth = i_qtype.quantize(np.array([1/6]))[0]
comments.append(str.format("int_q: {} A0: {} B0: {} C0: {}",
i_qtype.q, six, three, sixth))
contents.append(np.array([lowb(six), highb(six),
lowb(three), highb(three),
lowb(sixth), highb(sixth), i_qtype.q],
dtype=np.int8))
for k in LSTM_INFOS_ORDER.keys():
info, comment = scale_infos(k, qrec.cache["i_2_%s_q" % k])
contents.append(info)
comments.append(comment)
cname, file_name = gen_constant(gen, node, node, INFOS)
const_info = ConstantInfo(file_name, QType.Pow2(bits=8, q=0, signed=True),
contents=np.hstack(tuple(contents)))
gen.globals.append(GlobalArgInfo("int8", cname,
gen.opts['default_global_home_location'],
gen.opts['default_global_exec_location'],
const_info=const_info,
comment=" ".join(comments)))
# define RNN_F_INF 8
# define RNN_F_OFF 0
# define RNN_F_SCALE 0
# define RNN_F_SCALEN 1
# define RNN_F_A0 2
# define RNN_F_B0 3
# define RNN_F_IN_SCALE 4
# define RNN_F_IN_SCALEN 5
# define RNN_OUT_SCALE 6
# define RNN_OUT_SCALEN 7
def rnn_infos(gen, node, qrec):
i_state_q = qrec.in_qs[node.INPUT_NAMES.index('i_state')]
contents = []
comments = []
# info for activation (scale the act input to the proper scale)
info, comment = INFOS_FUNCS[node.activation](
"f", qrec.cache['s_2_s_q'], i_state_q)
contents.append(info)
comments.append(comment)
# info for input scaling (only used with non SameInputStateScale kernels)
info, comment = scale_infos("f", qrec.cache["i_2_a_q"])
contents.append(info)
comments.append(comment)
# info for scaling the activation out to out scale (only used for non Hard activations kernels)
info, comment = scale_infos("f", qrec.cache["s_2_o_q"])
contents.append(info)
comments.append(comment)
cname, file_name = gen_constant(gen, node, node, INFOS)
const_info = ConstantInfo(file_name, QType.Pow2(bits=8, q=0, signed=True),
contents=np.hstack(tuple(contents)))
gen.globals.append(GlobalArgInfo("int8", cname,
gen.opts['default_global_home_location'],
gen.opts['default_global_exec_location'],
const_info=const_info,
comment=comment))
# define GRU_R_INF 4
# define GRU_R_OFF 0
# define GRU_R_INT_SCALE 0
# define GRU_R_INT_SCALEN 1
# define GRU_R_IN_SCALE 2
# define GRU_R_IN_SCALEN 3
# define GRU_Z_INF 4
# define GRU_Z_OFF (GRU_R_OFF+GRU_R_INF)
# define GRU_Z_INT_SCALE (0 + GRU_Z_OFF)
# define GRU_Z_INT_SCALEN (1 + GRU_Z_OFF)
# define GRU_Z_IN_SCALE (2 + GRU_Z_OFF)
# define GRU_Z_IN_SCALEN (3 + GRU_Z_OFF)
# define GRU_HT_INF 2
# define GRU_HT_OFF (GRU_Z_OFF+GRU_Z_INF)
# define GRU_HT_IN_SCALE (0 + GRU_HT_OFF)
# define GRU_HT_IN_SCALEN (1 + GRU_HT_OFF)
# define GRU_H_INF 2
# define GRU_H_OFF (GRU_HT_OFF+GRU_HT_INF)
# define GRU_H_INT_SCALE (0 + GRU_H_OFF)
# define GRU_H_INT_SCALEN (1 + GRU_H_OFF)
# define GRU_INT_INF 3
# define GRU_INT_OFF (GRU_H_OFF+GRU_H_INF)
# define GRU_INT_A0 (2 + GRU_INT_OFF)
# define GRU_INT_B0 (3 + GRU_INT_OFF)
# define GRU_INT_C0 (4 + GRU_INT_OFF)
# define GRU_CELL_INFOS (GRU_R_INF+GRU_Z_INF+GRU_HT_INF+GRU_H_INF+GRU_INT_INF)
def gru_infos(gen, node, qrec):
i_qtype = internal_qtype(qrec)
contents = []
comments = []
r_to_int_scale = qrec.cache['r_WR_2_int_q'].qbiases[0]
r_to_int_scalen = qrec.cache['r_WR_2_int_q'].qnorms[0]
r_to_in_scale = qrec.cache['i_2_r_WR_q'].qbiases[0]
r_to_in_scalen = qrec.cache['i_2_r_WR_q'].qnorms[0]
z_to_int_scale = qrec.cache['z_WR_2_int_q'].qbiases[0]
z_to_int_scalen = qrec.cache['z_WR_2_int_q'].qnorms[0]
z_to_in_scale = qrec.cache['i_2_z_WR_q'].qbiases[0]
z_to_in_scalen = qrec.cache['i_2_z_WR_q'].qnorms[0]
ht_to_in_scale = qrec.cache['i_2_h_WR_q'].qbiases[0]
ht_to_in_scalen = qrec.cache['i_2_h_WR_q'].qnorms[0]
h_to_int_scale = qrec.cache['h_WR_2_int_q'].qbiases[0]
h_to_int_scalen = qrec.cache['h_WR_2_int_q'].qnorms[0]
# GRU_R_INFOS
comments.append(str.format("r_to_int_scale: {} r_to_int_scalen: {} r_to_in_scale: {} r_to_in_scalen: {}",
r_to_int_scale, r_to_int_scalen, r_to_in_scale, r_to_in_scalen,))
contents.append(np.array(
[r_to_int_scale, r_to_int_scalen, r_to_in_scale, r_to_in_scalen], dtype=np.int8))
# GRU_Z_INFOS
comments.append(str.format("z_to_int_scale: {} z_to_int_scalen: {} z_to_in_scale: {} z_to_in_scalen: {}",
z_to_int_scale, z_to_int_scalen, z_to_in_scale, z_to_in_scalen,))
contents.append(np.array(
[z_to_int_scale, z_to_int_scalen, z_to_in_scale, z_to_in_scalen], dtype=np.int8))
# GRU_HT_INFOS
comments.append(str.format("ht_to_in_scale: {} ht_to_in_scalen: {}",
ht_to_in_scale, ht_to_in_scalen,))
contents.append(np.array([ht_to_in_scale, ht_to_in_scalen], dtype=np.int8))
# GRU_H_INFOS
comments.append(str.format("h_to_int_scale: {} h_to_int_scalen: {}",
h_to_int_scale, h_to_int_scalen,))
contents.append(np.array([h_to_int_scale, h_to_int_scalen], dtype=np.int8))
three = i_qtype.quantize(np.array([3]))[0]
six = i_qtype.quantize(np.array([6]))[0]
sixth = i_qtype.quantize(np.array([1/6]))[0]
comments.append(str.format("int_q: {} A0: {} B0: {} C0: {}",
i_qtype.q, six, three, sixth))
contents.append(np.array([lowb(six), highb(six),
lowb(three), highb(three),
lowb(sixth), highb(sixth), i_qtype.q],
dtype=np.int8))
cname, file_name = gen_constant(gen, node, node, INFOS)
const_info = ConstantInfo(file_name, QType.Pow2(bits=8, q=0, signed=True),
contents=np.hstack(tuple(contents)))
gen.globals.append(GlobalArgInfo("int8", cname,
gen.opts['default_global_home_location'],
gen.opts['default_global_exec_location'],
const_info=const_info,
comment=" ".join(comments)))
|
rplugin/python3/deoplete/sources/deoplete_go/clang_index.py | zchee/deoplete-vim-go | 115 | 11144398 | <reponame>zchee/deoplete-vim-go<filename>rplugin/python3/deoplete/sources/deoplete_go/clang_index.py
class Clang_Index(object):
kinds = dict(
{
# Declarations
1: "t", # CXCursor_UnexposedDecl # A declaration whose specific kind
# is not exposed via this interface
2: "struct", # CXCursor_StructDecl (A C or C++ struct)
3: "union", # CXCursor_UnionDecl (A C or C++ union)
4: "class", # CXCursor_ClassDecl (A C++ class)
5: "enumeration", # CXCursor_EnumDecl (An enumeration)
# CXCursor_FieldDecl (A field (in C) or non-static data member
6: "member",
# (in C++) in a struct, union, or C++ class)
# CXCursor_EnumConstantDecl (An enumerator constant)
7: "enumerator constant",
8: "function", # CXCursor_FunctionDecl (A function)
9: "variable", # CXCursor_VarDecl (A variable)
# CXCursor_ParmDecl (A function or method parameter)
10: "method parameter",
11: "11", # CXCursor_ObjCInterfaceDecl (An Objective-C @interface)
# CXCursor_ObjCCategoryDecl (An Objective-C @interface for a
12: "12",
# category)
13: "13", # CXCursor_ObjCProtocolDecl
# (An Objective-C @protocol declaration)
# CXCursor_ObjCPropertyDecl (An Objective-C @property declaration)
14: "14",
15: "15", # CXCursor_ObjCIvarDecl (An Objective-C instance variable)
16: "16", # CXCursor_ObjCInstanceMethodDecl
# (An Objective-C instance method)
17: "17", # CXCursor_ObjCClassMethodDecl
# (An Objective-C class method)
18: "18", # CXCursor_ObjCImplementationDec
# (An Objective-C @implementation)
19: "19", # CXCursor_ObjCCategoryImplDecll
# (An Objective-C @implementation for a category)
20: "typedef", # CXCursor_TypedefDecl (A typedef)
21: "class method", # CXCursor_CXXMethod (A C++ class method)
22: "namespace", # CXCursor_Namespace (A C++ namespace)
# CXCursor_LinkageSpec (A linkage specification,e.g. Extern "C")
23: "23",
24: "constructor", # CXCursor_Constructor (A C++ constructor)
25: "destructor", # CXCursor_Destructor (A C++ destructor)
# CXCursor_ConversionFunction (A C++ conversion function)
26: "conversion function",
# CXCursor_TemplateTypeParameter (A C++ template type parameter)
27: "a",
# CXCursor_NonTypeTemplateParameter (A C++ non-type template parameter)
28: "a",
# CXCursor_TemplateTemplateParameter (A C++ template template
# parameter)
29: "a",
# CXCursor_FunctionTemplate (A C++ function template)
30: "function template",
# CXCursor_ClassTemplate (A C++ class template)
31: "class template",
32: "32", # CXCursor_ClassTemplatePartialSpecialization
# (A C++ class template partial specialization)
# CXCursor_NamespaceAlias (A C++ namespace alias declaration)
33: "n",
# CXCursor_UsingDirective (A C++ using directive)
34: "using directive",
# CXCursor_UsingDeclaration (A C++ using declaration)
35: "using declaration",
# CXCursor_TypeAliasDecl (A C++ alias declaration)
36: "alias declaration",
# CXCursor_ObjCSynthesizeDecl (An Objective-C synthesize definition)
37: "37",
# CXCursor_ObjCDynamicDecl (An Objective-C dynamic definition)
38: "38",
39: "39", # CXCursor_CXXAccessSpecifier (An access specifier)
# References
40: "40", # CXCursor_ObjCSuperClassRef
41: "41", # CXCursor_ObjCProtocolRef
42: "42", # CXCursor_ObjCClassRef
43: "43", # CXCursor_TypeRef
44: "44", # CXCursor_CXXBaseSpecifier
45: "45", # CXCursor_TemplateRef
# (A reference to a class template, function template, template
# template parameter, or class template partial
# specialization)
# CXCursor_NamespaceRef (A ref to a namespace or namespace alias)
46: "46",
# CXCursor_MemberRef (A reference to a member of a struct, union,
47: "47",
# or class that occurs in some non-expression context,
# e.g., a designated initializer)
48: "48", # CXCursor_LabelRef (A reference to a labeled statement)
49: "49", # CXCursor_OverloadedDeclRef
# (A reference to a set of overloaded functions or function
# templates that has not yet been resolved to a specific
# function or function template)
50: "50", # CXCursor_VariableRef
# Error conditions
# 70: '70', # CXCursor_FirstInvalid
70: "70", # CXCursor_InvalidFile
71: "71", # CXCursor_NoDeclFound
72: "u", # CXCursor_NotImplemented
73: "73", # CXCursor_InvalidCode
# Expressions
# CXCursor_UnexposedExpr (An expression whose specific kind is
100: "100",
# not exposed via this interface)
# CXCursor_DeclRefExpr (An expression that refers to some value
101: "101",
# declaration, such as a function, varible, or
# enumerator)
# CXCursor_MemberRefExpr (An expression that refers to a member
102: "102",
# of a struct, union, class, Objective-C class, etc)
103: "103", # CXCursor_CallExpr (An expression that calls a function)
# CXCursor_ObjCMessageExpr (An expression that sends a message
104: "104",
# to an Objective-C object or class)
# CXCursor_BlockExpr (An expression that represents a block
105: "105",
# literal)
106: "106", # CXCursor_IntegerLiteral (An integer literal)
# CXCursor_FloatingLiteral (A floating point number literal)
107: "107",
108: "108", # CXCursor_ImaginaryLiteral (An imaginary number literal)
109: "109", # CXCursor_StringLiteral (A string literal)
110: "110", # CXCursor_CharacterLiteral (A character literal)
# CXCursor_ParenExpr (A parenthesized expression, e.g. "(1)")
111: "111",
# CXCursor_UnaryOperator (This represents the unary-expression's
112: "112",
# (except sizeof and alignof))
# CXCursor_ArraySubscriptExpr ([C99 6.5.2.1] Array Subscripting)
113: "113",
# CXCursor_BinaryOperator (A builtin binary operation expression
114: "114",
# such as "x + y" or "x <= y")
# CXCursor_CompoundAssignOperator (Compound assignment such as
115: "115",
# "+=")
116: "116", # CXCursor_ConditionalOperator (The ?: ternary operator)
# CXCursor_CStyleCastExpr (An explicit cast in C (C99 6.5.4) or
117: "117",
# C-style cast in C++ (C++ [expr.cast]), which uses the
# syntax (Type)expr)
118: "118", # CXCursor_CompoundLiteralExpr ([C99 6.5.2.5])
# CXCursor_InitListExpr (Describes an C or C++ initializer list)
119: "119",
# CXCursor_AddrLabelExpr (The GNU address of label extension,
120: "120",
# representing &&label)
121: "121", # CXCursor_StmtExpr (This is the GNU Statement Expression
# extension: ({int X=4; X;})
# CXCursor_GenericSelectionExpr (brief Represents a C11 generic
122: "122",
# selection)
# CXCursor_GNUNullExpr (Implements the GNU __null extension)
123: "123",
# CXCursor_CXXStaticCastExpr (C++'s static_cast<> expression)
124: "124",
# CXCursor_CXXDynamicCastExpr (C++'s dynamic_cast<> expression)
125: "125",
# CXCursor_CXXReinterpretCastExpr (C++'s reinterpret_cast<>
126: "126",
# expression)
# CXCursor_CXXConstCastExpr (C++'s const_cast<> expression)
127: "127",
# CXCursor_CXXFunctionalCastExpr (Represents an explicit C++ type
128: "128",
# conversion that uses "functional" notion
# (C++ [expr.type.conv]))
129: "129", # CXCursor_CXXTypeidExpr (A C++ typeid expression
# (C++ [expr.typeid]))
# CXCursor_CXXBoolLiteralExpr (brief [C++ 2.13.5] C++ Boolean
130: "130",
# Literal)
# CXCursor_CXXNullPtrLiteralExpr ([C++0x 2.14.7] C++ Pointer
131: "131",
# Literal)
# CXCursor_CXXThisExpr (Represents the "this" expression in C+)
132: "132",
133: "133", # CXCursor_CXXThrowExpr ([C++ 15] C++ Throw Expression)
# CXCursor_CXXNewExpr (A new expression for memory allocation
134: "134",
# and constructor calls)
135: "135", # CXCursor_CXXDeleteExpr (A delete expression for memory
# deallocation and destructor calls)
136: "136", # CXCursor_UnaryExpr (A unary expression)
# CXCursor_ObjCStringLiteral (An Objective-C string literal
137: "137",
# i.e. @"foo")
# CXCursor_ObjCEncodeExpr (An Objective-C @encode expression)
138: "138",
# CXCursor_ObjCSelectorExpr (An Objective-C @selector expression)
139: "139",
# CXCursor_ObjCProtocolExpr (An Objective-C @protocol expression)
140: "140",
# CXCursor_ObjCBridgedCastExpr (An Objective-C "bridged" cast
141: "141",
# expression, which casts between Objective-C pointers
# and C pointers, transferring ownership in the process)
# CXCursor_PackExpansionExpr (Represents a C++0x pack expansion
142: "142",
# that produces a sequence of expressions)
# CXCursor_SizeOfPackExpr (Represents an expression that computes
143: "143",
# the length of a parameter pack)
# CXCursor_LambdaExpr (Represents a C++ lambda expression that
144: "144",
# produces a local function object)
# CXCursor_ObjCBoolLiteralExpr (Objective-c Boolean Literal)
145: "145",
# Statements
# CXCursor_UnexposedStmt (A statement whose specific kind is not
200: "200",
# exposed via this interface)
201: "201", # CXCursor_LabelStmt (A labelled statement in a function)
202: "202", # CXCursor_CompoundStmt (A group of statements like
# { stmt stmt }.
203: "203", # CXCursor_CaseStmt (A case statment)
204: "204", # CXCursor_DefaultStmt (A default statement)
205: "205", # CXCursor_IfStmt (An if statemen)
206: "206", # CXCursor_SwitchStmt (A switch statement)
207: "207", # CXCursor_WhileStmt (A while statement)
208: "208", # CXCursor_DoStmt (A do statement)
209: "209", # CXCursor_ForStmt (A for statement)
210: "210", # CXCursor_GotoStmt (A goto statement)
211: "211", # CXCursor_IndirectGotoStmt (An indirect goto statement)
212: "212", # CXCursor_ContinueStmt (A continue statement)
213: "213", # CXCursor_BreakStmt (A break statement)
214: "214", # CXCursor_ReturnStmt (A return statement)
# CXCursor_GCCAsmStmt (A GCC inline assembly statement extension)
215: "215",
# CXCursor_ObjCAtTryStmt (Objective-C's overall try-catch-finally
216: "216",
# statement.
# CXCursor_ObjCAtCatchStmt (Objective-C's catch statement)
217: "217",
# CXCursor_ObjCAtFinallyStmt (Objective-C's finally statement)
218: "218",
# CXCursor_ObjCAtThrowStmt (Objective-C's throw statement)
219: "219",
# CXCursor_ObjCAtSynchronizedStmt (Objective-C's synchronized
220: "220",
# statement)
# CXCursor_ObjCAutoreleasePoolStmt (Objective-C's autorelease
221: "221",
# pool statement)
# CXCursor_ObjCForCollectionStmt (Objective-C's collection
222: "222",
# statement)
223: "223", # CXCursor_CXXCatchStmt (C++'s catch statement)
224: "224", # CXCursor_CXXTryStmt (C++'s try statement)
225: "225", # CXCursor_CXXForRangeStmt (C++'s for (*: *) statement)
# CXCursor_SEHTryStmt (Windows Structured Exception Handling's
226: "226",
# try statement)
# CXCursor_SEHExceptStmt (Windows Structured Exception Handling's
227: "227",
# except statement.
228: "228", # CXCursor_SEHFinallyStmt (Windows Structured Exception
# Handling's finally statement)
# CXCursor_MSAsmStmt (A MS inline assembly statement extension)
229: "229",
230: "230", # CXCursor_NullStmt (The null satement ";": C99 6.8.3p3)
# CXCursor_DeclStmt (Adaptor class for mixing declarations with
231: "231",
# statements and expressions)
# Translation unit
300: "300", # CXCursor_TranslationUnit (Cursor that represents the
# translation unit itself)
# Attributes
# CXCursor_UnexposedAttr (An attribute whose specific kind is
400: "400",
# not exposed via this interface)
401: "401", # CXCursor_IBActionAttr
402: "402", # CXCursor_IBOutletAttr
403: "403", # CXCursor_IBOutletCollectionAttr
404: "404", # CXCursor_CXXFinalAttr
405: "405", # CXCursor_CXXOverrideAttr
406: "406", # CXCursor_AnnotateAttr
407: "407", # CXCursor_AsmLabelAttr
# Preprocessing
500: "500", # CXCursor_PreprocessingDirective
501: "define", # CXCursor_MacroDefinition
502: "502", # CXCursor_MacroInstantiation
503: "503", # CXCursor_InclusionDirective
# Modules
600: "600", # CXCursor_ModuleImportDecl (A module import declaration)
}
)
|
tests/r/test_wage2.py | hajime9652/observations | 199 | 11144399 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.wage2 import wage2
def test_wage2():
"""Test module wage2.py by downloading
wage2.csv and testing shape of
extracted data has 935 rows and 17 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = wage2(test_path)
try:
assert x_train.shape == (935, 17)
except:
shutil.rmtree(test_path)
raise()
|
d3net/semantic-segmentation/segmentation_data.py | ishine/ai-research-code | 199 | 11144422 | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
CityScapes Segmentation data-iterator code.
'''
import os
import numpy as np
import cv2
import nnabla as nn
from nnabla.utils.data_iterator import data_iterator_simple
from nnabla.utils.image_utils import imread
import image_preprocess
class CityScapesDatasetPath(object):
'''
A Helper Class which resolves the path to images
in CityScapes dataset.
'''
def __init__(self, data_dir=None):
self.data_dir = data_dir
self.train_file = os.path.join(self.data_dir, 'train.txt')
self.val_file = os.path.join(self.data_dir, 'val.txt')
def get_image_path(self, name, train):
folder = 'train' if train else 'val'
return os.path.join(self.data_dir, 'leftImg8bit', folder, name + '_leftImg8bit.png')
def get_label_path(self, name, train):
folder = 'train' if train else 'val'
return os.path.join(self.data_dir, 'gtFine', folder, name + '_gtFine_labelTrainIds.png')
def get_image_paths(self, train=True):
file_name = self.train_file if train else self.val_file
names = np.loadtxt(file_name, dtype=str)
return [self.get_image_path(name, train) for name in names]
def get_label_paths(self, train=True):
file_name = self.train_file if train else self.val_file
names = np.loadtxt(file_name, dtype=str)
return [self.get_label_path(name, train) for name in names]
def palette_png_reader(fname):
'''
'''
assert 'PilBackend' in nn.utils.image_utils.get_available_backends()
if nn.utils.image_utils.get_backend() != 'PilBackend':
nn.utils.image_utils.set_backend("PilBackEnd")
return imread(fname, return_palette_indices=True)
def data_iterator_segmentation(batch_size, image_paths, label_paths, rng=None, train=True):
'''
Returns a data iterator object for semantic image segmentation dataset.
Args:
batch_size (int): Batch size
image_paths (list of str): A list of image paths
label_paths (list of str): A list of label image paths
rng (None or numpy.random.RandomState):
A random number generator used in shuffling dataset and data augmentation.
train (bool): It performs random data augmentation as preprocessing if train is True.
num_classs (int): Number of classes. Requierd if `label_mask_transformer` is not passed.
'''
assert len(image_paths) == len(label_paths)
num_examples = len(image_paths)
def image_label_load_func(i):
'''
Returns:
image: c x h x w array
label: c x h x w array
'''
img = cv2.imread(image_paths[i], cv2.IMREAD_COLOR)
lab = palette_png_reader(label_paths[i])
img, lab = image_preprocess.preprocess_image_and_label(
img, lab, rng=rng)
return img, lab
return data_iterator_simple(image_label_load_func, num_examples, batch_size, shuffle=train, rng=rng)
def data_iterator_cityscapes(batch_size, data_dir, rng=None, train=True):
'''
Returns a data iterator object for CityScapes segmentation dataset.
args:
data_dir (str):
A folder contains CityScapes dataset.
See `data_iterator_segmentation` for other arguments.
'''
cityscapes = CityScapesDatasetPath(data_dir)
image_paths = cityscapes.get_image_paths(train=train)
label_paths = cityscapes.get_label_paths(train=train)
return data_iterator_segmentation(batch_size, image_paths, label_paths, rng, train)
|
quetz/testing/mockups.py | maresb/quetz | 108 | 11144429 | from typing import Callable, Optional, Union
import requests
from quetz.config import Config
from quetz.dao import Dao
from quetz.tasks.workers import job_wrapper
class TestWorker:
"synchronous worker for testing"
def __init__(
self,
config: Config,
db,
dao: Dao,
session: Optional[requests.Session] = None,
):
self.db = db
self.dao = dao
self.session = session
self.config = config
def execute(self, func: Union[Callable, bytes], *args, **kwargs):
resources = {
"db": self.db,
"dao": self.dao,
"pkgstore": self.config.get_package_store(),
}
if self.session:
resources['session'] = self.session
kwargs.update(resources)
job_wrapper(func, self.config, *args, **kwargs)
|
test/pytest/test_conv1d.py | julesmuhizi/hls4ml | 263 | 11144473 | from hls4ml.converters.keras_to_hls import keras_to_hls
import pytest
import hls4ml
import numpy as np
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow.keras.models import model_from_json
import yaml
@pytest.fixture(scope='module')
def data():
X = np.random.rand(100,100,7)
return X
@pytest.fixture(scope='module')
def keras_model():
jsons = open('../../example-models/keras/KERAS_conv1d.json','r').read()
model = model_from_json(jsons)
model.load_weights('../../example-models/keras/KERAS_conv1d_weights.h5')
return model
@pytest.fixture
@pytest.mark.parametrize('settings', [('io_parallel', 'latency'),
('io_parallel', 'resource'),
('io_stream', 'latency'),
('io_stream', 'resource')])
def hls_model(settings):
io_type = settings[0]
strategy = settings[1]
config = hls4ml.converters.create_config(output_dir = 'hls4mlprj_conv1d_{}_{}'.format(io_type, strategy))
config['KerasJson'] = '../../example-models/keras/KERAS_conv1d.json'
config['KerasH5'] = '../../example-models/keras/KERAS_conv1d_weights.h5'
config['OutputDir'] = 'hls4mlprj_conv1d_{}_{}'.format(io_type, strategy)
config['IOType'] = io_type
hls_config = {'Model' : {'Strategy' : strategy,
'ReuseFactor' : 1,
'Precision' : 'ap_fixed<16,3,AP_RND_CONV,AP_SAT>'}}
# Some model specific precision tuning
config['LayerName'] = {}
config['LayerName']['fc1_relu'] = {'Precision':{'weight' : 'ap_fixed<16,3>', 'result' : 'ap_fixed<16,6,AP_RND_CONV,AP_SAT>'}}
config['LayerName']['output_softmax'] = {'Precision':{'weight' : 'ap_fixed<16,6>', 'result' : 'ap_fixed<16,6,AP_RND_CONV,AP_SAT>'}}
config['LayerName']['output_softmax_softmax'] = {'Strategy':'Stable'}
config['HLSConfig'] = hls_config
hls_model = keras_to_hls(config)
hls_model.compile()
return hls_model
@pytest.mark.parametrize('settings', [('io_parallel', 'latency'),
('io_parallel', 'resource'),
('io_stream', 'latency'),
('io_stream', 'resource')])
def test_accuracy(data, keras_model, hls_model):
X = data
model = keras_model
# model under test predictions and accuracy
y_keras = model.predict(X)
y_hls4ml = hls_model.predict(X)
# "accuracy" of hls4ml predictions vs keras
rel_acc = accuracy_score(np.argmax(y_keras, axis=1), np.argmax(y_hls4ml, axis=1))
print('hls4ml accuracy relative to keras: {}'.format(rel_acc))
assert rel_acc > 0.98
|
launcher/launch.py | zarzen/byteps | 3,361 | 11144477 | #!/usr/bin/python
from __future__ import print_function
import os
import re
import subprocess
import threading
import sys
import time
from functools import reduce
class PropagatingThread(threading.Thread):
""" propagate exceptions to the parent's thread
refer to https://stackoverflow.com/a/31614591/9601110
"""
def run(self):
self.exc = None
try:
if hasattr(self, '_Thread__target'):
# python 2.x
self.ret = self._Thread__target(
*self._Thread__args, **self._Thread__kwargs)
else:
# python 3.x
self.ret = self._target(*self._args, **self._kwargs)
except BaseException as e:
self.exc = e
def join(self):
super(PropagatingThread, self).join()
if self.exc:
raise self.exc
return self.exc
COMMON_REQUIRED_ENVS = ["DMLC_ROLE", "DMLC_NUM_WORKER", "DMLC_NUM_SERVER",
"DMLC_PS_ROOT_URI", "DMLC_PS_ROOT_PORT"]
WORKER_REQUIRED_ENVS = ["DMLC_WORKER_ID"]
NUMA_PATH = "/sys/devices/system/node"
def allocate_cpu(local_size):
cpu_mt = os.getenv("BYTEPS_MULTITHREADED_CPU", "1").lower() in ["1", "true"]
def get_numa_info():
"""
returns a list of list, each sub list is the cpu ids of a numa node. e.g
[[0,1,2,3], [4,5,6,7]]
"""
ret = []
if os.path.exists(NUMA_PATH):
items = os.listdir(NUMA_PATH)
nodes = list(filter(lambda str: str.startswith("node"), items))
if nodes:
for node in nodes:
items = os.listdir(os.path.join(NUMA_PATH, node))
cpus = [re.findall("cpu\d+", cpu) for cpu in items]
cpus = list(filter(lambda x: x, cpus))
cpu_ids = [int(cpu[0].split('cpu')[1]) for cpu in cpus]
cpu_ids = sorted(cpu_ids)
if cpu_mt:
cpu_ids = cpu_ids[:len(cpu_ids) // 2]
ret.append(cpu_ids)
else:
print("NUMA PATH %s NOT FOUND" % NUMA_PATH)
return ret
def _get_allocation(nodes, quota, cpu_num, cpu_blacklist):
if quota < 1:
raise ValueError("quota should be no less than 1")
ret = []
for node in nodes:
if len(node) < quota:
continue
split_index = []
for i in range(1, quota):
if node[i] != node[i-1] + 1:
split_index.append(i)
quota_bck = quota
last_idx = 0
for idx in split_index:
ret.append(node[last_idx:idx])
quota -= idx - last_idx
last_idx = idx
curr_alloc = node[last_idx:last_idx+quota]
curr_alloc = [item for item in curr_alloc if item not in cpu_blacklist]
ret.append(curr_alloc)
if cpu_mt:
curr_alloc = [x + cpu_num for x in curr_alloc]
curr_alloc = [item for item in curr_alloc if item not in cpu_blacklist]
ret.append(curr_alloc)
for idx in sorted(range(quota_bck), reverse=True):
del node[idx]
return ret
return ret
def _get_quota(nodes, local_size):
# default quota is the number of physical cores for non-root processess
default_quota = cpu_num // local_size
default_quota = int(os.getenv("BYTEPS_NUMA_DEFAULT_QUOTA", default_quota))
while default_quota >= 1 and default_quota * local_size > cpu_num:
default_quota -= 1
# root quota is the number of cpus for root processess
# root does more work, thus using more cpus
root_quota = cpu_num - default_quota * (local_size - 1)
if int(os.getenv("BYTEPS_NUMA_ROOT_QUOTA", 0)):
root_quota = int(os.getenv("BYTEPS_NUMA_ROOT_QUOTA", 0))
node_size = len(nodes[0])
if cpu_mt:
node_size //= 2
while root_quota >= 1 and root_quota > node_size:
root_quota -= 1
return [default_quota] * (local_size - 1) + [root_quota]
nodes = get_numa_info()
if not nodes:
return None
cpu_num = reduce(lambda x, y: (x + len(y)), nodes, 0)
quota_list = _get_quota(nodes, local_size)
cpu_blacklist = os.getenv("BYTEPS_CPU_BLACKLIST", "-1")
cpu_blacklist = [int(item) for item in cpu_blacklist.split(",")]
ret = []
for quota in quota_list:
while quota > 0:
allocation = _get_allocation(nodes, quota, cpu_num, cpu_blacklist)
if allocation:
ret.append(allocation)
break
else:
quota -= 1
return ret
def check_env():
assert "DMLC_ROLE" in os.environ and \
os.environ["DMLC_ROLE"].lower() in ["worker", "server", "scheduler"]
required_envs = COMMON_REQUIRED_ENVS
if os.environ["DMLC_ROLE"] == "worker":
assert "DMLC_NUM_WORKER" in os.environ
num_worker = int(os.environ["DMLC_NUM_WORKER"])
assert num_worker >= 1
if num_worker == 1:
required_envs = []
required_envs += WORKER_REQUIRED_ENVS
for env in required_envs:
if env not in os.environ:
print("The env " + env + " is missing")
os._exit(0)
def worker(local_rank, local_size, command, allocation=None):
my_env = os.environ.copy()
my_env["BYTEPS_LOCAL_RANK"] = str(local_rank)
my_env["BYTEPS_LOCAL_SIZE"] = str(local_size)
if int(os.getenv("BYTEPS_ENABLE_GDB", 0)):
if command.find("python") != 0:
command = "python " + command
command = "gdb -ex 'run' -ex 'bt' -batch --args " + command
if allocation:
print("enable NUMA finetune...")
retval = subprocess.call(
["dpkg", "-s", "numactl"], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if retval == 0:
numa = "numactl --physcpubind "
for cpu_set in allocation:
if len(cpu_set) == 1:
numa += "{},".format(cpu_set[0])
else:
numa += "{}-{},".format(cpu_set[0], cpu_set[-1])
numa = numa.strip(',') + ' '
command = numa + command
print("Command: %s\n" % command)
else:
print("Warning: numactl not found. try `sudo apt-get install numactl`.")
if os.environ.get("BYTEPS_TRACE_ON", "") == "1":
print("\n!!!Enable profiling for WORKER_ID: %s and local_rank: %d!!!" %
(os.environ.get("DMLC_WORKER_ID"), local_rank))
print("BYTEPS_TRACE_START_STEP: %s\tBYTEPS_TRACE_END_STEP: %s\t BYTEPS_TRACE_DIR: %s" % (os.environ.get(
"BYTEPS_TRACE_START_STEP", ""), os.environ.get("BYTEPS_TRACE_END_STEP", ""), os.environ.get("BYTEPS_TRACE_DIR", "")))
print("Command: %s\n" % command)
sys.stdout.flush()
trace_path = os.path.join(os.environ.get(
"BYTEPS_TRACE_DIR", "."), str(local_rank))
if not os.path.exists(trace_path):
os.makedirs(trace_path)
subprocess.check_call(command, env=my_env,
stdout=sys.stdout, stderr=sys.stderr, shell=True)
def parse_num_range(core_list):
# core_list is a colon-seperated string. each section is the physical
# core assignment for the corresponding byteps worker.
# example input: 1,4-5,7-11,12:20-25
# example output: [[[1], [4, 5], [7, 8, 9, 10, 11], [12]], [[20, 21, 22, 23, 24, 25]]]
core_list = core_list.split(':')
ret = []
for item in core_list:
temp = [(lambda sub: range(sub[0], sub[-1] + 1))(list(map(int, elem.split('-')))) for elem in item.split(',')]
ret.append([list(a) for a in temp])
return ret
def launch_bps():
print("BytePS launching " + os.environ["DMLC_ROLE"])
sys.stdout.flush()
check_env()
os.environ["PYTHONUNBUFFERED"] = "1"
if os.environ["DMLC_ROLE"] == "worker":
if "NVIDIA_VISIBLE_DEVICES" in os.environ:
local_size = len(os.environ["NVIDIA_VISIBLE_DEVICES"].split(","))
else:
local_size = 1
t = [None] * local_size
bind_to_cores = os.getenv("BYTEPS_NUMA_ON", "1") == "1"
if bind_to_cores:
user_override = os.getenv("BYTEPS_VISIBLE_CPU_CORES", "").strip()
if user_override:
allocations = parse_num_range(user_override)
else:
allocations = allocate_cpu(local_size)
for i in range(local_size):
command = ' '.join(sys.argv[1:])
if bind_to_cores:
t[i] = PropagatingThread(target=worker, args=[
i, local_size, command, allocations[i]])
else:
t[i] = PropagatingThread(target=worker, args=[
i, local_size, command])
t[i].daemon = True
t[i].start()
for i in range(local_size):
t[i].join()
elif os.environ.get("BYTEPS_FORCE_DISTRIBUTED", "") == "1" or \
int(os.environ.get("DMLC_NUM_WORKER", "1")) > 1:
command = "python3 -c 'import byteps.server'"
if int(os.getenv("BYTEPS_ENABLE_GDB", 0)):
command = "gdb -ex 'run' -ex 'bt' -batch --args " + command
print("Command: %s\n" % command, flush=True)
my_env = os.environ.copy()
subprocess.check_call(command, env=my_env,
stdout=sys.stdout, stderr=sys.stderr, shell=True)
if __name__ == "__main__":
launch_bps()
|
tutorials/W3D3_ReinforcementLearningForGames/solutions/W3D3_Tutorial1_Solution_ef26beca.py | justynaekert/course-content-dl | 473 | 11144491 | <reponame>justynaekert/course-content-dl
class PolicyBasedPlayer():
def __init__(self, game, pnet, greedy=True):
self.game = game
self.pnet = pnet
self.greedy = greedy
def play(self, board):
valids = self.game.getValidMoves(board, 1)
action_probs = self.pnet.predict(board)
vap = action_probs*valids # masking invalid moves
sum_vap = np.sum(vap)
if sum_vap > 0:
vap /= sum_vap # renormalize
else:
# if all valid moves were masked we make all valid moves equally probable
print("All valid moves were masked, doing a workaround.")
vap = vap + valids
vap /= np.sum(vap)
if self.greedy:
# greedy policy player
a = np.where(vap == np.max(vap))[0][0]
else:
# sample-based policy player
a = np.random.choice(self.game.getActionSize(), p=vap)
return a
# add event to airtable
atform.add_event('Coding Exercise 5: Implement the PolicyBasedPlayer')
# playing games
set_seed(seed=SEED)
num_games = 20
player1 = PolicyBasedPlayer(game, pnet, greedy=True).play
player2 = RandomPlayer(game).play
arena = Arena.Arena(player1, player2, game, display=OthelloGame.display)
## Uncomment below to test!
result = arena.playGames(num_games, verbose=False)
print(f"\n\n{result}")
win_rate_player1 = result[0] / num_games
print(f"\nWin rate for player1 over {num_games} games: {round(win_rate_player1*100, 1)}%") |
tests/test_cases/test_cocotb/test_handle.py | lavanyajagan/cocotb | 350 | 11144502 | # Copyright cocotb contributors
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
"""
Tests for handles
"""
import logging
import random
import pytest
import cocotb
from cocotb.binary import BinaryValue
from cocotb.handle import _Limits
from cocotb.triggers import Timer
from cocotb.types import Logic, LogicArray
SIM_NAME = cocotb.SIM_NAME.lower()
@cocotb.test()
async def test_bad_attr(dut):
with pytest.raises(AttributeError):
dut.fake_signal
try:
_ = dut.stream_in_data.whoops
except AttributeError as e:
assert "whoops" in str(e)
else:
assert False, "Expected AttributeError"
# iverilog fails to discover string inputs (gh-2585)
# GHDL fails to discover string input properly (gh-2584)
@cocotb.test(
expect_error=AttributeError
if SIM_NAME.startswith("icarus")
else TypeError
if SIM_NAME.startswith("ghdl")
else ()
)
async def test_string_handle_takes_bytes(dut):
dut.stream_in_string.value = b"bytes"
await cocotb.triggers.Timer(10, "ns")
val = dut.stream_in_string.value
assert isinstance(val, bytes)
assert val == b"bytes"
# iverilog fails to discover string inputs (gh-2585)
# GHDL fails to discover string input properly (gh-2584)
@cocotb.test(
expect_error=AttributeError
if SIM_NAME.startswith("icarus")
else TypeError
if SIM_NAME.startswith("ghdl")
else (),
skip=cocotb.LANGUAGE in ["verilog"] and SIM_NAME.startswith("riviera"),
)
async def test_string_ansi_color(dut):
"""Check how different simulators treat ANSI-colored strings, see gh-2328"""
teststr = "\x1b[33myellow\x1b[49m\x1b[39m"
asciival_sum = sum(ord(char) for char in teststr)
await cocotb.triggers.Timer(10, "ns")
dut.stream_in_string.value = bytes(teststr.encode("ascii"))
await cocotb.triggers.Timer(10, "ns")
val = dut.stream_in_string.value
assert isinstance(val, bytes)
if cocotb.LANGUAGE in ["vhdl"] and SIM_NAME.startswith("riviera"):
# Riviera-PRO doesn't return anything with VHDL:
assert val == b""
# ...and the value shows up differently in the HDL:
assert dut.stream_in_string_asciival_sum.value == sum(
ord(char) for char in teststr.replace("\x1b", "\0")
)
elif cocotb.LANGUAGE in ["verilog"] and SIM_NAME.startswith(("ncsim", "xmsim")):
# Xcelium with VPI strips the escape char when reading:
assert val == bytes(teststr.replace("\x1b", "").encode("ascii"))
# the HDL gets the correct value though:
assert dut.stream_in_string_asciival_sum.value == asciival_sum
else:
assert val == bytes(teststr.encode("ascii"))
assert dut.stream_in_string_asciival_sum.value == asciival_sum
async def test_delayed_assignment_still_errors(dut):
"""Writing a bad value should fail even if the write is scheduled to happen later"""
# note: all these fail because BinaryValue.assign rejects them
with pytest.raises(ValueError):
dut.stream_in_int.setimmediatevalue("1010 not a real binary string")
with pytest.raises(TypeError):
dut.stream_in_int.setimmediatevalue([])
with pytest.raises(ValueError):
dut.stream_in_int.value = "1010 not a real binary string"
with pytest.raises(TypeError):
dut.stream_in_int.value = []
async def int_values_test(signal, n_bits, limits=_Limits.VECTOR_NBIT):
"""Test integer access to a signal."""
values = gen_int_test_values(n_bits, limits)
for val in values:
signal.value = val
await Timer(1, "ns")
if limits == _Limits.VECTOR_NBIT:
if val < 0:
got = signal.value.signed_integer
else:
got = signal.value.integer
else:
got = signal.value
assert got == val
def gen_int_test_values(n_bits, limits=_Limits.VECTOR_NBIT):
"""Generates a list of int test values for a given number of bits."""
unsigned_min = 0
unsigned_max = 2**n_bits - 1
signed_min = -(2 ** (n_bits - 1))
signed_max = 2 ** (n_bits - 1) - 1
if limits == _Limits.VECTOR_NBIT:
return [1, -1, 4, -4, unsigned_min, unsigned_max, signed_min, signed_max]
elif limits == _Limits.SIGNED_NBIT:
return [1, -1, 4, -4, signed_min, signed_max]
else:
return [1, -1, 4, -4, unsigned_min, unsigned_max]
async def int_overflow_test(signal, n_bits, test_mode, limits=_Limits.VECTOR_NBIT):
"""Test integer overflow."""
if test_mode == "ovfl":
value = gen_int_ovfl_value(n_bits, limits)
elif test_mode == "unfl":
value = gen_int_unfl_value(n_bits, limits)
else:
value = None
with pytest.raises(OverflowError):
signal.value = value
def gen_int_ovfl_value(n_bits, limits=_Limits.VECTOR_NBIT):
unsigned_max = 2**n_bits - 1
signed_max = 2 ** (n_bits - 1) - 1
if limits == _Limits.SIGNED_NBIT:
return signed_max + 1
elif limits == _Limits.UNSIGNED_NBIT:
return unsigned_max + 1
else:
return unsigned_max + 1
def gen_int_unfl_value(n_bits, limits=_Limits.VECTOR_NBIT):
unsigned_min = 0
signed_min = -(2 ** (n_bits - 1))
if limits == _Limits.SIGNED_NBIT:
return signed_min - 1
elif limits == _Limits.UNSIGNED_NBIT:
return unsigned_min - 1
else:
return signed_min - 1
@cocotb.test()
async def test_int_8bit(dut):
"""Test int access to 8-bit vector."""
await int_values_test(dut.stream_in_data, len(dut.stream_in_data))
@cocotb.test()
async def test_int_8bit_overflow(dut):
"""Test 8-bit vector overflow."""
await int_overflow_test(dut.stream_in_data, len(dut.stream_in_data), "ovfl")
@cocotb.test()
async def test_int_8bit_underflow(dut):
"""Test 8-bit vector underflow."""
await int_overflow_test(dut.stream_in_data, len(dut.stream_in_data), "unfl")
@cocotb.test()
async def test_int_32bit(dut):
"""Test int access to 32-bit vector."""
await int_values_test(dut.stream_in_data_dword, len(dut.stream_in_data_dword))
@cocotb.test()
async def test_int_32bit_overflow(dut):
"""Test 32-bit vector overflow."""
await int_overflow_test(
dut.stream_in_data_dword, len(dut.stream_in_data_dword), "ovfl"
)
@cocotb.test()
async def test_int_32bit_underflow(dut):
"""Test 32-bit vector underflow."""
await int_overflow_test(
dut.stream_in_data_dword, len(dut.stream_in_data_dword), "unfl"
)
@cocotb.test()
async def test_int_39bit(dut):
"""Test int access to 39-bit vector."""
await int_values_test(dut.stream_in_data_39bit, len(dut.stream_in_data_39bit))
@cocotb.test()
async def test_int_39bit_overflow(dut):
"""Test 39-bit vector overflow."""
await int_overflow_test(
dut.stream_in_data_39bit, len(dut.stream_in_data_39bit), "ovfl"
)
@cocotb.test()
async def test_int_39bit_underflow(dut):
"""Test 39-bit vector underflow."""
await int_overflow_test(
dut.stream_in_data_39bit, len(dut.stream_in_data_39bit), "unfl"
)
@cocotb.test()
async def test_int_64bit(dut):
"""Test int access to 64-bit vector."""
await int_values_test(dut.stream_in_data_wide, len(dut.stream_in_data_wide))
@cocotb.test()
async def test_int_64bit_overflow(dut):
"""Test 64-bit vector overflow."""
await int_overflow_test(
dut.stream_in_data_wide, len(dut.stream_in_data_wide), "ovfl"
)
@cocotb.test()
async def test_int_64bit_underflow(dut):
"""Test 64-bit vector underflow."""
await int_overflow_test(
dut.stream_in_data_wide, len(dut.stream_in_data_wide), "unfl"
)
@cocotb.test()
async def test_int_128bit(dut):
"""Test int access to 128-bit vector."""
await int_values_test(dut.stream_in_data_dqword, len(dut.stream_in_data_dqword))
@cocotb.test()
async def test_int_128bit_overflow(dut):
"""Test 128-bit vector overflow."""
await int_overflow_test(
dut.stream_in_data_dqword, len(dut.stream_in_data_dqword), "ovfl"
)
@cocotb.test()
async def test_int_128bit_underflow(dut):
"""Test 128-bit vector underflow."""
await int_overflow_test(
dut.stream_in_data_dqword, len(dut.stream_in_data_dqword), "unfl"
)
@cocotb.test(expect_error=AttributeError if SIM_NAME.startswith("icarus") else ())
async def test_integer(dut):
"""Test access to integers."""
if (
cocotb.LANGUAGE in ["verilog"]
and SIM_NAME.startswith("riviera")
or SIM_NAME.startswith("ghdl")
):
limits = (
_Limits.VECTOR_NBIT
) # stream_in_int is ModifiableObject in Riviera and GHDL, not IntegerObject
else:
limits = _Limits.SIGNED_NBIT
await int_values_test(dut.stream_in_int, 32, limits)
@cocotb.test(expect_error=AttributeError if SIM_NAME.startswith("icarus") else ())
async def test_integer_overflow(dut):
"""Test integer overflow."""
if (
cocotb.LANGUAGE in ["verilog"]
and SIM_NAME.startswith("riviera")
or SIM_NAME.startswith("ghdl")
):
limits = (
_Limits.VECTOR_NBIT
) # stream_in_int is ModifiableObject in Riviera and GHDL, not IntegerObject
else:
limits = _Limits.SIGNED_NBIT
await int_overflow_test(dut.stream_in_int, 32, "ovfl", limits)
@cocotb.test(expect_error=AttributeError if SIM_NAME.startswith("icarus") else ())
async def test_integer_underflow(dut):
"""Test integer underflow."""
if (
cocotb.LANGUAGE in ["verilog"]
and SIM_NAME.startswith("riviera")
or SIM_NAME.startswith("ghdl")
):
limits = (
_Limits.VECTOR_NBIT
) # stream_in_int is ModifiableObject in Riviera and GHDL, not IntegerObject
else:
limits = _Limits.SIGNED_NBIT
await int_overflow_test(dut.stream_in_int, 32, "unfl", limits)
# GHDL unable to find real signals (gh-2589)
# iverilog unable to find real signals (gh-2590)
@cocotb.test(
expect_error=AttributeError
if SIM_NAME.startswith("icarus")
else AttributeError
if SIM_NAME.startswith("ghdl")
else ()
)
async def test_real_assign_double(dut):
"""
Assign a random floating point value, read it back from the DUT and check
it matches what we assigned
"""
val = random.uniform(-1e307, 1e307)
log = logging.getLogger("cocotb.test")
timer_shortest = Timer(1, "step")
await timer_shortest
log.info("Setting the value %g" % val)
dut.stream_in_real.value = val
await timer_shortest
await timer_shortest # FIXME: Workaround for VHPI scheduling - needs investigation
got = float(dut.stream_out_real)
log.info("Read back value %g" % got)
assert got == val, "Values didn't match!"
# GHDL unable to find real signals (gh-2589)
# iverilog unable to find real signals (gh-2590)
@cocotb.test(
expect_error=AttributeError
if SIM_NAME.startswith("icarus")
else AttributeError
if SIM_NAME.startswith("ghdl")
else ()
)
async def test_real_assign_int(dut):
"""Assign a random integer value to ensure we can write types convertible to
int, read it back from the DUT and check it matches what we assigned.
"""
val = random.randint(-(2**31), 2**31 - 1)
log = logging.getLogger("cocotb.test")
timer_shortest = Timer(1, "step")
await timer_shortest
log.info("Setting the value %i" % val)
dut.stream_in_real.value = val
await timer_shortest
await timer_shortest # FIXME: Workaround for VHPI scheduling - needs investigation
got = dut.stream_out_real
log.info("Read back value %d" % got)
assert got == float(val), "Values didn't match!"
# identifiers starting with `_` are illegal in VHDL
@cocotb.test(skip=cocotb.LANGUAGE in ["vhdl"])
async def test_access_underscore_name(dut):
"""Test accessing HDL name starting with an underscore"""
# direct access does not work because we consider such names cocotb-internal
with pytest.raises(AttributeError):
dut._underscore_name
# indirect access works
dut._id("_underscore_name", extended=False).value = 0
await Timer(1, "ns")
assert dut._id("_underscore_name", extended=False).value == 0
dut._id("_underscore_name", extended=False).value = 1
await Timer(1, "ns")
assert dut._id("_underscore_name", extended=False).value == 1
dut._id("_underscore_name", extended=False).value = 0
await Timer(1, "ns")
assert dut._id("_underscore_name", extended=False).value == 0
@cocotb.test()
async def test_assign_LogicArray(dut):
value = LogicArray(dut.stream_in_data.value)
value &= LogicArray("0x1X011z")
dut.stream_in_data.value = value
with pytest.raises(ValueError):
dut.stream_in_data.value = LogicArray("010") # not the correct size
@cocotb.test()
async def test_assign_Logic(dut):
dut.stream_in_ready.value = Logic("X")
await Timer(1, "ns")
assert dut.stream_in_ready.value.binstr.lower() == "x"
with pytest.raises(ValueError):
dut.stream_in_data.value = Logic("U") # not the correct size
@cocotb.test()
async def test_assign_BinaryValue_too_big(dut):
with pytest.raises(ValueError):
dut.stream_in_data.value = BinaryValue(0, n_bits=1)
|
benchmark/opperf/nd_operations/misc_operators.py | mchoi8739/incubator-mxnet | 211 | 11144516 | <reponame>mchoi8739/incubator-mxnet<gh_stars>100-1000
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Performance benchmark tests for MXNet NDArray Miscellaneous Operations.
Below 16 Miscellaneous Operators are covered:
['reset_arrays', 'multi_all_finite', 'multi_sum_sq', 'add_n', 'UpSampling', 'Custom', 'squeeze',
'all_finite', 'clip', 'multi_lars', 'SequenceReverse', 'SequenceLast', 'SequenceMask', 'cast_storage',
'cumsum', 'fill_element_0index']
"""
import mxnet as mx
from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks
from benchmark.opperf.utils.op_registry_utils import get_remaining_miscellaneous_operators
from benchmark.opperf.utils.benchmark_utils import run_performance_test
from benchmark.opperf.utils.common_utils import merge_map_list
from benchmark.opperf.rules.default_params import MX_OP_MODULE
from benchmark.opperf.custom_operations.custom_operations import CustomAddOneProp
def run_mx_misc_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype) for all the miscellaneous
operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
standard_inputs_array_ops = [{"args": [(1024, 1024)],
"num_arrays": 1},
{"args": [(10000, 1)],
"num_arrays": 1},
{"args": [(10000, 10)],
"num_arrays": 1}]
int64_tensor_inputs_array_ops = [{"args": [(2**32, 1)],
"num_arrays":1}]
standard_inputs_add_n = [{"args": [(1024, 1024)]},
{"args": [(10000, 1)]},
{"args": [(10000, 10)]}]
int64_tensor_inputs_add_n = [{"args": [(2**16, 2**16)]}]
standard_inputs_upsampling = [{"args": (32, 3, 256, 256),
"scale": 2,
"sample_type": "nearest"},
{"args": (32, 3, 10000, 1),
"scale": 4,
"sample_type": "nearest"}]
int64_tensor_inputs_upsampling = [{"args": (2**32 + 1, 1, 1, 1),
"scale": 2,
"sample_type": "nearest"}]
standard_inputs_custom = [{"args": [(1024, 1024)],
"op_type": "CustomAddOne"},
{"args": [(10000, 1)],
"op_type": "CustomAddOne"},
{"args": [(10000, 10)],
"op_type": "CustomAddOne"}]
int64_tensor_inputs_custom = [{"args": [(2**32 + 1, 1)],
"op_type": "CustomAddOne"}]
if int64_tensor == 'on':
inputs_array_ops = int64_tensor_inputs_array_ops
inputs_add_n = int64_tensor_inputs_add_n
inputs_upsampling = int64_tensor_inputs_upsampling
inputs_custom = int64_tensor_inputs_custom
else:
inputs_array_ops = standard_inputs_array_ops
inputs_add_n = standard_inputs_add_n
inputs_upsampling = standard_inputs_upsampling
inputs_custom = standard_inputs_custom
# Individual tests for ops with positional args
array_ops_benchmark = run_performance_test([getattr(MX_OP_MODULE, "reset_arrays"),
getattr(MX_OP_MODULE, "multi_all_finite"),
getattr(MX_OP_MODULE, "multi_sum_sq")],
run_backward=False,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=inputs_array_ops,
warmup=warmup,
runs=runs)
add_n_benchmark = run_performance_test([getattr(MX_OP_MODULE, "add_n")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=inputs_add_n,
warmup=warmup,
runs=runs)
# There are currently issus with UpSampling with bilinear interpolation.
# track issue here: https://github.com/apache/incubator-mxnet/issues/9138
upsampling_benchmark = run_performance_test([getattr(MX_OP_MODULE, "UpSampling")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=inputs_upsampling,
warmup=warmup,
runs=runs)
# Create and register CustomAddOne operator for use in Custom op testing
c = CustomAddOneProp()
c.create_operator(ctx, [(1024,1024)], [dtype])
custom_benchmark = run_performance_test([getattr(MX_OP_MODULE, "Custom")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=inputs_custom,
warmup=warmup,
runs=runs)
# Fetch remaining Miscellaneous Operators
mx_misc_ops = get_remaining_miscellaneous_operators()
# Run benchmarks
mx_misc_op_results = run_op_benchmarks(mx_misc_ops, dtype, ctx, profiler, int64_tensor, warmup, runs)
return merge_map_list(array_ops_benchmark + add_n_benchmark + upsampling_benchmark + custom_benchmark + [mx_misc_op_results])
|
sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2015_06_15/models/_models_py3.py | rsdoherty/azure-sdk-for-python | 2,728 | 11144536 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._storage_management_enums import *
class CheckNameAvailabilityResult(msrest.serialization.Model):
"""The CheckNameAvailability operation response.
:param name_available: Boolean value that indicates whether the name is available for you to
use. If true, the name is available. If false, the name has already been taken or is invalid
and cannot be used.
:type name_available: bool
:param reason: The reason that a storage account name could not be used. The Reason element is
only returned if NameAvailable is false. Possible values include: "AccountNameInvalid",
"AlreadyExists".
:type reason: str or ~azure.mgmt.storage.v2015_06_15.models.Reason
:param message: The error message explaining the Reason value in more detail.
:type message: str
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
name_available: Optional[bool] = None,
reason: Optional[Union[str, "Reason"]] = None,
message: Optional[str] = None,
**kwargs
):
super(CheckNameAvailabilityResult, self).__init__(**kwargs)
self.name_available = name_available
self.reason = reason
self.message = message
class CustomDomain(msrest.serialization.Model):
"""The custom domain assigned to this storage account. This can be set via Update.
All required parameters must be populated in order to send to Azure.
:param name: Required. The custom domain name. Name is the CNAME source.
:type name: str
:param use_sub_domain_name: Indicates whether indirect CName validation is enabled. Default
value is false. This should only be set on updates.
:type use_sub_domain_name: bool
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'use_sub_domain_name': {'key': 'useSubDomainName', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
use_sub_domain_name: Optional[bool] = None,
**kwargs
):
super(CustomDomain, self).__init__(**kwargs)
self.name = name
self.use_sub_domain_name = use_sub_domain_name
class Endpoints(msrest.serialization.Model):
"""The URIs that are used to perform a retrieval of a public blob, queue or table object.
:param blob: The blob endpoint.
:type blob: str
:param queue: The queue endpoint.
:type queue: str
:param table: The table endpoint.
:type table: str
:param file: The file endpoint.
:type file: str
"""
_attribute_map = {
'blob': {'key': 'blob', 'type': 'str'},
'queue': {'key': 'queue', 'type': 'str'},
'table': {'key': 'table', 'type': 'str'},
'file': {'key': 'file', 'type': 'str'},
}
def __init__(
self,
*,
blob: Optional[str] = None,
queue: Optional[str] = None,
table: Optional[str] = None,
file: Optional[str] = None,
**kwargs
):
super(Endpoints, self).__init__(**kwargs)
self.blob = blob
self.queue = queue
self.table = table
self.file = file
class Resource(msrest.serialization.Model):
"""Describes a storage resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class StorageAccount(Resource):
"""The storage account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param provisioning_state: The status of the storage account at the time the operation was
called. Possible values include: "Creating", "ResolvingDNS", "Succeeded".
:type provisioning_state: str or ~azure.mgmt.storage.v2015_06_15.models.ProvisioningState
:param account_type: The type of the storage account. Possible values include: "Standard_LRS",
"Standard_ZRS", "Standard_GRS", "Standard_RAGRS", "Premium_LRS".
:type account_type: str or ~azure.mgmt.storage.v2015_06_15.models.AccountType
:param primary_endpoints: The URLs that are used to perform a retrieval of a public blob,
queue, or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob
endpoint.
:type primary_endpoints: ~azure.mgmt.storage.v2015_06_15.models.Endpoints
:param primary_location: The location of the primary data center for the storage account.
:type primary_location: str
:param status_of_primary: The status indicating whether the primary location of the storage
account is available or unavailable. Possible values include: "Available", "Unavailable".
:type status_of_primary: str or ~azure.mgmt.storage.v2015_06_15.models.AccountStatus
:param last_geo_failover_time: The timestamp of the most recent instance of a failover to the
secondary location. Only the most recent timestamp is retained. This element is not returned if
there has never been a failover instance. Only available if the accountType is Standard_GRS or
Standard_RAGRS.
:type last_geo_failover_time: ~datetime.datetime
:param secondary_location: The location of the geo-replicated secondary for the storage
account. Only available if the accountType is Standard_GRS or Standard_RAGRS.
:type secondary_location: str
:param status_of_secondary: The status indicating whether the secondary location of the storage
account is available or unavailable. Only available if the SKU name is Standard_GRS or
Standard_RAGRS. Possible values include: "Available", "Unavailable".
:type status_of_secondary: str or ~azure.mgmt.storage.v2015_06_15.models.AccountStatus
:param creation_time: The creation date and time of the storage account in UTC.
:type creation_time: ~datetime.datetime
:param custom_domain: The custom domain the user assigned to this storage account.
:type custom_domain: ~azure.mgmt.storage.v2015_06_15.models.CustomDomain
:param secondary_endpoints: The URLs that are used to perform a retrieval of a public blob,
queue, or table object from the secondary location of the storage account. Only available if
the SKU name is Standard_RAGRS.
:type secondary_endpoints: ~azure.mgmt.storage.v2015_06_15.models.Endpoints
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'account_type': {'key': 'properties.accountType', 'type': 'str'},
'primary_endpoints': {'key': 'properties.primaryEndpoints', 'type': 'Endpoints'},
'primary_location': {'key': 'properties.primaryLocation', 'type': 'str'},
'status_of_primary': {'key': 'properties.statusOfPrimary', 'type': 'str'},
'last_geo_failover_time': {'key': 'properties.lastGeoFailoverTime', 'type': 'iso-8601'},
'secondary_location': {'key': 'properties.secondaryLocation', 'type': 'str'},
'status_of_secondary': {'key': 'properties.statusOfSecondary', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'custom_domain': {'key': 'properties.customDomain', 'type': 'CustomDomain'},
'secondary_endpoints': {'key': 'properties.secondaryEndpoints', 'type': 'Endpoints'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
provisioning_state: Optional[Union[str, "ProvisioningState"]] = None,
account_type: Optional[Union[str, "AccountType"]] = None,
primary_endpoints: Optional["Endpoints"] = None,
primary_location: Optional[str] = None,
status_of_primary: Optional[Union[str, "AccountStatus"]] = None,
last_geo_failover_time: Optional[datetime.datetime] = None,
secondary_location: Optional[str] = None,
status_of_secondary: Optional[Union[str, "AccountStatus"]] = None,
creation_time: Optional[datetime.datetime] = None,
custom_domain: Optional["CustomDomain"] = None,
secondary_endpoints: Optional["Endpoints"] = None,
**kwargs
):
super(StorageAccount, self).__init__(location=location, tags=tags, **kwargs)
self.provisioning_state = provisioning_state
self.account_type = account_type
self.primary_endpoints = primary_endpoints
self.primary_location = primary_location
self.status_of_primary = status_of_primary
self.last_geo_failover_time = last_geo_failover_time
self.secondary_location = secondary_location
self.status_of_secondary = status_of_secondary
self.creation_time = creation_time
self.custom_domain = custom_domain
self.secondary_endpoints = secondary_endpoints
class StorageAccountCheckNameAvailabilityParameters(msrest.serialization.Model):
"""The parameters used to check the availability of the storage account name.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param type:
:type type: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
type: Optional[str] = "Microsoft.Storage/storageAccounts",
**kwargs
):
super(StorageAccountCheckNameAvailabilityParameters, self).__init__(**kwargs)
self.name = name
self.type = type
class StorageAccountCreateParameters(msrest.serialization.Model):
"""The parameters to provide for the account.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource. This will be one of the supported and
registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a
resource cannot be changed once it is created, but if an identical geo region is specified on
update, the request will succeed.
:type location: str
:param tags: A set of tags. A list of key value pairs that describe the resource. These tags
can be used for viewing and grouping this resource (across resource groups). A maximum of 15
tags can be provided for a resource. Each tag must have a key with a length no greater than 128
characters and a value with a length no greater than 256 characters.
:type tags: dict[str, str]
:param account_type: The sku name. Required for account creation; optional for update. Note
that in older versions, sku name was called accountType. Possible values include:
"Standard_LRS", "Standard_ZRS", "Standard_GRS", "Standard_RAGRS", "Premium_LRS".
:type account_type: str or ~azure.mgmt.storage.v2015_06_15.models.AccountType
"""
_validation = {
'location': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'account_type': {'key': 'properties.accountType', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
account_type: Optional[Union[str, "AccountType"]] = None,
**kwargs
):
super(StorageAccountCreateParameters, self).__init__(**kwargs)
self.location = location
self.tags = tags
self.account_type = account_type
class StorageAccountKeys(msrest.serialization.Model):
"""The access keys for the storage account.
:param key1: The value of key 1.
:type key1: str
:param key2: The value of key 2.
:type key2: str
"""
_attribute_map = {
'key1': {'key': 'key1', 'type': 'str'},
'key2': {'key': 'key2', 'type': 'str'},
}
def __init__(
self,
*,
key1: Optional[str] = None,
key2: Optional[str] = None,
**kwargs
):
super(StorageAccountKeys, self).__init__(**kwargs)
self.key1 = key1
self.key2 = key2
class StorageAccountListResult(msrest.serialization.Model):
"""The list storage accounts operation response.
:param value: The list of storage accounts and their properties.
:type value: list[~azure.mgmt.storage.v2015_06_15.models.StorageAccount]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[StorageAccount]'},
}
def __init__(
self,
*,
value: Optional[List["StorageAccount"]] = None,
**kwargs
):
super(StorageAccountListResult, self).__init__(**kwargs)
self.value = value
class StorageAccountRegenerateKeyParameters(msrest.serialization.Model):
"""The parameters used to regenerate the storage account key.
All required parameters must be populated in order to send to Azure.
:param key_name: Required.
:type key_name: str
"""
_validation = {
'key_name': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(
self,
*,
key_name: str,
**kwargs
):
super(StorageAccountRegenerateKeyParameters, self).__init__(**kwargs)
self.key_name = key_name
class StorageAccountUpdateParameters(msrest.serialization.Model):
"""The parameters to update on the account.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param account_type: The account type. Note that StandardZRS and PremiumLRS accounts cannot be
changed to other account types, and other account types cannot be changed to StandardZRS or
PremiumLRS. Possible values include: "Standard_LRS", "Standard_ZRS", "Standard_GRS",
"Standard_RAGRS", "Premium_LRS".
:type account_type: str or ~azure.mgmt.storage.v2015_06_15.models.AccountType
:param custom_domain: User domain assigned to the storage account. Name is the CNAME source.
Only one custom domain is supported per storage account at this time. To clear the existing
custom domain, use an empty string for the custom domain name property.
:type custom_domain: ~azure.mgmt.storage.v2015_06_15.models.CustomDomain
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'account_type': {'key': 'properties.accountType', 'type': 'str'},
'custom_domain': {'key': 'properties.customDomain', 'type': 'CustomDomain'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
account_type: Optional[Union[str, "AccountType"]] = None,
custom_domain: Optional["CustomDomain"] = None,
**kwargs
):
super(StorageAccountUpdateParameters, self).__init__(**kwargs)
self.tags = tags
self.account_type = account_type
self.custom_domain = custom_domain
class Usage(msrest.serialization.Model):
"""Describes Storage Resource Usage.
All required parameters must be populated in order to send to Azure.
:param unit: Required. The unit of measurement. Possible values include: "Count", "Bytes",
"Seconds", "Percent", "CountsPerSecond", "BytesPerSecond".
:type unit: str or ~azure.mgmt.storage.v2015_06_15.models.UsageUnit
:param current_value: Required. The current count of the allocated resources in the
subscription.
:type current_value: int
:param limit: Required. The maximum count of the resources that can be allocated in the
subscription.
:type limit: int
:param name: Required. The name of the type of usage.
:type name: ~azure.mgmt.storage.v2015_06_15.models.UsageName
"""
_validation = {
'unit': {'required': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(
self,
*,
unit: Union[str, "UsageUnit"],
current_value: int,
limit: int,
name: "UsageName",
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
class UsageListResult(msrest.serialization.Model):
"""The List Usages operation response.
:param value: The list Storage Resource Usages.
:type value: list[~azure.mgmt.storage.v2015_06_15.models.Usage]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
}
def __init__(
self,
*,
value: Optional[List["Usage"]] = None,
**kwargs
):
super(UsageListResult, self).__init__(**kwargs)
self.value = value
class UsageName(msrest.serialization.Model):
"""The Usage Names.
:param value: A string describing the resource name.
:type value: str
:param localized_value: A localized string describing the resource name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
super(UsageName, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
|
SCRAPE/Lib/site-packages/twisted/python/constants.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 4,612 | 11144568 | <reponame>Chinmoy-Prasad-Dutta/scrapy_scraper
# -*- test-case-name: twisted.python.test.test_constants -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Symbolic constant support, including collections and constants with text,
numeric, and bit flag values.
"""
# Import and re-export Constantly
from constantly import ( # type: ignore[import]
FlagConstant,
Flags,
NamedConstant,
Names,
ValueConstant,
Values,
)
__all__ = ["NamedConstant", "ValueConstant", "FlagConstant", "Names", "Values", "Flags"]
|
scipy/special/tests/test_spence.py | Ennosigaeon/scipy | 9,095 | 11144573 | <filename>scipy/special/tests/test_spence.py
import numpy as np
from numpy import sqrt, log, pi
from scipy.special._testutils import FuncData
from scipy.special import spence
def test_consistency():
# Make sure the implementation of spence for real arguments
# agrees with the implementation of spence for imaginary arguments.
x = np.logspace(-30, 300, 200)
dataset = np.vstack((x + 0j, spence(x))).T
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
def test_special_points():
# Check against known values of Spence's function.
phi = (1 + sqrt(5))/2
dataset = [(1, 0),
(2, -pi**2/12),
(0.5, pi**2/12 - log(2)**2/2),
(0, pi**2/6),
(-1, pi**2/4 - 1j*pi*log(2)),
((-1 + sqrt(5))/2, pi**2/15 - log(phi)**2),
((3 - sqrt(5))/2, pi**2/10 - log(phi)**2),
(phi, -pi**2/15 + log(phi)**2/2),
# Corrected from Zagier, "The Dilogarithm Function"
((3 + sqrt(5))/2, -pi**2/10 - log(phi)**2)]
dataset = np.asarray(dataset)
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
|
test/test_form.py | syscocloud/puppetboard | 352 | 11144587 | <gh_stars>100-1000
from puppetboard import app, forms
def test_form_valid(capsys):
for form in [forms.QueryForm]:
with app.app.test_request_context():
qf = form()
out, err = capsys.readouterr()
assert qf is not None
assert err == ""
assert out == ""
|
models/vgg_11.py | LucaBonfiglioli/Early-Bird-Tickets | 132 | 11144594 | import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = [
'slimmingvgg',
]
model_urls = {
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
}
class VGG(nn.Module):
def __init__(self, features, cfg, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(cfg[0] * 7 * 7, cfg[1]),
nn.BatchNorm1d(cfg[1]),
nn.ReLU(True),
nn.Linear(cfg[1],cfg[2]),
nn.BatchNorm1d(cfg[2]),
nn.ReLU(True),
nn.Linear(cfg[2], num_classes)
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight, mode='fan_out')#, nonlinearity='relu')
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(0.5)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(0.5)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M', 4096, 4096]
}
def slimmingvgg(pretrained=False, depth=None, dataset=None, config=None, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
if config == None:
config = cfg['A']
config2 = [config[-4],config[-2],config[-1]]
model = VGG(make_layers(config[:-2], batch_norm=True), config2, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
return model |
src/operation/typechecker_unittest.py | fekblom/critic | 216 | 11144608 | <gh_stars>100-1000
import copy
import json
def basic():
import htmlutils
from operation.basictypes import OperationError, OperationFailure
from operation.typechecker import (
Optional, TypeChecker, TypeCheckerContext, BooleanChecker,
StringChecker, RestrictedString, SHA1, IntegerChecker,
RestrictedInteger, PositiveInteger, NonNegativeInteger, ArrayChecker,
EnumerationChecker, VariantChecker, DictionaryChecker)
# Check TypeChecker.make()'s handling of basic types.
assert type(TypeChecker.make(bool)) is BooleanChecker
assert type(TypeChecker.make(str)) is StringChecker
assert type(TypeChecker.make(int)) is IntegerChecker
assert type(TypeChecker.make([bool])) is ArrayChecker
assert type(TypeChecker.make(set(["foo", "bar"]))) is EnumerationChecker
assert type(TypeChecker.make(set([bool, str, int]))) is VariantChecker
assert type(TypeChecker.make({ "foo": bool })) is DictionaryChecker
# Check TypeChecker.make()'s handling of TypeChecker sub-classes and
# instances thereof.
assert isinstance(TypeChecker.make(BooleanChecker), BooleanChecker)
boolean_checker = BooleanChecker()
assert TypeChecker.make(boolean_checker) is boolean_checker
def check(checker, *values):
checker = TypeChecker.make(checker)
results = []
for value in values:
converted = checker(value, TypeCheckerContext(None, None, None))
results.append(value if converted is None else converted)
return results
def should_match(checker, *values, **kwargs):
results = check(checker, *values)
if "result" in kwargs:
expected_result = kwargs["result"]
for result in results:
assert result == expected_result, \
"%r != %r" % (result, expected_result)
def should_not_match(checker, *values, **expected):
for value in values:
try:
check(checker, copy.deepcopy(value))
except (OperationError, OperationFailure) as error:
error = json.loads(str(error))
for key, value in expected.items():
if isinstance(value, str):
value = set([value])
assert error.get(key) in value, \
("%s: %r not among %r" % (key, error.get(key), value))
else:
assert False, "checker allowed value incorrectly: %r" % value
# Check some simple things that should be accepted.
should_match(bool, True, False)
should_match(str, "", "foo")
should_match(int, -2**31, -1, 0, 1, 2**31)
should_match([bool], [], [True, False])
should_match([str], ["", "foo"])
should_match([int], [-2**31, -1, 0, 1, 2**31])
should_match(set(["foo", "bar"]), "foo", "bar")
should_match(set([bool, str, int]),
True, False, "", "foo", -2**31, -1, 0, 1, 2**31)
# Check some equally simple things that shouldn't be accepted.
should_not_match(bool, 10, "foo",
error="invalid input: data is not a boolean")
should_not_match(str, True, 10,
error="invalid input: data is not a string")
should_not_match(int, True, "foo", 0.5,
error="invalid input: data is not an integer")
should_not_match([bool], [True, 10], [False, "foo"],
error="invalid input: data[1] is not a boolean")
should_not_match([str], ["", True], ["foo", 10],
error="invalid input: data[1] is not a string")
should_not_match([int], [0, True], [10, "foo"],
error="invalid input: data[1] is not an integer")
should_not_match(set(["foo", "bar"]), "fie",
error="invalid input: data is not valid")
should_not_match(set(["foo", "bar"]), True, 10,
error="invalid input: data is not a string")
should_not_match(set([bool, str, int]), [True], ["foo"], [10],
error="data is of invalid type")
# Check some dictionary checkers.
should_match({ "b": bool, "s": str, "i": int },
{ "b": True, "s": "foo", "i": 10 })
should_match({ "req": bool, "opt": Optional(bool) },
{ "req": True, "opt": False },
{ "req": False })
should_not_match({ "b": bool }, { "b": "foo" }, { "b": 10 },
error="invalid input: data.b is not a boolean")
should_not_match({ "b": bool }, { "i": 10 },
error="invalid input: data.b missing")
should_not_match({ "b": bool }, { "b": True, "i": 10 },
error="invalid input: data.i was not used")
should_not_match({ "b": Optional(bool) }, { "b": "foo" }, { "b": 10 },
error="invalid input: data.b is not a boolean")
# Check suffixed variant checker in dictionary.
id_or_name = VariantChecker({ "id": int, "name": str })
should_match({ "thing": id_or_name },
{ "thing": 10 },
{ "thing_id": 10 },
result={ "thing": 10 })
should_match({ "thing": id_or_name },
{ "thing": "foo" },
{ "thing_name": "foo" },
result={ "thing": "foo" })
should_not_match({ "thing": id_or_name },
{ "thing_id": "foo" },
error="invalid input: data.thing_id is not an integer")
should_not_match({ "thing": id_or_name },
{ "thing_name": 10 },
error="invalid input: data.thing_name is not a string")
should_not_match({ "thing": id_or_name },
{ "thing_id": 10,
"thing_name": "foo" },
error=("invalid input: data.thing_id was not used",
"invalid input: data.thing_name was not used"))
# Check some RestrictedString types.
should_match(RestrictedString, "", "foo")
should_match(RestrictedString(minlength=0), "", "foo")
should_match(RestrictedString(minlength=3), "foo")
should_match(RestrictedString(maxlength=0), "")
should_match(RestrictedString(maxlength=3), "", "foo")
should_match(RestrictedString(minlength=0, maxlength=3), "", "foo")
should_match(RestrictedString(allowed=lambda c: False), "")
should_match(RestrictedString(allowed=lambda c: True), "", "foo")
should_match(RestrictedString(allowed=lambda c: c in "foo"), "", "foo")
should_not_match(RestrictedString(), True, 10,
error="invalid input: data is not a string")
should_not_match(
RestrictedString(minlength=1), "",
code="paramtooshort:data",
title="Invalid data",
message="invalid input: data must be at least 1 characters long")
should_not_match(
RestrictedString(maxlength=2), "foo",
code="paramtoolong:data",
title="Invalid data",
message="invalid input: data must be at most 2 characters long")
should_not_match(
RestrictedString(allowed=lambda c: False), "foo",
code="paramcontainsillegalchar:data",
title="Invalid data",
message="invalid input: data may not contain the characters 'f', 'o'")
should_not_match(
RestrictedString(allowed=lambda c: False, ui_name="gazonk"), "foo",
code="paramcontainsillegalchar:data",
title="Invalid gazonk",
message="invalid input: gazonk may not contain the characters 'f', 'o'")
# Check SHA1.
sha1 = "0123456789abcdefABCDEF0123456789abcdefAB"
should_match(SHA1, *[sha1[:length] for length in range(4, 41)])
should_not_match(SHA1, True, 10,
error="invalid input: data is not a string")
for ch in range(0, 256):
ch = chr(ch)
if ch in sha1:
continue
should_not_match(
SHA1, "012" + ch,
message=htmlutils.htmlify(
"invalid input: data may not contain the character %r" % ch))
should_not_match(
SHA1, "012",
message="invalid input: data must be at least 4 characters long")
should_not_match(
SHA1, "0" * 41,
message="invalid input: data must be at most 40 characters long")
# Check some RestrictedInteger types.
should_match(RestrictedInteger, -2**31, -1, 0, 1, 2**31)
should_match(RestrictedInteger(minvalue=-2**31), -2**31, -1, 0, 1, 2**31)
should_match(RestrictedInteger(minvalue=0), 0, 1, 2**31)
should_match(RestrictedInteger(maxvalue=0), -2**31, -1, 0)
should_match(RestrictedInteger(maxvalue=2**31), -2**31, -1, 0, 1, 2**31)
should_match(RestrictedInteger(minvalue=0, maxvalue=0), 0)
should_not_match(RestrictedInteger(), True, "foo",
error="invalid input: data is not an integer")
should_not_match(RestrictedInteger(minvalue=0), -2**31, -1,
code="valuetoolow:data",
title="Invalid data parameter",
message="invalid input: data must be 0 or higher")
should_not_match(RestrictedInteger(maxvalue=0), 1, 2**31,
code="valuetoohigh:data",
title="Invalid data parameter",
message="invalid input: data must be 0 or lower")
should_not_match(RestrictedInteger(minvalue=1, ui_name="gazonk"), 0,
code="valuetoolow:data",
title="Invalid gazonk parameter",
message="invalid input: gazonk must be 1 or higher")
# Check NonNegativeInteger.
should_match(NonNegativeInteger, 0, 1, 2**31)
should_not_match(NonNegativeInteger, True, "foo",
error="invalid input: data is not an integer")
should_not_match(NonNegativeInteger, -2**31, -1,
code="valuetoolow:data",
title="Invalid data parameter",
message="invalid input: data must be 0 or higher")
# Check PositiveInteger.
should_match(PositiveInteger, 1, 2**31)
should_not_match(PositiveInteger, True, "foo",
error="invalid input: data is not an integer")
should_not_match(PositiveInteger, -2**31, -1, 0,
code="valuetoolow:data",
title="Invalid data parameter",
message="invalid input: data must be 1 or higher")
print "basic: ok"
|
onsets_and_frames/mel.py | chenchy/onsets-and-frames | 149 | 11144631 | import numpy as np
import torch.nn.functional as F
from librosa.filters import mel
from librosa.util import pad_center
from scipy.signal import get_window
from torch.autograd import Variable
from .constants import *
class STFT(torch.nn.Module):
"""adapted from <NAME>'s https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length, hop_length, win_length=None, window='hann'):
super(STFT, self).__init__()
if win_length is None:
win_length = filter_length
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
def forward(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
class MelSpectrogram(torch.nn.Module):
def __init__(self, n_mels, sample_rate, filter_length, hop_length,
win_length=None, mel_fmin=0.0, mel_fmax=None):
super(MelSpectrogram, self).__init__()
self.stft = STFT(filter_length, hop_length, win_length)
mel_basis = mel(sample_rate, filter_length, n_mels, mel_fmin, mel_fmax, htk=True)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def forward(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, T, n_mels)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = torch.log(torch.clamp(mel_output, min=1e-5))
return mel_output
# the default melspectrogram converter across the project
melspectrogram = MelSpectrogram(N_MELS, SAMPLE_RATE, WINDOW_LENGTH, HOP_LENGTH, mel_fmin=MEL_FMIN, mel_fmax=MEL_FMAX)
melspectrogram.to(DEFAULT_DEVICE)
|
deepfence_backend/cve_scan_registry/utils/credentials.py | tuapuikia/ThreatMapper | 1,281 | 11144670 | import requests
def get_registry_credential(credential_id, api_url, api_key):
headers = {"Content-Type": "application/json", "deepfence-key": api_key}
registry_credential_response = requests.post(
"https://{0}/df-api/registry-credential".format(api_url),
json={"credential_id": credential_id},
headers=headers, verify=False).json()
return registry_credential_response.get("data", {})
|
test/ipython_etherbone.py | skiphansen/litex-buildenv | 198 | 11144699 | #!/usr/bin/env python3
from IPython import embed
from litescope.software.driver.analyzer import LiteScopeAnalyzerDriver
from common import *
from make import get_testdir
def main():
args, wb = connect("LiteX Etherbone Interactive Console")
print_memmap(wb)
print()
analyzer_csv = '{}/analyzer.csv'.format(get_testdir(args))
if os.path.exists(analyzer_csv):
analyzer = LiteScopeAnalyzerDriver(wb.regs, "analyzer", config_csv=analyzer_csv, debug=True)
else:
print("WARNING: No litescope csv found at {},\nAssuming litescope not included in design!".format(analyzer_csv))
try:
embed()
finally:
wb.close()
if __name__ == "__main__":
main()
|
Chap6PracPrintTable.py | viju4you/Python | 110 | 11144707 | <filename>Chap6PracPrintTable.py
#! /usr/bin/env python3
# Table Printer Chap. 6
# Function for taking lists of strings and displays in an organized table
tableData = [['apples','oranges','cherries','bananas'],
['Alice','Bob','Carol','David'],
['dogs','cats','moose','goose']]
def printTable(dataLists):
colWidths = [0] * len(dataLists)
for i in colWidths:
colWidths = max(dataLists[i], key=len)
y = len(colWidths)
for x in range(len(dataLists[0])):
print(str(dataLists[0][x]).rjust(y) + str(dataLists[1][x]).rjust(y) + str(dataLists[2][x]).rjust(y))
printTable(tableData)
|
rl_coach/architectures/tensorflow_components/embedders/image_embedder.py | JohnnyPeng18/coach | 1,960 | 11144715 | <filename>rl_coach/architectures/tensorflow_components/embedders/image_embedder.py<gh_stars>1000+
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List
import tensorflow as tf
from rl_coach.architectures.tensorflow_components.layers import Conv2d, Dense
from rl_coach.architectures.tensorflow_components.embedders.embedder import InputEmbedder
from rl_coach.base_parameters import EmbedderScheme
from rl_coach.core_types import InputImageEmbedding
class ImageEmbedder(InputEmbedder):
"""
An input embedder that performs convolutions on the input and then flattens the result.
The embedder is intended for image like inputs, where the channels are expected to be the last axis.
The embedder also allows custom rescaling of the input prior to the neural network.
"""
def __init__(self, input_size: List[int], activation_function=tf.nn.relu,
scheme: EmbedderScheme=EmbedderScheme.Medium, batchnorm: bool=False, dropout_rate: float=0.0,
name: str= "embedder", input_rescaling: float=255.0, input_offset: float=0.0, input_clipping=None,
dense_layer=Dense, is_training=False, flatten=True):
super().__init__(input_size, activation_function, scheme, batchnorm, dropout_rate, name, input_rescaling,
input_offset, input_clipping, dense_layer=dense_layer, is_training=is_training,
flatten=flatten)
self.return_type = InputImageEmbedding
if len(input_size) != 3 and scheme != EmbedderScheme.Empty:
raise ValueError("Image embedders expect the input size to have 3 dimensions. The given size is: {}"
.format(input_size))
@property
def schemes(self):
return {
EmbedderScheme.Empty:
[],
EmbedderScheme.Shallow:
[
Conv2d(32, 3, 1)
],
# atari dqn
EmbedderScheme.Medium:
[
Conv2d(32, 8, 4),
Conv2d(64, 4, 2),
Conv2d(64, 3, 1)
],
# carla
EmbedderScheme.Deep: \
[
Conv2d(32, 5, 2),
Conv2d(32, 3, 1),
Conv2d(64, 3, 2),
Conv2d(64, 3, 1),
Conv2d(128, 3, 2),
Conv2d(128, 3, 1),
Conv2d(256, 3, 2),
Conv2d(256, 3, 1)
]
}
|
netbox/extras/tests/dummy_plugin/__init__.py | TheFlyingCorpse/netbox | 4,994 | 11144719 | from extras.plugins import PluginConfig
class DummyPluginConfig(PluginConfig):
name = 'extras.tests.dummy_plugin'
verbose_name = 'Dummy plugin'
version = '0.0'
description = 'For testing purposes only'
base_url = 'dummy-plugin'
min_version = '1.0'
max_version = '9.0'
middleware = [
'extras.tests.dummy_plugin.middleware.DummyMiddleware'
]
queues = [
'testing-low',
'testing-medium',
'testing-high'
]
config = DummyPluginConfig
|
scitbx/linalg/tests/tst_cholesky.py | dperl-sol/cctbx_project | 155 | 11144722 | from __future__ import absolute_import, division, print_function
import scitbx.math
import scitbx.linalg
from scitbx import matrix
from scitbx.array_family import flex
from libtbx.test_utils import approx_equal
import random
from six.moves import range
def exercise_cholesky_decomposition():
from scitbx.examples import immoptibox_ports
immoptibox_ports.py_cholesky_decomposition \
= immoptibox_ports.cholesky_decomposition
immoptibox_ports.cholesky_decomposition \
= exercise_scitbx_cholesky_decomposition
immoptibox_ports.tst_flex_counts = 0
immoptibox_ports.exercise_cholesky()
immoptibox_ports.cholesky_decomposition \
= immoptibox_ports.py_cholesky_decomposition
assert immoptibox_ports.tst_flex_counts == 299
del immoptibox_ports.tst_flex_counts
def exercise_scitbx_cholesky_decomposition(a):
from scitbx.examples import immoptibox_ports
c = immoptibox_ports.py_cholesky_decomposition(a)
al = a.matrix_symmetric_as_packed_l()
chol = scitbx.linalg.l_l_transpose_cholesky_decomposition_in_place(al)
cl = al
if (c is None):
assert chol.failure
else:
assert approx_equal(cl, c.matrix_lower_triangle_as_packed_l())
for i_trial in range(10):
b = flex.random_double(size=a.focus()[0], factor=2)-1
x = chol.solve(b)
assert approx_equal(a.matrix_multiply(x), b)
immoptibox_ports.tst_flex_counts += 1
return c
def exercise_gill_murray_wright_cholesky_decomposition():
def p_as_mx(p):
n = len(p)
m = flex.double(flex.grid(n,n))
m.matrix_diagonal_set_in_place(1)
for i in range(n):
if p[i] != i:
m.matrix_swap_rows_in_place(i, p[i])
return matrix.sqr(m)
def core(a):
c = flex.double(a)
c.resize(flex.grid(a.n))
u = c.matrix_upper_triangle_as_packed_u()
gwm = scitbx.linalg.gill_murray_wright_cholesky_decomposition_in_place(
u,
epsilon=1.e-8)
assert gwm.epsilon == 1.e-8
u = c.matrix_upper_triangle_as_packed_u()
gwm = scitbx.linalg.gill_murray_wright_cholesky_decomposition_in_place(u)
assert gwm.epsilon == scitbx.math.floating_point_epsilon_double_get()
assert gwm.packed_u.id() == u.id()
p, e = gwm.pivots, gwm.e
r = matrix.sqr(u.matrix_packed_u_as_upper_triangle())
if a.n != (0,0):
rtr = r.transpose() * r
pm = p_as_mx(p)
papt = pm * a * pm.transpose()
paept = papt + matrix.diag(e)
delta_decomposition = scitbx.linalg.matrix_equality_ratio(paept, rtr)
assert delta_decomposition < 10, delta_decomposition
b = flex.random_double(size=a.n[0], factor=2)-1
x = gwm.solve(b=b)
px = pm * matrix.col(x)
pb = pm * matrix.col(b)
if 0:
eigen = scitbx.linalg.eigensystem.real_symmetric(
paept.as_flex_double_matrix())
lambda_ = eigen.values()
print("condition number: ", lambda_[0]/lambda_[-1])
delta_solve = scitbx.linalg.matrix_cholesky_test_ratio(
a=paept.as_flex_double_matrix(),
x=flex.double(px),
b=flex.double(pb),
epsilon=gwm.epsilon)
assert delta_solve < 10, delta_solve
return p, e, r
# empty matrix
a = matrix.sqr([])
p, e, r = core(a)
assert p.size() == 0
assert e.size() == 0
assert len(r) == 0
n_max = 15
n_trials_per_n = 10
# identity matrices
for n in range(1,n_max+1):
a = matrix.diag([1]*n)
p, e, r = core(a)
assert list(p) == list(range(n))
assert approx_equal(e, [0]*n)
assert approx_equal(r, a)
# null matrices
for n in range(1,n_max+1):
a = matrix.sqr([0]*n*n)
p, e, r = core(a)
assert list(p) == list(range(n))
assert list(e) == [scitbx.math.floating_point_epsilon_double_get()]*n
for i in range(n):
for j in range(n):
if (i != j): r(i,j) == 0
else: r(i,j) == r(0,0)
# random semi-positive diagonal matrices
for n in range(1,n_max+1):
for i_trial in range(n_trials_per_n):
a = matrix.diag(flex.random_double(size=n))
p, e, r = core(a)
assert approx_equal(e, [0]*n)
for i in range(n):
for j in range(n):
if (i != j): approx_equal(r(i,j), 0)
# random diagonal matrices
for n in range(1,n_max+1):
for i_trial in range(n_trials_per_n):
a = matrix.diag(flex.random_double(size=n, factor=2)-1)
p, e, r = core(a)
for i in range(n):
for j in range(n):
if (i != j): approx_equal(r(i,j), 0)
# random semi-positive definite matrices
for n in range(1,n_max+1):
for i_trial in range(n_trials_per_n):
m = matrix.sqr(flex.random_double(size=n*n, factor=2)-1)
a = m.transpose_multiply()
p, e, r = core(a)
assert approx_equal(e, [0]*n)
# random matrices
for n in range(1,n_max+1):
size = n*(n+1)//2
for i_trial in range(n_trials_per_n):
a = (flex.random_double(size=size, factor=2)-1) \
.matrix_packed_u_as_symmetric()
core(matrix.sqr(a))
a.matrix_diagonal_set_in_place(0)
core(matrix.sqr(a))
# <NAME> and <NAME>:
# Numerical Optimization.
# Springer, New York, 1999, pp. 145-150.
for i in range(3):
for j in range(3):
a = flex.double([[4,2,1],[2,6,3],[1,3,-0.004]])
a.matrix_swap_rows_in_place(i=i, j=j)
a.matrix_swap_columns_in_place(i=i, j=j)
p, e, r = core(matrix.sqr(a))
if (i == 0 and j == 0):
assert list(p) == [1,1,2] # swap row 0 and 1 and nothing else
assert approx_equal(e, [0.0, 0.0, 3.008])
assert approx_equal(r,
[2.4494897427831779, 0.81649658092772592, 1.2247448713915889,
0.0, 1.8257418583505538, 0.0,
0.0, 0.0, 1.2263767773404712])
def run():
exercise_cholesky_decomposition()
exercise_gill_murray_wright_cholesky_decomposition()
print('OK')
if __name__ == '__main__':
run()
|
tests/property_test.py | jimustafa/gdstk | 135 | 11144781 | <reponame>jimustafa/gdstk<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 <NAME>.
# This file is part of gdstk, distributed under the terms of the
# Boost Software License - Version 1.0. See the accompanying
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt>
import gdstk
def test_gds_properties():
for obj in [
gdstk.Polygon([-1 + 0j, -2j, 3 + 0j, 4j]),
gdstk.FlexPath((0j, 1j), 0.1),
gdstk.RobustPath(0j, 0.1),
gdstk.Label("Label", 0j),
gdstk.Reference("EMPTY"),
]:
assert obj.get_gds_property(12) is None
assert obj.delete_gds_property(12) is obj
obj.set_gds_property(13, "Property text")
assert obj.get_gds_property(12) is None
assert obj.get_gds_property(13) == "Property text"
obj.delete_gds_property(13)
assert obj.get_gds_property(13) is None
obj.set_gds_property(13, "Second text")
obj.set_gds_property(13, "Third text")
obj.set_gds_property(14, "Fourth text")
assert obj.get_gds_property(13) == "Third text"
assert obj.properties == [
["S_GDS_PROPERTY", 14, b"Fourth text\x00"],
["S_GDS_PROPERTY", 13, b"Third text\x00"],
]
def test_properties():
for obj in [
gdstk.Polygon([-1 + 0j, -2j, 3 + 0j, 4j]),
gdstk.FlexPath((0j, 1j), 0.1),
gdstk.RobustPath(0j, 0.1),
gdstk.Label("Label", 0j),
gdstk.Reference("EMPTY"),
gdstk.Cell("CELL"),
gdstk.Library("Name"),
]:
assert len(obj.properties) == 0
assert obj.get_property("None") is None
obj.set_property("FIRST", 1)
obj.set_property("SECOND", 2.0)
obj.set_property("THIRD", -3)
obj.set_property("FOURTH", [1, 2.0, -3, "FO", b"UR\x00TH\x00"])
obj.set_property("FIRST", -1)
assert obj.get_property("FIRST") == [-1]
obj.delete_property("THIRD")
assert obj.properties == [
["FIRST", -1],
["FOURTH", 1, 2.0, -3, b"FO", b"UR\x00TH\x00"],
["SECOND", 2.0],
["FIRST", 1],
]
obj.properties = (
("ONE", -1),
("TWO", -2.3e-4, "two"),
("Three", b"\xFF\xEE", 0),
)
assert obj.properties == [
["ONE", -1],
["TWO", -2.3e-4, b"two"],
["Three", b"\xFF\xEE", 0],
]
|
vmtkScripts/vmtkbifurcationreferencesystems.py | michelebucelli/vmtk | 217 | 11144782 | #!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkbifurcationreferencesystems.py,v $
## Language: Python
## Date: $Date: 2005/09/14 09:48:31 $
## Version: $Revision: 1.8 $
## Copyright (c) <NAME>, <NAME>. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
from vmtk import vtkvmtk
import sys
from vmtk import pypes
class vmtkBifurcationReferenceSystems(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Centerlines = None
self.ReferenceSystems = None
self.RadiusArrayName = 'MaximumInscribedSphereRadius'
self.BlankingArrayName = 'Blanking'
self.GroupIdsArrayName = 'GroupIds'
self.ReferenceSystemsNormalArrayName = 'Normal'
self.ReferenceSystemsUpNormalArrayName = 'UpNormal'
self.SetScriptName('vmtkbifurcationreferencesystems')
self.SetScriptDoc('compute reference systems for each bifurcation of a tree. The script takes in input the centerlines already split into branches.')
self.SetInputMembers([
['Centerlines','i','vtkPolyData',1,'','the input split centerlines','vmtksurfacereader'],
['RadiusArrayName','radiusarray','str',1,'','the name of the array where centerline radius values are stored'],
['BlankingArrayName','blankingarray','str',1,'','the name of the array where centerline blanking information about branches is stored'],
['GroupIdsArrayName','groupidsarray','str',1,'','the name of the array where centerline group ids are stored'],
['ReferenceSystemsNormalArrayName','normalarray','str',1,'','the name of the array where reference system plane normals have to be stored'],
['ReferenceSystemsUpNormalArrayName','upnormalarray','str',1,'','the name of the array where reference system upnormals have to be stored']
])
self.SetOutputMembers([
['ReferenceSystems','o','vtkPolyData',1,'','the output reference systems, given as points coinciding with the origins','vmtksurfacewriter'],
['ReferenceSystemsNormalArrayName','normalarray','str',1,'','the name of the array where reference system plane normals are stored'],
['ReferenceSystemsUpNormalArrayName','upnormalarray','str',1,'','the name of the array where reference system upnormals are stored']
])
def Execute(self):
if self.Centerlines == None:
self.PrintError('Error: No input centerlines.')
bifurcationReferenceSystems = vtkvmtk.vtkvmtkCenterlineBifurcationReferenceSystems()
bifurcationReferenceSystems.SetInputData(self.Centerlines)
bifurcationReferenceSystems.SetRadiusArrayName(self.RadiusArrayName)
bifurcationReferenceSystems.SetBlankingArrayName(self.BlankingArrayName)
bifurcationReferenceSystems.SetGroupIdsArrayName(self.GroupIdsArrayName)
bifurcationReferenceSystems.SetNormalArrayName(self.ReferenceSystemsNormalArrayName)
bifurcationReferenceSystems.SetUpNormalArrayName(self.ReferenceSystemsUpNormalArrayName)
## bifurcationReferenceSystems.SetReferenceGroupId(self.ReferenceGroupId)
bifurcationReferenceSystems.Update()
self.ReferenceSystems = bifurcationReferenceSystems.GetOutput()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
|
perma_web/perma/migrations/0014_auto_20160916_1953.py | rachelaus/perma | 317 | 11144803 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-09-16 19:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('perma', '0013_auto_20160829_2012'),
]
operations = [
migrations.AddField(
model_name='capturejob',
name='order',
field=models.FloatField(db_index=True, default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='capturejob',
name='status',
field=models.CharField(choices=[(b'pending', b'pending'), (b'in_progress', b'in_progress'), (b'completed', b'completed'), (b'deleted', b'deleted'), (b'failed', b'failed')], db_index=True, default=b'pending', max_length=15),
),
]
|
tests/hare/assert_hare.py | josebalius/go-spacemesh | 586 | 11144847 | <reponame>josebalius/go-spacemesh<filename>tests/hare/assert_hare.py
from pytest_testconfig import config as testconfig
from tests.delayed_assert.delayed_assert import expect, assert_expectations
from tests.queries import query_hare_output_set, query_round_1, query_round_2, query_round_3, query_pre_round, \
query_no_svp, query_empty_set, query_new_iteration, query_mem_usage
class Set:
def __init__(self, values):
self.values = {}
for v in values:
self.values[v] = v
@classmethod
def from_str(cls, s):
values = [x.strip() for x in s.split(',')]
return cls(values)
def contains(self, val):
return val in self.values
def equals(self, other):
for v in self.values:
if v not in other.values:
return False
return True
def consistency(outputs):
for s in outputs:
for g in outputs:
if not g.equals(s):
return False
return True
def v1(outputs, intersection):
for v in intersection:
if not outputs.contains(v):
return False
return True
def validate(outputs):
sets = [Set.from_str(o) for o in outputs]
if not consistency(sets):
print("consistency failed")
return False
return True
def assert_all(curr_idx, ns, layer):
total = testconfig['bootstrap']['replicas'] + testconfig['client']['replicas']
# assert no node ended up with an empty set at the end of the pre-round
lst = query_pre_round(curr_idx, ns, layer)
assert 0 == len(lst)
# assert all nodes had SVP ready at the end of round 1
lst = query_round_1(curr_idx, ns, layer)
assert total == len(lst)
# assert all nodes had a (non-nil) proposal at the end of round 2
lst = query_round_2(curr_idx, ns, layer)
assert total == len(lst)
# assert at least f+1 has committed at the end of round 3
lst = query_round_3(curr_idx, ns, layer)
f = int(testconfig['client']['args']['hare-max-adversaries'])
assert len(lst) >= f + 1
# assert termination output set
lst = query_hare_output_set(curr_idx, ns, layer)
assert total == len(lst)
assert validate(lst)
def total_eligibilities(hits):
return sum(hit.eligibility_count for hit in hits)
def expect_consensus_process(curr_idx, ns, layer, total, f):
msg = 'layer=%s expected=%s actual=%s'
# assert no node ended up with an empty set at the end of the pre-round
lst = query_pre_round(curr_idx, ns, layer)
expect(0 == len(lst), msg % (layer, 0, len(lst)))
# assert all nodes had SVP ready at the end of round 1
lst = query_round_1(curr_idx, ns, layer)
expect(total == len(lst), msg % (layer, total, len(lst)))
# assert all nodes had a (non-nil) proposal at the end of round 2
lst = query_round_2(curr_idx, ns, layer)
expect(total == len(lst), msg % (layer, total, len(lst)))
# assert at least f+1 has committed at the end of round 3
lst = query_round_3(curr_idx, ns, layer)
expect(total_eligibilities(lst) >= f + 1,
'layer=%s total_eligibilities=%d f=%d' % (layer, total_eligibilities(lst), f))
# assert termination output set
lst = query_hare_output_set(curr_idx, ns, layer)
expect(total == len(lst), msg % (layer, total, len(lst)))
expect(validate(lst), msg % (layer, 'true', 'false'))
def expect_hare(curr_idx, ns, min_l, max_l, total, f):
layer = min_l
while layer <= max_l:
expect_consensus_process(curr_idx, ns, layer, total, f)
layer = layer + 1
assert_expectations()
def validate_hare(indx, ns):
lst = query_empty_set(indx, ns)
expect(0 == len(lst), 'query no empty set')
lst = query_no_svp(indx, ns)
expect(0 == len(lst), 'query no svp')
lst = query_new_iteration(indx, ns)
expect(0 == len(lst), 'query no new iteration')
assert_expectations()
def get_max_mem_usage(i, n):
x = query_mem_usage(i, n)
max_mem = 0
for y in x:
try:
z = int(y.Alloc_max)
if z > max_mem:
max_mem = z
except:
continue
return max_mem
|
code/scripts/plan2scene/metric_impl/substance_classifier/prepare_texture_crops.py | madhawav/plan2scene | 305 | 11144852 | <gh_stars>100-1000
#!/bin/python3
import sys
import os
import os.path as osp
from PIL import Image
import argparse
import logging
from plan2scene.config_manager import ConfigManager
from plan2scene.texture_gen.custom_transforms.random_crop import RandomResizedCropAndDropAlpha
if __name__ == "__main__":
"""
This script is used to prepare texture crops (from texture dataset) used to train the substance classifier.
"""
parser = argparse.ArgumentParser(description="Extract rectified surface crops from the opensurfaces dataset, to train the substance classifier.")
parser.add_argument("output_path", type=str, help="Output directory to save texture crops.")
parser.add_argument("input_path", type=str, help="Directory containing textures.")
parser.add_argument("--crops-per-image", type=int, default=20)
parser.add_argument("--crop-size", type=int, default=256)
parser.add_argument("--output-size", type=int, default=128)
parser.add_argument("--attempt-count", type=int, default=100)
conf = ConfigManager()
conf.add_args(parser)
args = parser.parse_args()
conf.process_args(args)
output_path = args.output_path
input_path = args.input_path
# Configuration used
crop_count = args.crops_per_image
crop_size = (args.crop_size, args.crop_size)
output_size = (args.output_size, args.output_size)
attempt_count = args.attempt_count
if osp.exists(output_path):
logging.error("Output directory already exist")
sys.exit(1)
if not osp.exists(output_path):
os.makedirs(output_path)
image_file_paths = [osp.join(input_path, a) for a in os.listdir(input_path)]
image_file_paths = [a for a in image_file_paths if osp.splitext(a)[1] in [".jpg", ".JPG", ".jpeg", ".JPEG", ".png", ".PNG"]]
logging.info("Found {count} files.".format(count=len(image_file_paths)))
with open(osp.join(output_path, "index.html"), "w") as f:
for image_file_path in image_file_paths:
img_name = image_file_path.split("/")[-1]
img = Image.open(image_file_path)
index = 0
for i in range(crop_count):
crop = RandomResizedCropAndDropAlpha(crop_size, attempt_count, ratio=(1.0, 1.0))(img)
if crop is not None:
crop = crop.resize(output_size)
crop.save(osp.join(output_path, img_name.split(".")[0] + "_crop%d.png" % index))
logging.info("Saved {file}.".format(file=osp.join(output_path, img_name.split(".")[0] + "_crop%d.png" % index)))
f.write("<div style='float:left; margin:5px;'><img src='%s'/><br><small>%s</small></div>" % (
img_name.split(".")[0] + "_crop%d.png" % index, img_name))
index += 1
f.flush()
|
mahotas/tests/test_center_of_mass.py | langner/mahotas | 541 | 11144867 | import numpy as np
from scipy import ndimage
import mahotas.center_of_mass
np.random.seed(2321)
def _mean_out(img, axis):
if len(img.shape) == 2: return img.mean(1-axis)
if axis == 0:
return _mean_out(img.mean(1), 0)
return _mean_out(img.mean(0), axis - 1)
def slow_center_of_mass(img):
'''
Returns the center of mass of img.
'''
xs = []
for axis,si in enumerate(img.shape):
xs.append(np.mean(_mean_out(img, axis) * np.arange(si)))
xs = np.array(xs)
xs /= img.mean()
return xs
def test_cmp_ndimage():
R = (255*np.random.rand(128,256)).astype(np.uint16)
R += np.arange(256, dtype=np.uint16)
m0,m1 = mahotas.center_of_mass(R)
n0,n1 = ndimage.center_of_mass(R)
assert np.abs(n0 - m0) < 1.
assert np.abs(n1 - m1) < 1.
def test_cmp_ndimage3():
R = (255*np.random.rand(32,128,8,16)).astype(np.uint16)
R += np.arange(16, dtype=np.uint16)
m = mahotas.center_of_mass(R)
n = ndimage.center_of_mass(R)
p = slow_center_of_mass(R)
assert np.abs(n - m).max() < 1.
assert np.abs(p - m).max() < 1.
def test_simple():
R = (255*np.random.rand(128,256)).astype(np.uint16)
R += np.arange(256, dtype=np.uint16)
m0,m1 = mahotas.center_of_mass(R)
assert 0 < m0 < 128
assert 0 < m1 < 256
def test_labels():
R = (255*np.random.rand(128,256)).astype(np.uint16)
labels = np.zeros(R.shape, np.intc)
labels[100:,:] += 1
labels[100:,100:] += 1
centres = mahotas.center_of_mass(R, labels)
for label,cm in enumerate(centres):
assert np.all(cm == mahotas.center_of_mass(R * (labels == label)))
def test_labels_not_intc():
img = np.arange(256).reshape((16,16))
labels = img.copy()
labels %= 3
cm = mahotas.center_of_mass(img, labels)
assert cm.shape == (3,2)
labels = labels.T.copy()
cm = mahotas.center_of_mass(img, labels.T)
assert cm.shape == (3,2)
labels = labels.T.copy()
labels = labels.astype(np.uint16)
cm = mahotas.center_of_mass(img, labels)
assert cm.shape == (3,2)
|
lib/tool_shed/util/hg_util.py | rhpvorderman/galaxy | 1,085 | 11144873 | <gh_stars>1000+
import logging
import os
import subprocess
import tempfile
from datetime import datetime
from time import gmtime
from galaxy.tool_shed.util import basic_util
from galaxy.tool_shed.util.hg_util import (
clone_repository,
copy_file_from_manifest,
get_changectx_for_changeset,
get_config_from_disk,
get_ctx_file_path_from_manifest,
get_file_context_from_ctx,
pull_repository,
reversed_lower_upper_bounded_changelog,
reversed_upper_bounded_changelog,
update_repository,
)
from galaxy.util import unicodify
log = logging.getLogger(__name__)
INITIAL_CHANGELOG_HASH = '000000000000'
def add_changeset(repo_path, path_to_filename_in_archive):
try:
subprocess.check_output(['hg', 'add', path_to_filename_in_archive], stderr=subprocess.STDOUT, cwd=repo_path)
except Exception as e:
error_message = f"Error adding '{path_to_filename_in_archive}' to repository: {unicodify(e)}"
if isinstance(e, subprocess.CalledProcessError):
error_message += f"\nOutput was:\n{unicodify(e.output)}"
raise Exception(error_message)
def archive_repository_revision(app, repository, archive_dir, changeset_revision):
'''Create an un-versioned archive of a repository.'''
repo_path = repository.repo_path(app)
try:
subprocess.check_output(['hg', 'archive', '-r', changeset_revision, archive_dir], stderr=subprocess.STDOUT, cwd=repo_path)
except Exception as e:
error_message = f"Error attempting to archive revision '{changeset_revision}' of repository '{repository.name}': {unicodify(e)}"
if isinstance(e, subprocess.CalledProcessError):
error_message += f"\nOutput was:\n{unicodify(e.output)}"
log.exception(error_message)
raise Exception(error_message)
def commit_changeset(repo_path, full_path_to_changeset, username, message):
try:
subprocess.check_output(['hg', 'commit', '-u', username, '-m', message, full_path_to_changeset], stderr=subprocess.STDOUT, cwd=repo_path)
except Exception as e:
error_message = f"Error committing '{full_path_to_changeset}' to repository: {unicodify(e)}"
if isinstance(e, subprocess.CalledProcessError):
if e.returncode == 1 and 'nothing changed' in unicodify(e.output):
return
error_message += f"\nOutput was:\n{unicodify(e.output)}"
raise Exception(error_message)
def get_hgrc_path(repo_path):
return os.path.join(repo_path, '.hg', 'hgrc')
def create_hgrc_file(app, repository):
# Since we support both http and https, we set `push_ssl` to False to
# override the default (which is True) in the Mercurial API.
# The hg purge extension purges all files and directories not being tracked
# by Mercurial in the current repository. It will remove unknown files and
# empty directories. This is not currently used because it is not supported
# in the Mercurial API.
repo_path = repository.repo_path(app)
hgrc_path = get_hgrc_path(repo_path)
with open(hgrc_path, 'w') as fp:
fp.write('[paths]\n')
fp.write('default = .\n')
fp.write('default-push = .\n')
fp.write('[web]\n')
fp.write(f'allow_push = {repository.user.username}\n')
fp.write(f'name = {repository.name}\n')
fp.write('push_ssl = false\n')
fp.write('[extensions]\n')
fp.write('hgext.purge=')
def get_named_tmpfile_from_ctx(ctx, filename, dir):
"""
Return a named temporary file created from a specified file with a given name included in a repository
changeset revision.
"""
filename = basic_util.strip_path(filename)
for ctx_file in ctx.files():
ctx_file_name = basic_util.strip_path(unicodify(ctx_file))
if filename == ctx_file_name:
try:
# If the file was moved, its destination file contents will be returned here.
fctx = ctx[ctx_file]
except LookupError:
# Continue looking in case the file was moved.
fctx = None
continue
if fctx:
fh = tempfile.NamedTemporaryFile('wb', prefix="tmp-toolshed-gntfc", dir=dir)
tmp_filename = fh.name
fh.close()
fh = open(tmp_filename, 'wb')
fh.write(fctx.data())
fh.close()
return tmp_filename
return None
def get_readable_ctx_date(ctx):
"""Convert the date of the changeset (the received ctx) to a human-readable date."""
t, tz = ctx.date()
date = datetime(*gmtime(float(t) - tz)[:6])
ctx_date = date.strftime("%Y-%m-%d")
return ctx_date
def get_repository_heads(repo):
"""Return current repository heads, which are changesets with no child changesets."""
heads = [repo[h] for h in repo.heads(None)]
return heads
def get_reversed_changelog_changesets(repo):
"""Return a list of changesets in reverse order from that provided by the repository manifest."""
reversed_changelog = []
for changeset in repo.changelog:
reversed_changelog.insert(0, changeset)
return reversed_changelog
def get_revision_label(app, repository, changeset_revision, include_date=True, include_hash=True):
"""
Return a string consisting of the human readable changeset rev and the changeset revision string
which includes the revision date if the receive include_date is True.
"""
repo = repository.hg_repo
ctx = get_changectx_for_changeset(repo, changeset_revision)
if ctx:
return get_revision_label_from_ctx(ctx, include_date=include_date, include_hash=include_hash)
else:
if include_hash:
return f"-1:{changeset_revision}"
else:
return "-1"
def get_rev_label_changeset_revision_from_repository_metadata(app, repository_metadata, repository=None,
include_date=True, include_hash=True):
if repository is None:
repository = repository_metadata.repository
repo = repository.hg_repo
changeset_revision = repository_metadata.changeset_revision
ctx = get_changectx_for_changeset(repo, changeset_revision)
if ctx:
rev = '%04d' % ctx.rev()
if include_date:
changeset_revision_date = get_readable_ctx_date(ctx)
if include_hash:
label = f"{str(ctx.rev())}:{changeset_revision} ({changeset_revision_date})"
else:
label = f"{str(ctx.rev())} ({changeset_revision_date})"
else:
if include_hash:
label = f"{str(ctx.rev())}:{changeset_revision}"
else:
label = f"{str(ctx.rev())}"
else:
rev = '-1'
if include_hash:
label = f"-1:{changeset_revision}"
else:
label = "-1"
return rev, label, changeset_revision
def get_revision_label_from_ctx(ctx, include_date=True, include_hash=True):
if include_date:
if include_hash:
return '%s:%s <i><font color="#666666">(%s)</font></i>' % \
(str(ctx.rev()), str(ctx), str(get_readable_ctx_date(ctx)))
else:
return '%s <i><font color="#666666">(%s)</font></i>' % \
(str(ctx.rev()), str(get_readable_ctx_date(ctx)))
else:
if include_hash:
return f'{str(ctx.rev())}:{str(ctx)}'
else:
return str(ctx.rev())
def get_rev_label_from_changeset_revision(repo, changeset_revision, include_date=True, include_hash=True):
"""
Given a changeset revision hash, return two strings, the changeset rev and the changeset revision hash
which includes the revision date if the receive include_date is True.
"""
ctx = get_changectx_for_changeset(repo, changeset_revision)
if ctx:
rev = '%04d' % ctx.rev()
label = get_revision_label_from_ctx(ctx, include_date=include_date)
else:
rev = '-1'
label = f"-1:{changeset_revision}"
return rev, label
def remove_file(repo_path, selected_file, force=True):
cmd = ['hg', 'remove']
if force:
cmd.append('--force')
cmd.append(selected_file)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, cwd=repo_path)
except Exception as e:
error_message = f"Error removing file '{selected_file}': {unicodify(e)}"
if isinstance(e, subprocess.CalledProcessError):
error_message += f"\nOutput was:\n{unicodify(e.output)}"
raise Exception(error_message)
def init_repository(repo_path):
"""
Create a new Mercurial repository in the given directory.
"""
try:
subprocess.check_output(['hg', 'init'], stderr=subprocess.STDOUT, cwd=repo_path)
except Exception as e:
error_message = f'Error initializing repository: {unicodify(e)}'
if isinstance(e, subprocess.CalledProcessError):
error_message += f"\nOutput was:\n{unicodify(e.output)}"
raise Exception(error_message)
def changeset2rev(repo_path, changeset_revision):
"""
Return the revision number (as an int) corresponding to a specified changeset revision.
"""
try:
rev = subprocess.check_output(['hg', 'id', '-r', changeset_revision, '-n'], stderr=subprocess.STDOUT, cwd=repo_path)
except Exception as e:
error_message = f"Error looking for changeset '{changeset_revision}': {unicodify(e)}"
if isinstance(e, subprocess.CalledProcessError):
error_message += f"\nOutput was:\n{unicodify(e.output)}"
raise Exception(error_message)
return int(rev.strip())
__all__ = (
'add_changeset',
'archive_repository_revision',
'clone_repository',
'commit_changeset',
'copy_file_from_manifest',
'create_hgrc_file',
'get_changectx_for_changeset',
'get_config_from_disk',
'get_ctx_file_path_from_manifest',
'get_file_context_from_ctx',
'get_named_tmpfile_from_ctx',
'get_readable_ctx_date',
'get_repository_heads',
'get_reversed_changelog_changesets',
'get_revision_label',
'get_rev_label_changeset_revision_from_repository_metadata',
'get_revision_label_from_ctx',
'get_rev_label_from_changeset_revision',
'pull_repository',
'remove_file',
'reversed_lower_upper_bounded_changelog',
'reversed_upper_bounded_changelog',
'update_repository',
'init_repository',
'changeset2rev',
)
|
djstripe/management/commands/djstripe_clear_expired_idempotency_keys.py | iqbalabd/dj-stripe | 937 | 11144893 | <filename>djstripe/management/commands/djstripe_clear_expired_idempotency_keys.py<gh_stars>100-1000
from django.core.management.base import BaseCommand
from ...utils import clear_expired_idempotency_keys
class Command(BaseCommand):
help = "Deleted expired Stripe idempotency keys."
def handle(self, *args, **options):
clear_expired_idempotency_keys()
|
compiler-rt/test/memprof/lit.cfg.py | acidburn0zzz/llvm-project | 2,338 | 11144909 | <reponame>acidburn0zzz/llvm-project
# -*- Python -*-
import os
import platform
import re
import lit.formats
# Get shlex.quote if available (added in 3.3), and fall back to pipes.quote if
# it's not available.
try:
import shlex
sh_quote = shlex.quote
except:
import pipes
sh_quote = pipes.quote
def get_required_attr(config, attr_name):
attr_value = getattr(config, attr_name, None)
if attr_value == None:
lit_config.fatal(
"No attribute %r in test configuration! You may need to run "
"tests from your build directory or add this attribute "
"to lit.site.cfg.py " % attr_name)
return attr_value
# Setup config name.
config.name = 'MemProfiler' + config.name_suffix
# Platform-specific default MEMPROF_OPTIONS for lit tests.
default_memprof_opts = list(config.default_sanitizer_opts)
default_memprof_opts_str = ':'.join(default_memprof_opts)
if default_memprof_opts_str:
config.environment['MEMPROF_OPTIONS'] = default_memprof_opts_str
config.substitutions.append(('%env_memprof_opts=',
'env MEMPROF_OPTIONS=' + default_memprof_opts_str))
# Setup source root.
config.test_source_root = os.path.dirname(__file__)
libdl_flag = '-ldl'
# Setup default compiler flags used with -fmemory-profile option.
# FIXME: Review the set of required flags and check if it can be reduced.
target_cflags = [get_required_attr(config, 'target_cflags')]
target_cxxflags = config.cxx_mode_flags + target_cflags
clang_memprof_static_cflags = (['-fmemory-profile',
'-mno-omit-leaf-frame-pointer',
'-fno-omit-frame-pointer',
'-fno-optimize-sibling-calls'] +
config.debug_info_flags + target_cflags)
clang_memprof_static_cxxflags = config.cxx_mode_flags + clang_memprof_static_cflags
memprof_dynamic_flags = []
if config.memprof_dynamic:
memprof_dynamic_flags = ['-shared-libsan']
config.available_features.add('memprof-dynamic-runtime')
else:
config.available_features.add('memprof-static-runtime')
clang_memprof_cflags = clang_memprof_static_cflags + memprof_dynamic_flags
clang_memprof_cxxflags = clang_memprof_static_cxxflags + memprof_dynamic_flags
def build_invocation(compile_flags):
return ' ' + ' '.join([config.clang] + compile_flags) + ' '
config.substitutions.append( ("%clang ", build_invocation(target_cflags)) )
config.substitutions.append( ("%clangxx ", build_invocation(target_cxxflags)) )
config.substitutions.append( ("%clang_memprof ", build_invocation(clang_memprof_cflags)) )
config.substitutions.append( ("%clangxx_memprof ", build_invocation(clang_memprof_cxxflags)) )
if config.memprof_dynamic:
shared_libmemprof_path = os.path.join(config.compiler_rt_libdir, 'libclang_rt.memprof{}.so'.format(config.target_suffix))
config.substitutions.append( ("%shared_libmemprof", shared_libmemprof_path) )
config.substitutions.append( ("%clang_memprof_static ", build_invocation(clang_memprof_static_cflags)) )
config.substitutions.append( ("%clangxx_memprof_static ", build_invocation(clang_memprof_static_cxxflags)) )
# Some tests uses C++11 features such as lambdas and need to pass -std=c++11.
config.substitutions.append(("%stdcxx11 ", '-std=c++11 '))
config.substitutions.append( ("%libdl", libdl_flag) )
config.available_features.add('memprof-' + config.bits + '-bits')
config.available_features.add('fast-unwinder-works')
# Set LD_LIBRARY_PATH to pick dynamic runtime up properly.
new_ld_library_path = os.path.pathsep.join(
(config.compiler_rt_libdir, config.environment.get('LD_LIBRARY_PATH', '')))
config.environment['LD_LIBRARY_PATH'] = new_ld_library_path
# Default test suffixes.
config.suffixes = ['.c', '.cpp']
config.substitutions.append(('%fPIC', '-fPIC'))
config.substitutions.append(('%fPIE', '-fPIE'))
config.substitutions.append(('%pie', '-pie'))
# Only run the tests on supported OSs.
if config.host_os not in ['Linux']:
config.unsupported = True
if not config.parallelism_group:
config.parallelism_group = 'shadow-memory'
|
Pytorch/ActorCritic/agent_and_model.py | FitMachineLearning/FitML | 171 | 11144912 | <filename>Pytorch/ActorCritic/agent_and_model.py
## DQN Tutorial
## Implementation from https://github.com/FitMachineLearning
import torch
import gym
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from dataclasses import dataclass
from typing import Any
from random import random
@dataclass
class sars:
state: Any
action: Any
reward: float
next_state: Any
done: bool
qval: float
advantage: float = 0.0
class DQNAgent:
def __init__(self,actor_model,critic_model):
self.actor_model = actor_model
self.critic_model = critic_model
def get_actions(self, observations):
# import ipdb; ipdb.set_trace()
guessed_actions = self.actor_model(torch.Tensor(observations).to(self.actor_model.device))
return guessed_actions
def get_predicted_Q_values(self,observation_and_action):
guessed_Qs = self.critic_model(torch.Tensor(observation_and_action))
return guessed_Qs(-1)[1]
def update_target_model(self):
self.targetModel.load_state_dict(self.model.state_dict())
class ActorModel(nn.Module):
def __init__(self, obs_shape, action_shape,lr):
super(ActorModel,self).__init__()
assert len(obs_shape) ==1, "This network only works on flat observations"
self.obs_shape = obs_shape
self.action_shape = action_shape
# import ipdb; ipdb.set_trace()
self.net = torch.nn.Sequential(
torch.nn.Linear(obs_shape[0],512),
torch.nn.ReLU(),
# torch.nn.Linear(1024,256),
# torch.nn.ReLU(),
torch.nn.Linear(512,action_shape[0])
)
self.opt = optim.Adam(self.net.parameters(),lr=lr)
if torch.cuda.is_available():
print("Using CUDA")
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cuda:1')
self.to(self.device)
def forward(self, x):
return self.net(x)
class CriticModel(nn.Module):
def __init__(self, obs_shape, action_shape,lr):
super(CriticModel,self).__init__()
assert len(obs_shape) ==1, "This network only works on flat observations"
self.obs_shape = obs_shape
self.action_shape = action_shape
self.net = torch.nn.Sequential(
torch.nn.Linear(obs_shape[0]+action_shape[0],512),
torch.nn.ReLU(),
# torch.nn.Linear(2048,512),
# torch.nn.ReLU(),
torch.nn.Linear(512,1) # one out put because we are predicting Q values
)
self.opt = optim.Adam(self.net.parameters(),lr=lr)
if torch.cuda.is_available():
print("Using CUDA")
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cuda:1')
self.to(self.device)
def forward(self, x):
return self.net(x)
class ReplayBuffer:
def __init__(self, buffer_size = 1000):
# self.buffer_size = buffer_size
self.buffer_size = buffer_size
self.buffer = np.empty((buffer_size),dtype=object)
# self.buffer = []
self.index = 0
def insert(self, sars):
# self.buffer.append(sars)
# print("inserting index ", self.index, "@",self.index%self.buffer_size)
if(self.index == 10):
print("first 10 ",self.buffer[0:10])
# import ipdb; ipdb.set_trace()
# if(self.index > self.buffer_size and self.index%self.buffer_size==0):
# print("first 10 ",self.buffer[0:10])
# print("last 10 ",self.buffer[-10:])
# print("")
# import ipdb; ipdb.set_trace()
self.buffer[self.index%self.buffer_size] = sars
self.index+=1
# self.buffer.append(sars)
# if(len(self.buffer)>self.buffer_size):
# self.buffer = self.buffer[1:]
# # print("Clipping Buffer at size", len(self.buffer))
def sample(self, num_samples,current_episode_steps):
# assert num_samples < min(len(self.buffer),self.index)
# if num_samples>self.index:
# print("sampling n ",min(num_samples,self.index))
a = self.buffer[0:min(self.index,self.buffer_size)]
if len(self.buffer) > 0:
return np.random.choice(a, min(num_samples,self.index))
else:
return []
|
tools/mo/openvino/tools/mo/front/mxnet/eye_ext.py | opencv/dldt | 1,127 | 11144913 | <reponame>opencv/dldt
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.ops.eye import MXEye
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
class EyeExtractor(FrontExtractorOp):
op = '_eye'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
num_rows = attrs.int("N")
num_columns = attrs.int("M", num_rows)
if num_columns is None or num_columns == 0:
num_columns = num_rows
diagonal_index = attrs.int("k", 0)
out_type = attrs.dtype("dtype", np.float32)
new_attrs = {'num_rows': num_rows, 'num_columns': num_columns, 'diagonal_index': diagonal_index, 'output_type': out_type}
MXEye.update_node_stat(node, new_attrs)
return cls.enabled
|
pytorch-pretrained-bert/src/gen_pt_squad.py | lianapanatau/BERT-for-RRC-ABSA | 425 | 11144917 | <gh_stars>100-1000
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and authors from University of Illinois at Chicago.
# Copyright (c) 2018, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
import random
import json
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
import squad_data_utils as data_utils
import modelconfig
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def gen(args):
tokenizer = BertTokenizer.from_pretrained(modelconfig.MODEL_ARCHIVE_MAP[args.bert_model] )
train_examples = data_utils.read_squad_examples(os.path.join(args.input_dir, "train.json"), is_training=True)
train_features = data_utils.convert_examples_to_features(
train_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
input_ids_np = np.array([f.input_ids for f in train_features], dtype=np.int16)
segment_ids_np = np.array([f.segment_ids for f in train_features], dtype=np.int16)
input_mask_np = np.array([f.input_mask for f in train_features], dtype=np.int16)
start_positions_np = np.array([f.start_position for f in train_features], dtype=np.int16)
end_positions_np = np.array([f.end_position for f in train_features], dtype=np.int16)
np.savez_compressed(os.path.join(args.output_dir, "data.npz"),
input_ids=input_ids_np,
segment_ids = segment_ids_np,
input_mask = input_mask_np,
start_positions = start_positions_np,
end_positions = end_positions_np)
#>>>>> validation
valid_examples=data_utils.read_squad_examples(os.path.join(args.input_dir,"dev.json"), is_training=True)
valid_features = data_utils.convert_examples_to_features(
valid_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)
logger.info(" Num orig examples = %d", len(valid_examples))
logger.info(" Num split examples = %d", len(valid_features))
valid_input_ids_np = np.array([f.input_ids for f in valid_features], dtype=np.int16)
valid_segment_ids_np = np.array([f.segment_ids for f in valid_features], dtype=np.int16)
valid_input_mask_np = np.array([f.input_mask for f in valid_features], dtype=np.int16)
valid_start_positions_np = np.array([f.start_position for f in valid_features], dtype=np.int16)
valid_end_positions_np = np.array([f.end_position for f in valid_features], dtype=np.int16)
np.savez_compressed(os.path.join(args.output_dir, "dev.npz"),
input_ids=valid_input_ids_np,
segment_ids = valid_segment_ids_np,
input_mask = valid_input_mask_np,
start_positions = valid_start_positions_np,
end_positions = valid_end_positions_np)
#<<<<< end of validation declaration
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--bert-model", default='bert-base', type=str)
parser.add_argument("--input_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=320,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument('--seed',
type=int,
default=0,
help="random seed for initialization")
parser.add_argument('--doc_stride',
type=int,
default=128)
parser.add_argument('--max_query_length',
type=int,
default=30)
parser.add_argument('--max_answer_length',
type=int,
default=30)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
gen(args)
if __name__=="__main__":
main() |
src/nlpia/features.py | byukan/nlpia | 532 | 11144924 | import pandas as pd
import numpy as np
def optimize_feature_power(df, output_column_name=None, exponents=[2., 1., .8, .5, .25, .1, .01]):
""" Plot the correlation coefficient for various exponential scalings of input features
>>> np.random.seed(314159)
>>> df = pd.DataFrame()
>>> df['output'] = np.random.randn(1000)
>>> df['x10'] = df.output * 10
>>> df['sq'] = df.output ** 2
>>> df['sqrt'] = df.output ** .5
>>> optimize_feature_power(df, output_column_name='output').round(2)
x10 sq sqrt
power
2.00 -0.08 1.00 0.83
1.00 1.00 -0.08 0.97
0.80 1.00 0.90 0.99
0.50 0.97 0.83 1.00
0.25 0.93 0.76 0.99
0.10 0.89 0.71 0.97
0.01 0.86 0.67 0.95
Returns:
DataFrame:
columns are the input_columns from the source dataframe (df)
rows are correlation with output for each attempted exponent used to scale the input features
"""
output_column_name = list(df.columns)[-1] if output_column_name is None else output_column_name
input_column_names = [colname for colname in df.columns if output_column_name != colname]
results = np.zeros((len(exponents), len(input_column_names)))
for rownum, exponent in enumerate(exponents):
for colnum, column_name in enumerate(input_column_names):
results[rownum, colnum] = (df[output_column_name] ** exponent).corr(df[column_name])
results = pd.DataFrame(results, columns=input_column_names, index=pd.Series(exponents, name='power'))
# results.plot(logx=True)
return results
|
scripts/readme.py | abdullahzamanbabar/syntribos | 277 | 11144925 | #!/usr/bin/env python
# Copyright 2016 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
repository_tags = """
========================
Team and repository tags
========================
.. image:: https://governance.openstack.org/tc/badges/syntribos.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
.. image:: https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat
:target: https://docs.openstack.org/syntribos/latest/
.. image:: https://img.shields.io/pypi/v/syntribos.svg
:target: https://pypi.python.org/pypi/syntribos/
.. image:: https://img.shields.io/pypi/pyversions/syntribos.svg
:target: https://pypi.python.org/pypi/syntribos/
.. image:: https://img.shields.io/pypi/wheel/syntribos.svg
:target: https://pypi.python.org/pypi/syntribos/
.. image:: https://img.shields.io/irc/%23openstack-security.png
:target: https://webchat.freenode.net/?channels=openstack-security
"""
def find_docs():
"""Yields files as per the whitelist."""
loc = "../doc/source/{}.rst"
whitelist = [
"about", "installation",
"configuration", "commands",
"running", "logging",
"test-anatomy", "unittests",
"contributing"]
for fname in whitelist:
fpath = loc.format(fname)
if os.path.isfile(fpath):
yield fpath
def concat_docs():
"""Concatinates files yielded by the generator `find_docs`."""
file_path = os.path.dirname(os.path.realpath(__file__))
head, tail = os.path.split(file_path)
outfile = head + "/README.rst"
if not os.path.isfile(outfile):
print("../README.rst not found, exiting!")
exit(1)
with open(outfile, 'w') as readme_handle:
readme_handle.write(repository_tags)
for doc in find_docs():
with open(doc, 'r') as doc_handle:
for line in doc_handle:
readme_handle.write(line)
readme_handle.write("\n")
if __name__ == '__main__':
"""Generate README.rst from docs."""
concat_docs()
print("\nREADME.rst created!\n")
|
src/gamesbyexample/ulamspiral.py | spp2/PythonStdioGames | 736 | 11144960 | """Ulam Spiral, by <NAME> <EMAIL>
The Ulam spiral is a mysterious mathematics pattern for prime numbers
with turtle graphics.
More info at https://en.wikipedia.org/wiki/Ulam_spiral"""
__version__ = 0
import turtle
import math
turtle.tracer(1000, 0) # Make the turtle draw faster.
SPACING = 3
DOT_SIZE = 4
def main():
turtle.bgcolor('#353337') # Use a dark background color.
turtle.pencolor('#CCCCCC') # The spiral is a light gray color.
# (!) Comment this next line to draw the spiral.
turtle.penup()
turtle.forward(SPACING) # 1 is not prime, so skip
turtle.left(90)
turtle.dot(DOT_SIZE) # 2 is prime, so make a dot
turtle.forward(SPACING)
turtle.left(90)
currentNumber = 3 # This is the number we test for primality.
spiralSideLength = 3
while currentNumber < 40000:
# We draw two sides before increasing the spiral side length:
for i in range(2):
for j in range(spiralSideLength):
divs = amountOfDivisors(currentNumber)
currentNumber += 1
if divs == 0:
# Mark the prime number
turtle.dot(DOT_SIZE, '#76b7eb')
turtle.forward(SPACING)
turtle.left(90)
spiralSideLength += 1
turtle.update() # Finish drawing the screen.
turtle.exitonclick() # When user clicks on the window, close it.
def amountOfDivisors(number):
# Return the number of divisors for `number`.
total = 0
for i in range(2, int(math.sqrt(number)) + 1):
# If i evenly divides number with no remainder, increase total.
if number % i == 0:
total += 1
return total
try:
main()
except turtle.Terminator:
pass # Do nothing when the turtle window is closed.
|
contrib/performance/sqlusage/requests/propfind_invite.py | backwardn/ccs-calendarserver | 462 | 11144997 | ##
# Copyright (c) 2012-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.protocol.http.data.string import ResponseDataString
from caldavclientlibrary.protocol.webdav.definitions import statuscodes, \
headers
from caldavclientlibrary.protocol.webdav.propfind import PropFind
from contrib.performance.sqlusage.requests.httpTests import HTTPTestBase
from caldavclientlibrary.protocol.caldav.definitions import csxml
class PropfindInviteTest(HTTPTestBase):
"""
A propfind operation
"""
def __init__(self, label, sessions, logFilePath, logFilePrefix, depth=1):
super(PropfindInviteTest, self).__init__(label, sessions, logFilePath, logFilePrefix)
self.depth = headers.Depth1 if depth == 1 else headers.Depth0
def doRequest(self):
"""
Execute the actual HTTP request.
"""
props = (
csxml.invite,
)
# Create WebDAV propfind
request = PropFind(self.sessions[0], self.sessions[0].calendarHref, self.depth, props)
result = ResponseDataString()
request.setOutput(result)
# Process it
self.sessions[0].runSession(request)
# If its a 207 we want to parse the XML
if request.getStatusCode() == statuscodes.MultiStatus:
pass
else:
raise RuntimeError("Propfind request failed: %s" % (request.getStatusCode(),))
|
torchnlp/encoders/encoder.py | MPetrochuk/PyTorch-NLP | 2,125 | 11145071 | <filename>torchnlp/encoders/encoder.py
class Encoder(object):
"""
Base class for a encoder employing an identity function.
Args:
enforce_reversible (bool, optional): Check for reversibility on ``Encoder.encode`` and
``Encoder.decode``. Formally, reversible means:
``Encoder.decode(Encoder.encode(object_)) == object_``.
"""
def __init__(self, enforce_reversible=False):
self.enforce_reversible = enforce_reversible
def encode(self, object_):
""" Encodes an object.
Args:
object_ (object): Object to encode.
Returns:
object: Encoding of the object.
"""
if self.enforce_reversible:
self.enforce_reversible = False
encoded_decoded = self.decode(self.encode(object_))
self.enforce_reversible = True
if encoded_decoded != object_:
raise ValueError('Encoding is not reversible for "%s"' % object_)
return object_
def batch_encode(self, iterator, *args, **kwargs):
"""
Args:
batch (list): Batch of objects to encode.
*args: Arguments passed to ``encode``.
**kwargs: Keyword arguments passed to ``encode``.
Returns:
list: Batch of encoded objects.
"""
return [self.encode(object_, *args, **kwargs) for object_ in iterator]
def decode(self, encoded):
""" Decodes an object.
Args:
object_ (object): Encoded object.
Returns:
object: Object decoded.
"""
if self.enforce_reversible:
self.enforce_reversible = False
decoded_encoded = self.encode(self.decode(encoded))
self.enforce_reversible = True
if decoded_encoded != encoded:
raise ValueError('Decoding is not reversible for "%s"' % encoded)
return encoded
def batch_decode(self, iterator, *args, **kwargs):
"""
Args:
iterator (list): Batch of encoded objects.
*args: Arguments passed to ``decode``.
**kwargs: Keyword arguments passed to ``decode``.
Returns:
list: Batch of decoded objects.
"""
return [self.decode(encoded, *args, **kwargs) for encoded in iterator]
|
google_play_scraper/exceptions.py | shikher-chhawchharia/google-play-scraper | 325 | 11145073 | <filename>google_play_scraper/exceptions.py
class GooglePlayScraperException(Exception):
pass
class NotFoundError(GooglePlayScraperException):
pass
class ExtraHTTPError(GooglePlayScraperException):
pass
|
modules/nltk_contrib/coref/tag.py | h4ck3rm1k3/NLP-project | 123 | 11145128 | <gh_stars>100-1000
import os
import re
import subprocess
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from nltk.util import LazyMap, LazyConcatenation
from nltk.internals import find_binary, java
from nltk.tag import TaggerI
from nltk_contrib.coref import CorpusReaderDecorator
class TaggerCorpusReader(CorpusReaderDecorator):
"""
A C{CorpusReaderDecorator} that adds tagger functionality to an arbitrary
C{CorpusReader}.
"""
def __init__(self, reader, **kwargs):
"""
@return: a corpus reader
@rtype: C{TaggerCorpusReader}
@param reader: the corpus reader to decorate
@type reader: C{CorpusReader}
@kwparam tagger: a tagger object to defer tagging to
@type tagger: C{TaggerI}
"""
self._tagger = kwargs.get('tagger')
CorpusReaderDecorator.__init__(self, reader, **kwargs)
def tagged_sents(self):
return LazyMap(self._tagger.tag, self.sents())
def tagged_words(self):
return LazyConcatenation(LazyMap(self._tagger.tag, self.sents()))
def tagger(self):
return self._tagger
class MXPostTaggerCorpusReader(TaggerCorpusReader):
def __init__(self, reader, **kwargs):
kwargs['tagger'] = MXPostTagger()
TaggerCorpusReader.__init__(self, reader, **kwargs)
def tagged_sents(self):
sents = self.sents()
batch_indices = range(len(sents) / 1024 + 1)
return LazyConcatenation(LazyMap(lambda i:
self._tagger.batch_tag(sents[i * 1024: i * 1024 + 1024]),
batch_indices))
class MXPostTagger(TaggerI):
def tag(self, tokens):
return self.batch_tag([tokens])[0]
def batch_tag(self, sents):
return mxpost_tag(sents)
_mxpost_home = None
_mxpost_classpath = None
def config_mxpost(mxpost_home=None):
global _mxpost_classpath, _mxpost_home
classpath = os.environ.get('CLASSPATH', '').split(':')
mxpost_jar = filter(lambda c: c.endswith('mxpost.jar'), classpath)
if mxpost_jar:
_mxpost_home = os.path.dirname(mxpost_jar[0])
_mxpost_classpath = mxpost_jar[0]
elif os.environ.get('MXPOST'):
_mxpost_home = os.environ.get('MXPOST')
_mxpost_classpath = '%s/mxpost.jar' % os.environ.get('MXPOST')
elif os.environ.get('MXPOST_HOME'):
_mxpost_home = os.environ.get('MXPOST_HOME')
_mxpost_classpath = '%s/mxpost.jar' % os.environ.get('MXPOST_HOME')
elif os.path.exists('/usr/local/mxpost/mxpost.jar'):
_mxpost_home = '/usr/local/mxpost'
_mxpost_classpath = '/usr/local/mxpost/mxpost.jar'
else:
_mxpost_home = None
_mxpost_classpath = None
raise Exception, "can't find mxpost.jar"
def call_mxpost(classpath=None, stdin=None, stdout=None, stderr=None,
blocking=False):
if not classpath:
config_mxpost()
if not classpath:
classpath = _mxpost_classpath
elif 'mxpost.jar' not in classpath:
classpath += ':%s' % _mxpost_classpath
cmd = ['tagger.TestTagger', '%s/%s' % (_mxpost_home, 'wsj-02-21.mxpost')]
return java(cmd, classpath, stdin, stdout, stderr, blocking)
_MXPOST_OUTPUT_RE = \
re.compile(r'^\s*(?P<word>\S+)\_(?P<tag>\S+)\s*$')
def mxpost_parse_output(mxpost_output):
result = []
mxpost_output = mxpost_output.strip()
for sent in filter(None, mxpost_output.split('\n')):
tokens = filter(None, re.split(r'\s+', sent))
if tokens:
result.append([])
for token in tokens:
m = _MXPOST_OUTPUT_RE.match(token)
if not m:
raise Exception, "invalid mxpost tag pattern: %s, %s" % (token, tokens)
word = m.group('word')
tag = m.group('tag')
result[-1].append((word, tag))
return result
def mxpost_tag(sents, **kwargs):
p = call_mxpost(stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = \
p.communicate('\n'.join([' '.join(sent) for sent in sents]))
rc = p.returncode
if rc != 0:
raise Exception, 'exited with non-zero status %s' % rc
if kwargs.get('verbose'):
print 'warning: %s' % stderr
return mxpost_parse_output(stdout) |
tcc/tcc/alignment.py | deepneuralmachine/google-research | 23,901 | 11145179 | <gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Variants of the cycle-consistency loss described in TCC paper.
The Temporal Cycle-Consistency (TCC) Learning paper
(https://arxiv.org/pdf/1904.07846.pdf) describes a loss that enables learning
of self-supervised representations from sequences of embeddings that are good
at temporally fine-grained tasks like phase classification, video alignment etc.
These losses impose cycle-consistency constraints between sequences of
embeddings. Another interpretation of the cycle-consistency constraints is
that of mutual nearest-nieghbors. This means if state A in sequence 1 is the
nearest neighbor of state B in sequence 2 then it must also follow that B is the
nearest neighbor of A. We found that imposing this constraint on a dataset of
related sequences (like videos of people pitching a baseball) allows us to learn
generally useful visual representations.
This code allows the user to apply the loss while giving them the freedom to
choose the right encoder for their dataset/task. One advice for choosing an
encoder is to ensure that the encoder does not solve the mutual neighbor finding
task in a trivial fashion. For example, if one uses an LSTM or Transformer with
positional encodings, the matching between sequences may be done trivially by
counting the frame index with the encoder rather than learning good features.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tcc.tcc.deterministic_alignment import compute_deterministic_alignment_loss
from tcc.tcc.stochastic_alignment import compute_stochastic_alignment_loss
def compute_alignment_loss(embs,
batch_size,
steps=None,
seq_lens=None,
stochastic_matching=False,
normalize_embeddings=False,
loss_type='classification',
similarity_type='l2',
num_cycles=20,
cycle_length=2,
temperature=0.1,
label_smoothing=0.1,
variance_lambda=0.001,
huber_delta=0.1,
normalize_indices=True):
"""Computes alignment loss between sequences of embeddings.
This function is a wrapper around different variants of the alignment loss
described deterministic_alignment.py and stochastic_alignment.py files. The
structure of the library is as follows:
i) loss_fns.py - Defines the different loss functions.
ii) deterministic_alignment.py - Performs the alignment between sequences by
deterministically sampling all steps of the sequences.
iii) stochastic_alignment.py - Performs the alignment between sequences by
stochasticallty sub-sampling a fixed number of steps from the sequences.
There are four major hparams that need to be tuned while applying the loss:
i) Should the loss be applied with L2 normalization on the embeddings or
without it?
ii) Should we perform stochastic alignment of sequences? This means should we
use all the steps of the embedding or only choose a random subset for
alignment?
iii) Should we apply cycle-consistency constraints using a classification loss
or a regression loss? (Section 3 in paper)
iv) Should the similarity metric be based on an L2 distance or cosine
similarity?
Other hparams that can be used to control how hard/soft we want the alignment
between different sequences to be:
i) temperature (all losses)
ii) label_smoothing (classification)
iii) variance_lambda (regression_mse_var)
iv) huber_delta (regression_huber)
Each of these params are used in their respective loss types (in brackets) and
allow the application of the cycle-consistency constraints in a controllable
manner but they do so in very different ways. Please refer to paper for more
details.
The default hparams work well for frame embeddings of videos of humans
performing actions. Other datasets might need different values of hparams.
Args:
embs: Tensor, sequential embeddings of the shape [N, T, D] where N is the
batch size, T is the number of timesteps in the sequence, D is the size of
the embeddings.
batch_size: Integer, Size of the batch.
steps: Tensor, step indices/frame indices of the embeddings of the shape
[N, T] where N is the batch size, T is the number of the timesteps.
If this is set to None, then we assume that the sampling was done in a
uniform way and use tf.range(num_steps) as the steps.
seq_lens: Tensor, Lengths of the sequences from which the sampling was done.
This can provide additional information to the alignment loss. This is
different from num_steps which is just the number of steps that have been
sampled from the entire sequence.
stochastic_matching: Boolean, Should the used for matching be sampled
stochastically or deterministically? Deterministic is better for TPU.
Stochastic is better for adding more randomness to the training process
and handling long sequences.
normalize_embeddings: Boolean, Should the embeddings be normalized or not?
Default is to use raw embeddings. Be careful if you are normalizing the
embeddings before calling this function.
loss_type: String, This specifies the kind of loss function to use.
Currently supported loss functions: classification, regression_mse,
regression_mse_var, regression_huber.
similarity_type: String, Currently supported similarity metrics: l2, cosine.
num_cycles: Integer, number of cycles to match while aligning
stochastically. Only used in the stochastic version.
cycle_length: Integer, Lengths of the cycle to use for matching. Only used
in the stochastic version. By default, this is set to 2.
temperature: Float, temperature scaling used to scale the similarity
distributions calculated using the softmax function.
label_smoothing: Float, Label smoothing argument used in
tf.keras.losses.categorical_crossentropy function and described in this
paper https://arxiv.org/pdf/1701.06548.pdf.
variance_lambda: Float, Weight of the variance of the similarity
predictions while cycling back. If this is high then the low variance
similarities are preferred by the loss while making this term low results
in high variance of the similarities (more uniform/random matching).
huber_delta: float, Huber delta described in tf.keras.losses.huber_loss.
normalize_indices: Boolean, If True, normalizes indices by sequence lengths.
Useful for ensuring numerical instabilities doesn't arise as sequence
indices can be large numbers.
Returns:
loss: Tensor, Scalar loss tensor that imposes the chosen variant of the
cycle-consistency loss.
"""
##############################################################################
# Checking inputs and setting defaults.
##############################################################################
# Get the number of timestemps in the sequence embeddings.
num_steps = tf.shape(embs)[1]
# If steps has not been provided assume sampling has been done uniformly.
if steps is None:
steps = tf.tile(tf.expand_dims(tf.range(num_steps), axis=0),
[batch_size, 1])
# If seq_lens has not been provided assume is equal to the size of the
# time axis in the emebeddings.
if seq_lens is None:
seq_lens = tf.tile(tf.expand_dims(num_steps, 0), [batch_size])
if not tf.executing_eagerly():
# Check if batch size embs is consistent with provided batch size.
with tf.control_dependencies([tf.assert_equal(batch_size,
tf.shape(embs)[0])]):
embs = tf.identity(embs)
# Check if number of timesteps in embs is consistent with provided steps.
with tf.control_dependencies([tf.assert_equal(num_steps,
tf.shape(steps)[1]),
tf.assert_equal(batch_size,
tf.shape(steps)[0])]):
steps = tf.identity(steps)
else:
tf.assert_equal(batch_size, tf.shape(steps)[0])
tf.assert_equal(num_steps, tf.shape(steps)[1])
tf.assert_equal(batch_size, tf.shape(embs)[0])
##############################################################################
# Perform alignment and return loss.
##############################################################################
if normalize_embeddings:
embs = tf.nn.l2_normalize(embs, axis=-1)
if stochastic_matching:
loss = compute_stochastic_alignment_loss(
embs=embs,
steps=steps,
seq_lens=seq_lens,
num_steps=num_steps,
batch_size=batch_size,
loss_type=loss_type,
similarity_type=similarity_type,
num_cycles=num_cycles,
cycle_length=cycle_length,
temperature=temperature,
label_smoothing=label_smoothing,
variance_lambda=variance_lambda,
huber_delta=huber_delta,
normalize_indices=normalize_indices)
else:
loss = compute_deterministic_alignment_loss(
embs=embs,
steps=steps,
seq_lens=seq_lens,
num_steps=num_steps,
batch_size=batch_size,
loss_type=loss_type,
similarity_type=similarity_type,
temperature=temperature,
label_smoothing=label_smoothing,
variance_lambda=variance_lambda,
huber_delta=huber_delta,
normalize_indices=normalize_indices)
return loss
|
src/pretalx/common/urls.py | hrchu/pretalx | 418 | 11145194 | <gh_stars>100-1000
from contextlib import suppress
from urllib.parse import urljoin, urlparse
from django.conf import settings
from django.urls import resolve, reverse
from urlman import Urls
def get_base_url(event=None, url=None):
if url and url.startswith("/orga"):
return settings.SITE_URL
if event:
if event.settings.html_export_url and url:
with suppress(Exception):
resolved = resolve(url)
if "agenda" in resolved.namespaces:
return event.settings.html_export_url
if event.settings.custom_domain:
return event.settings.custom_domain
return settings.SITE_URL
def build_absolute_uri(urlname, event=None, args=None, kwargs=None):
url = get_base_url(event)
return urljoin(url, reverse(urlname, args=args, kwargs=kwargs))
class EventUrls(Urls):
def get_hostname(self, url):
url = get_base_url(self.instance.event, url)
return urlparse(url).netloc
def get_scheme(self, url):
url = get_base_url(self.instance.event, url)
return urlparse(url).scheme
|
lisrd/models/lisrd_sift.py | liuyuzhenn/LISRD | 225 | 11145202 | """ Module to train and run LISRD-SIFT. """
import warnings
warnings.filterwarnings(action='once')
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as func
from .base_model import BaseModel, Mode
from .backbones.net_vlad import NetVLAD
from ..datasets.utils.homographies import warp_points
from ..utils.geometry_utils import keep_true_keypoints
class LisrdSiftModule(nn.Module):
def __init__(self, config, device):
super().__init__()
self._config = config
self._device = device
self._variances = ['sift', 'upright_sift']
self.vlad_sift = NetVLAD(
num_clusters=self._config['n_clusters'],
dim=self._config['meta_desc_dim'])
self.vlad_upright_sift = NetVLAD(
num_clusters=self._config['n_clusters'],
dim=self._config['meta_desc_dim'])
self.vlad_layers = {'sift': self.vlad_sift,
'upright_sift': self.vlad_upright_sift}
def forward(self, inputs, mode):
outputs = self._get_sift_desc(inputs)
self._compute_meta_descriptors(outputs)
return outputs
def _get_sift_desc(self, inputs):
images = np.uint8(inputs.cpu().numpy().transpose(0, 2, 3, 1) * 255)
descs = {v: [] for v in self._variances}
keypoints = []
assignments = []
tile = self._config['tile']
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_size = np.array(img.shape[:2])
tile_size = img_size / tile
sift = cv2.xfeatures2d.SIFT_create(nfeatures=self._config['n_kp'],
contrastThreshold=0.01)
points = sift.detect(img, None)
if len(points) == 0: # No point detected
keypoints.append(np.zeros((1, 2))) # Dummy kp
assignments.append(np.zeros(1, dtype=int))
for v in self._variances:
descs[v].append(np.ones((1, self._config['desc_size'])))
continue
for v in self._variances:
kp = points.copy()
if v == 'upright_sift':
for k in kp:
k.angle = 0. # Set all orientations to 0
_, desc = sift.compute(img, kp)
descs[v].append(desc)
points = [[k.pt[1], k.pt[0]] for k in points]
keypoints.append(np.array(points))
# For each keypoint, compute in which tile it lands up
ass = np.clip(points // tile_size, 0, tile - 1)
ass = ass[:, 1] + tile * ass[:, 0]
assignments.append(ass.astype(int))
outputs = {'keypoints': keypoints, 'assignments': assignments}
for v in self._variances:
outputs[v + '_desc'] = descs[v]
return outputs
def _compute_meta_descriptor(self, assignments, descs, netvlad):
b_size = len(assignments)
n_tiles = self._config['tile'] * self._config['tile']
meta_descs = []
for i in range(b_size):
meta_desc = []
for j in range(n_tiles):
if np.sum(assignments[i] == j) == 0: # no points in this tile
meta_desc.append( # Dummy meta desc
torch.ones(self._config['meta_desc_dim']
* self._config['n_clusters'],
dtype=torch.float, device=self._device))
continue
desc = descs[i][assignments[i] == j]
desc = desc.reshape(1, self._config['desc_size'], -1, 1)
desc = torch.tensor(desc, dtype=torch.float,
device=self._device)
meta_desc.append(netvlad.forward(desc).flatten())
meta_desc = torch.stack(meta_desc, dim=0)
meta_descs.append(meta_desc)
return torch.stack(meta_descs, dim=0)
def _compute_meta_descriptors(self, outputs):
"""
For each kind of descriptor, compute a meta descriptor encoding
a sub area of the total image.
"""
for v in self._variances:
outputs[v + '_meta_desc'] = self._compute_meta_descriptor(
outputs['assignments'], outputs[v + '_desc'],
self.vlad_layers[v])
class LisrdSift(BaseModel):
required_config_keys = []
def __init__(self, dataset, config, device):
self._device = device
super().__init__(dataset, config, device)
self._variances = ['sift', 'upright_sift']
def _model(self, config):
return LisrdSiftModule(config, self._device)
def _forward(self, inputs, mode, config):
outputs = {}
if mode == Mode.EXPORT:
outputs['descriptors'] = {}
outputs['meta_descriptors'] = {}
with torch.no_grad():
output = self._net.forward(inputs['image0'], mode)
outputs['keypoints'] = output['keypoints']
outputs['assignments'] = output['assignments']
for v in self._variances:
outputs['descriptors'][v] = output[v + '_desc']
outputs['meta_descriptors'][v] = output[v + '_meta_desc']
else:
num_img = 3 if 'image2' in inputs else 2
for i in range(num_img):
n = str(i)
output = self._net.forward(inputs['image' + n], mode)
outputs['keypoints' + n] = output['keypoints']
outputs['assignments' + n] = output['assignments']
for v in self._variances:
outputs[v + '_desc' + n] = output[v + '_desc']
outputs[v + '_meta_desc' + n] = output[v + '_meta_desc']
return outputs
def _loss(self, outputs, inputs, config):
# Loss for the meta descriptors only
meta_desc_loss = self._meta_descriptors_loss(outputs, inputs, config)
return meta_desc_loss
def _meta_descriptors_loss(self, outputs, inputs, config):
# Filter out the points not in common between the two images
H = inputs['homography'].detach().cpu().numpy()
img_size = np.array(inputs['image0'].size()[2:4])
b_size = len(H)
losses = []
for i in range(b_size):
kp0, idx0 = keep_true_keypoints(
outputs['keypoints0'][i], H[i], img_size)
kp1, idx1 = keep_true_keypoints(
outputs['keypoints1'][i], np.linalg.inv(H[i]), img_size)
if (np.sum(idx0) == 0) or (np.sum(idx1) == 0): # No common points
return torch.tensor(0, dtype=torch.float, device=self._device,
requires_grad=True)
assignments0 = outputs['assignments0'][i][idx0]
assignments1 = outputs['assignments1'][i][idx1]
# Compute the distance between all descriptors
desc_dists = []
for v in self._variances:
desc0 = torch.tensor(outputs[v + '_desc0'][i][idx0],
dtype=torch.float, device=self._device)
desc1 = torch.tensor(outputs[v + '_desc1'][i][idx1],
dtype=torch.float, device=self._device)
desc_dist = torch.norm(desc0.unsqueeze(1) - desc1.unsqueeze(0),
dim=2)
desc_dists.append(desc_dist)
desc_dists = torch.stack(desc_dists, dim=2)
# Compute the similarity for each meta descriptor
meta_desc_sims = []
for v in self._variances:
meta_desc0 = outputs[v + '_meta_desc0'][i][assignments0]
meta_desc0 = func.normalize(meta_desc0, dim=1)
meta_desc1 = outputs[v + '_meta_desc1'][i][assignments1]
meta_desc1 = func.normalize(meta_desc1, dim=1)
meta_desc_sims.append(meta_desc0 @ meta_desc1.t())
meta_desc_sims = torch.stack(meta_desc_sims, dim=2)
# Weight the descriptor distances
meta_desc_sims = func.softmax(meta_desc_sims, dim=2)
desc_dist = torch.sum(desc_dists * meta_desc_sims, dim=2)
# Compute correct matches
warped_kp0 = warp_points(kp0, H[i])
points_dist = torch.tensor(np.linalg.norm(
warped_kp0[:, None, :] - kp1[None, :, :], axis=2))
wrong_matches = points_dist > self._config['correct_thresh']
dist_mask = points_dist <= self._config['dist_thresh']
# Positive loss
pos_desc_dist = desc_dist.clone()
pos_desc_dist[wrong_matches] = 0.
pos_dist = torch.max(pos_desc_dist, dim=1)[0]
# Negative loss
desc_dist[dist_mask] = torch.tensor(np.inf)
neg_dist = torch.min(desc_dist, dim=1)[0]
losses.append(func.relu(config['margin']
+ pos_dist - neg_dist).mean())
# Total loss
loss = torch.stack(losses, dim=0).mean()
return loss
def _matching_score(self, outputs, inputs, config):
# Filter out the points not in common between the two images
H = inputs['homography'].detach().cpu().numpy()
img_size = np.array(inputs['image0'].size()[2:4])
b_size = len(H)
matching_scores = []
for i in range(b_size):
kp0, idx0 = keep_true_keypoints(
outputs['keypoints0'][i], H[i], img_size)
kp1, idx1 = keep_true_keypoints(
outputs['keypoints1'][i], np.linalg.inv(H[i]), img_size)
if (np.sum(idx0) == 0) or (np.sum(idx1) == 0): # No common points
return 0.
assignments0 = outputs['assignments0'][i][idx0]
assignments1 = outputs['assignments1'][i][idx1]
# Compute the distance between all descriptors
desc_dists = []
for v in self._variances:
desc0 = torch.tensor(outputs[v + '_desc0'][i][idx0],
dtype=torch.float, device=self._device)
desc1 = torch.tensor(outputs[v + '_desc1'][i][idx1],
dtype=torch.float, device=self._device)
desc_dist = torch.norm(desc0.unsqueeze(1) - desc1.unsqueeze(0),
dim=2)
desc_dists.append(desc_dist)
desc_dists = torch.stack(desc_dists, dim=2)
# Compute the similarity for each meta descriptor
meta_desc_sims = []
for v in self._variances:
meta_desc0 = outputs[v + '_meta_desc0'][i][assignments0]
meta_desc0 = func.normalize(meta_desc0, dim=1)
meta_desc1 = outputs[v + '_meta_desc1'][i][assignments1]
meta_desc1 = func.normalize(meta_desc1, dim=1)
meta_desc_sims.append(meta_desc0 @ meta_desc1.t())
meta_desc_sims = torch.stack(meta_desc_sims, dim=2)
# Weight the descriptor distances
meta_desc_sims = func.softmax(meta_desc_sims, dim=2)
desc_dist = torch.sum(desc_dists * meta_desc_sims, dim=2)
desc_dist = desc_dist.detach().cpu().numpy()
# Compute correct matches
warped_kp0 = warp_points(kp0, H[i])
points_dist = np.linalg.norm(
warped_kp0[:, None, :] - kp1[None, :, :], axis=2)
best_matches = np.argmin(points_dist, axis=1)
min_dist = points_dist[np.arange(len(points_dist)), best_matches]
true_matches = min_dist < self._config['correct_thresh']
# Compute percentage of correct matches
closest = np.argmin(desc_dist, axis=1)
m_score = (0. if np.sum(true_matches) == 0
else (closest == best_matches)[true_matches].mean())
matching_scores.append(m_score)
return np.stack(matching_scores, axis=0).mean()
def _metrics(self, outputs, inputs, config):
m_score = self._matching_score(outputs, inputs, config)
return {'matching_score': m_score}
def initialize_weights(self):
def init_weights(m):
if type(m) == nn.Conv2d:
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
m.bias.data.fill_(0.01)
self._net.apply(init_weights)
|
sdk/core/azure-common/tests/test_profile.py | rsdoherty/azure-sdk-for-python | 2,728 | 11145233 | <filename>sdk/core/azure-common/tests/test_profile.py
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from azure.profiles import ProfileDefinition, KnownProfiles
from azure.profiles.multiapiclient import MultiApiClientMixin
import pytest
def test_profile_from_string():
profile_from_string = KnownProfiles.from_name("2017-03-09-profile")
assert profile_from_string is KnownProfiles.v2017_03_09_profile
with pytest.raises(ValueError):
KnownProfiles.from_name("blablabla")
def test_default_profile():
with pytest.raises(ValueError):
KnownProfiles.default.use("This is not a profile")
def test_multiapi_client():
class SDKClient(object):
# Mock msrest.SDKClient to not import it
def __init__(self, creds, config):
assert creds == "creds"
assert config == "config"
class TestClient(MultiApiClientMixin, SDKClient):
DEFAULT_API_VERSION = "2216-08-09"
_PROFILE_TAG = "azure.mgmt.compute.ComputeManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION
}},
_PROFILE_TAG + " latest"
)
def __init__(self, creds="creds", config="config", api_version=None, profile=KnownProfiles.default):
super(TestClient, self).__init__(
creds,
config,
api_version=api_version,
profile=profile
)
def operations(self):
return self._get_api_version("operations")
# By default, use latest
client = TestClient()
assert client.operations() == TestClient.DEFAULT_API_VERSION
# Dynamically change to a new profile
KnownProfiles.default.use(KnownProfiles.v2017_03_09_profile)
assert client.operations() == "2016-03-30"
# I ask explicitly latest, where the default is not that
client = TestClient(profile=KnownProfiles.latest)
assert client.operations() == TestClient.DEFAULT_API_VERSION
# Bring back default to latest for next tests
KnownProfiles.default.use(KnownProfiles.latest)
# I asked explicily a specific profile, must not be latest
client = TestClient(profile=KnownProfiles.v2017_03_09_profile)
assert client.operations() == "2016-03-30"
# I refuse api_version and profile at the same time
# https://github.com/Azure/azure-sdk-for-python/issues/1864
with pytest.raises(ValueError):
TestClient(api_version="something", profile=KnownProfiles.latest)
# If I provide only api_version, this creates a profile with just that
client = TestClient(api_version="2666-05-15")
assert client.operations() == "2666-05-15"
# I can specify old profile syntax with dict
client = TestClient(profile={
"operations": "1789-07-14"
})
assert client.operations() == "1789-07-14"
# If I give a profile definition with no default api-version
# and I call a method not define in the profile, this fails
client = TestClient(profile={
"operations2": "1789-07-14"
})
with pytest.raises(ValueError):
client.operations() == "1789-07-14"
def test_multiapi_client_legacy():
"""The messed-up way old Profile was.
Note that this was only released on RC packages, so as soon as the
CLI does not use RC packages anymore, I have no trouble to
remove that legacy.
"""
class TestClient(MultiApiClientMixin):
DEFAULT_API_VERSION = "2216-08-09"
_PROFILE_TAG = "azure.mgmt.compute.ComputeManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION
}},
_PROFILE_TAG + " latest"
)
def __init__(self, creds="creds", config="config", api_version=None, profile=KnownProfiles.default):
super(TestClient, self).__init__(
credentials="credentials",
subscription_id="subscription_id",
api_version=api_version,
base_url="base_url",
profile=profile
)
def operations(self):
return self._get_api_version("operations")
# Creating a client that does not raise with:
# TypeError: object.__init__() takes no parameters
# is enough to show the legacy work
TestClient()
|
notebooks/utils.py | gongda0e/AVT | 102 | 11145238 | # Copyright (c) Facebook, Inc. and its affiliates.
"""Utils for notebook."""
import sys
import os
import os.path as osp
import glob
from collections import OrderedDict
from collections.abc import Iterable
import json
import subprocess
import pickle as pkl
import logging
import h5py
import math
import operator
import pathlib
import pandas as pd
import moviepy.editor as mpy
from tqdm import tqdm
import proglog
import numpy as np
from scipy.special import softmax
import torch
# from omegaconf import OmegaConf
import hydra
from hydra.experimental import initialize as hydra_initialize, compose as hydra_compose
import matplotlib
from matplotlib import pylab
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
# from tqdm import tqdm
from tqdm.notebook import tqdm
sys.path.append('..')
from external.rulstm.RULSTM.utils import topk_recall
from launch import subselect_dict_keys_diff
from datasets import epic_kitchens
CODE_DIR = str(pathlib.Path(__file__).parent.resolve() / '../')
OUTPUT_DIR = f'{CODE_DIR}/OUTPUTS/'
RESULTS_SAVE_DIR_PREFIX = 'results' # This is the prefix, can have multiple, if >1 eval datasets
DATASET_EVAL_CFG_KEY = 'dataset_eval'
DATASET_EVAL_CFG_KEY_SUFFIX = ''
proglog.notebook() # so moviepy uses notebook tqdm
SQRT2 = math.sqrt(2)
sns.set_style("whitegrid")
rcParams['mathtext.fontset'] = 'custom'
rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
rcParams['mathtext.fontset'] = 'stix'
rcParams['font.family'] = 'STIXGeneral'
matplotlib.rc('axes', edgecolor='k')
matplotlib.rc('font', size=30)
def save_graph(fig, outfpath, root_dir='./', **kwargs):
# Any postprocessing of the graphs
sns.despine(top=True, right=True, left=False, bottom=False)
# Save code
final_oufpath = os.path.join(root_dir, outfpath)
os.makedirs(osp.dirname(final_oufpath), exist_ok=True)
fig.savefig(final_oufpath,
bbox_inches='tight',
transparent=True,
pad_inches=0,
**kwargs)
def allkeys(obj, keys=[]):
"""Recursively find all leaf keys in h5. """
keys = []
for key in obj.keys():
if isinstance(obj[key], h5py.Group):
keys += [f'{key}/{el}' for el in allkeys(obj[key])]
else:
keys.append(key)
return keys
class EmptyResdirError(ValueError):
pass
def gen_load_resfiles(resdir):
resfiles = glob.glob(osp.join(resdir, '*.pth'))
if len(resfiles) == 0:
resfiles = glob.glob(osp.join(resdir, '*.h5'))
if len(resfiles) == 0:
raise EmptyResdirError(f'Didnt find any resfiles in {resdir}')
for resfile in resfiles:
if resfile.endswith('.pth'):
output_dict = {
key: val.numpy() if torch.torch.is_tensor(val) else val
for key, val in torch.load(resfile).items()
}
else:
output_dict = {}
with h5py.File(resfile, 'r') as fin:
for key in allkeys(fin):
try:
output_dict[key] = fin[key][()]
except AttributeError as err:
# Happens for the string keys... need to figure what
# to do here
logging.warning('Unable to load %s (%s)', key, err)
yield output_dict
def read_results(conf_path, run_id=0, results_dir='results/'):
resdir = osp.join(OUTPUT_DIR, conf_path, str(run_id), results_dir)
data = next(gen_load_resfiles(resdir))
# TODO allow to read only certain keys, eg some times we only need logits
# which would be faster to read
res_per_layer = {
key: OrderedDict()
for key in data if key not in ['epoch']
}
if len(res_per_layer) == 0:
raise ValueError('No logits found in the output. Note that code was '
'changed Aug 26 2020 that renames "output" to '
'"logits" etc. So might need to rerun testing.')
logging.info('Reading from resfiles')
for data in gen_load_resfiles(resdir):
for i, idx in enumerate(data['idx']):
idx = int(idx)
for key in res_per_layer:
if idx not in res_per_layer[key]:
res_per_layer[key][idx] = []
res_per_layer[key][idx].append(data[key][i])
# Mean over all the multiple predictions per key
final_res = {}
for key in res_per_layer:
if len(res_per_layer[key]) == 0:
continue
max_idx = max(res_per_layer[key].keys())
key_output = np.zeros([
max_idx + 1,
] + list(res_per_layer[key][0][0].shape))
for idx in res_per_layer[key]:
key_output[idx] = np.mean(np.stack(res_per_layer[key][idx]),
axis=0)
final_res[key] = key_output
return final_res
def get_epoch_from_resdir(conf_path, run_id=0, results_dir='results/'):
resdir = osp.join(OUTPUT_DIR, conf_path, str(run_id), results_dir)
data = next(gen_load_resfiles(resdir))
if 'epoch' not in data:
return None
return np.min(data['epoch'])
def read_all_results(conf_path, run_id=0):
resdirs = glob.glob(
osp.join(OUTPUT_DIR, conf_path, str(run_id),
RESULTS_SAVE_DIR_PREFIX + '*'))
all_res = {}
for resdir in resdirs:
resdir_bname = osp.basename(resdir)
all_res[resdir_bname] = read_results(conf_path,
run_id,
results_dir=resdir_bname)
return all_res
def read_file_into_list(fpath):
"""Read cli from file into a string."""
# TODO: Ideally reuse this from the launch script
args_lst = []
with open(fpath, 'r') as fin:
for line in fin:
args = line.split('#')[0].strip()
if not args: # Empty
continue
args_lst.append(args)
# Importing this on the global scope does not work .. gives the
# super(cls, self).. error
# https://thomas-cokelaer.info/blog/2011/09/382/
# Probably some issue with auto package reload in notebooks for py2.7
# packages..
from hydra._internal.core_plugins.basic_sweeper import BasicSweeper
from hydra.core.override_parser.overrides_parser import OverridesParser
sweeper = BasicSweeper(max_batch_size=None)
parser = OverridesParser.create()
overrides = parser.parse_overrides(args_lst)
run_args = sweeper.split_arguments(overrides, max_batch_size=None)[0]
return run_args
def get_config(cfg_fpath, run_id=0):
# outdir = osp.join(OUTPUT_DIR, cfg_fpath, str(run_id))
overrides_all = read_file_into_list('../' + cfg_fpath)
# https://github.com/facebookresearch/hydra/issues/716 should fix the issue
# with interpolation not working in notebook etc.
# However it can't handle ":" style custom interpolation, so need to
# override those.
cfg_all = []
for overrides in overrides_all:
overrides.append('cwd="../"')
with hydra_initialize(config_path='../conf'):
cfg = hydra_compose(config_name='config.yaml',
return_hydra_config=True,
overrides=overrides)
cfg_all.append(cfg)
if run_id is None:
return cfg_all
else:
return cfg_all[run_id]
def get_dataset(cfg_fpath,
run_id=0,
dataset_cfg_key=DATASET_EVAL_CFG_KEY,
dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX):
cfg = get_config(cfg_fpath, run_id)
sys.path.append('../')
dataset = hydra.utils.instantiate(getattr(
cfg, dataset_cfg_key + dataset_key_suffix),
frames_per_clip=1,
_recursive_=False)
return dataset
def overlay_text(clip, texts):
"""
Args:
clip: Moviepy clip
texts: List of 2 strings (corr to GT and pred) to overlay onto the clip
"""
bg_color = 'white' if texts[0] == texts[1] else 'pink'
texts[0] = 'GT: ' + texts[0]
texts[1] = 'Pred: ' + texts[1]
textclip = (mpy.TextClip(str(texts), bg_color=bg_color).set_duration(
clip.duration).set_pos(("right", "top")))
return mpy.CompositeVideoClip([clip, textclip])
def compute_topk(predictions, labels, k, classes=None):
"""
Args:
predictions (N, K)
labels (N,)
classes: (C', ): Set of classes to compute over. By default, uses
all classes
"""
if classes is None:
classes = np.unique(labels)
# Subselect items that belong to the classes
# Converting to list since classses are at times dict_values and that
# doesn't directly convert to np.array
reqd_elts = np.isin(labels, list(classes))
predictions = predictions[reqd_elts]
labels = labels[reqd_elts]
top_predictions = np.argpartition(predictions, -k, axis=-1)[:, -k:]
ratio_solved = np.mean(
np.any(labels[:, np.newaxis] == top_predictions, axis=-1))
return ratio_solved * 100.0
def combine_verb_noun_preds(res_verb, res_noun):
"""
Args:
res_verb (matrix with NxC1 dims)
res_noun (matrix with NxC2 dims)
Returns:
res_action (matrix with Nx(C1 * C2) dims)
"""
num_elts = res_verb.shape[0]
# normalize the predictions using softmax
res_verb = softmax(res_verb, axis=-1)
res_noun = softmax(res_noun, axis=-1)
# Cross product to get the combined score
return np.einsum('ij,ik->ijk', res_verb, res_noun).reshape((num_elts, -1))
def compute_conf_mat(predictions, target):
def to_onehot(indices, num_classes):
onehot = torch.zeros(indices.shape[0],
num_classes,
*indices.shape[1:],
device=indices.device)
# rgirdhar: When test on test set, there will be some data points where
# we don't have the labels
return onehot.scatter_(1, indices[indices >= 0].unsqueeze(1), 1)
num_classes = predictions.shape[1]
assert predictions.shape[0] == target.shape[0]
with torch.no_grad():
target_1hot = to_onehot(target, num_classes)
target_1hot_t = target_1hot.transpose(0, 1).float()
pred_idx = torch.argmax(predictions, dim=1)
pred_1hot = to_onehot(pred_idx.reshape(-1), num_classes)
pred_1hot = pred_1hot.float()
confusion_matrix = torch.matmul(target_1hot_t, pred_1hot)
return confusion_matrix
def mean_class_accuracy(conf_mat):
# Increase floating point precision similar to forecasting HOI
conf_mat = conf_mat.type(torch.float64)
cls_cnt = conf_mat.sum(dim=1) + 1e-15
cls_hit = conf_mat.diag()
cls_acc = (cls_hit / cls_cnt).mean().item()
return cls_acc
def compute_accuracy(predictions, labels, classes=None):
"""
Args:
predictions: (B, C) logits
labels: (B, )
classes: OrderedDict[name (str), cls_id (int)]
"""
# This can happen when computing tail class accuracies and it's not
# specified for the test set
if predictions.size == 0:
return [float('nan')] * 5
labels = labels.astype(np.int64)
if classes is not None:
classes_to_keep = list(classes.values())
else:
classes_to_keep = range(max(labels) + 1)
top_1 = compute_topk(predictions, labels, 1, classes=classes_to_keep)
top_5 = compute_topk(predictions, labels, 5, classes=classes_to_keep)
try:
ar_outputs = topk_recall(predictions,
labels,
k=5,
classes=classes_to_keep)
if isinstance(ar_outputs, tuple):
# This happens if RULSTM code is modified to return per-class AR
# values
ar5, ar5_per_cls = ar_outputs
ar5_per_cls = {k: v * 100.0 for k, v in ar5_per_cls.items()}
else:
ar5 = ar_outputs
ar5_per_cls = {c: float('nan') for c in classes_to_keep}
except ZeroDivisionError:
# This happens when it can't find any true classes, the code
# can't deal with that
ar5 = float('nan')
ar5_per_cls = {c: float('nan') for c in classes_to_keep}
# Compute a mean class accuracy (used in EGTEA) -- accuracy per class and
# then mean over the classes
conf_mat = compute_conf_mat(torch.from_numpy(predictions),
torch.from_numpy(labels))
# Make sure conf mat makes sense
top_1_confmat = 100.0 * (conf_mat.diag()[classes_to_keep].sum() /
conf_mat[classes_to_keep].sum())
if (not np.isnan(top_1) and not np.isnan(top_1_confmat)
and not np.isclose(top_1, top_1_confmat, atol=1.0)):
# Using a large atol margin cos conf_mat comp happens on GPUs and can
# be non deterministic, so might not match sometimes..
# Save the outputs for analysis
with open('debug_acc.pkl', 'wb') as fout:
pkl.dump(predictions, fout)
pkl.dump(labels, fout)
pkl.dump(conf_mat, fout)
raise ValueError(f'top1 ({top_1}) doesnt match what I get from '
f'conf_mat ({top_1_confmat}). This could happen '
f'if the model predicts all 0s for some data points '
f'and hence argmax is not defined and behaves '
f'differently in numpy and torch '
f'(https://github.com/pytorch/pytorch/issues/14147)')
top1_meancls = 100.0 * mean_class_accuracy(conf_mat)
return top_1, top_5, ar5 * 100, top1_meancls, ar5_per_cls
def print_accuracies_epic(metrics: dict, prefix: str = ''):
print(f"[{prefix}] Accuracies verb/noun/action: "
f"{metrics['vtop1']:.1f} {metrics['vtop5']:.1f} "
f"{metrics['ntop1']:.1f} {metrics['ntop5']:.1f} "
f"{metrics['atop1']:.1f} {metrics['atop5']:.1f} ")
print(f"[{prefix}] Mean class top-1 accuracies verb/noun/action: "
f"{metrics['vtop1_meancls']:.1f} "
f"{metrics['ntop1_meancls']:.1f} "
f"{metrics['atop1_meancls']:.1f} ")
print(f"[{prefix}] Recall@5 verb/noun/action: "
f"{metrics['vrec5']:.1f} {metrics['nrec5']:.1f} "
f"{metrics['arec5']:.1f} ")
print(f"[{prefix}] Recall@5 many shot verb/noun/action: "
f"{metrics['vrec5_ms']:.1f} {metrics['nrec5_ms']:.1f} "
f"{metrics['arec5_ms']:.1f} ")
if 'vrec5_tail' in metrics:
# assuming the others for tail/unseen will be in there too, since
# they are all computed at one place for ek100
print(f"[{prefix}] Recall@5 tail verb/noun/action: "
f"{metrics['vrec5_tail']:.1f} {metrics['nrec5_tail']:.1f} "
f"{metrics['arec5_tail']:.1f} ")
print(f"[{prefix}] Recall@5 unseen verb/noun/action: "
f"{metrics['vrec5_unseen']:.1f} {metrics['nrec5_unseen']:.1f} "
f"{metrics['arec5_unseen']:.1f} ")
def get_logits_from_results(results):
if 'logits' in results:
return results['logits']
# Newer version, as of Nov 3 2020
logits_keys = [key for key in results.keys() if key.startswith('logits/')]
if len(logits_keys) == 1:
return results[logits_keys[0]]
# Else, return all of them in a dict
return {key: results[key] for key in logits_keys}
def get_epic_action_accuracy(run_info_verb, run_info_noun):
# Compute action accuracies implicitly from verb and noun
# TODO also compute with many-shot classes for EPIC 55
res_verb = get_logits_from_results(read_results(*run_info_verb))
res_noun = get_logits_from_results(read_results(*run_info_noun))
dataset_verb = get_dataset(*run_info_verb)
vtop1, vtop5, vrec5, vtop1_meancls, vrec5_per_cls = compute_accuracy(
res_verb, dataset_verb.df['verb_class'].values)
dataset_noun = get_dataset(*run_info_noun)
ntop1, ntop5, nrec5, ntop1_meancls, nrec5_per_cls = compute_accuracy(
res_noun, dataset_noun.df['noun_class'].values)
assert (len(dataset_verb.df) == len(res_verb) == len(dataset_noun.df) ==
len(res_noun))
res_action = combine_verb_noun_preds(res_verb, res_noun)
true_action = (
dataset_verb.df['verb_class'].values * len(dataset_noun.classes) +
dataset_noun.df['noun_class'].values)
atop1, atop5, arec5, atop1_meancls, arec5_per_cls = compute_accuracy(
res_action, true_action)
print_accuracies_epic({
'vtop1': vtop1,
'vtop5': vtop5,
'vrec5': vrec5,
'vrec5_ms': float('nan'), # TODO
'vtop1_meancls': vtop1_meancls,
'vrec5_per_cls': vrec5_per_cls,
'ntop1': ntop1,
'ntop5': ntop5,
'nrec5': nrec5,
'nrec5_ms': float('nan'), # TODO
'ntop1_meancls': ntop1_meancls,
'nrec5_per_cls': nrec5_per_cls,
'atop1': atop1,
'atop5': atop5,
'arec5': arec5,
'arec5_ms': float('nan'), # TODO
'atop1_meancls': atop1_meancls,
'arec5_per_cls': arec5_per_cls,
})
def epic100_unseen_tail_eval(probs, dataset):
"""
probs: contains 3 elements: predictions for verb, noun and action
"""
# based on https://github.com/fpv-iplab/rulstm/blob/d44612e4c351ff668f149e2f9bc870f1e000f113/RULSTM/main.py#L379
unseen_participants_ids = pd.read_csv(osp.join(
dataset.rulstm_annotation_dir,
'validation_unseen_participants_ids.csv'),
names=['id'],
squeeze=True)
tail_verbs_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_verbs_ids.csv'),
names=['id'],
squeeze=True)
tail_nouns_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_nouns_ids.csv'),
names=['id'],
squeeze=True)
tail_actions_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_actions_ids.csv'),
names=['id'],
squeeze=True)
# Now based on https://github.com/fpv-iplab/rulstm/blob/d44612e4c351ff668f149e2f9bc870f1e000f113/RULSTM/main.py#L495
unseen_bool_idx = dataset.df.narration_id.isin(
unseen_participants_ids).values
tail_verbs_bool_idx = dataset.df.narration_id.isin(tail_verbs_ids).values
tail_nouns_bool_idx = dataset.df.narration_id.isin(tail_nouns_ids).values
tail_actions_bool_idx = dataset.df.narration_id.isin(
tail_actions_ids).values
# For tail
_, _, vrec5_tail, _, _ = compute_accuracy(
probs[0][tail_verbs_bool_idx],
dataset.df.verb_class.values[tail_verbs_bool_idx])
_, _, nrec5_tail, _, _ = compute_accuracy(
probs[1][tail_nouns_bool_idx],
dataset.df.noun_class.values[tail_nouns_bool_idx])
_, _, arec5_tail, _, _ = compute_accuracy(
probs[2][tail_actions_bool_idx],
dataset.df.action_class.values[tail_actions_bool_idx])
# for unseen
_, _, vrec5_unseen, _, _ = compute_accuracy(
probs[0][unseen_bool_idx],
dataset.df.verb_class.values[unseen_bool_idx])
_, _, nrec5_unseen, _, _ = compute_accuracy(
probs[1][unseen_bool_idx],
dataset.df.noun_class.values[unseen_bool_idx])
_, _, arec5_unseen, _, _ = compute_accuracy(
probs[2][unseen_bool_idx],
dataset.df.action_class.values[unseen_bool_idx])
return dict(
vrec5_tail=vrec5_tail,
nrec5_tail=nrec5_tail,
arec5_tail=arec5_tail,
vrec5_unseen=vrec5_unseen,
nrec5_unseen=nrec5_unseen,
arec5_unseen=arec5_unseen,
)
def compute_accuracies_epic(probs, dataset):
manyshot_classes = dataset.classes_manyshot
vtop1, vtop5, vrec5, vtop1_meancls, vrec5_per_cls = compute_accuracy(
probs[0], dataset.df.verb_class.values)
vrec5_ms, nrec5_ms, arec5_ms = float('nan'), float('nan'), float('nan')
if 'verb' in manyshot_classes:
_, _, vrec5_ms, _, _ = compute_accuracy(
probs[0],
dataset.df.verb_class.values,
classes=manyshot_classes['verb'])
ntop1, ntop5, nrec5, ntop1_meancls, nrec5_per_cls = compute_accuracy(
probs[1], dataset.df.noun_class.values)
if 'noun' in manyshot_classes:
_, _, nrec5_ms, _, _ = compute_accuracy(
probs[1],
dataset.df.noun_class.values,
classes=manyshot_classes['noun'])
atop1, atop5, arec5, atop1_meancls, arec5_per_cls = compute_accuracy(
probs[2], dataset.df.action_class.values)
if 'action' in manyshot_classes:
_, _, arec5_ms, _, _ = compute_accuracy(
probs[2],
dataset.df.action_class.values,
classes=manyshot_classes['action'])
res = {
'vtop1': vtop1,
'vtop5': vtop5,
'vrec5': vrec5,
'vrec5_ms': vrec5_ms,
'vtop1_meancls': vtop1_meancls,
'vrec5_per_cls': vrec5_per_cls,
'ntop1': ntop1,
'ntop5': ntop5,
'nrec5': nrec5,
'nrec5_ms': nrec5_ms,
'ntop1_meancls': ntop1_meancls,
'nrec5_per_cls': nrec5_per_cls,
'atop1': atop1,
'atop5': atop5,
'arec5': arec5,
'arec5_ms': arec5_ms,
'atop1_meancls': atop1_meancls,
'arec5_per_cls': arec5_per_cls,
}
if dataset.version == epic_kitchens.EPIC100_VERSION:
res.update(epic100_unseen_tail_eval(probs, dataset))
return res
def get_epic_marginalize_verb_noun(
run_info, dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX):
res_action = get_logits_from_results(
read_results(*run_info, results_dir=f'results{dataset_key_suffix}'))
dataset = get_dataset(*run_info, dataset_key_suffix=dataset_key_suffix)
if isinstance(res_action, dict):
print(f'Found logits outputs for verb noun as well [{run_info}]')
# It has multiple heads for verb/noun as well
res_verb = res_action['logits/verb']
res_noun = res_action['logits/noun']
res_action = res_action['logits/action']
else:
res_action_probs = softmax(res_action, axis=-1)
# Marginalize the other dimension, using the mapping matrices I store
# in the dataset obj
res_verb = np.matmul(
res_action_probs,
dataset.class_mappings[('verb', 'action')].numpy())
res_noun = np.matmul(
res_action_probs,
dataset.class_mappings[('noun', 'action')].numpy())
accuracies = compute_accuracies_epic([res_verb, res_noun, res_action],
dataset)
# Returning the actual scores for actions instead of the probs. Found
# better results with this, and Sener et al. ECCV'20 does the same.
scores = [res_verb, res_noun, res_action]
return accuracies, scores, dataset
def read_scores_from_pkl(pkl_fpath):
"""
This is to read the data as I dump in the ActionBanks code
"""
with open(pkl_fpath, 'rb') as fin:
scores = pkl.load(fin)
return [
scores['verb_scores'], scores['noun_scores'], scores['action_scores']
]
def load_json(fpath, verb_noun_to_action, nclasses):
"""
Args:
fpath: Path to the json
verb_noun_to_action: Dict from (verb_id, noun_id) to action_id
nclasses: A list of 3 elements, with the label space for verb/noun/act
Returns: a dict with
{uid1: score1, uid2: score2 ...}
"""
assert len(nclasses) == 3, 'One for verb/noun/action'
with open(fpath, 'r') as fin:
preds = json.load(fin)
# Res for verb/noun/action
all_res = []
for j, space in enumerate(['verb', 'noun', 'action']):
# Convert to a {uid: <scores>} format
res = {}
for key, val in preds['results'].items():
# Will be using 0 for all the scores not defined. Should be fine given
# top 100 should be enough for late fusion etc, metrics are like top-5
# anyway.
scores = np.zeros((nclasses[j], ))
for i, score in val[space].items():
if space == 'action':
# Since for actions the "key" is (verb, noun) tuple,
# need to convert it to an action index by
# verb_id * noun_count + noun_id
idx = tuple(int(el) for el in i.split(','))
idx = verb_noun_to_action[idx]
else:
idx = int(i)
scores[idx] = score
res[key] = scores
all_res.append(res)
return all_res
def _concat_with_uids(scores, dataset, uid_key):
# Make a dict with the IDs from the dataset
# There will be 3 elements in scores -- verb, noun, action
return [
dict(
zip([str(el)
for el in dataset.df[uid_key].values], scores_per_space))
for scores_per_space in scores
]
def _normalize_scores(scores, p):
"""This brings the scores between 0 to 1, and normalizes by """
res = []
for scores_per_space in scores:
res.append({
uid: val / (np.linalg.norm(val, ord=p, axis=-1) + 0.000001)
for uid, val in scores_per_space.items()
})
return res
def _get_avg_norm_scores(scores, p):
"""Remove the UID keys etc, and then compute."""
scores = np.array([val for _, val in scores.items()])
return np.mean(np.linalg.norm(scores, ord=p, axis=-1), axis=0)
def get_epic_marginalize_late_fuse(
run_infos,
weights=1.0,
dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX,
uid_key='uid',
eventual_fname='seen.json',
normalize_before_combine=None):
"""
Args:
eventual_fname: This is used to read prepackaged outputs from result
files, and using the filename to know which file to look for
when a directory is passed in as run info.
normalize_before_combine: Set to non-None to normalize the features
by that p-norm, and then combine. So the weights would have to be
defined w.r.t normalized features.
"""
all_scores = []
all_datasets = []
for run_info_id, run_info in enumerate(run_infos):
if isinstance(run_info[0], dict):
# This is likely a pre-computed scores (so eg a nested
# get_epic_marginalize.. function). So I just use the scores as is.
scores = run_info
elif os.path.isdir(run_info[0]):
assert len(all_datasets) > 0, (
'Need at least 1 datasets to be read before reading from json '
'to figure the verb/noun -> action_id and '
'to figure the total number of classes to gen feat vectors')
scores = load_json(
os.path.join(run_info[0], eventual_fname),
all_datasets[-1].verb_noun_to_action,
[list(el.values())[0].shape[-1] for el in all_scores[-1]])
elif run_info[0].endswith('.pkl'):
# This is the input used to read predictions from the action_banks
# codebase, where I dump output into pkl and read here for late
# fusion.
scores = read_scores_from_pkl(run_info[0])
assert len(
all_datasets) > 0, 'At least one run_info must be passed in'
scores = _concat_with_uids(scores, all_datasets[-1], uid_key)
else:
accuracies, scores, dataset = get_epic_marginalize_verb_noun(
run_info, dataset_key_suffix=dataset_key_suffix)
scores = _concat_with_uids(scores, dataset, uid_key)
print_accuracies_epic(accuracies, prefix=run_info)
all_datasets.append(dataset)
if normalize_before_combine is not None:
scores = _normalize_scores(scores, p=normalize_before_combine)
logging.warning(
'Adding scores from run_info %d with avg action L1 norm of %f',
run_info_id, _get_avg_norm_scores(scores[-1], p=1))
all_scores.append(scores)
# Late fuse
if isinstance(weights, float):
weights = [weights] * len(run_infos)
else:
assert len(weights) == len(run_infos)
# broadcastable_weights = np.array(weights)[:, np.newaxis, np.newaxis]
# Combined scores by combining the corresponding score for each uid.
combined = []
for space_id in range(3): # verb/noun/action
scores_for_space = [scores[space_id] for scores in all_scores]
# Take the union of all the UIDs we have score for
total_uids = set.union(*[set(el.keys()) for el in scores_for_space])
logging.warning('Combined UIDs: %d. UIDs in the runs %s',
len(total_uids),
[len(el.keys()) for el in scores_for_space])
combined_for_space = {}
for uid in total_uids:
combined_for_space[uid] = []
for run_id, scores_for_space_per_run in enumerate(
scores_for_space):
if uid in scores_for_space_per_run:
combined_for_space[uid].append(
scores_for_space_per_run[uid] * weights[run_id])
combined_for_space[uid] = np.sum(np.stack(combined_for_space[uid]),
axis=0)
combined.append(combined_for_space)
# Now to compute accuracies, need to convert back to np arrays from dict.
# Would only work for parts that are in the dataset
combined_np = []
for combined_for_space in combined:
combined_np.append(
np.array([
combined_for_space[str(uid)]
for uid in all_datasets[-1].df[uid_key].values
]))
accuracies = compute_accuracies_epic(combined_np, all_datasets[-1])
return accuracies, combined, all_datasets[-1]
def summarize_results(cfg_name, metric='arec5'):
"""
Read all runs corr to cfg_name, and show the results in a human readable
form with the config overrides (unique) that were active. It averages
over runs too.
"""
run_cfgs = read_file_into_list('../' + cfg_name)
run_cfgs_hydra = get_config(cfg_name, run_id=None)
# Convert to dicts
run_cfgs = [(i, dict([el.split('=') for el in conf]))
for i, conf in enumerate(run_cfgs)]
# Keep only the stuff that changes across them
run_cfgs = subselect_dict_keys_diff(run_cfgs)
all_res = {}
for (run_id, params), cfg_hydra in tqdm(zip(run_cfgs, run_cfgs_hydra),
total=len(run_cfgs),
desc='Loading results'):
try:
accuracies, _, _ = get_epic_marginalize_verb_noun(
(cfg_name, run_id))
epoch = get_epoch_from_resdir(cfg_name, run_id)
except (EmptyResdirError, OSError): # H5 didn't let it read
continue
if epoch != cfg_hydra.train.num_epochs:
# This training has not finished
continue
run_id = 0
if 'run_id' in params:
run_id = int(params['run_id'])
del params['run_id']
params_hash = tuple(sorted(list(params.items())))
if params_hash not in all_res:
all_res[params_hash] = {}
all_res[params_hash][run_id] = accuracies[metric]
for params_hash in all_res:
run_ids, values = zip(*all_res[params_hash].items())
print(f'{params_hash} [{run_ids}]: [{values}] '
f'mean: {np.mean(values)}, std: {np.std(values)}')
def plot_per_cls_perf(run_infos_all: list,
names: list,
metrics: list = ['vrec5_per_cls', 'nrec5_per_cls'],
cls_types: list = ['verb', 'noun'],
show_topn: int = 10,
xticks_rotation: float = 0,
show_subset: callable = None,
outfpath: str = 'figs/improved/'):
"""
Args:
run_infos_all: [[(cfg, sweep_id), (cfg, sweep_id)...],
[(cfg, sweep_id), (cfg, sweep_id)...], ...]
names: The name for each run_info group
metrics: There will be 1 graph for each
"""
assert len(run_infos_all) == len(names)
assert len(metrics) == len(cls_types)
final_accs = {cls_type: [] for cls_type in cls_types}
for i, run_infos in enumerate(tqdm(run_infos_all, desc='Reading acc')):
for run_id, run_info in enumerate(run_infos):
cfg_fpath, sweep_id = run_info
all_accuracies, _, dataset = get_epic_marginalize_verb_noun(
(cfg_fpath, sweep_id))
for metric, cls_type in zip(metrics, cls_types):
accuracies = all_accuracies[metric]
assert isinstance(accuracies,
dict), 'Supports per-class for now'
classes = operator.attrgetter(f'{cls_type}_classes')(dataset)
cls_id_to_name = {v: k for k, v in classes.items()}
for cls_id, score in accuracies.items():
final_accs[cls_type].append({
'method':
names[i],
'run_id':
run_id,
'cls_name':
cls_id_to_name[cls_id],
'accuracy':
score,
})
for cls_type in final_accs:
accs = pd.DataFrame(final_accs[cls_type])
# Print logs
for method in names:
for run_id in accs.run_id.unique():
this_acc = (accs[accs.method == method][
accs.run_id == run_id].accuracy.mean())
print(f'Check {method} {run_id}: {this_acc}')
mean_acc_by_cls = accs.groupby(['method',
'cls_name']).mean().reset_index()
first_col = mean_acc_by_cls[mean_acc_by_cls.method == names[0]]
last_col = mean_acc_by_cls[mean_acc_by_cls.method == names[-1]]
merged = first_col[['cls_name', 'accuracy'
]].merge(last_col[['cls_name', 'accuracy']],
on='cls_name',
how='outer',
suffixes=['_first', '_last'])
# get the largest gains
gains = (merged['accuracy_last'] -
merged['accuracy_first']).sort_values()
gained_labels = merged.loc[gains.index].cls_name.tolist()
if show_subset is not None:
gained_labels = [el for el in gained_labels if show_subset(el)]
gained_labels = gained_labels[-show_topn:]
accs_largegains = accs[accs.cls_name.isin(gained_labels)]
fig = plt.figure(num=None,
figsize=(2 * len(gained_labels), 4),
dpi=300)
ax = sns.barplot(x='cls_name',
y='accuracy',
hue='method',
data=accs_largegains,
order=gained_labels,
errwidth=1.0)
ax.set_xlabel('Classes')
ax.set_ylabel('Recall @ 5')
ax.set_xticklabels(ax.get_xticklabels(),
rotation=xticks_rotation,
ha='center')
plt.show()
save_graph(fig, os.path.join(outfpath, cls_type + '.pdf'))
def get_struct_outputs_per_dataset(run_infos,
weights,
dataset_key_suffix,
uid_key='uid',
eventual_fname='seen.json',
normalize_before_combine=None):
_, combined, dataset = get_epic_marginalize_late_fuse(
run_infos,
weights,
dataset_key_suffix=dataset_key_suffix,
uid_key=uid_key,
eventual_fname=eventual_fname,
normalize_before_combine=normalize_before_combine)
results = {}
# Now the following may not be true since if the run_info contains an
# actual json, it might have more rows etc.
# assert len(combined[0]) == len(dataset)
action_to_verb_noun = {
val: key
for key, val in dataset.verb_noun_to_action.items()
}
for uid in tqdm(combined[0].keys(), desc='Computing res'):
verb_res = {f'{j}': val for j, val in enumerate(combined[0][uid])}
noun_res = {f'{j}': val for j, val in enumerate(combined[1][uid])}
top_100_actions = sorted(np.argpartition(combined[2][uid],
-100)[-100:],
key=lambda x: -combined[2][uid][x])
action_res = {
','.join((str(el)
for el in action_to_verb_noun[j])): combined[2][uid][j]
for j in top_100_actions
}
results[f'{uid}'] = {
'verb': verb_res,
'noun': noun_res,
'action': action_res,
}
# Add in all the discarded dfs with uniform distribution
if dataset.discarded_df is not None:
for _, row in dataset.discarded_df.iterrows():
if str(row[uid_key]) in results:
continue
results[f'{row[uid_key]}'] = {
'verb':
{f'{j}': 0.0
for j in range(len(dataset.verb_classes))},
'noun':
{f'{j}': 0.0
for j in range(len(dataset.noun_classes))},
'action': {f'0,{j}': 0.0
for j in range(100)},
}
output_dict = {
'version': f'{dataset.version}',
'challenge': dataset.challenge_type,
'results': results
}
return output_dict
def package_results_for_submission(run_infos,
weights,
normalize_before_combine=None):
res_s1 = get_struct_outputs_per_dataset(
run_infos,
weights,
dataset_key_suffix='',
eventual_fname='seen.json',
normalize_before_combine=normalize_before_combine)
res_s2 = get_struct_outputs_per_dataset(
run_infos,
weights,
dataset_key_suffix='_s2',
eventual_fname='unseen.json',
normalize_before_combine=normalize_before_combine)
# write it out in the first run's output dir
output_dir = osp.join(OUTPUT_DIR, run_infos[0][0], str(run_infos[0][1]),
'challenge')
print(f'Saving outputs to {output_dir}')
os.makedirs(output_dir, exist_ok=True)
with open(osp.join(output_dir, 'seen.json'), 'w') as fout:
json.dump(res_s1, fout, indent=4)
with open(osp.join(output_dir, 'unseen.json'), 'w') as fout:
json.dump(res_s2, fout, indent=4)
subprocess.check_output(
f'zip -j {output_dir}/submit.zip '
f'{output_dir}/seen.json '
f'{output_dir}/unseen.json ',
shell=True)
def package_results_for_submission_ek100(run_infos, weights, sls=[1, 4, 4]):
res = get_struct_outputs_per_dataset(run_infos,
weights,
dataset_key_suffix='',
uid_key='narration_id',
eventual_fname='test.json')
res['sls_pt'] = sls[0]
res['sls_tl'] = sls[1]
res['sls_td'] = sls[2]
# write it out in the first run's output dir
output_dir = osp.join(OUTPUT_DIR, run_infos[0][0], str(run_infos[0][1]),
'challenge')
print(f'Saving outputs to {output_dir}')
os.makedirs(output_dir, exist_ok=True)
with open(osp.join(output_dir, 'test.json'), 'w') as fout:
json.dump(res, fout, indent=4)
subprocess.check_output(
f'zip -j {output_dir}/submit.zip '
f'{output_dir}/test.json ',
shell=True)
|
autoPyTorch/utils/config_space_hyperparameter.py | mens-artis/Auto-PyTorch | 1,657 | 11145246 | import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
def get_hyperparameter(hyper_type, name, value_range, log = False):
if isinstance(value_range, tuple) and len(value_range) == 2 and isinstance(value_range[1], bool) and \
isinstance(value_range[0], (tuple, list)):
value_range, log = value_range
if len(value_range) == 0:
raise ValueError(name + ': The range has to contain at least one element')
if len(value_range) == 1:
return CSH.Constant(name, int(value_range[0]) if isinstance(value_range[0], bool) else value_range[0])
if len(value_range) == 2 and value_range[0] == value_range[1]:
return CSH.Constant(name, int(value_range[0]) if isinstance(value_range[0], bool) else value_range[0])
if hyper_type == CSH.CategoricalHyperparameter:
return CSH.CategoricalHyperparameter(name, value_range)
if hyper_type == CSH.UniformFloatHyperparameter:
assert len(value_range) == 2, "Float HP range update for %s is specified by the two upper and lower values. %s given." %(name, len(value_range))
return CSH.UniformFloatHyperparameter(name, lower=value_range[0], upper=value_range[1], log=log)
if hyper_type == CSH.UniformIntegerHyperparameter:
assert len(value_range) == 2, "Int HP range update for %s is specified by the two upper and lower values. %s given." %(name, len(value_range))
return CSH.UniformIntegerHyperparameter(name, lower=value_range[0], upper=value_range[1], log=log)
raise ValueError('Unknown type: %s for hp %s' % (hyper_type, name) )
def add_hyperparameter(cs, hyper_type, name, value_range, log=False):
return cs.add_hyperparameter(get_hyperparameter(hyper_type, name, value_range, log))
|
cirq-core/cirq/contrib/qcircuit/qcircuit_diagram_info_test.py | Saibaba-Alapati/Cirq | 3,326 | 11145254 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cirq
import cirq.contrib.qcircuit as ccq
def test_get_qcircuit_diagram_info():
qubits = cirq.NamedQubit('x'), cirq.NamedQubit('y')
gate = cirq.SwapPowGate(exponent=0.5)
op = gate(*qubits)
qubit_map = {q: i for i, q in enumerate(qubits)}
args = cirq.CircuitDiagramInfoArgs(
known_qubits=qubits,
known_qubit_count=None,
use_unicode_characters=True,
precision=3,
qubit_map=qubit_map,
)
actual_info = ccq.get_qcircuit_diagram_info(op, args)
name = r'{\text{SWAP}^{0.5}}'
expected_info = cirq.CircuitDiagramInfo(
(r'\multigate{1}' + name, r'\ghost' + name), exponent=0.5, connected=False
)
assert actual_info == expected_info
gate = cirq.SWAP
op = gate(*qubits)
qubit_map = {q: i for q, i in zip(qubits, (4, 3))}
args = cirq.CircuitDiagramInfoArgs(
known_qubits=qubits,
known_qubit_count=None,
use_unicode_characters=True,
precision=3,
qubit_map=qubit_map,
)
actual_info = ccq.get_qcircuit_diagram_info(op, args)
expected_info = cirq.CircuitDiagramInfo(
(r'\ghost{\text{SWAP}}', r'\multigate{1}{\text{SWAP}}'), connected=False
)
assert actual_info == expected_info
qubit_map = {q: i for q, i in zip(qubits, (2, 5))}
args = cirq.CircuitDiagramInfoArgs(
known_qubits=qubits,
known_qubit_count=None,
use_unicode_characters=True,
precision=3,
qubit_map=qubit_map,
)
actual_info = ccq.get_qcircuit_diagram_info(op, args)
expected_info = cirq.CircuitDiagramInfo((r'\gate{\text{Swap}}',) * 2)
assert actual_info == expected_info
actual_info = ccq.get_qcircuit_diagram_info(op, cirq.CircuitDiagramInfoArgs.UNINFORMED_DEFAULT)
assert actual_info == expected_info
|
alembic/versions/2020081018_added_group_uuid_to_worker_9d3ab0b9d304.py | kl-chou/codalab-worksheets | 236 | 11145262 | <filename>alembic/versions/2020081018_added_group_uuid_to_worker_9d3ab0b9d304.py<gh_stars>100-1000
"""Added group_uuid to worker
Revision ID: 9d3ab0b9d304
Revises: <PASSWORD>
Create Date: 2020-08-10 18:07:31.646054
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9d3ab0b9d304'
down_revision = '<PASSWORD>'
def upgrade():
op.add_column('worker', sa.Column('group_uuid', sa.String(length=63), nullable=True))
op.create_foreign_key(None, 'worker', 'group', ['group_uuid'], ['uuid'])
def downgrade():
op.drop_constraint(None, 'worker', type_='foreignkey')
op.drop_column('worker', 'group_uuid')
|
simplemonitor/Loggers/seq.py | cgroschupp/simplemonitor | 373 | 11145276 | <filename>simplemonitor/Loggers/seq.py
"""
Simplemonitor logger for seq
Inspiration from
https://raw.githubusercontent.com/eifinger/appdaemon-scripts/master/seqSink/seqSink.py
"""
import datetime
import json
from typing import cast
import requests
from ..Monitors.monitor import Monitor
from .logger import Logger, register
@register
class SeqLogger(Logger):
"""Logging to seq"""
logger_type = "seq"
only_failures = False
buffered = False
dateformat = None
def __init__(self, config_options: dict = None) -> None:
if config_options is None:
config_options = {}
super().__init__(config_options)
# i.e. http://192.168.0.5:5341
self.endpoint = cast(
str, self.get_config_option("endpoint", required=True, allow_empty=False)
)
# Potentially, would need to add a header for ApiKey
# Send message to indicate we have started logging
self.log_to_seq(
self.endpoint,
"SeqLogger",
"simpleMonitor",
"__init__",
None,
"logging enabled for simpleMonitor",
False,
)
def save_result2(self, name: str, monitor: Monitor) -> None:
try:
is_fail = monitor.test_success() is False
self.log_to_seq(
self.endpoint,
name,
monitor.name,
monitor.monitor_type,
str(monitor.get_params()),
monitor.describe(),
is_fail,
)
except Exception:
self.logger_logger.exception("Error sending to seq in %s", monitor.name)
def describe(self) -> str:
return "Sends simple log to seq using raw endpoint"
def log_to_seq(
self, endpoint, name, app_name, monitor_type, params, description, is_fail
):
"""Send an event to seq"""
event_data = {
"Timestamp": str(datetime.datetime.now()),
"Level": "Error" if is_fail is True else "Information",
"MessageTemplate": str(description),
"Properties": {
"Type": "simpleMonitor",
"Name": name,
"Monitor": str(app_name),
"MonitorType": monitor_type,
# "Params": params
},
}
if params is not None:
event_data["Properties"]["Params"] = params
request_body = {"Events": [event_data]}
try:
_ = json.dumps(request_body) # This just checks it is valid...
except TypeError:
self.logger_logger.error("Could not serialise %s", request_body)
return
try:
response = requests.post(self.endpoint, json=request_body)
if not response.status_code == 200 and not response.status_code == 201:
self.logger_logger.error(
"POST to seq failed with status code: %s", response
)
except requests.RequestException:
self.logger_logger.exception("Failed to log to seq")
|
validation_tests/analytical_exact/trapezoidal_channel/plot_results.py | samcom12/anuga_core | 136 | 11145293 | from anuga.utilities import plot_utils as util
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as pyplot
import numpy
from project import *
filename = 'channel_floodplain.sww'
# Time-index to plot outputs from
p2 = util.get_output(filename)
p=util.get_centroids(p2, velocity_extrapolation=True)
v = (p.x>6.0)*(p.x<8.0)
print numpy.any(v)
# Numerical results along a central channel 'slice'
index= -1
V1 = p.stage[index,v] - p.elev[v]
V2 = p.yvel[index,v]
V3 = p.xvel[index,v]
##########################################################################
# Analytical solution of steady uniform 2D flow in a trapezoidal channel.
##########################################################################
Qin=4.6932 # Inflow discharge
slp=1./300. # Floodplain slope (= water slope for steady uniform flow)
man_n=0.03 # Manning's n
Bcentral=6.0 #Flat bed width of the trapezoidal channel
alpha=0.5 # Side slope of the trapezoidal banks
k = (slp*(1./man_n)**2)**0.5 # At any point, the analytical solution says U = k*d^(2/3)
# Function to calculate the discharge, given the channel centre depth dc, assuming
# steady uniform flow
def discharge_su(dc):
if(alpha>0.):
out = 2*k*( 3./(8.*alpha)*(dc)**(8./3.)) +Bcentral*k*(dc)**(5./3.)
else:
out = Bcentral*k*(dc)**(5./3.)
return out
# Function that will be minimized to find the depth associated with discharge Qin
def minme(dc):
q1 = discharge_su(dc)
return (q1-Qin)**2.
# Minimise the function mimne, to find the centre depth.
import scipy.optimize
dc_analytical = scipy.optimize.fmin(minme, x0=1.0)[0]
print 'dc_analytic ',dc_analytical
##################################
# Plots
##################################
# Analytical solution has U*abs(U)*n^2 / D^(4./3.) = Sf = bed slope
# Hence, the following two variables should be equal -- I have checked that x velocities are fairly small
pyplot.clf()
pyplot.figure(figsize=(12.,8.))
pyplot.plot(p.y[v], V2*0+k*dc_analytical**(2./3.), 'o', label='analytical velocity')
pyplot.plot(p.y[v], (V2**2)**0.5,'o', label='numerical velocity')
pyplot.plot(p.y[v], V1, 'o',label='numerical depth')
pyplot.plot(p.y[v], V1*0. + dc_analytical, 'o', label='analytical depth')
pyplot.title('Mid channel numerical velocities and depths, vs analytical velocities and depths')
pyplot.legend(loc='best')
pyplot.xlabel('Down-channel distance (m)')
pyplot.ylabel('Generic scale (m or m/s)')
pyplot.savefig('fig1mid_channel.png')
# Plot velocity over the cross-section
pyplot.clf()
v1 = (p.y<105.0)&(p.y>95.0)
analytical_stage = min(p.elev[v1]) + dc_analytical
analytic_vel = ( (1./300.)*numpy.maximum(analytical_stage-p.elev[v1],0.0)**(4./3.)*(1./0.03)**2.)**0.5
analytic_vel = analytic_vel*(analytical_stage>p.elev[v1])
temp0 = p.stage[index,v1]*0. + analytical_stage
temp1 = (temp0) * (temp0 > p.elev[v1])
temp2 = (p.elev[v1]) * (temp0 < p.elev[v1])
Analytic_Stage = temp1 + temp2
pyplot.figure(figsize=(12.,8.))
pyplot.plot(p.x[v1], analytic_vel,'o', label='analytical velocity')
pyplot.plot(p.x[v1], p.yvel[index,v1],'o', label='numerical velocity')
#pyplot.plot(p.x[v1],p.stage[index,v1]-p.elev[v1],'ko', label='numerical height')
pyplot.plot(p.x[v1],p.stage[index,v1],'o', label='numerical stage')
pyplot.plot(p.x[v1],Analytic_Stage,'o', label='analytical stage')
pyplot.plot(p.x[v1],p.elev[v1],'o', label='bed elevation')
pyplot.ylim([-4,2])
pyplot.legend(loc=8)
pyplot.title('Velocity (analytical and numerical) and Stage:' + '\n' +'Downstream channel regions (95 to 105m)' +'\n')
pyplot.xlabel('Cross-channel distance (m)')
pyplot.ylabel('Generic scale (m or m/s)')
pyplot.savefig('fig2upstream_channel.png')
# Plot velocity over the cross-section
pyplot.clf()
v1 = (p.y<505.0)&(p.y>495.0)
analytical_stage = min(p.elev[v1]) + dc_analytical
analytic_vel = ( (1./300.)*numpy.maximum(analytical_stage-p.elev[v1],0.0)**(4./3.)*(1./0.03)**2.)**0.5
analytic_vel = analytic_vel*(analytical_stage>p.elev[v1])
temp0 = p.stage[index,v1]*0. + analytical_stage
temp1 = (temp0) * (temp0 > p.elev[v1])
temp2 = (p.elev[v1]) * (temp0 < p.elev[v1])
Analytic_Stage = temp1 + temp2
pyplot.figure(figsize=(12.,8.))
pyplot.plot(p.x[v1], analytic_vel,'o', label='analytical velocity')
pyplot.plot(p.x[v1], p.yvel[index,v1],'o', label='numerical velocity')
#pyplot.plot(p.x[v1],p.stage[index,v1]-p.elev[v1],'ko', label='numerical height')
pyplot.plot(p.x[v1],p.stage[index,v1],'o', label='numerical stage')
pyplot.plot(p.x[v1],Analytic_Stage,'o', label='analytical stage')
pyplot.plot(p.x[v1],p.elev[v1],'o', label='bed elevation')
pyplot.ylim([-4,2])
pyplot.legend(loc=10)
pyplot.title('Velocity (analytical and numerical) and Stage:' + '\n' +'Central channel regions (495 to 505m)' +'\n')
pyplot.xlabel('Cross-channel distance (m)')
pyplot.ylabel('Generic scale (m or m/s)')
pyplot.savefig('fig3central_channel.png')
# Plot velocity over the cross-section
pyplot.clf()
v1 = (p.y<705.0)&(p.y>695.0)
analytical_stage = min(p.elev[v1]) + dc_analytical
analytic_vel = ( (1./300.)*numpy.maximum(analytical_stage-p.elev[v1],0.0)**(4./3.)*(1./0.03)**2.)**0.5
analytic_vel = analytic_vel*(analytical_stage>p.elev[v1])
temp0 = p.stage[index,v1]*0. + analytical_stage
temp1 = (temp0) * (temp0 > p.elev[v1])
temp2 = (p.elev[v1]) * (temp0 < p.elev[v1])
Analytic_Stage = temp1 + temp2
pyplot.figure(figsize=(12.,8.))
pyplot.plot(p.x[v1], analytic_vel,'o', label='analytical velocity')
pyplot.plot(p.x[v1], p.yvel[index,v1],'o', label='numerical velocity')
#pyplot.plot(p.x[v1],p.stage[index,v1]-p.elev[v1],'ko', label='numerical height')
pyplot.plot(p.x[v1],p.stage[index,v1],'o', label='numerical stage')
pyplot.plot(p.x[v1],Analytic_Stage,'o', label='analytical stage')
pyplot.plot(p.x[v1],p.elev[v1],'o', label='bed elevation')
pyplot.ylim([-4,2])
pyplot.legend(loc=10)
pyplot.title('Velocity (analytical and numerical) and Stage:' + '\n' +'Downstream channel regions (695 to 705m)' +'\n')
pyplot.xlabel('Cross-channel distance (m)')
pyplot.ylabel('Generic scale (m or m/s)')
pyplot.savefig('fig4downstream_channel.png')
print '#======================================================================'
print '# Extract some cross section info'
print '#======================================================================'
from anuga.shallow_water.sww_interrogate import get_flow_through_multiple_cross_sections
polyline0 = [ [floodplain_width, 10.0], [0., 10.0]]
polyline1 = [[floodplain_width, floodplain_length-300.0], [0., floodplain_length-300.0]]
polyline2 = [[floodplain_width, floodplain_length-1.0], [0., floodplain_length-1.0]]
polylines= [polyline0, polyline1, polyline2]
time, [Q0,Q1,Q2] = get_flow_through_multiple_cross_sections(filename, polylines, verbose=True)
pyplot.figure(figsize=(12.,8.))
pyplot.plot(time, Q0, label='10m')
pyplot.plot(time, Q1, label='500m')
pyplot.plot(time, Q2, label='799m')
pyplot.plot([0,time[-1]], [Qin,Qin], label='Input Q')
pyplot.ylim([0,7])
pyplot.legend(loc=10)
pyplot.title(' (Approximate) Cross sectional flow across transect at 10m, 500m and 799m')
pyplot.xlabel('Time (sec)')
pyplot.ylabel('Discharge (m^3/sec)')
pyplot.savefig('cross_section_10_500_790.png')
|
modoboa/maillog/lib.py | HarshCasper/modoboa | 1,602 | 11145302 | <filename>modoboa/maillog/lib.py
# coding: utf-8
import sys
import time
def date_to_timestamp(timetuple):
"""Date conversion.
Returns a date and a time in seconds from the epoch.
:param list timetuple: list containing date
:return: an integer
"""
date = " ".join(
[("%d" % elem) if isinstance(elem, int) else elem
for elem in timetuple]
)
fmt = "%Y %m %d %H %M %S" \
if timetuple[1].isdigit() else "%Y %b %d %H %M %S"
try:
local = time.strptime(date, fmt)
except ValueError:
print >> sys.stderr, "Error: failed to convert date and time"
return 0
return int(time.mktime(local))
|
netrd/dynamics/SIS.py | sdmccabe/netrd | 116 | 11145314 | <filename>netrd/dynamics/SIS.py<gh_stars>100-1000
"""
SIS.py
------
Implementation of Susceptible-Infected-Susceptible models dynamics on a
network.
author: <NAME>
Submitted as part of the 2019 NetSI Collabathon.
"""
from netrd.dynamics import BaseDynamics
import numpy as np
import networkx as nx
class SISModel(BaseDynamics):
"""Susceptible-Infected-Susceptible dynamical process."""
def simulate(self, G, L, num_seeds=1, beta=None, mu=None):
r"""Simulate SIS model dynamics on a network.
The results dictionary also stores the ground truth network as
`'ground_truth'`.
Parameters
----------
G (nx.Graph)
the input (ground-truth) graph with :math:`N` nodes.
L (int)
the length of the desired time series.
num_seeds (int)
the number of initially infected nodes.
beta (float)
the infection rate for the SIS process.
mu (float)
the recovery rate for the SIS process.
Returns
-------
TS (np.ndarray)
an :math:`N \times L` array of synthetic time series data.
"""
H = G.copy()
N = H.number_of_nodes()
TS = np.zeros((N, L))
index_to_node = dict(zip(range(G.order()), list(G.nodes())))
# sensible defaults for beta and mu
if not beta:
avg_k = np.mean(list(dict(H.degree()).values()))
beta = 1 / avg_k
if not mu:
mu = 1 / H.number_of_nodes()
seeds = np.random.permutation(
np.concatenate([np.repeat(1, num_seeds), np.repeat(0, N - num_seeds)])
)
TS[:, 0] = seeds
infected_attr = {index_to_node[i]: s for i, s in enumerate(seeds)}
nx.set_node_attributes(H, infected_attr, 'infected')
nx.set_node_attributes(H, 0, 'next_infected')
# SIS dynamics
for t in range(1, L):
nodes = np.random.permutation(H.nodes)
for i in nodes:
if H.nodes[i]['infected']:
neigh = H.neighbors(i)
for j in neigh:
if np.random.random() < beta:
H.nodes[j]['next_infected'] = 1
if np.random.random() < mu:
H.nodes[i]['infected'] = 0
infections = nx.get_node_attributes(H, 'infected')
next_infections = nx.get_node_attributes(H, 'next_infected')
# store SIS dynamics for time t
TS[:, t] = np.array(list(infections.values()))
nx.set_node_attributes(H, next_infections, 'infected')
nx.set_node_attributes(H, 0, 'next_infected')
# if the epidemic dies off, stop
if TS[:, t].sum() < 1:
break
# if the epidemic died off, pad the time series to the right shape
if TS.shape[1] < L:
TS = np.hstack([TS, np.zeros((N, L - TS.shape[1]))])
self.results['ground_truth'] = H
self.results['TS'] = TS
self.results['index_to_node'] = index_to_node
return TS
|
stix/test/extensions/identity/ciq_identity_3_0_test.py | saegel/python-stix | 194 | 11145321 | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from mixbox.vendor.six import BytesIO, text_type
from stix.threat_actor import ThreatActor
import stix.extensions.identity.ciq_identity_3_0 as ciq
from stix.test import EntityTestCase
from stix.core import STIXPackage
class CIQIdentity3_0InstanceTests(EntityTestCase, unittest.TestCase):
klass = ciq.CIQIdentity3_0Instance
_full_dict = {
'id': 'example:ciqidentity-1',
'name': '<NAME>',
'roles': ['Programmer', 'Analyst'],
'specification': {
'party_name': {
'name_lines': [
{'value': 'Foo'},
{'value': 'Bar'}
],
'organisation_names': [
{
'name_elements': [
{
'element_type': 'FullName',
'value': 'Foo Inc.'
}
],
'subdivision_names': [
{
'type': 'Department',
'value': 'InfoSec'
}
]
}
],
'person_names': [
{
'type': 'LegalName',
'name_elements': [
{
'element_type': 'FirstName',
'value': 'John',
},
{
'element_type': 'LastName',
'value': 'Smith',
}
]
},
{
'name_elements': [
{'value': '<NAME>'}
]
}
]
},
'languages': [
{'value': 'test language'}
],
'addresses': [
{
'free_text_address': {
'address_lines': ['1234 Example Lane.']
},
'country': {
'name_elements': [
{
'value': 'name 1',
'name_code': 'US',
'name_code_type': 'ISO 3166-1 alpha-2'
},
{
'value': 'name 2',
'name_code': 'BZ',
'name_code_type': 'ISO 3166-1 alpha-2',
'name_type': 'ISO'
}
]
},
'administrative_area': {
'name_elements': [
{'value': 'admin area name 1'},
{'value': 'admin area name 2'}
]
}
}
],
'electronic_address_identifiers': [
{
'type': 'EMAIL',
'value': 'an eai v'
}
],
'free_text_lines': [
{
'type': 'ftl type',
'value': 'ftl value'
}
],
'contact_numbers': [
{
'communication_media_type': 'Fax',
'contact_number_elements': [
{
'value': 'a contact number',
'type': 'Pin'
}
]
}
],
'nationalities': [
{
'name_elements': [
{'value': 'name 1'},
{'value': 'name 2'}
]
}
],
'organisation_info': {
'industry_type': 'SECTOR 1 | SECTOR 2',
}
},
'xsi:type': 'stix-ciqidentity:CIQIdentity3.0InstanceType'
}
class IdentityInThreatActorTests(EntityTestCase, unittest.TestCase):
klass = ThreatActor
_full_dict = {
"id": "example:threatactor-c96266cf-ccb3-43f3-b44e-26dbd66273e5",
"identity": {
"specification": {
"addresses": [
{
"administrative_area": {
"name_elements": [{"value": "California"}]
},
"country": {
"name_elements": [
{
"name_code": "US",
"name_code_type": "ISO 3166-1 alpha-2"
}
]
}
}
],
"electronic_address_identifiers": [
{"value": "<EMAIL>"},
{"value": "facebook.com/thediscoteam"},
{"value": "twitter.com/realdiscoteam"}
],
"languages": [{"value": "Spanish"}],
"party_name": {
"organisation_names": [
{
"name_elements": [{"value": "<NAME>"}],
"type": "CommonUse"
},
{
"name_elements": [{"value": "Equipo del Discoteca"}],
"type": "UnofficialName"
}
]
}
},
"xsi:type": "stix-ciqidentity:CIQIdentity3.0InstanceType"
},
"timestamp": "2016-10-04T19:43:57.382126+00:00",
"title": "Disco Team Threat Actor Group"
}
def test_identity_from_xml(self):
obj = self.klass.from_dict(self._full_dict)
sp = STIXPackage()
sp.add(obj)
s = BytesIO(sp.to_xml())
pkg = STIXPackage.from_xml(s)
self.assertTrue("CIQIdentity3.0InstanceType" in text_type(pkg.to_xml()))
if __name__ == "__main__":
unittest.main()
|
tests/test_provider_gavinbunney_bitbucketserver.py | mjuenema/python-terrascript | 507 | 11145348 | <gh_stars>100-1000
# tests/test_provider_gavinbunney_bitbucketserver.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:13:29 UTC)
def test_provider_import():
import terrascript.provider.gavinbunney.bitbucketserver
def test_resource_import():
from terrascript.resource.gavinbunney.bitbucketserver import bitbucketserver_banner
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_default_reviewers_condition,
)
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_global_permissions_group,
)
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_global_permissions_user,
)
from terrascript.resource.gavinbunney.bitbucketserver import bitbucketserver_group
from terrascript.resource.gavinbunney.bitbucketserver import bitbucketserver_license
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_mail_server,
)
from terrascript.resource.gavinbunney.bitbucketserver import bitbucketserver_plugin
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_plugin_config,
)
from terrascript.resource.gavinbunney.bitbucketserver import bitbucketserver_project
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_project_hook,
)
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_project_permissions_group,
)
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_project_permissions_user,
)
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_repository,
)
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_repository_hook,
)
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_repository_permissions_group,
)
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_repository_permissions_user,
)
from terrascript.resource.gavinbunney.bitbucketserver import bitbucketserver_user
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_user_access_token,
)
from terrascript.resource.gavinbunney.bitbucketserver import (
bitbucketserver_user_group,
)
def test_datasource_import():
from terrascript.data.gavinbunney.bitbucketserver import (
bitbucketserver_application_properties,
)
from terrascript.data.gavinbunney.bitbucketserver import bitbucketserver_cluster
from terrascript.data.gavinbunney.bitbucketserver import (
bitbucketserver_global_permissions_groups,
)
from terrascript.data.gavinbunney.bitbucketserver import (
bitbucketserver_global_permissions_users,
)
from terrascript.data.gavinbunney.bitbucketserver import bitbucketserver_group_users
from terrascript.data.gavinbunney.bitbucketserver import bitbucketserver_groups
from terrascript.data.gavinbunney.bitbucketserver import bitbucketserver_plugin
from terrascript.data.gavinbunney.bitbucketserver import (
bitbucketserver_project_hooks,
)
from terrascript.data.gavinbunney.bitbucketserver import (
bitbucketserver_project_permissions_groups,
)
from terrascript.data.gavinbunney.bitbucketserver import (
bitbucketserver_project_permissions_users,
)
from terrascript.data.gavinbunney.bitbucketserver import (
bitbucketserver_repository_hooks,
)
from terrascript.data.gavinbunney.bitbucketserver import (
bitbucketserver_repository_permissions_groups,
)
from terrascript.data.gavinbunney.bitbucketserver import (
bitbucketserver_repository_permissions_users,
)
from terrascript.data.gavinbunney.bitbucketserver import bitbucketserver_user
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.gavinbunney.bitbucketserver
#
# t = terrascript.provider.gavinbunney.bitbucketserver.bitbucketserver()
# s = str(t)
#
# assert 'https://github.com/gavinbunney/terraform-provider-bitbucketserver' in s
# assert '1.5.0' in s
|
vh-gunicorn/gunicorn.py | rEinve/ajenti-v | 150 | 11145376 | <filename>vh-gunicorn/gunicorn.py
import os
import shutil
from ajenti.api import *
from ajenti.plugins.supervisor.client import SupervisorServiceManager
from ajenti.plugins.vh.api import ApplicationGatewayComponent, SanityCheck
from ajenti.plugins.vh.processes import SupervisorRestartable
from ajenti.util import platform_select
from reconfigure.configs import SupervisorConfig
from reconfigure.items.supervisor import ProgramData
TEMPLATE_PROCESS = """
import multiprocessing
bind = 'unix:/var/run/ajenti-v-gunicorn-%(id)s.sock'
user = '%(user)s'
chdir = '%(root)s'
workers = %(workers)s or (multiprocessing.cpu_count() * 2 + 1)
%(custom_conf)s
"""
class GUnicornServerTest (SanityCheck):
def __init__(self, backend):
SanityCheck.__init__(self)
self.backend = backend
self.type = _('GUnicorn service')
self.name = backend.id
def check(self):
s = SupervisorServiceManager.get().get_one(self.backend.id)
if s:
self.message = s.status
return s and s.running
@plugin
class Gunicorn (ApplicationGatewayComponent):
id = 'python-wsgi'
title = 'Python WSGI'
def init(self):
self.config_dir = '/etc/gunicorn.ajenti.d/'
self.checks = []
def __generate_website(self, website):
for location in website.locations:
if location.backend.type == 'python-wsgi':
location.backend.__config_name = location.backend.id.replace('-', '_') + '.py'
c = TEMPLATE_PROCESS % {
'id': location.backend.id,
'user': location.backend.params.get('username', None) or 'www-data',
'root': location.path or website.root,
'workers': location.backend.params.get('workers', None),
'custom_conf': location.backend.params.get('custom_conf') or '',
}
open(os.path.join(self.config_dir, location.backend.__config_name), 'w').write(c)
def create_configuration(self, config):
self.checks = []
if os.path.exists(self.config_dir):
shutil.rmtree(self.config_dir)
os.mkdir(self.config_dir)
for website in config.websites:
if website.enabled:
self.__generate_website(website)
sup = SupervisorConfig(path=platform_select(
debian='/etc/supervisor/supervisord.conf',
centos='/etc/supervisord.conf',
))
sup.load()
COMMENT = 'Generated by Ajenti-V'
for p in sup.tree.programs:
if p.comment == COMMENT:
sup.tree.programs.remove(p)
for website in config.websites:
if website.enabled:
for location in website.locations:
if location.backend.type == 'python-wsgi':
self.checks.append(GUnicornServerTest(location.backend))
self.__generate_website(website)
p = ProgramData()
p.name = location.backend.id
p.comment = COMMENT
p.command = 'gunicorn -c %s%s "%s"' % (self.config_dir, location.backend.__config_name, location.backend.params['module'])
p.directory = location.path or website.root
virtualenv = location.backend.params.get('venv', None)
if virtualenv:
p.environment = 'PATH="%s:%s"' % (os.path.join(virtualenv, 'bin'), os.environ['PATH'])
p.command = os.path.join(virtualenv, 'bin') + '/' + p.command
sup.tree.programs.append(p)
sup.save()
def apply_configuration(self):
SupervisorRestartable.get().schedule()
def get_checks(self):
return self.checks
|
samples/14-semparsing/ucca/scripts/annotate.py | tomshafer/nn4nlp | 1,037 | 11145384 | <reponame>tomshafer/nn4nlp
#!/usr/bin/env python3
import argparse
import glob
import sys
from ucca.ioutil import file2passage, passage2file
from ucca.textutil import annotate
desc = """Read UCCA standard format in XML or binary pickle, and write back with POS tags and dependency parse."""
def main():
argparser = argparse.ArgumentParser(description=desc)
argparser.add_argument("filenames", nargs="+", help="passage file names to annotate")
argparser.add_argument("-v", "--verbose", action="store_true", help="print tagged text for each passage")
args = argparser.parse_args()
for pattern in args.filenames:
filenames = glob.glob(pattern)
if not filenames:
raise IOError("Not found: " + pattern)
for filename in filenames:
passage = file2passage(filename)
annotate(passage, verbose=args.verbose, replace=True)
sys.stderr.write("Writing '%s'...\n" % filename)
passage2file(passage, filename, binary=not filename.endswith("xml"))
sys.exit(0)
if __name__ == '__main__':
main()
|
tests/math/test_geometry.py | AdvaitDhingra/scikit-hep | 150 | 11145413 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Tests for the skhep.math.geometry module.
"""
# -----------------------------------------------------------------------------
# Import statements
# -----------------------------------------------------------------------------
from skhep.math.geometry import *
from skhep.utils.py23 import *
import pytest
from pytest import approx
from math import pi
# -----------------------------------------------------------------------------
# Actual tests
# -----------------------------------------------------------------------------
def test_geometry_constructors():
v1 = Vector3D()
p1 = Point3D()
assert str(p1) == "Point3D(x=0.0,y=0.0,z=0.0)"
assert repr(p1) == "<Point3D(x=0.0,y=0.0,z=0.0)>"
l1 = Line3D(p1, Vector3D(0, 0, 1))
assert str(l1) == "Line3D({0},{1})".format(p1, Vector3D(0, 0, 1))
assert repr(l1) == "<Line3D({0},{1})>".format(p1, Vector3D(0, 0, 1))
pl1 = Plane3D(p1, Vector3D(0, 0, 1))
assert str(pl1) == "Plane3D({0},{1})".format(p1, Vector3D(0, 0, 1))
assert repr(pl1) == "<Plane3D({0},{1})>".format(p1, Vector3D(0, 0, 1))
l2 = Line3D.from_points(p1, Point3D(1, 2, 3))
pl2 = Plane3D.from_points(p1, Point3D(1, 2, 3), Point3D(3, 2, 1))
p2 = Point3D.frompoint(1, 1, 1)
pl3 = Plane3D.from_line_and_point(l1, p2)
p3 = Point3D.fromiterable([1.0, 1.0, 1.0])
assert p3 == Point3D(1.0, 1.0, 1.0)
p4 = Point3D.fromcylindricalcoords(1.0, 0.0, 1.0)
assert p4 == Point3D(1.0, 0.0, 1.0)
assert p4.rho == 1.0
p5 = Point3D.fromsphericalcoords(1.0, 0.0, 0.0)
assert p5 == Point3D(0.0, 0.0, 1.0)
assert p5.theta == 0.0
assert p5.phi == 0.0
v2 = Vector3D(1, 1, 1)
with pytest.raises(NotImplementedError):
Line3D.__init__(Line3D(), p1, p1)
with pytest.raises(NotImplementedError):
Line3D.__init__(Line3D(), v2, v2)
with pytest.raises(ValueError):
Line3D.__init__(Line3D(), p1, v1)
with pytest.raises(NotImplementedError):
Plane3D.__init__(Plane3D(), p1, p1)
with pytest.raises(NotImplementedError):
Plane3D.__init__(Plane3D(), v2, v2)
with pytest.raises(ValueError):
Plane3D.__init__(Plane3D(), p1, v1)
def test_operators():
v1 = Vector3D(1, 2, 3)
v2 = Vector3D(3, 2, 1)
p1 = Point3D(0, 0, 1)
p2 = Point3D(0, 1, 0)
line1 = Line3D(p1, v1)
line2 = Line3D(p2, v2)
plane1 = Plane3D(p1, v1)
plane2 = Plane3D(p2, v2)
assert p1 + v1 == Point3D(1, 2, 4)
assert v1 + p1 == Point3D(1, 2, 4)
assert p1 - v1 == Point3D(-1, -2, -2)
assert p1 - p2 == Vector3D(0, -1, 1)
p1 -= v1
p1 += v1
assert p1 == p1
assert p1 != p2
assert line1 == line1
assert line1 != line2
assert plane1 == plane1
assert plane1 != line2
def test_contains():
line = Line3D(Point3D(), Vector3D(0, 0, 1))
plane = Plane3D(Point3D(), Vector3D(0, 0, 1))
assert Point3D(0, 0, -10) in line
assert Point3D(1, 0, -10) not in line
assert Point3D(1, 0, 0) in plane
assert Point3D(0, 1, 0) in plane
assert Point3D(0, 0, 2) not in plane
linex = Line3D(Point3D(), Vector3D(1, 0, 0))
liney = Line3D(Point3D(), Vector3D(0, 1, 0))
linez = Line3D(Point3D(), Vector3D(0, 0, 1))
assert linex in plane
assert liney in plane
assert linez not in plane
def test_distance():
with pytest.raises(NotImplementedError):
Point3D.distance(Point3D(), 2.0)
with pytest.raises(NotImplementedError):
Line3D.distance(Line3D(), 2.0)
with pytest.raises(NotImplementedError):
Plane3D.distance(Plane3D(), 2.0)
p0 = Point3D()
p1 = Point3D(1, 0, 0)
assert p0.distance(p0) == 0
assert p0.distance(p1) == 1
line = Line3D(Point3D(), Vector3D(0, 0, 1))
assert p0.distance(line) == 0
assert p1.distance(line) == 1
assert line.distance(p0) == 0
assert line.distance(p1) == 1
plane = Plane3D(Point3D(1, 1, 0), Vector3D(0, 0, 1))
assert plane.distance(plane) == 0
assert plane.distance(p0) == 0
line1 = Line3D(Point3D(1, 2, 3), Vector3D(4, 5, 6))
line2 = Line3D(Point3D(0, 0, 1), Vector3D(1, 1, 0))
assert plane.distance(line1) == 0
assert plane.distance(line2) == 1
assert line1.distance(plane) == 0
assert line2.distance(plane) == 1
line3 = Line3D(Point3D(0, 0, 2), Vector3D(1, 1, 0))
assert line3.distance(line2) == 1
def test_angle():
p0 = Point3D()
with pytest.raises(NotImplementedError):
Line3D.angle(Line3D(), p0)
with pytest.raises(NotImplementedError):
Plane3D.angle(Plane3D(), p0)
line0 = Line3D(Point3D(), Vector3D(0, 0, 1))
line1 = Line3D(Point3D(0, 0, 1), Vector3D(0, 1, 0))
plane0 = Plane3D(Point3D(), Vector3D(0, 0, 1))
plane1 = Plane3D(Point3D(0, 0, 1), Vector3D(0, 1, 0))
assert line0.angle(line1) == pi / 2.0
assert line1.angle(line0) == pi / 2.0
assert line1.angle(line1) == 0.0
assert line0.angle(plane0) == 0.0
assert line0.angle(plane1) == pi / 2.0
assert line1.angle(plane0) == pi / 2.0
assert line1.angle(plane1) == 0.0
assert plane0.angle(plane1) == pi / 2.0
assert plane1.angle(plane0) == pi / 2.0
assert plane1.angle(plane1) == 0.0
assert plane0.angle(line0) == 0.0
assert plane0.angle(line1) == pi / 2.0
assert plane1.angle(line0) == pi / 2.0
assert plane1.angle(line1) == 0.0
def test_intersect():
with pytest.raises(NotImplementedError):
Line3D.intersect(Line3D(), 1)
with pytest.raises(NotImplementedError):
Plane3D.intersect(Plane3D(), 1)
p0 = Point3D()
p1 = Point3D(0, 0, 1)
line0 = Line3D(Point3D(), Vector3D(0, 0, 1))
line1 = Line3D(Point3D(0, 0, 1), Vector3D(0, 1, 0))
plane0 = Plane3D(Point3D(), Vector3D(0, 0, 1))
plane1 = Plane3D(Point3D(0, 0, 1), Vector3D(0, 1, 0))
assert line0.intersect(p0) == p0
assert line0.intersect(p1) == p1
assert line1.intersect(p0) == None
assert line1.intersect(p1) == p1
assert line0.intersect(line1) == Point3D(0, 0, 1)
assert line1.intersect(line0) == Point3D(0, 0, 1)
assert line0.intersect(plane0) == Point3D()
assert line0.intersect(plane1) == line0
assert line1.intersect(plane0) == None
assert line1.intersect(plane1) == Point3D(0, 0, 1)
assert plane0.intersect(p0) == p0
assert plane0.intersect(p1) == None
assert plane1.intersect(p0) == p0
assert plane1.intersect(p1) == p1
assert plane0.intersect(line0) == Point3D()
assert plane0.intersect(line1) == None
assert plane1.intersect(line0) == line0
assert plane1.intersect(line1) == Point3D(0, 0, 1)
assert plane0.intersect(plane1) == Line3D(Point3D(), Vector3D(1, 0, 0))
assert plane1.intersect(plane0) == Line3D(Point3D(), Vector3D(1, 0, 0))
assert plane0.intersect(plane0) == None
line2 = Line3D(Point3D(0, 1, 0), Vector3D(0, 0, 1))
assert line2.intersect(line0) == None
|
NKUCodingCat/0021/encry.py | saurabh896/python-1 | 3,976 | 11145426 | <filename>NKUCodingCat/0021/encry.py<gh_stars>1000+
#coding=utf-8
import os,time,random,hashlib,math
def md5(str):
m = hashlib.md5()
m.update(str)
return m.hexdigest()
def Salt(len=64):
return "%s"*len%tuple([chr(65+random.randint(0,25)) for i in range(len)])
def encry(Str):
Log = int(math.log(len(Str),2))+1
MaxLen = 2**Log
SAL = Salt(MaxLen-len(Str)+random.randint(8,16))
ENC = md5(Str+SAL)
return SAL,ENC
print encry("sudgds") |
service_catalog/admin.py | LaudateCorpus1/squest | 112 | 11145441 | <reponame>LaudateCorpus1/squest
from django.db import models
from django.contrib import admin
from martor.widgets import AdminMartorWidget
from service_catalog.models.documentation import Doc
class DocAdmin(admin.ModelAdmin):
formfield_overrides = {
models.TextField: {'widget': AdminMartorWidget},
}
admin.site.register(Doc, DocAdmin)
|
mmdet/models/detectors/dense_reppoints_detector.py | azuredsky/RepPointsV2 | 295 | 11145458 | <reponame>azuredsky/RepPointsV2
import mmcv
import numpy as np
import scipy.interpolate
import torch
from mmdet.core import bbox2result
from .single_stage import SingleStageDetector
from ..builder import DETECTORS
@DETECTORS.register_module()
class DenseRepPointsDetector(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(DenseRepPointsDetector, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
@property
def with_mask(self):
return True
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
x = self.extract_feat(img)
outs = self.bbox_head(x, test=False)
loss_inputs = outs + (gt_bboxes, gt_masks, gt_labels, img_metas)
losses = self.bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
def simple_test(self, img, img_meta, rescale=False):
x = self.extract_feat(img)
outs = self.bbox_head(x, test=True)
bbox_inputs = outs + (img_meta, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs)
det_bboxes, det_points, det_pts_scores, det_cls = bbox_list[0]
ori_shape = img_meta[0]['ori_shape']
scale_factor = img_meta[0]['scale_factor']
bbox_results = bbox2result(det_bboxes, det_cls, self.bbox_head.num_classes)
rle_results = self.get_seg_masks(det_pts_scores, det_points, det_bboxes, det_cls,
self.test_cfg, ori_shape, scale_factor, rescale)
# For visualization(rescale=False), we also return pts_results to show the points
if not rescale:
det_points_reshape = det_points.reshape(det_points.shape[0], -1, 2)
det_pts_scores_reshape = det_pts_scores.reshape(det_pts_scores.shape[0], -1, 1)
det_pts_score_cat = torch.cat([det_points_reshape, det_pts_scores_reshape], dim=-1) \
.reshape(det_points.shape[0], -1)
det_pts_score_cls_cat = torch.cat([det_pts_score_cat, det_points[:, [-1]]], dim=-1)
pts_results = pts2result(det_pts_score_cls_cat, det_cls, self.bbox_head.num_classes)
return (bbox_results, rle_results), pts_results
else:
return bbox_results, rle_results
def get_seg_masks(self, pts_score, det_pts, det_bboxes, det_labels,
test_cfg, ori_shape, scale_factor, rescale=False):
"""
Get segmentation masks from points and scores
Args:
pts_score (Tensor or ndarray): shape (n, num_pts)
det_pts (Tensor): shape (n, num_pts*2)
det_bboxes (Tensor): shape (n, 4)
det_labels (Tensor): shape (n, 1)
test_cfg (dict): rcnn testing config
ori_shape: original image size
scale_factor: scale factor for image
rescale: whether rescale to original size
Returns:
list[list]: encoded masks
"""
cls_segms = [[] for _ in range(self.bbox_head.num_classes)]
bboxes = det_bboxes.cpu().numpy()[:, :4]
labels = det_labels.cpu().numpy()
if rescale:
img_h, img_w = ori_shape[:2]
else:
img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
scale_factor = 1.0
for i in range(bboxes.shape[0]):
bbox = (bboxes[i, :] / scale_factor).astype(np.int32)
label = labels[i]
w = max(bbox[2] - bbox[0], 1)
h = max(bbox[3] - bbox[1], 1)
im_mask = np.zeros((img_h, img_w), dtype=np.uint8)
im_pts = det_pts[i].clone()
im_pts = im_pts.reshape(-1, 2)
im_pts_score = pts_score[i]
im_pts[:, 0] = (im_pts[:, 0] - bbox[0])
im_pts[:, 1] = (im_pts[:, 1] - bbox[1])
_h, _w = h, w
corner_pts = im_pts.new_tensor([[0, 0], [_h - 1, 0], [0, _w - 1], [_w - 1, _h - 1]])
corner_score = im_pts_score.new_tensor([0, 0, 0, 0])
im_pts = torch.cat([im_pts, corner_pts], dim=0).cpu().numpy()
im_pts_score = torch.cat([im_pts_score, corner_score], dim=0).cpu().numpy()
# im_pts = im_pts.cpu().numpy()
# im_pts_score = im_pts_score.cpu().numpy()
# im_pts_score = (im_pts_score > 0.5).astype(np.float32)
grids = tuple(np.mgrid[0:_w:1, 0:_h:1])
bbox_mask = scipy.interpolate.griddata(im_pts, im_pts_score, grids)
bbox_mask = bbox_mask.transpose(1, 0)
bbox_mask = mmcv.imresize(bbox_mask, (w, h))
bbox_mask = bbox_mask.astype(np.float32)
bbox_mask[np.isnan(bbox_mask)] = 0
bbox_mask = (bbox_mask > test_cfg.get('pts_score_thr', 0.5)).astype(np.uint8)
im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = bbox_mask
cls_segms[label].append(im_mask)
return cls_segms
def pts2result(pts, labels, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, pts_num)
labels (Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
if pts.shape[0] == 0:
return [np.zeros((0, pts.shape[1]), dtype=np.float32) for i in range(num_classes)]
else:
pts = pts.cpu().numpy()
labels = labels.cpu().numpy()
return [pts[labels == i, :] for i in range(num_classes)]
|
paypal/standard/ipn/migrations/0008_auto_20181128_1032.py | americanhandelsociety/django-paypal | 563 | 11145516 | # Generated by Django 2.1.3 on 2018-11-28 10:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ipn', '0007_auto_20160219_1135'),
]
operations = [
migrations.AlterField(
model_name='paypalipn',
name='flag',
field=models.BooleanField(blank=True, default=False),
),
migrations.AlterField(
model_name='paypalipn',
name='test_ipn',
field=models.BooleanField(blank=True, default=False),
),
]
|
models/department.py | noryb009/rmc | 164 | 11145548 | <gh_stars>100-1000
import mongoengine as me
class Department(me.Document):
# eg. earth
id = me.StringField(primary_key=True)
# eg. Earth Sciences
name = me.StringField(required=True)
# eg. sci
faculty_id = me.StringField(required=True)
# TODO(mack): url is broken, a lot of urls are for the grad rather
# than undergrad calendar
# eg. http://ugradcalendar.uwaterloo.ca/courses/EARTH
url = me.URLField(required=True)
|
tests/unit/wallet/test_transaction.py | nishp77/lbry-sdk | 4,996 | 11145583 | import os
import unittest
import tempfile
import shutil
from binascii import hexlify, unhexlify
from itertools import cycle
from lbry.testcase import AsyncioTestCase
from lbry.wallet.constants import CENT, COIN, NULL_HASH32
from lbry.wallet import Wallet, Account, Ledger, Database, Headers, Transaction, Output, Input
NULL_HASH = b'\x00'*32
FEE_PER_BYTE = 50
FEE_PER_CHAR = 200000
def get_output(amount=CENT, pubkey_hash=NULL_HASH32, height=-2):
return Transaction(height=height) \
.add_outputs([Output.pay_pubkey_hash(amount, pubkey_hash)]) \
.outputs[0]
def get_input(amount=CENT, pubkey_hash=NULL_HASH):
return Input.spend(get_output(amount, pubkey_hash))
def get_transaction(txo=None):
return Transaction() \
.add_inputs([get_input()]) \
.add_outputs([txo or Output.pay_pubkey_hash(CENT, NULL_HASH32)])
def get_claim_transaction(claim_name, claim=b''):
return get_transaction(
Output.pay_claim_name_pubkey_hash(CENT, claim_name, claim, NULL_HASH32)
)
class TestSizeAndFeeEstimation(AsyncioTestCase):
async def asyncSetUp(self):
self.ledger = Ledger({
'db': Database(':memory:'),
'headers': Headers(':memory:')
})
await self.ledger.db.open()
async def asyncTearDown(self):
await self.ledger.db.close()
def test_output_size_and_fee(self):
txo = get_output()
self.assertEqual(txo.size, 46)
self.assertEqual(txo.get_fee(self.ledger), 46 * FEE_PER_BYTE)
claim_name = 'verylongname'
tx = get_claim_transaction(claim_name, b'0'*4000)
base_size = tx.size - tx.inputs[0].size - tx.outputs[0].size
txo = tx.outputs[0]
self.assertEqual(tx.size, 4225)
self.assertEqual(tx.base_size, base_size)
self.assertEqual(txo.size, 4067)
self.assertEqual(txo.get_fee(self.ledger), len(claim_name) * FEE_PER_CHAR)
# fee based on total bytes is the larger fee
claim_name = 'a'
tx = get_claim_transaction(claim_name, b'0'*4000)
base_size = tx.size - tx.inputs[0].size - tx.outputs[0].size
txo = tx.outputs[0]
self.assertEqual(tx.size, 4214)
self.assertEqual(tx.base_size, base_size)
self.assertEqual(txo.size, 4056)
self.assertEqual(txo.get_fee(self.ledger), txo.size * FEE_PER_BYTE)
def test_input_size_and_fee(self):
txi = get_input()
self.assertEqual(txi.size, 148)
self.assertEqual(txi.get_fee(self.ledger), 148 * FEE_PER_BYTE)
def test_transaction_size_and_fee(self):
tx = get_transaction()
self.assertEqual(tx.size, 204)
self.assertEqual(tx.base_size, tx.size - tx.inputs[0].size - tx.outputs[0].size)
self.assertEqual(tx.get_base_fee(self.ledger), FEE_PER_BYTE * tx.base_size)
class TestAccountBalanceImpactFromTransaction(unittest.TestCase):
def test_is_my_output_not_set(self):
tx = get_transaction()
with self.assertRaisesRegex(ValueError, "Cannot access net_account_balance"):
_ = tx.net_account_balance
tx.inputs[0].txo_ref.txo.is_my_output = True
with self.assertRaisesRegex(ValueError, "Cannot access net_account_balance"):
_ = tx.net_account_balance
tx.outputs[0].is_my_output = True
# all inputs/outputs are set now so it should work
_ = tx.net_account_balance
def test_paying_from_my_account_to_other_account(self):
tx = Transaction() \
.add_inputs([get_input(300*CENT)]) \
.add_outputs([get_output(190*CENT, NULL_HASH),
get_output(100*CENT, NULL_HASH)])
tx.inputs[0].txo_ref.txo.is_my_output = True
tx.outputs[0].is_my_output = False
tx.outputs[1].is_my_output = True
self.assertEqual(tx.net_account_balance, -200*CENT)
def test_paying_from_other_account_to_my_account(self):
tx = Transaction() \
.add_inputs([get_input(300*CENT)]) \
.add_outputs([get_output(190*CENT, NULL_HASH),
get_output(100*CENT, NULL_HASH)])
tx.inputs[0].txo_ref.txo.is_my_output = False
tx.outputs[0].is_my_output = True
tx.outputs[1].is_my_output = False
self.assertEqual(tx.net_account_balance, 190*CENT)
def test_paying_from_my_account_to_my_account(self):
tx = Transaction() \
.add_inputs([get_input(300*CENT)]) \
.add_outputs([get_output(190*CENT, NULL_HASH),
get_output(100*CENT, NULL_HASH)])
tx.inputs[0].txo_ref.txo.is_my_output = True
tx.outputs[0].is_my_output = True
tx.outputs[1].is_my_output = True
self.assertEqual(tx.net_account_balance, -10*CENT) # lost to fee
class TestTransactionSerialization(unittest.TestCase):
def test_genesis_transaction(self):
raw = unhexlify(
"01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff1f0"
"4ffff001d010417696e736572742074696d657374616d7020737472696e67ffffffff01000004bfc91b8e"
"001976a914345991dbf57bfb014b87006acdfafbfc5fe8292f88ac00000000"
)
tx = Transaction(raw)
self.assertEqual(tx.version, 1)
self.assertEqual(tx.locktime, 0)
self.assertEqual(len(tx.inputs), 1)
self.assertEqual(len(tx.outputs), 1)
coinbase = tx.inputs[0]
self.assertTrue(coinbase.txo_ref.is_null)
self.assertEqual(coinbase.txo_ref.position, 0xFFFFFFFF)
self.assertEqual(coinbase.sequence, 0xFFFFFFFF)
self.assertIsNotNone(coinbase.coinbase)
self.assertIsNone(coinbase.script)
self.assertEqual(
hexlify(coinbase.coinbase),
b'04ffff001d010417696e736572742074696d657374616d7020737472696e67'
)
out = tx.outputs[0]
self.assertEqual(out.amount, 40000000000000000)
self.assertEqual(out.position, 0)
self.assertTrue(out.script.is_pay_pubkey_hash)
self.assertFalse(out.script.is_pay_script_hash)
self.assertFalse(out.script.is_claim_involved)
tx._reset()
self.assertEqual(tx.raw, raw)
def test_coinbase_transaction(self):
raw = unhexlify(
"01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff200"
"34d520504f89ac55a086032d217bf0700000d2f6e6f64655374726174756d2f0000000001a03489850800"
"00001976a914cfab870d6deea54ca94a41912a75484649e52f2088ac00000000"
)
tx = Transaction(raw)
self.assertEqual(tx.version, 1)
self.assertEqual(tx.locktime, 0)
self.assertEqual(len(tx.inputs), 1)
self.assertEqual(len(tx.outputs), 1)
coinbase = tx.inputs[0]
self.assertTrue(coinbase.txo_ref.is_null)
self.assertEqual(coinbase.txo_ref.position, 0xFFFFFFFF)
self.assertEqual(coinbase.sequence, 0)
self.assertIsNotNone(coinbase.coinbase)
self.assertIsNone(coinbase.script)
self.assertEqual(
hexlify(coinbase.coinbase),
b'034d520504f89ac55a086032d217bf0700000d2f6e6f64655374726174756d2f'
)
out = tx.outputs[0]
self.assertEqual(out.amount, 36600100000)
self.assertEqual(out.position, 0)
self.assertTrue(out.script.is_pay_pubkey_hash)
self.assertFalse(out.script.is_pay_script_hash)
self.assertFalse(out.script.is_claim_involved)
tx._reset()
self.assertEqual(tx.raw, raw)
def test_claim_transaction(self):
raw = unhexlify(
"01000000012433e1b327603843b083344dbae5306ff7927f87ebbc5ae9eb50856c5b53fd1d000000006a4"
"7304402201a91e1023d11c383a11e26bf8f9034087b15d8ada78fa565e0610455ffc8505e0220038a63a6"
"ecb399723d4f1f78a20ddec0a78bf8fb6c75e63e166ef780f3944fbf0121021810150a2e4b088ec51b20c"
"be1b335962b634545860733367824d5dc3eda767dffffffff028096980000000000fdff00b50463617473"
"4cdc080110011a7808011230080410011a084d616361726f6e6922002a003214416c6c207269676874732"
"072657365727665642e38004a0052005a001a42080110011a30add80aaf02559ba09853636a0658c42b72"
"7cb5bb4ba8acedb4b7fe656065a47a31878dbf9912135ddb9e13806cc1479d220a696d6167652f6a70656"
"72a5c080110031a404180cc0fa4d3839ee29cca866baed25fafb43fca1eb3b608ee889d351d3573d042c7"
"b83e2e643db0d8e062a04e6e9ae6b90540a2f95fe28638d0f18af4361a1c2214f73de93f4299fb32c32f9"
"49e02198a8e91101abd6d7576a914be16e4b0f9bd8f6d47d02b3a887049c36d3b84cb88ac0cd2520b0000"
"00001976a914f521178feb733a719964e1da4a9efb09dcc39cfa88ac00000000"
)
tx = Transaction(raw)
self.assertEqual(tx.id, '666c3d15de1d6949a4fe717126c368e274b36957dce29fd401138c1e87e92a62')
self.assertEqual(tx.version, 1)
self.assertEqual(tx.locktime, 0)
self.assertEqual(len(tx.inputs), 1)
self.assertEqual(len(tx.outputs), 2)
txin = tx.inputs[0]
self.assertEqual(
txin.txo_ref.id,
'1dfd535b6c8550ebe95abceb877f92f76f30e5ba4d3483b043386027b3e13324:0'
)
self.assertEqual(txin.txo_ref.position, 0)
self.assertEqual(txin.sequence, 0xFFFFFFFF)
self.assertIsNone(txin.coinbase)
self.assertEqual(txin.script.template.name, 'pubkey_hash')
self.assertEqual(
hexlify(txin.script.values['pubkey']),
b'<KEY>'
)
self.assertEqual(
hexlify(txin.script.values['signature']),
b'304402201a91e1023d11c383a11e26bf8f9034087b15d8ada78fa565e0610455ffc8505e0220038a63a6'
b'ecb399723d4f1f78a20ddec0a78bf8fb6c75e63e166ef780f3944fbf01'
)
# Claim
out0 = tx.outputs[0]
self.assertEqual(out0.amount, 10000000)
self.assertEqual(out0.position, 0)
self.assertTrue(out0.script.is_pay_pubkey_hash)
self.assertTrue(out0.script.is_claim_name)
self.assertTrue(out0.script.is_claim_involved)
self.assertEqual(out0.script.values['claim_name'], b'cats')
self.assertEqual(
hexlify(out0.script.values['pubkey_hash']),
b'be16e4b0f9bd8f6d47d02b3a887049c36d3b84cb'
)
# Change
out1 = tx.outputs[1]
self.assertEqual(out1.amount, 189977100)
self.assertEqual(out1.position, 1)
self.assertTrue(out1.script.is_pay_pubkey_hash)
self.assertFalse(out1.script.is_claim_involved)
self.assertEqual(
hexlify(out1.script.values['pubkey_hash']),
b'f521178feb733a719964e1da4a9efb09dcc39cfa'
)
tx._reset()
self.assertEqual(tx.raw, raw)
class TestTransactionSigning(AsyncioTestCase):
async def asyncSetUp(self):
self.ledger = Ledger({
'db': Database(':memory:'),
'headers': Headers(':memory:')
})
await self.ledger.db.open()
async def asyncTearDown(self):
await self.ledger.db.close()
async def test_sign(self):
account = Account.from_dict(
self.ledger, Wallet(), {
"seed":
"carbon smart garage balance margin twelve chest sword toas"
"t envelope bottom stomach absent"
}
)
await account.ensure_address_gap()
address1, address2 = await account.receiving.get_addresses(limit=2)
pubkey_hash1 = self.ledger.address_to_hash160(address1)
pubkey_hash2 = self.ledger.address_to_hash160(address2)
tx = Transaction() \
.add_inputs([Input.spend(get_output(int(2*COIN), pubkey_hash1))]) \
.add_outputs([Output.pay_pubkey_hash(int(1.9*COIN), pubkey_hash2)])
await tx.sign([account])
self.assertEqual(
hexlify(tx.inputs[0].script.values['signature']),
b'304402200dafa26ad7cf38c5a971c8a25ce7d85a076235f146126762296b1223c42ae21e022020ef9eeb8'
b'398327891008c5c0be4357683f12cb22346691ff23914f457bf679601'
)
class TransactionIOBalancing(AsyncioTestCase):
async def asyncSetUp(self):
wallet_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, wallet_dir)
self.ledger = Ledger({
'db': Database(os.path.join(wallet_dir, 'blockchain.db')),
'headers': Headers(':memory:'),
})
await self.ledger.db.open()
self.account = Account.from_dict(
self.ledger, Wallet(), {
"seed": "carbon smart garage balance margin twelve chest sword "
"toast envelope bottom stomach absent"
}
)
addresses = await self.account.ensure_address_gap()
self.pubkey_hash = [self.ledger.address_to_hash160(a) for a in addresses]
self.hash_cycler = cycle(self.pubkey_hash)
async def asyncTearDown(self):
await self.ledger.db.close()
def txo(self, amount, address=None):
return get_output(int(amount*COIN), address or next(self.hash_cycler))
def txi(self, txo):
return Input.spend(txo)
def tx(self, inputs, outputs):
return Transaction.create(inputs, outputs, [self.account], self.account)
async def create_utxos(self, amounts):
utxos = [self.txo(amount) for amount in amounts]
self.funding_tx = Transaction(is_verified=True) \
.add_inputs([self.txi(self.txo(sum(amounts)+0.1))]) \
.add_outputs(utxos)
await self.ledger.db.insert_transaction(self.funding_tx)
for utxo in utxos:
await self.ledger.db.save_transaction_io(
self.funding_tx,
self.ledger.hash160_to_address(utxo.script.values['pubkey_hash']),
utxo.script.values['pubkey_hash'], ''
)
return utxos
@staticmethod
def inputs(tx):
return [round(i.amount/COIN, 2) for i in tx.inputs]
@staticmethod
def outputs(tx):
return [round(o.amount/COIN, 2) for o in tx.outputs]
async def test_basic_use_cases(self):
self.ledger.fee_per_byte = int(.01*CENT)
# available UTXOs for filling missing inputs
utxos = await self.create_utxos([
1, 1, 3, 5, 10
])
# pay 3 coins (3.02 w/ fees)
tx = await self.tx(
[], # inputs
[self.txo(3)] # outputs
)
# best UTXO match is 5 (as UTXO 3 will be short 0.02 to cover fees)
self.assertListEqual(self.inputs(tx), [5])
# a change of 1.98 is added to reach balance
self.assertListEqual(self.outputs(tx), [3, 1.98])
await self.ledger.release_outputs(utxos)
# pay 2.98 coins (3.00 w/ fees)
tx = await self.tx(
[], # inputs
[self.txo(2.98)] # outputs
)
# best UTXO match is 3 and no change is needed
self.assertListEqual(self.inputs(tx), [3])
self.assertListEqual(self.outputs(tx), [2.98])
await self.ledger.release_outputs(utxos)
# supplied input and output, but input is not enough to cover output
tx = await self.tx(
[self.txi(self.txo(10))], # inputs
[self.txo(11)] # outputs
)
# additional input is chosen (UTXO 3)
self.assertListEqual([10, 3], self.inputs(tx))
# change is now needed to consume extra input
self.assertListEqual([11, 1.96], self.outputs(tx))
await self.ledger.release_outputs(utxos)
# liquidating a UTXO
tx = await self.tx(
[self.txi(self.txo(10))], # inputs
[] # outputs
)
self.assertListEqual([10], self.inputs(tx))
# missing change added to consume the amount
self.assertListEqual([9.98], self.outputs(tx))
await self.ledger.release_outputs(utxos)
# liquidating at a loss, requires adding extra inputs
tx = await self.tx(
[self.txi(self.txo(0.01))], # inputs
[] # outputs
)
# UTXO 1 is added to cover some of the fee
self.assertListEqual([0.01, 1], self.inputs(tx))
# change is now needed to consume extra input
self.assertListEqual([0.97], self.outputs(tx))
async def test_basic_use_cases_sqlite(self):
self.ledger.coin_selection_strategy = 'sqlite'
self.ledger.fee_per_byte = int(0.01*CENT)
# available UTXOs for filling missing inputs
utxos = await self.create_utxos([
1, 1, 3, 5, 10
])
self.assertEqual(5, len(await self.ledger.get_utxos()))
# pay 3 coins (3.07 w/ fees)
tx = await self.tx(
[], # inputs
[self.txo(3)] # outputs
)
await self.ledger.db.db.run(self.ledger.db._transaction_io, tx, tx.outputs[0].get_address(self.ledger), tx.id)
self.assertListEqual(self.inputs(tx), [1.0, 1.0, 3.0])
# a change of 1.95 is added to reach balance
self.assertListEqual(self.outputs(tx), [3, 1.95])
# utxos: 1.95, 3, 5, 10
self.assertEqual(2, len(await self.ledger.get_utxos()))
# pay 4.946 coins (5.00 w/ fees)
tx = await self.tx(
[], # inputs
[self.txo(4.946)] # outputs
)
self.assertEqual(1, len(await self.ledger.get_utxos()))
self.assertListEqual(self.inputs(tx), [5.0])
self.assertEqual(2, len(tx.outputs))
self.assertEqual(494600000, tx.outputs[0].amount)
# utxos: 3, 1.95, 4.946, 10
await self.ledger.release_outputs(utxos)
# supplied input and output, but input is not enough to cover output
tx = await self.tx(
[self.txi(self.txo(10))], # inputs
[self.txo(11)] # outputs
)
# additional input is chosen (UTXO 1)
self.assertListEqual([10, 1.0, 1.0], self.inputs(tx))
# change is now needed to consume extra input
self.assertListEqual([11, 0.95], self.outputs(tx))
await self.ledger.release_outputs(utxos)
# liquidating a UTXO
tx = await self.tx(
[self.txi(self.txo(10))], # inputs
[] # outputs
)
self.assertListEqual([10], self.inputs(tx))
# missing change added to consume the amount
self.assertListEqual([9.98], self.outputs(tx))
await self.ledger.release_outputs(utxos)
# liquidating at a loss, requires adding extra inputs
tx = await self.tx(
[self.txi(self.txo(0.01))], # inputs
[] # outputs
)
# UTXO 1 is added to cover some of the fee
self.assertListEqual([0.01, 1], self.inputs(tx))
# change is now needed to consume extra input
self.assertListEqual([0.97], self.outputs(tx))
|
models/mnist_models.py | SanghyukChun/rebias | 129 | 11145593 | <filename>models/mnist_models.py
"""ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
Implementation for simple statcked convolutional networks.
"""
import torch
import torch.nn as nn
class SimpleConvNet(nn.Module):
def __init__(self, num_classes=None, kernel_size=7, feature_pos='post'):
super(SimpleConvNet, self).__init__()
padding = kernel_size // 2
layers = [
nn.Conv2d(3, 16, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
]
self.extracter = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, 10)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if feature_pos not in ['pre', 'post', 'logits']:
raise ValueError(feature_pos)
self.feature_pos = feature_pos
def forward(self, x, logits_only=False):
pre_gap_feats = self.extracter(x)
post_gap_feats = self.avgpool(pre_gap_feats)
post_gap_feats = torch.flatten(post_gap_feats, 1)
logits = self.fc(post_gap_feats)
if logits_only:
return logits
elif self.feature_pos == 'pre':
feats = pre_gap_feats
elif self.feature_pos == 'post':
feats = post_gap_feats
else:
feats = logits
return logits, feats
|
src/reader/__main__.py | mirekdlugosz/reader | 205 | 11145602 | <filename>src/reader/__main__.py
import sys
CANNOT_IMPORT = """\
Error: cannot import reader._cli
This might be due to missing dependencies. The command-line interface is
optional, use the 'cli' extra to install its dependencies:
pip install reader[cli]
"""
try:
from reader._cli import cli
cli(prog_name='python -m reader')
except ImportError:
print(CANNOT_IMPORT, file=sys.stderr)
raise
|
profiles/migrations/0003_role_team_teamrolebinding_userrolebinding.py | slarimore02/squest | 112 | 11145605 | # Generated by Django 3.2.9 on 2021-12-01 21:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from profiles.migrations import _rbac as rbac
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
('profiles', '0002_auto_20211105_0946'),
]
operations = [
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.CharField(blank=True, max_length=500)),
('content_type', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
('permissions', models.ManyToManyField(blank=True, help_text='Permissions linked to this role.', related_name='roles', related_query_name='roles', to='auth.Permission')),
],
options={
'unique_together': {('name', 'content_type')},
},
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='UserRoleBinding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
('role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profiles.role')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'content_type', 'object_id', 'role')},
},
),
migrations.CreateModel(
name='TeamRoleBinding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
('role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profiles.role')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='profiles.team')),
],
options={
'unique_together': {('team', 'content_type', 'object_id', 'role')},
},
),
migrations.RunPython(rbac.create_roles),
migrations.RunPython(rbac.init_spoc_role),
]
|
features/steps/line.py | kckaiwei/python-pptx | 1,429 | 11145617 | # encoding: utf-8
"""Step implementations for LineFormat-related features."""
from __future__ import absolute_import, print_function, unicode_literals
from behave import given, then, when
from pptx import Presentation
from pptx.enum.dml import MSO_LINE
from pptx.util import Length, Pt
from helpers import test_pptx
# given ===================================================
@given("a LineFormat object as line")
def given_a_LineFormat_object_as_line(context):
line = Presentation(test_pptx("dml-line")).slides[0].shapes[0].line
context.line = line
@given("a LineFormat object as line having {current} dash style")
def given_a_LineFormat_object_as_line_having_dash_style(context, current):
shape_idx = {"no explicit": 0, "solid": 1, "dashed": 2, "dash-dot": 3}[current]
shape = Presentation(test_pptx("dml-line")).slides[3].shapes[shape_idx]
context.line = shape.line
@given("a LineFormat object as line having {line_width} width")
def given_a_LineFormat_object_as_line_having_width(context, line_width):
shape_idx = {"no explicit": 0, "1 pt": 1}[line_width]
prs = Presentation(test_pptx("dml-line"))
shape = prs.slides[2].shapes[shape_idx]
context.line = shape.line
# when ====================================================
@when("I assign {value_key} to line.dash_style")
def when_I_assign_value_to_line_dash_style(context, value_key):
value = {
"None": None,
"MSO_LINE.DASH": MSO_LINE.DASH,
"MSO_LINE.DASH_DOT": MSO_LINE.DASH_DOT,
"MSO_LINE.SOLID": MSO_LINE.SOLID,
}[value_key]
context.line.dash_style = value
@when("I assign {line_width} to line.width")
def when_I_assign_value_to_line_width(context, line_width):
value = {"None": None, "1 pt": Pt(1), "2.34 pt": Pt(2.34)}[line_width]
context.line.width = value
# then ====================================================
@then("line.color is a ColorFormat object")
def then_line_color_is_a_ColorFormat_object(context):
class_name = context.line.color.__class__.__name__
expected_value = "ColorFormat"
assert class_name == expected_value, "expected '%s', got '%s'" % (
expected_value,
class_name,
)
@then("line.dash_style is {dash_style}")
def then_line_dash_style_is_value(context, dash_style):
expected_value = {
"None": None,
"MSO_LINE.DASH": MSO_LINE.DASH,
"MSO_LINE.DASH_DOT": MSO_LINE.DASH_DOT,
"MSO_LINE.SOLID": MSO_LINE.SOLID,
}[dash_style]
actual_value = context.line.dash_style
assert actual_value == expected_value, "expected %s, got %s" % (
expected_value,
actual_value,
)
@then("line.fill is a FillFormat object")
def then_line_fill_is_a_FillFormat_object(context):
class_name = context.line.fill.__class__.__name__
expected_value = "FillFormat"
assert class_name == expected_value, "expected '%s', got '%s'" % (
expected_value,
class_name,
)
@then("line.width is {line_width}")
def then_line_width_is_value(context, line_width):
expected_value = {"0": 0, "1 pt": Pt(1), "2.34 pt": Pt(2.34)}[line_width]
line_width = context.line.width
assert line_width == expected_value
assert isinstance(line_width, Length)
|
tapiriik/web/views/rollback.py | prohfesor/tapiriik | 1,445 | 11145636 | from tapiriik.services.rollback import RollbackTask
from django.http import HttpResponse
from django.views.decorators.http import require_GET
from django.shortcuts import redirect, render
def account_rollback_initiate(req):
if not req.user:
return HttpResponse(status=403)
task = RollbackTask.Create(req.user)
return HttpResponse(task.json())
def account_rollback_status(req, task_id):
if not req.user:
return HttpResponse(status=403)
task = RollbackTask.Get(task_id)
if not task:
return HttpResponse(status=404)
if req.method == 'POST':
task.Schedule()
return HttpResponse(task.json())
def rollback_dashboard(req):
if not req.user:
return redirect('/')
return render(req, "rollback.html") |
urizen/core/utils.py | vurmux/urizen | 107 | 11145649 | <filename>urizen/core/utils.py
#!/usr/bin/env python3
import pkgutil
import importlib
import sys
from inspect import getmembers, isfunction
import urizen as uz
def find_generators(module):
path_dict = {}
spec_list = []
for importer, modname, ispkg in pkgutil.walk_packages(module.__path__):
import_path = '{}.{}'.format(module.__name__, modname)
if ispkg:
spec = pkgutil._get_spec(importer, modname)
importlib._bootstrap._load(spec)
spec_list.append(spec)
elif import_path.startswith('urizen.generators'):
path_dict[import_path[18:]] = [
f
for f in getmembers(sys.modules.get(import_path))
if isfunction(f[1]) and not f[0].startswith('_')
and f[1].__module__ == import_path
]
for spec in spec_list:
del sys.modules[spec.name]
return path_dict
def construct_generators_tree():
proto_gens = find_generators(uz)
gen_tree = {}
for gen_path, gen_list in proto_gens.items():
if '.' not in gen_path:
continue
gen_type, gen_module_name = gen_path.split('.')
if gen_type not in gen_tree:
gen_tree[gen_type] = {}
if gen_module_name not in gen_tree[gen_type]:
gen_tree[gen_type][gen_module_name] = {}
for gen_name, gen_function in gen_list:
gen_tree[gen_type][gen_module_name][gen_name] = gen_function
return gen_tree |
src/sage/tests/books/computational-mathematics-with-sagemath/sol/graphique_doctest.py | UCD4IDS/sage | 1,742 | 11145650 | <filename>src/sage/tests/books/computational-mathematics-with-sagemath/sol/graphique_doctest.py
## -*- encoding: utf-8 -*-
"""
This file (./sol/graphique_doctest.sage) was *autogenerated* from ./sol/graphique.tex,
with sagetex.sty version 2011/05/27 v2.3.1.
It contains the contents of all the sageexample environments from this file.
You should be able to doctest this file with:
sage -t ./sol/graphique_doctest.sage
It is always safe to delete this file; it is not used in typesetting your
document.
Sage example in ./sol/graphique.tex, line 3::
sage: reset()
Sage example in ./sol/graphique.tex, line 10::
sage: t = var('t'); liste = [a + cos(t) for a in srange(0, 2, 0.1)]
sage: g = polar_plot(liste, (t, 0, 2 * pi)); g.show(aspect_ratio = 1)
Sage example in ./sol/graphique.tex, line 38::
sage: f = lambda x: abs(x**2 - 1/4)
sage: def liste_pts(u0, n):
....: u = u0; liste = [[u0,0]]
....: for k in range(n):
....: v, u = u, f(u)
....: liste.extend([[v,u], [u,u]])
....: return(liste)
sage: g = line(liste_pts(1.1, 8), rgbcolor = (.9,0,0))
sage: g += line(liste_pts(-.4, 8), rgbcolor = (.01,0,0))
sage: g += line(liste_pts(1.3, 3), rgbcolor = (.5,0,0))
sage: g += plot(f, -1, 3, rgbcolor = 'blue')
sage: g += plot(x, -1, 3, rgbcolor = 'green')
sage: g.show(aspect_ratio = 1, ymin = -.2, ymax = 3)
Sage example in ./sol/graphique.tex, line 78::
sage: x = var('x'); y = function('y')
sage: DE = x^2 * diff(y(x), x) - y(x) == 0
sage: desolve(DE, y(x))
_C*e^(-1/x)
sage: g = plot([c*e^(-1/x) for c in srange(-8, 8, 0.4)], (x, -3, 3))
sage: y = var('y')
sage: g += plot_vector_field((x^2, y), (x,-3,3), (y,-5,5))
sage: g.show() # not tested, known bug, see :trac:`32657`
Sage example in ./sol/graphique.tex, line 124::
sage: from sage.calculus.desolvers import desolve_system_rk4
sage: f = lambda x, y: [a*x-b*x*y,-c*y+d*b*x*y]
sage: x, y, t = var('x, y, t')
sage: a, b, c, d = 1., 0.1, 1.5, 0.75
sage: P = desolve_system_rk4(f(x,y), [x,y],\
....: ics=[0,10,5], ivar=t, end_points=15)
sage: Ql = [[i,j] for i,j,k in P]; p = line(Ql, color='red')
sage: p += text("Rabbits", (12,37), fontsize=10, color='red')
sage: Qr = [[i,k] for i,j,k in P]; p += line(Qr, color='blue')
sage: p += text("Foxes", (12,7), fontsize=10, color='blue')
sage: p.axes_labels(["time", "population"])
sage: p.show(gridlines = True)
Sage example in ./sol/graphique.tex, line 154::
sage: n = 10; L = srange(6, 18, 12 / n); R = srange(3, 9, 6 / n)
sage: def g(x,y): v = vector(f(x, y)); return v / v.norm()
sage: q = plot_vector_field(g(x, y), (x, 0, 60), (y, 0, 36))
sage: for j in range(n):
....: P = desolve_system_rk4(f(x,y), [x,y],
....: ics=[0,L[j],R[j]], ivar=t, end_points=15)
....: Q = [[j,k] for i,j,k in P]
....: q += line(Q, color=hue(.8-j/(2*n)))
sage: q.axes_labels(["rabbits", "foxes"]); q.show()
Sage example in ./sol/graphique.tex, line 185::
sage: from scipy import integrate
sage: def dX_dt(X, t=0): return [X[1], 0.5*X[1] - X[0] - X[1]^3]
sage: t = srange(0, 40, 0.01); x0 = srange(-2, 2, 0.1); y0 = 2.5
sage: CI = [[i, y0] for i in x0] + [[i, -y0] for i in x0]
sage: def g(x,y): v = vector(dX_dt([x, y])); return v / v.norm()
sage: x, y = var('x, y'); n = len(CI)
sage: q = plot_vector_field(g(x, y), (x, -3, 3), (y, -y0, y0))
sage: for j in range(n): # long time
....: X = integrate.odeint(dX_dt, CI[j], t)
....: q += line(X, color=(1.7*j/(4*n),1.5*j/(4*n),1-3*j/(8*n)))
sage: X = integrate.odeint(dX_dt, [0.01,0], t)
sage: q += line(X, color = 'red'); q.show()
Sage example in ./sol/graphique.tex, line 234::
sage: from scipy import integrate
sage: t = srange(0, 40, 0.2)
sage: n = 35; CI_cart = [[4, .2 * i] for i in range(n)]
sage: CI = list(map(lambda x: [sqrt(x[0]^2+x[1]^2),
....: pi - arctan(x[1]/x[0])], CI_cart))
sage: for alpha in [0.1, 0.5, 1, 1.25]: # long time
....: dX_dt = lambda X, t=0: [cos(X[1])*(1-1/X[0]^2),
....: -sin(X[1]) * (1/X[0]+1/X[0]^3) + 2*alpha/X[0]^2]
....: q = circle((0, 0), 1, fill=True, rgbcolor='purple')
....: for j in range(n):
....: X = integrate.odeint(dX_dt, CI[j], t)
....: Y = [[u[0]*cos(u[1]), u[0]*sin(u[1])] for u in X]
....: q += line(Y, xmin = -4, xmax = 4, color='blue')
....: q.show(aspect_ratio = 1, axes = False)
"""
|
src/mattermostdriver/endpoints/status.py | fried/python-mattermost-driver | 142 | 11145658 | <reponame>fried/python-mattermost-driver<gh_stars>100-1000
from .base import Base
class Status(Base):
def get_user_status(self, user_id):
return self.client.get(
'/users/' + user_id + '/status'
)
def update_user_status(self, user_id, options=None):
return self.client.put(
'/users/' + user_id + '/status',
options=options
)
def get_user_statuses_by_id(self, options=None):
return self.client.post(
'/users/status/ids',
options
)
|
__scraping__/associatedrealtorsaruba.com - selenium/main.py | whitmans-max/python-examples | 140 | 11145662 | <filename>__scraping__/associatedrealtorsaruba.com - selenium/main.py<gh_stars>100-1000
#!/usr/bin/env python3
# date: 2020.01.07
# https://stackoverflow.com/questions/59632031/how-to-extract-href-when-href-element-is-a-hyperlink?noredirect=1#comment105434826_59632031
import selenium.webdriver
url = 'https://associatedrealtorsaruba.com/index.php?option=com_ezrealty&Itemid=11&task=results&cnid=0&custom7=&custom8=&parking=&type=0&cid=0&stid=0&locid=0&minprice=&maxprice=&minbed=&maxbed=&min_squarefeet=&max_squarefeet=&bathrooms=&sold=0&lug=0&featured=0&custom4=&custom5=&custom6=&postcode=&radius=&direction=DEFAULT&submit=Search'
driver = selenium.webdriver.Firefox()
driver.get(url)
while True:
all_items = driver.find_elements_by_xpath('//span[@class="h3"]')
for item in all_items:
print(item.text)
try:
# find link to next page
all_items = driver.find_element_by_xpath('//a[@title="next page"]')
# click link to load next page
all_items.click()
except Exception as ex:
print('ex:', ex)
break
|
software/glasgow/gateware/analyzer.py | electroniceel/Glasgow | 1,014 | 11145674 | from functools import reduce
from collections import OrderedDict
from nmigen.compat import *
from nmigen.compat.genlib.fifo import _FIFOInterface, SyncFIFOBuffered
from nmigen.compat.genlib.coding import PriorityEncoder, PriorityDecoder
__all__ = ["EventSource", "EventAnalyzer", "TraceDecodingError", "TraceDecoder"]
REPORT_DELAY = 0b10000000
REPORT_DELAY_MASK = 0b10000000
REPORT_EVENT = 0b01000000
REPORT_EVENT_MASK = 0b11000000
REPORT_SPECIAL = 0b00000000
REPORT_SPECIAL_MASK = 0b11000000
SPECIAL_DONE = 0b000000
SPECIAL_OVERRUN = 0b000001
SPECIAL_THROTTLE = 0b000010
SPECIAL_DETHROTTLE = 0b000011
class EventSource(Module):
def __init__(self, name, kind, width, fields, depth):
assert (width > 0 and kind in ("change", "strobe") or
width == 0 and kind == "strobe")
self.name = name
self.width = width
self.fields = fields
self.depth = depth
self.kind = kind
self.data = Signal(max(1, width))
self.trigger = Signal()
class EventAnalyzer(Module):
"""
An event analyzer module.
This event analyzer is designed to observe parallel, bursty processes in real-time, and yet
degrade gracefully (i.e. without losing data or breaking most applets) when observing processes
that generate events continuously, or generate very many simultaneous events for a short time.
To do this, the event analyzer is permitted to pause any applets marked as purely synchronous
once the event FIFO high-water mark is reached.
The event analyzer tries to make efficient use of power-of-2 wide block RAMs and be highly
tunable. To achieve this, it separates the event FIFO from the event data FIFOs, and avoids
storing timestamps explicitly. In a system with `n` events, each of which carries `d_n` bits
of data, there would be a single event FIFO that is `n` bits wide, where a bit being set means
that event `n` occurred at a given cycle; `n` event data FIFOs that are `d_n` bits wide each,
where, if a bit is set in the event FIFO, a data word is pushed into the event data FIFO; and
finally, one delay FIFO, where the last entry is incremented on every cycle that has
no event, and a new entry is pushed on every cycle there is at least one event. This way,
only cycles that have at least one event add new FIFO entries, and only one wide timestamp
counter needs to be maintained, greatly reducing the amount of necessary resources compared
to a more naive approach.
"""
@staticmethod
def _depth_for_width(width):
if width == 0:
return 0
elif width <= 2:
return 2048
elif width <= 4:
return 1024
elif width <= 8:
return 512
else:
return 256
def __init__(self, output_fifo, event_depth=None, delay_width=16):
assert output_fifo.width == 8
self.output_fifo = output_fifo
self.delay_width = delay_width
self.event_depth = event_depth
self.event_sources = Array()
self.done = Signal()
self.throttle = Signal()
self.overrun = Signal()
def add_event_source(self, name, kind, width, fields=(), depth=None):
if depth is None:
depth = self._depth_for_width(width)
event_source = EventSource(name, kind, width, fields, depth)
self.event_sources.append(event_source)
return event_source
def do_finalize(self):
assert len(self.event_sources) < 2 ** 6
assert max(s.width for s in self.event_sources) <= 32
# Fill the event, event data, and delay FIFOs.
throttle_on = Signal()
throttle_off = Signal()
throttle_edge = Signal()
throttle_fifos = []
self.sync += [
If(~self.throttle & throttle_on,
self.throttle.eq(1),
throttle_edge.eq(1)
).Elif(self.throttle & throttle_off,
self.throttle.eq(0),
throttle_edge.eq(1)
).Else(
throttle_edge.eq(0)
)
]
overrun_trip = Signal()
overrun_fifos = []
self.sync += [
If(overrun_trip,
self.overrun.eq(1)
)
]
event_width = 1 + len(self.event_sources)
if self.event_depth is None:
event_depth = min(self._depth_for_width(event_width),
self._depth_for_width(self.delay_width))
else:
event_depth = self.event_depth
self.submodules.event_fifo = event_fifo = \
SyncFIFOBuffered(width=event_width, depth=event_depth)
throttle_fifos.append(self.event_fifo)
self.comb += [
event_fifo.din.eq(Cat(self.throttle, [s.trigger for s in self.event_sources])),
event_fifo.we.eq(reduce(lambda a, b: a | b, (s.trigger for s in self.event_sources)) |
throttle_edge)
]
self.submodules.delay_fifo = delay_fifo = \
SyncFIFOBuffered(width=self.delay_width, depth=event_depth)
delay_timer = self._delay_timer = Signal(self.delay_width)
delay_ovrun = ((1 << self.delay_width) - 1)
delay_max = delay_ovrun - 1
self.sync += [
If(delay_fifo.we,
delay_timer.eq(0)
).Else(
delay_timer.eq(delay_timer + 1)
)
]
self.comb += [
delay_fifo.din.eq(Mux(self.overrun, delay_ovrun, delay_timer)),
delay_fifo.we.eq(event_fifo.we | (delay_timer == delay_max) |
self.done | self.overrun),
]
for event_source in self.event_sources:
if event_source.width > 0:
event_source.submodules.data_fifo = event_data_fifo = \
SyncFIFOBuffered(event_source.width, event_source.depth)
self.submodules += event_source
throttle_fifos.append(event_data_fifo)
self.comb += [
event_data_fifo.din.eq(event_source.data),
event_data_fifo.we.eq(event_source.trigger),
]
else:
event_source.submodules.data_fifo = _FIFOInterface(1, 0)
# Throttle applets based on FIFO levels with hysteresis.
self.comb += [
throttle_on .eq(reduce(lambda a, b: a | b,
(f.level >= f.depth - f.depth // (4 if f.depth > 4 else 2)
for f in throttle_fifos))),
throttle_off.eq(reduce(lambda a, b: a & b,
(f.level < f.depth // (4 if f.depth > 4 else 2)
for f in throttle_fifos))),
]
# Detect imminent FIFO overrun and trip overrun indication.
self.comb += [
overrun_trip.eq(reduce(lambda a, b: a | b,
(f.level == f.depth - 2
for f in throttle_fifos)))
]
# Dequeue events, and serialize events and event data.
self.submodules.event_encoder = event_encoder = \
PriorityEncoder(width=len(self.event_sources))
self.submodules.event_decoder = event_decoder = \
PriorityDecoder(width=len(self.event_sources))
self.comb += event_decoder.i.eq(event_encoder.o)
self.submodules.serializer = serializer = FSM(reset_state="WAIT-EVENT")
rep_overrun = Signal()
rep_throttle_new = Signal()
rep_throttle_cur = Signal()
delay_septets = 5
delay_counter = Signal(7 * delay_septets)
serializer.act("WAIT-EVENT",
If(delay_fifo.readable,
delay_fifo.re.eq(1),
NextValue(delay_counter, delay_counter + delay_fifo.dout + 1),
If(delay_fifo.dout == delay_ovrun,
NextValue(rep_overrun, 1),
NextState("REPORT-DELAY")
)
),
If(event_fifo.readable,
event_fifo.re.eq(1),
NextValue(event_encoder.i, event_fifo.dout[1:]),
NextValue(rep_throttle_new, event_fifo.dout[0]),
If((event_fifo.dout != 0) | (rep_throttle_cur != event_fifo.dout[0]),
NextState("REPORT-DELAY")
)
).Elif(self.done,
NextState("REPORT-DELAY")
)
)
serializer.act("REPORT-DELAY",
If(delay_counter >= 128 ** 4,
NextState("REPORT-DELAY-5")
).Elif(delay_counter >= 128 ** 3,
NextState("REPORT-DELAY-4")
).Elif(delay_counter >= 128 ** 2,
NextState("REPORT-DELAY-3")
).Elif(delay_counter >= 128 ** 1,
NextState("REPORT-DELAY-2")
).Else(
NextState("REPORT-DELAY-1")
)
)
for septet_no in range(delay_septets, 0, -1):
if septet_no == 1:
next_state = [
NextValue(delay_counter, 0),
If(rep_overrun,
NextState("REPORT-OVERRUN")
).Elif(rep_throttle_cur != rep_throttle_new,
NextState("REPORT-THROTTLE")
).Elif(event_encoder.i,
NextState("REPORT-EVENT")
).Elif(self.done,
NextState("REPORT-DONE")
).Else(
NextState("WAIT-EVENT")
)
]
else:
next_state = [
NextState("REPORT-DELAY-%d" % (septet_no - 1))
]
serializer.act("REPORT-DELAY-%d" % septet_no,
If(self.output_fifo.writable,
self.output_fifo.din.eq(
REPORT_DELAY | delay_counter.part((septet_no - 1) * 7, 7)),
self.output_fifo.we.eq(1),
*next_state
)
)
serializer.act("REPORT-THROTTLE",
If(self.output_fifo.writable,
NextValue(rep_throttle_cur, rep_throttle_new),
If(rep_throttle_new,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_THROTTLE),
).Else(
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_DETHROTTLE),
),
self.output_fifo.we.eq(1),
If(event_encoder.n,
NextState("WAIT-EVENT")
).Else(
NextState("REPORT-EVENT")
)
)
)
event_source = self.event_sources[event_encoder.o]
event_data = Signal(32)
serializer.act("REPORT-EVENT",
If(self.output_fifo.writable,
NextValue(event_encoder.i, event_encoder.i & ~event_decoder.o),
self.output_fifo.din.eq(
REPORT_EVENT | event_encoder.o),
self.output_fifo.we.eq(1),
NextValue(event_data, event_source.data_fifo.dout),
event_source.data_fifo.re.eq(1),
If(event_source.width > 24,
NextState("REPORT-EVENT-DATA-4")
).Elif(event_source.width > 16,
NextState("REPORT-EVENT-DATA-3")
).Elif(event_source.width > 8,
NextState("REPORT-EVENT-DATA-2")
).Elif(event_source.width > 0,
NextState("REPORT-EVENT-DATA-1")
).Else(
If(event_encoder.i & ~event_decoder.o,
NextState("REPORT-EVENT")
).Else(
NextState("WAIT-EVENT")
)
)
)
)
for octet_no in range(4, 0, -1):
if octet_no == 1:
next_state = [
If(event_encoder.n,
NextState("WAIT-EVENT")
).Else(
NextState("REPORT-EVENT")
)
]
else:
next_state = [
NextState("REPORT-EVENT-DATA-%d" % (octet_no - 1))
]
serializer.act("REPORT-EVENT-DATA-%d" % octet_no,
If(self.output_fifo.writable,
self.output_fifo.din.eq(event_data.part((octet_no - 1) * 8, 8)),
self.output_fifo.we.eq(1),
*next_state
)
)
serializer.act("REPORT-DONE",
If(self.output_fifo.writable,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_DONE),
self.output_fifo.we.eq(1),
NextState("DONE")
)
)
if hasattr(self.output_fifo, "flush"):
flush_output_fifo = [self.output_fifo.flush.eq(1)]
else:
flush_output_fifo = []
serializer.act("DONE",
If(self.done,
flush_output_fifo
).Else(
NextState("WAIT-EVENT")
)
)
serializer.act("REPORT-OVERRUN",
If(self.output_fifo.writable,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_OVERRUN),
self.output_fifo.we.eq(1),
NextState("OVERRUN")
)
)
serializer.act("OVERRUN",
flush_output_fifo,
NextState("OVERRUN")
)
class TraceDecodingError(Exception):
pass
class TraceDecoder:
"""
Event analyzer trace decoder.
Decodes raw analyzer traces into a timestamped sequence of maps from event fields to
their values.
"""
def __init__(self, event_sources, absolute_timestamps=True):
self.event_sources = event_sources
self.absolute_timestamps = absolute_timestamps
self._state = "IDLE"
self._byte_off = 0
self._timestamp = 0
self._delay = 0
self._event_src = 0
self._event_off = 0
self._event_data = 0
self._pending = OrderedDict()
self._timeline = []
def events(self):
"""
Return names and widths for all events that may be emitted by this trace decoder.
"""
yield ("throttle", "throttle", 1)
for event_src in self.event_sources:
if event_src.fields:
for field_name, field_width in event_src.fields:
yield ("%s-%s" % (field_name, event_src.name), event_src.kind, field_width)
else:
yield (event_src.name, event_src.kind, event_src.width)
def _flush_timestamp(self):
if self._delay == 0:
return
if self._pending:
self._timeline.append((self._timestamp, self._pending))
self._pending = OrderedDict()
if self.absolute_timestamps:
self._timestamp += self._delay
else:
self._timestamp = self._delay
self._delay = 0
def process(self, data):
"""
Incrementally parse a chunk of analyzer trace, and record events in it.
"""
for octet in data:
is_delay = ((octet & REPORT_DELAY_MASK) == REPORT_DELAY)
is_event = ((octet & REPORT_EVENT_MASK) == REPORT_EVENT)
is_special = ((octet & REPORT_SPECIAL_MASK) == REPORT_SPECIAL)
special = octet & ~REPORT_SPECIAL
if self._state == "IDLE" and is_delay:
self._state = "DELAY"
self._delay = octet & ~REPORT_DELAY_MASK
elif self._state == "DELAY" and is_delay:
self._delay = (self._delay << 7) | (octet & ~REPORT_DELAY_MASK)
elif self._state == "DELAY" and is_special and \
special in (SPECIAL_THROTTLE, SPECIAL_DETHROTTLE):
self._flush_timestamp()
if special == SPECIAL_THROTTLE:
self._pending["throttle"] = 1
elif special == SPECIAL_DETHROTTLE:
self._pending["throttle"] = 0
elif self._state in ("IDLE", "DELAY") and is_event:
self._flush_timestamp()
if (octet & ~REPORT_EVENT_MASK) > len(self.event_sources):
raise TraceDecodingError("at byte offset %d: event source out of bounds" %
self._byte_off)
self._event_src = self.event_sources[octet & ~REPORT_EVENT_MASK]
if self._event_src.width == 0:
self._pending[self._event_src.name] = None
self._state = "IDLE"
else:
self._event_off = self._event_src.width
self._event_data = 0
self._state = "EVENT"
elif self._state == "EVENT":
self._event_data <<= 8
self._event_data |= octet
if self._event_off > 8:
self._event_off -= 8
else:
if self._event_src.fields:
offset = 0
for field_name, field_width in self._event_src.fields:
self._pending["%s-%s" % (field_name, self._event_src.name)] = \
(self._event_data >> offset) & ((1 << field_width) - 1)
offset += field_width
else:
self._pending[self._event_src.name] = self._event_data
self._state = "IDLE"
elif self._state in "DELAY" and is_special and \
special in (SPECIAL_DONE, SPECIAL_OVERRUN):
self._flush_timestamp()
if special == SPECIAL_DONE:
self._state = "DONE"
elif special == SPECIAL_OVERRUN:
self._state = "OVERRUN"
else:
raise TraceDecodingError("at byte offset %d: invalid byte %#04x for state %s" %
(self._byte_off, octet, self._state))
self._byte_off += 1
def flush(self, pending=False):
"""
Return the complete event timeline since the start of decoding or the previous flush.
If ``pending`` is ``True``, also flushes pending events; this may cause duplicate
timestamps if more events arrive after the flush.
"""
if self._state == "OVERRUN":
self._timeline.append((self._timestamp, "overrun"))
elif pending and self._pending or self._state == "DONE":
self._timeline.append((self._timestamp, self._pending))
self._pending = OrderedDict()
timeline, self._timeline = self._timeline, []
return timeline
def is_done(self):
return self._state in ("DONE", "OVERRUN")
# -------------------------------------------------------------------------------------------------
import unittest
from . import simulation_test
class EventAnalyzerTestbench(Module):
def __init__(self, **kwargs):
self.submodules.fifo = SyncFIFOBuffered(width=8, depth=64)
self.submodules.dut = EventAnalyzer(self.fifo, **kwargs)
def trigger(self, index, data):
yield self.dut.event_sources[index].trigger.eq(1)
if self.dut.event_sources[index].width > 0:
yield self.dut.event_sources[index].data.eq(data)
def step(self):
yield
for event_source in self.dut.event_sources:
yield event_source.trigger.eq(0)
def read(self, count, limit=128):
data = []
cycle = 0
while len(data) < count:
while not (yield self.fifo.readable) and cycle < limit:
yield
cycle += 1
if not (yield self.fifo.readable):
raise ValueError("FIFO underflow")
data.append((yield from self.fifo.read()))
yield
cycle = 16
while not (yield self.fifo.readable) and cycle < limit:
yield
cycle += 1
if (yield self.fifo.readable):
raise ValueError("junk in FIFO: %#04x at %d" % ((yield self.fifo.dout), count))
return data
class EventAnalyzerTestCase(unittest.TestCase):
def setUp(self):
self.tb = EventAnalyzerTestbench(event_depth=16)
def configure(self, tb, sources):
for n, args in enumerate(sources):
if not isinstance(args, tuple):
args = (args,)
tb.dut.add_event_source(str(n), "strobe", *args)
def assertEmitted(self, tb, data, decoded, flush_pending=True):
self.assertEqual((yield from tb.read(len(data))), data)
decoder = TraceDecoder(self.tb.dut.event_sources)
decoder.process(data)
self.assertEqual(decoder.flush(flush_pending), decoded)
@simulation_test(sources=(8,))
def test_one_8bit_src(self, tb):
yield from tb.trigger(0, 0xaa)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xaa,
], [
(2, {"0": 0xaa}),
])
@simulation_test(sources=(8,8))
def test_two_8bit_src(self, tb):
yield from tb.trigger(0, 0xaa)
yield from tb.trigger(1, 0xbb)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xaa,
REPORT_EVENT|1, 0xbb,
], [
(2, {"0": 0xaa, "1": 0xbb}),
])
@simulation_test(sources=(12,))
def test_one_12bit_src(self, tb):
yield from tb.trigger(0, 0xabc)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0x0a, 0xbc,
], [
(2, {"0": 0xabc}),
])
@simulation_test(sources=(16,))
def test_one_16bit_src(self, tb):
yield from tb.trigger(0, 0xabcd)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd,
], [
(2, {"0": 0xabcd}),
])
@simulation_test(sources=(24,))
def test_one_24bit_src(self, tb):
yield from tb.trigger(0, 0xabcdef)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd, 0xef
], [
(2, {"0": 0xabcdef}),
])
@simulation_test(sources=(32,))
def test_one_32bit_src(self, tb):
yield from tb.trigger(0, 0xabcdef12)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd, 0xef, 0x12
], [
(2, {"0": 0xabcdef12}),
])
@simulation_test(sources=(0,))
def test_one_0bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
], [
(2, {"0": None}),
])
@simulation_test(sources=(0,0))
def test_two_0bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.trigger(1, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
REPORT_EVENT|1,
], [
(2, {"0": None, "1": None}),
])
@simulation_test(sources=(0,1))
def test_0bit_1bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.trigger(1, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
REPORT_EVENT|1, 0b1
], [
(2, {"0": None, "1": 0b1}),
])
@simulation_test(sources=(1,0))
def test_1bit_0bit_src(self, tb):
yield from tb.trigger(0, 1)
yield from tb.trigger(1, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b1,
REPORT_EVENT|1,
], [
(2, {"0": 0b1, "1": None}),
])
@simulation_test(sources=((3, (("a", 1), ("b", 2))),))
def test_fields(self, tb):
yield from tb.trigger(0, 0b101)
yield from tb.step()
yield from tb.trigger(0, 0b110)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b101,
REPORT_DELAY|1,
REPORT_EVENT|0, 0b110,
], [
(2, {"a-0": 0b1, "b-0": 0b10}),
(3, {"a-0": 0b0, "b-0": 0b11}),
])
@simulation_test(sources=(8,))
def test_delay(self, tb):
yield
yield
yield from tb.trigger(0, 0xaa)
yield from tb.step()
yield
yield from tb.trigger(0, 0xbb)
yield from tb.step()
yield
yield
yield from self.assertEmitted(tb, [
REPORT_DELAY|4,
REPORT_EVENT|0, 0xaa,
REPORT_DELAY|2,
REPORT_EVENT|0, 0xbb,
], [
(4, {"0": 0xaa}),
(6, {"0": 0xbb}),
])
@simulation_test(sources=(1,))
@unittest.skip("FIXME: see issue #182")
def test_delay_2_septet(self, tb):
yield tb.dut._delay_timer.eq(0b1_1110000)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b1110001,
REPORT_EVENT|0, 0b1
], [
(0b1_1110001, {"0": 0b1}),
])
@simulation_test(sources=(1,))
@unittest.skip("FIXME: see issue #182")
def test_delay_3_septet(self, tb):
yield tb.dut._delay_timer.eq(0b01_0011000_1100011)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b0011000,
REPORT_DELAY|0b1100100,
REPORT_EVENT|0, 0b1
], [
(0b01_0011000_1100100, {"0": 0b1}),
])
@simulation_test(sources=(1,))
@unittest.skip("FIXME: see issue #182")
def test_delay_max(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000011,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1111111,
REPORT_EVENT|0, 0b1
], [
(0xffff, {"0": 0b1}),
])
@simulation_test(sources=(1,))
@unittest.skip("FIXME: see issue #182")
def test_delay_overflow(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000000,
REPORT_EVENT|0, 0b1
], [
(0x10000, {"0": 0b1}),
])
@simulation_test(sources=(1,))
@unittest.skip("FIXME: see issue #182")
def test_delay_overflow_p1(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000001,
REPORT_EVENT|0, 0b1
], [
(0x10001, {"0": 0b1}),
])
@simulation_test(sources=(1,))
@unittest.skip("FIXME: see issue #182")
def test_delay_4_septet(self, tb):
for _ in range(64):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1000001,
REPORT_EVENT|0, 0b1
], [
(0xffff * 64 + 1, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_done(self, tb):
yield from tb.trigger(0, 1)
yield from tb.step()
yield
yield tb.dut.done.eq(1)
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b1,
REPORT_DELAY|2,
REPORT_SPECIAL|SPECIAL_DONE
], [
(2, {"0": 0b1}),
(4, {})
], flush_pending=False)
@simulation_test(sources=(1,))
def test_throttle_hyst(self, tb):
for x in range(16):
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.throttle), 0)
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.throttle), 1)
yield tb.fifo.re.eq(1)
for x in range(52):
yield
yield tb.fifo.re.eq(0)
yield
self.assertEqual((yield tb.dut.throttle), 0)
@simulation_test(sources=(1,))
def test_overrun(self, tb):
for x in range(18):
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.overrun), 0)
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.overrun), 1)
yield tb.fifo.re.eq(1)
for x in range(55):
while not (yield tb.fifo.readable):
yield
yield
yield tb.fifo.re.eq(0)
yield
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000000,
REPORT_SPECIAL|SPECIAL_OVERRUN,
], [
(0x10000, "overrun"),
], flush_pending=False)
|
main/model.py | tucan9389/MobileHumanPose | 137 | 11145686 | <reponame>tucan9389/MobileHumanPose
import torch
import torch.nn as nn
from torch.nn import functional as F
from backbone import *
from config import cfg
import os.path as osp
model_urls = {
'MobileNetV2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
'ResNet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'ResNet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'ResNet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'ResNet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'ResNet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'ResNext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
BACKBONE_DICT = {
'LPRES':LpNetResConcat,
'LPSKI':LpNetSkiConcat,
'LPWO':LpNetWoConcat
}
def soft_argmax(heatmaps, joint_num):
heatmaps = heatmaps.reshape((-1, joint_num, cfg.depth_dim*cfg.output_shape[0]*cfg.output_shape[1]))
heatmaps = F.softmax(heatmaps, 2)
heatmaps = heatmaps.reshape((-1, joint_num, cfg.depth_dim, cfg.output_shape[0], cfg.output_shape[1]))
accu_x = heatmaps.sum(dim=(2,3))
accu_y = heatmaps.sum(dim=(2,4))
accu_z = heatmaps.sum(dim=(3,4))
accu_x = accu_x * torch.nn.parallel.comm.broadcast(torch.arange(1,cfg.output_shape[1]+1).type(torch.cuda.FloatTensor), devices=[accu_x.device.index])[0]
accu_y = accu_y * torch.nn.parallel.comm.broadcast(torch.arange(1,cfg.output_shape[0]+1).type(torch.cuda.FloatTensor), devices=[accu_y.device.index])[0]
accu_z = accu_z * torch.nn.parallel.comm.broadcast(torch.arange(1,cfg.depth_dim+1).type(torch.cuda.FloatTensor), devices=[accu_z.device.index])[0]
accu_x = accu_x.sum(dim=2, keepdim=True) -1
accu_y = accu_y.sum(dim=2, keepdim=True) -1
accu_z = accu_z.sum(dim=2, keepdim=True) -1
coord_out = torch.cat((accu_x, accu_y, accu_z), dim=2)
return coord_out
class CustomNet(nn.Module):
def __init__(self, backbone, joint_num):
super(CustomNet, self).__init__()
self.backbone = backbone
self.joint_num = joint_num
def forward(self, input_img, target=None):
fm = self.backbone(input_img)
coord = soft_argmax(fm, self.joint_num)
if target is None:
return coord
else:
target_coord = target['coord']
target_vis = target['vis']
target_have_depth = target['have_depth']
## coordinate loss
loss_coord = torch.abs(coord - target_coord) * target_vis
loss_coord = (loss_coord[:,:,0] + loss_coord[:,:,1] + loss_coord[:,:,2] * target_have_depth)/3.
return loss_coord
def get_pose_net(backbone_str, is_train, joint_num):
INPUT_SIZE = cfg.input_shape
EMBEDDING_SIZE = cfg.embedding_size # feature dimension
WIDTH_MULTIPLIER = cfg.width_multiplier
assert INPUT_SIZE == (256, 256)
print("=" * 60)
print("{} BackBone Generated".format(backbone_str))
print("=" * 60)
model = CustomNet(BACKBONE_DICT[backbone_str](input_size = INPUT_SIZE, joint_num = joint_num, embedding_size = EMBEDDING_SIZE, width_mult = WIDTH_MULTIPLIER), joint_num)
if is_train == True:
model.backbone.init_weights()
return model
|
pypower/t/t_skip.py | Bengt/PYPOWER | 221 | 11145691 | <filename>pypower/t/t_skip.py
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Skips a number of tests.
"""
from pypower.t.t_globals import TestGlobals
def t_skip(cnt, msg=''):
"""Skips a number of tests.
Increments the global test count and skipped tests count. Prints
'skipped tests x..y : ' followed by the C{msg}, unless the
global variable t_quiet is true. Intended to be called between calls to
C{t_begin} and C{t_end}.
@author: <NAME> (PSERC Cornell)
"""
msg = ' : ' + msg
TestGlobals.t_skip_cnt = TestGlobals.t_skip_cnt + cnt
if not TestGlobals.t_quiet:
print('skipped tests %d..%d%s' % (TestGlobals.t_counter,
TestGlobals.t_counter + cnt - 1,
msg))
TestGlobals.t_counter = TestGlobals.t_counter + cnt
|
python/tvm/ir/op.py | XiaoSong9905/tvm | 4,640 | 11145713 | <filename>python/tvm/ir/op.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Primitive operators in the TVM IR."""
import tvm._ffi
from .expr import RelayExpr
from . import _ffi_api
@tvm._ffi.register_object("Op")
class Op(RelayExpr):
"""Primitive operator in the IR."""
def __init__(self):
raise RuntimeError("Cannot create op, use get instead")
@staticmethod
def get(op_name):
"""Get the Op for a given name
Parameters
----------
op_name : str
The operator name
Returns
-------
op : Op
The op of the corresponding name
"""
return _ffi_api.GetOp(op_name)
def get_attr(self, attr_name):
"""Get additional attribute about the operator.
Parameters
----------
attr_name : str
The attribute name.
Returns
-------
value : object
The attribute value
"""
return _ffi_api.OpGetAttr(self, attr_name)
def has_attr(self, attr_name):
"""Check whether the operator has additional attribute.
Parameters
----------
attr_name : str
The attribute name.
Returns
-------
value : bool
Whether the operator has additional attribute
"""
return _ffi_api.OpHasAttr(self, attr_name)
def set_attr(self, attr_name, value, plevel=10):
"""Set attribute about the operator.
Parameters
----------
attr_name : str
The attribute name
value : object
The attribute value
plevel : int
The priority level
"""
_ffi_api.OpSetAttr(self, attr_name, value, plevel)
def reset_attr(self, attr_name):
"""Reset attribute about the operator.
Parameters
----------
attr_name : str
The attribute name
"""
_ffi_api.OpResetAttr(self, attr_name)
def add_type_rel(self, rel_name, type_rel_func=None):
"""Attach the type function corresponding to the return type.
Parameters
----------
rel_name : str
The type relation name to register.
type_rel_func : Optional[function (args: List[Type], attrs: Attrs) -> Type]
The backing relation function which can solve an arbitrary relation on variables.
Differences with type_rel_func in C++:
1) When type_rel_func is not None
a) OpAddTypeRel on C++ side will adjust type_rel_func with TypeReporter to
calling convention of relay type system.
b) type_rel_func returns output argument's type, return None means can't
infer output's type.
c) only support single output operators for now, the last argument is output tensor.
2) when type_rel_func is None, will call predefined type_rel_funcs in relay
according to ``tvm.relay.type_relation.`` + rel_name.
"""
_ffi_api.OpAddTypeRel(self, rel_name, type_rel_func)
def add_argument(self, name, type, description): # pylint: disable=redefined-builtin
"""Add arguments information to the function.
Parameters
----------
name : str
The argument name.
type : str
The argument type.
description : str
The argument description.
"""
_ffi_api.OpAddArgument(self, name, type, description)
def set_support_level(self, level):
"""Set the support level of op.
Parameters
----------
level : int
The support level.
"""
_ffi_api.OpSetSupportLevel(self, level)
def set_num_inputs(self, n):
"""Set the support level of op.
Parameters
----------
n : int
The input number.
"""
_ffi_api.OpSetNumInputs(self, n)
def set_attrs_type_key(self, key):
"""Set the attribute type key of op.
Parameters
----------
key : str
The type key.
"""
_ffi_api.OpSetAttrsTypeKey(self, key)
@staticmethod
def list_op_names():
"""List all the op names in the op registry.
Returns
-------
value : List[str]
The registered op names
"""
return _ffi_api.ListOpNames()
def register_op_attr(op_name, attr_key, value=None, level=10):
"""Register an operator property of an operator by name.
Parameters
----------
op_name : str
The name of operator
attr_key : str
The attribute name.
value : object, optional
The value to set
level : int, optional
The priority level
Returns
-------
fregister : function
Register function if value is not specified.
"""
def _register(v):
"""internal register function"""
_ffi_api.RegisterOpAttr(op_name, attr_key, v, level)
return v
return _register(value) if value is not None else _register
def register_intrin_lowering(
op_name,
target,
*,
f=None,
level=10,
):
"""Register Op lowering function
Parameters
----------
op_name : str
The op name
target : str
The target string for given intrinsic lowering function
f : function, optional
The function to be registered.
level : int
The priority level
Returns
-------
fregister : function
Register op lowering function if f is not specified.
"""
def _register(f):
"""internal register function"""
_ffi_api.RegisterOpLowerIntrinsic(op_name, f, target, level)
return f
return _register(f) if f is not None else _register
|
pygithub3/services/pull_requests/__init__.py | teamorchard/python-github3 | 107 | 11145737 | # -*- encoding: utf-8 -*-
from pygithub3.services.base import Service, MimeTypeMixin
from .comments import Comments
class PullRequests(Service, MimeTypeMixin):
"""Consume `Pull Request API <http://developer.github.com/v3/pulls/>`_"""
def __init__(self, **config):
self.comments = Comments(**config)
super(PullRequests, self).__init__(**config)
def list(self, state='open', user=None, repo=None):
"""List all of the pull requests for a repo
:param str state: Pull requests state ('open' or 'closed')
:param str user: Username
:param str repo: Repository
:returns: A :doc:`result`
.. note::
Remember :ref:`config precedence`
"""
return self._get_result(
self.make_request('pull_requests.list', user=user, repo=repo),
state=state
)
def get(self, number, user=None, repo=None):
"""Get a single pull request
:param str number: The number of the pull request to get
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
"""
return self._get(
self.make_request('pull_requests.get', number=number, user=user,
repo=repo)
)
def create(self, data, user=None, repo=None):
"""Create a pull request
:param dict data: Input. See `github pullrequests doc`_
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
"""
return self._post(
self.make_request('pull_requests.create', body=data, user=user,
repo=repo)
)
def update(self, number, data, user=None, repo=None):
"""Update a pull request
:param str number: The number of the the pull request to update
:param dict data: Input. See `github pullrequests doc`_
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
"""
return self._patch(
self.make_request('pull_requests.update', number=number,
body=data, user=user, repo=repo)
)
def list_commits(self, number, user=None, repo=None):
"""List the commits for a pull request
:param str number: The number of the pull request to list commits for
:param str user: Username
:param str repo: Repository
:returns: A :doc:`result`
.. note::
Remember :ref:`config precedence`
"""
return self._get_result(
self.make_request('pull_requests.list_commits', number=number,
user=user, repo=repo)
)
def list_files(self, number, user=None, repo=None):
"""List the files for a pull request
:param str number: The number of the pull request to list files for
:param str user: Username
:param str repo: Repository
:returns: A :doc:`result`
.. note::
Remember :ref:`config precedence`
"""
return self._get_result(
self.make_request('pull_requests.list_files', number=number,
user=user, repo=repo)
)
def is_merged(self, number, user=None, repo=None):
"""Gets whether a pull request has been merged or not.
:param str number: The pull request to check
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
"""
return self._bool(
self.make_request('pull_requests.is_merged', number=number,
user=user, repo=repo)
)
def merge(self, number, message='', user=None, repo=None):
"""Merge a pull request.
:param str number: The pull request to merge
:param str message: Message of pull request
:param str user: Username
:param str repo: Repository
.. note::
Remember :ref:`config precedence`
This currently raises an HTTP 405 error if the request is not
mergable.
"""
body = {'commit_message': message}
return self._put(
self.make_request('pull_requests.merge', number=number,
body=body, user=user, repo=repo)
)
|
evennia/scripts/migrations/0003_checksessions_defaultscript_donothing_scriptbase_store_validatechannelhandler_validateidmappercache_.py | Jaykingamez/evennia | 1,544 | 11145747 | <gh_stars>1000+
# -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [("scripts", "0002_auto_20150118_1625")]
operations = [
migrations.CreateModel(
name="ScriptBase", fields=[], options={"proxy": True}, bases=("scripts.scriptdb",)
),
migrations.CreateModel(
name="DefaultScript", fields=[], options={"proxy": True}, bases=("scripts.scriptbase",)
),
migrations.CreateModel(
name="DoNothing", fields=[], options={"proxy": True}, bases=("scripts.defaultscript",)
),
migrations.CreateModel(
name="CheckSessions",
fields=[],
options={"proxy": True},
bases=("scripts.defaultscript",),
),
migrations.CreateModel(
name="Store", fields=[], options={"proxy": True}, bases=("scripts.defaultscript",)
),
migrations.CreateModel(
name="ValidateChannelHandler",
fields=[],
options={"proxy": True},
bases=("scripts.defaultscript",),
),
migrations.CreateModel(
name="ValidateIdmapperCache",
fields=[],
options={"proxy": True},
bases=("scripts.defaultscript",),
),
migrations.CreateModel(
name="ValidateScripts",
fields=[],
options={"proxy": True},
bases=("scripts.defaultscript",),
),
]
|
tests/some_test.py | lmicra/paco | 208 | 11145763 | <reponame>lmicra/paco<filename>tests/some_test.py
# -*- coding: utf-8 -*-
import pytest
import asyncio
from paco import some
from .helpers import run_in_loop
@asyncio.coroutine
def coro(num):
return num < 2
@asyncio.coroutine
def coro_false(num):
return num > 10
def test_some_truly():
task = some(coro, [1, 2, 3, 4, 3, 1])
assert run_in_loop(task) is True
def test_some_false():
task = some(coro_false, [1, 2, 3, 4, 3, 1])
assert run_in_loop(task) is False
def test_some_empty():
task = some(coro, [])
assert run_in_loop(task) is False
def test_some_invalid_input():
with pytest.raises(TypeError):
run_in_loop(some(coro, None))
|
rlschool/quadrotor/render.py | ANCL/QuadPPO | 169 | 11145771 | <reponame>ANCL/QuadPPO
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import time
import numpy as np
from collections import deque
from colour import Color
import trimesh
from trimesh import rendering
import pyglet
from pyglet import image
from pyglet import gl
from pyglet.graphics import Batch, TextureGroup
from rlschool.quadrotor.utils import TEXTURE_PATH, TILE, FACES
from rlschool.quadrotor.utils import sectorize, cube_vertices, geometry_hash, \
rotation_transform_mat
class Map(object):
"""
Map is an object to describe the virtual world.
For `no_collision` task, the `Map` contains a floor and other optional
obstacle walls. Drone is rendered as 3D model with flighting pose.
For `velocity_control` task, the `Map` ONLY contains a 3D drone model.
Moreover, the velocity vector of the drone is shown with an orange arrow;
the expected velocity vector of the drone is shown with a yellow arrow.
Args:
drone_3d_model (str): path to 3D STL model of the drone.
horizon_view_size (int): number of blocks to show in horizon view.
init_drone_z (float): the initial height of the drone.
task (str): name of the task setting. Currently, support
`no_collision` and `velocity_control`.
"""
def __init__(self,
drone_3d_model,
horizon_view_size=8,
init_drone_z=5,
task='no_collision',
debug_mode=False):
self.task = task
self.debug_mode = debug_mode
# When increase this, show more blocks in current view window
self.horizon_view_size = horizon_view_size
# A Batch is a collection of vertex lists for batched rendering
self.batch = Batch()
# Manages an OpenGL texture
self.group = TextureGroup(image.load(TEXTURE_PATH).get_texture())
# A mapping from position to the texture for whole, global map
self.whole_map = dict()
# Same as `whole_map` but only contains the positions to show
self.partial_map = dict()
# A mapping from position to a pyglet `VertextList` in `partial_map`
self._partial_map = dict()
# A mapping from sector to a list of positions (contiguous sub-region)
# using sectors for fast rendering
self.sectors = dict()
# Use deque to populate calling of `_show_block` and `_hide_block`
self.queue = deque()
# A graphics batch to draw drone 3D model
self.drone_batch = pyglet.graphics.Batch()
# Load drone triangular mesh and scene
self.drone_name = os.path.basename(drone_3d_model)
self.drone_mesh = trimesh.load(drone_3d_model)
self.drone_scene = self.drone_mesh.scene()
# Drawer stores drone scene geometry as vertex list in its model space
self.drone_drawer = None
# Store drone geometry hashes for easy retrival
self.drone_vertex_list_hash = ''
# Store drone geometry rendering mode, default gl.GL_TRIANGLES
self.drone_vertex_list_mode = gl.GL_TRIANGLES
# Store drone geometry texture
self.drone_texture = None
black = np.array([0, 0, 0, 255], dtype=np.uint8)
red = np.array([255, 0, 0, 255], dtype=np.uint8)
green = np.array([0, 255, 0, 255], dtype=np.uint8)
blue = np.array([0, 0, 255, 255], dtype=np.uint8)
for i, facet in enumerate(self.drone_mesh.facets):
if i < 30:
self.drone_mesh.visual.face_colors[facet] = black
elif i < 42:
self.drone_mesh.visual.face_colors[facet] = red
elif i < 54:
self.drone_mesh.visual.face_colors[facet] = green
elif i < 66:
self.drone_mesh.visual.face_colors[facet] = blue
else:
self.drone_mesh.visual.face_colors[facet] = black
# Mark positions of bounding wall and obstacles in the map
self._initialize(init_drone_z)
def _initialize(self, init_drone_z):
if self.task in ['no_collision', 'hovering_control']:
h = w = 100
for y in range(0, h):
for x in range(0, w):
# Pave the floor
self._add_block((x, y, 0), TILE, immediate=False)
elif self.task == 'velocity_control':
h = w = 0
self.drone_pos = [h // 2, w // 2, init_drone_z]
self._add_drone()
if self.task == 'velocity_control':
self.drone_velocity_drawer = self._add_drone_velocity(
np.array([0.0, 0.0, 1.0]), color=[255, 95, 63]) # orange
self.drone_expected_velocity_drawer = self._add_drone_velocity(
np.array([0.0, 0.0, 1.0]), color=[240, 210, 90]) # yellow
def _is_exposed(self, position):
x, y, z = position
for dx, dy, dz in FACES:
if (x+dx, y+dy, z+dz) not in self.whole_map:
# At least one face is not covered by another cube block.
return True
return False
def _add_drone(self):
""" Add the drone 3D model in its own model space.
"""
for name, geom in self.drone_scene.geometry.items():
if geom.is_empty:
continue
if geometry_hash(geom) == self.drone_vertex_list_hash:
continue
if name == self.drone_name:
args = rendering.convert_to_vertexlist(geom, smooth=True)
self.drone_drawer = self.drone_batch.add_indexed(*args)
self.drone_vertex_list_hash = geometry_hash(geom)
self.drone_vertex_list_mode = args[1]
try:
assert len(geom.visual.uv) == len(geom.vertices)
has_texture = True
except BaseException:
has_texture = False
if has_texture:
self.drone_texture = rendering.material_to_texture(
geom.visual.material)
def _add_drone_velocity(self, init_velocity_vector, radius=0.008,
color=[255, 0, 0]):
"""
Add the drone velocity vector as a cylinder into drone drawer batch.
"""
translation = np.eye(4)
translation[:3, 3] = [0, 0, 0.5]
height = np.linalg.norm(init_velocity_vector)
transform_z_axis = init_velocity_vector / height
transform = np.eye(4)
transform[:3, 2] = transform_z_axis
transform = np.dot(translation, transform)
velocity_axis = trimesh.creation.cylinder(
radius=radius, height=height, transform=transform)
velocity_axis.visual.face_colors = color
axis_origin = trimesh.creation.uv_sphere(
radius=radius*5, count=[10, 10])
axis_origin.visual.face_colors = color
merge = trimesh.util.concatenate([axis_origin, velocity_axis])
args = rendering.convert_to_vertexlist(merge)
drawer = self.drone_batch.add_indexed(*args)
return drawer
def _add_block(self, position, texture, immediate=True):
""" Add a block with the given `texture` and `position` to the world.
Note that block is a 1x1x1 cube and its position is its centroid.
Args:
position (tuple): The (x, y, z) position of the block to add.
texture (list): The coordinates of the texture squares, e.g. TILE.
immediate (bool): Whether or not to draw the block immediately.
"""
if position in self.whole_map:
# Not called for current static map
assert False, 'Duplicated block!'
self._remove_block(position, immediate)
self.whole_map[position] = texture
self.sectors.setdefault(sectorize(position), []).append(position)
if immediate:
if self._is_exposed(position):
self.show_block(position)
self._check_neighbors(position)
def _remove_block(self, position, immediate=True):
""" Remove the block at the given `position`.
Args:
position (tuple): The (x, y, z) position of the block to remove.
immediate (bool): Whether or not to remove the block immediately.
"""
del self.whole_map[position]
self.sectors[sectorize(position)].remove(position)
if immediate:
if position in self.partial_map:
self.hide_block(position)
self._check_neighbors(position)
def _check_neighbors(self, position):
x, y, z = position
for dx, dy, dz in FACES:
pos = (x+dx, y+dy, z+dz)
if pos not in self.whole_map:
continue
if self._is_exposed(pos):
if pos not in self.partial_map:
self.show_block(pos)
else:
if pos in self.partial_map:
self.hide_block(pos)
def _show_block(self, position, texture):
vertex_data = cube_vertices(position, 0.5) # 12x6=72
texture_data = list(texture) # 8x6=48
vertex_count = len(vertex_data) // 3 # 24
attributes = [
('v3f/static', vertex_data),
('t2f/static', texture_data)
]
self._partial_map[position] = self.batch.add(
vertex_count, gl.GL_QUADS, self.group, *attributes)
def _hide_block(self, position):
self._partial_map.pop(position).delete()
def _enqueue(self, func, *args):
self.queue.append((func, args))
def _dequeue(self):
func, args = self.queue.popleft()
func(*args)
def _get_velocity_transform(self, velocity, position):
height = np.linalg.norm(velocity)
transform = np.eye(4)
# Translation
x, z, y = position
transform[:3, 3] = [x, y, z]
# Rescale
transform[2, 2] = height
# Rotate
rotation = np.eye(4)
rotation[:3, 2] = velocity / height
return np.dot(transform, rotation)
def show_drone(self, position, rotation):
"""
Show the drone 3D model with corresponding translation and rotation.
"""
# Get the transform matrix for drone 3D model
x, z, y = position
transform = np.eye(4)
transform[:3, 3] = [x, y, z]
# NOTE: change the view size of drone 3D model
transform[0, 0] = 2.5
transform[1, 1] = 2.5
transform[2, 2] = 2.5
# Match drone model space x-y-z to openGL x-z-y
# TODO: read the config.json and match the propeller positions
model_space_transform = rotation_transform_mat(-np.pi / 2, 'roll')
transform = np.dot(transform, model_space_transform)
yaw, pitch, roll = rotation
if self.debug_mode:
# NOTE: manually set values to debug rotation,
# it's useful when input act is in form [c, c, c, c].
yaw = np.pi / 2
# pitch = np.pi / 2
# roll = np.pi / 2
transform = np.dot(transform, rotation_transform_mat(yaw, 'yaw'))
transform = np.dot(transform, rotation_transform_mat(pitch, 'pitch'))
transform = np.dot(transform, rotation_transform_mat(roll, 'roll'))
# Add a new matrix to the model stack to transform the model
gl.glPushMatrix()
gl.glMultMatrixf(rendering.matrix_to_gl(transform))
# Enable the target texture
if self.drone_texture is not None:
gl.glEnable(self.drone_texture.target)
gl.glBindTexture(self.drone_texture.target, self.drone_texture.id)
# Draw the mesh with its transform applied
self.drone_drawer.draw(mode=self.drone_vertex_list_mode)
gl.glPopMatrix()
# Disable texture after using
if self.drone_texture is not None:
gl.glDisable(self.drone_texture.target)
def show_velocity(self, position, velocity, expected_velocity=None):
"""
Show velocity vector as a thin cylinder arrow.
"""
if not hasattr(self, 'drone_velocity_drawer'):
return
transform = self._get_velocity_transform(velocity, position)
gl.glPushMatrix()
gl.glMultMatrixf(rendering.matrix_to_gl(transform))
self.drone_velocity_drawer.draw(mode=self.drone_vertex_list_mode)
gl.glPopMatrix()
if expected_velocity is not None and \
hasattr(self, 'drone_expected_velocity_drawer'):
transform = self._get_velocity_transform(
expected_velocity, position)
gl.glPushMatrix()
gl.glMultMatrixf(rendering.matrix_to_gl(transform))
self.drone_expected_velocity_drawer.draw(
mode=self.drone_vertex_list_mode)
gl.glPopMatrix()
def show_block(self, position, immediate=True):
texture = self.whole_map[position]
self.partial_map[position] = texture
if immediate:
self._show_block(position, texture)
else:
self._enqueue(self._show_block, position, texture)
def hide_block(self, position, immediate=True):
self.partial_map.pop(position)
if immediate:
self._hide_block(position)
else:
self._enqueue(self._hide_block, position)
def show_sector(self, sector):
for position in self.sectors.get(sector, []):
if position not in self.partial_map and self._is_exposed(position):
self.show_block(position, immediate=False)
def hide_sector(self, sector):
for position in self.sectors.get(sector, []):
if position in self.partial_map:
self.hide_block(position, immediate=False)
def change_sectors(self, before, after):
"""
Find the changed sectors and trigger show or hide operations
"""
# TODO: adjust the sector set when add extra view perspective
# relative to the drone.
# FIXME: when the drone flies high, the green floor immediately
# disappear
before_set, after_set = set(), set()
pad = self.horizon_view_size // 2
for dx in range(-pad, pad+1):
for dy in range(-pad, pad+1):
dz = 0
if dx ** 2 + dy ** 2 + dz ** 2 > (pad + 1) ** 2:
continue
if before:
x, y, z = before
before_set.add((x+dx, y+dy, z+dz))
if after:
x, y, z = after
after_set.add((x+dx, y+dy, z+dz))
show = after_set - before_set
hide = before_set - after_set
for sector in show:
self.show_sector(sector)
for sector in hide:
self.hide_sector(sector)
def process_queue(self):
# NOTE: no scheduled interval timer, we render by manually calling
# `RenderWindow.view()`. So we process queue without time contrains.
# In other words, it's a copy of `process_entire_queue()`
while self.queue:
self._dequeue()
def process_entire_queue(self):
while self.queue:
self._dequeue()
class RenderWindow(pyglet.window.Window):
"""
Pyglet window to render the `Map`.
Args:
drone_3d_model (str): path to 3D STL model of the drone.
horizon_view_size (int): number of blocks to show in horizon view.
x_offset (float): the offset between init drone position and
map (0, 0, 0) position from x-axis.
y_offset (float): the offset between init drone position and
map (0, 0, 0) position from y-axis.
z_offset (float): the offset between init drone position and
map (0, 0, 0) position from z-axis.
perspective_fovy (float): the field of view angle in degrees,
in the y direction.
perspective_aspect (float): the ratio of x (width) to y (height).
perspective_zNear (float): the distance from the viewer to the
near clipping plane.
perspective_zFar (float): the distance from the viewer to the
far clipping plane.
perspective_y_offset (float): the distance from the viewer to
the drone model along the y direction.
perspective_z_offset (float): the distance from the viewer to
the drone model along the z direction.
sky_light_blue (str): hex color to set the light blue color.
sky_dark_blue (str): hex color to set the dark blue color.
width (int): width of the pyglet viewer window.
height (int): height of the pyglet viewer window.
caption (str): title of the pyglet viewer window.
task (str): name of the task setting. Currently, support
`no_collision` and `velocity_control`.
"""
def __init__(self,
drone_3d_model=None,
horizon_view_size=8,
x_offset=0,
y_offset=0,
z_offset=0,
perspective_fovy=65.,
perspective_aspect=4/3., # i.e. 800/600
perspective_zNear=0.1,
perspective_zFar=60.,
perspective_y_offset=3,
perspective_z_offset=3,
sky_light_blue='#00d4ff',
sky_dark_blue='#020024',
width=800,
height=600,
caption='quadrotor',
task='no_collision',
debug_mode=False):
if drone_3d_model is None:
this_dir = os.path.realpath(os.path.dirname(__file__))
drone_3d_model = os.path.join(this_dir, 'quadcopter.stl')
super(RenderWindow, self).__init__(
width=width, height=height, caption=caption, resizable=False)
self.task = task
self.debug_mode = debug_mode
self.x_offset = x_offset
self.y_offset = y_offset
self.z_offset = z_offset
self.internal_map = Map(
drone_3d_model,
horizon_view_size=horizon_view_size,
init_drone_z=self.z_offset,
task=task,
debug_mode=debug_mode)
# The label to display in the top-left of the canvas
self.label = pyglet.text.Label(
'', font_name='Arial', font_size=18, x=10, y=self.height - 10,
anchor_x='left', anchor_y='top', color=(255, 0, 0, 255))
# Current (x, y, z) position of the drone in the world,
# specified with floats.
self.position = tuple([float(i) for i in self.internal_map.drone_pos])
# (vertical plane rotation, horizontal rotation)
# vertical rotation: [-90, 90], horizontal rotation unbounded
# TODO: update the rotation according the drone initial pose
self.rotation = (-30, 0)
if debug_mode:
self.rotation = (0, 0)
# Config perspective
self.perspective = [perspective_fovy, perspective_aspect,
perspective_zNear, perspective_zFar]
self.perspective_over_drone = [
perspective_y_offset, perspective_z_offset]
self.sector = None
light_blue = Color(sky_light_blue)
dark_blue = Color(sky_dark_blue)
self.colors = [list(i.rgb) + [1.0] for i in
list(light_blue.range_to(dark_blue, 700))]
self._gl_set_background(self.colors[0])
self._gl_enable_color_material()
self._gl_enable_blending()
self._gl_enable_smooth_lines()
# self._gl_enable_lighting(self.internal_map.drone_scene)
self.set_visible()
def update(self, dt):
self.internal_map.process_queue()
sector = sectorize(self.position)
if sector != self.sector:
self.internal_map.change_sectors(self.sector, sector)
if self.sector is None:
self.internal_map.process_entire_queue()
self.sector = sector
def view(self, drone_state, dt, expected_velocity=None):
# NOTE: because sim coord is (x => left, y => inner, z => up),
# gl coord is (x => left, y => up, z => outer),
# we remap gl-y to sim-z, gl-z to sim-y, then reverse sim-y
# In this way, we can transform gl coord to sim coord.
# Here use `-drone_state[y]'.
self.position = (drone_state['x'] + self.x_offset,
-drone_state['y'] + self.y_offset,
drone_state['z'] + self.z_offset)
rot = (drone_state['yaw'], drone_state['pitch'], drone_state['roll'])
if self.task == 'velocity_control':
assert expected_velocity is not None
ev_x, ev_y, ev_z = expected_velocity
expected_velocity = np.array([ev_x, ev_z, ev_y])
velocity = np.array([drone_state['g_v_x'], drone_state['g_v_z'],
drone_state['g_v_y']])
cid = abs(int(drone_state['z'] / 0.1)) % len(self.colors)
self._gl_set_background(self.colors[cid])
# Actually, `dt` does not work now, as we update the state in env.py
self.update(dt)
self.clear()
self._setup_3d()
gl.glColor3d(1, 1, 1)
self.internal_map.batch.draw()
self.internal_map.show_drone(self.position, rot)
if self.task == 'velocity_control':
self.internal_map.show_velocity(
self.position, velocity, expected_velocity)
self._setup_2d()
self._draw_label()
self.dispatch_events()
self.flip()
time.sleep(dt)
def _setup_2d(self):
w, h = self.get_size()
gl.glDisable(gl.GL_DEPTH_TEST)
viewport = self.get_viewport_size()
gl.glViewport(0, 0, max(1, viewport[0]), max(1, viewport[1]))
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(0, max(1, w), 0, max(1, h), -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
def _setup_3d(self):
w, h = self.get_size()
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glDepthFunc(gl.GL_LEQUAL)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glEnable(gl.GL_CULL_FACE)
viewport = self.get_viewport_size()
gl.glViewport(0, 0, max(1, viewport[0]), max(1, viewport[1]))
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.gluPerspective(*self.perspective)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
y, x = self.rotation
gl.glRotatef(x, 0, 1, 0)
gl.glRotatef(-y, math.cos(math.radians(x)),
0, math.sin(math.radians(x)))
# NOTE: for GL render, its x-z plane is the ground plane,
# so we unpack the position using `(x, z, y)` instead of `(x, y, z)`
x, z, y = self.position
if not self.debug_mode:
y += self.perspective_over_drone[0]
z += self.perspective_over_drone[1]
gl.glTranslatef(-x, -y, -z)
def _draw_label(self):
x, y, z = self.position
x -= self.x_offset
y -= self.y_offset
y = -y
self.label.text = 'xyz: (%.2f, %.2f, %.2f)' % (x, y, z)
self.label.draw()
@staticmethod
def _gl_set_background(background):
gl.glClearColor(*background)
@staticmethod
def _gl_unset_background():
gl.glClearColor(*[0, 0, 0, 0])
@staticmethod
def _gl_enable_color_material():
gl.glColorMaterial(gl.GL_FRONT_AND_BACK,
gl.GL_AMBIENT_AND_DIFFUSE)
gl.glEnable(gl.GL_COLOR_MATERIAL)
gl.glShadeModel(gl.GL_SMOOTH)
gl.glMaterialfv(gl.GL_FRONT,
gl.GL_AMBIENT,
rendering.vector_to_gl(
0.192250, 0.192250, 0.192250))
gl.glMaterialfv(gl.GL_FRONT,
gl.GL_DIFFUSE,
rendering.vector_to_gl(
0.507540, 0.507540, 0.507540))
gl.glMaterialfv(gl.GL_FRONT,
gl.GL_SPECULAR,
rendering.vector_to_gl(
.5082730, .5082730, .5082730))
gl.glMaterialf(gl.GL_FRONT,
gl.GL_SHININESS,
.4 * 128.0)
@staticmethod
def _gl_enable_blending():
# enable blending for transparency
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,
gl.GL_ONE_MINUS_SRC_ALPHA)
@staticmethod
def _gl_enable_smooth_lines():
# make the lines from Path3D objects less ugly
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
# set the width of lines to 4 pixels
gl.glLineWidth(4)
# set PointCloud markers to 4 pixels in size
gl.glPointSize(4)
@staticmethod
def _gl_enable_lighting(scene):
"""
Take the lights defined in scene.lights and
apply them as openGL lights.
"""
gl.glEnable(gl.GL_LIGHTING)
# opengl only supports 7 lights?
for i, light in enumerate(scene.lights[:7]):
# the index of which light we have
lightN = eval('gl.GL_LIGHT{}'.format(i))
# get the transform for the light by name
matrix = scene.graph.get(light.name)[0]
# convert light object to glLightfv calls
multiargs = rendering.light_to_gl(
light=light,
transform=matrix,
lightN=lightN)
# enable the light in question
gl.glEnable(lightN)
# run the glLightfv calls
for args in multiargs:
gl.glLightfv(*args)
|
easytransfer/model_zoo/modeling_utils.py | mczhuge/Kaleido-BERT | 109 | 11145780 | <reponame>mczhuge/Kaleido-BERT
# coding=utf-8
# Copyright (c) 2019 Alibaba PAI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import os
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import gfile
from easytransfer.engines.model import FLAGS
from easytransfer import layers
class PretrainedConfig(object):
def __init__(self, **kwargs):
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
tf.logging.error("Can't set {} with value {} for {}".format(key, value, self))
raise err
@classmethod
def get(cls, json_file, **kwargs):
config_dict = cls._dict_from_json_file(json_file)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def from_dict(cls, config_dict, **kwargs):
config = cls(**config_dict)
for key, value in kwargs.items():
setattr(config, key, value)
return config
@classmethod
def _dict_from_json_file(cls, json_file):
with gfile.GFile(json_file, mode='r') as reader:
text = reader.read()
return json.loads(text)
class PreTrainedModel(layers.Layer):
config_class = None
pretrained_model_archive_map = {}
pretrained_config_archive_map = {}
@classmethod
def dummy_inputs(self, seq_length):
""" Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
#input_ids = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
input_ids = [[1]*seq_length]
return tf.constant(input_ids)
def __init__(self, config, **kwargs):
kwargs.clear()
super(PreTrainedModel, self).__init__(**kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config in model
self.config = config
@classmethod
def get(cls, pretrained_model_name_or_path, **kwargs):
if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
config_path = cls.pretrained_config_archive_map[pretrained_model_name_or_path]
config_path = os.path.join(FLAGS.modelZooBasePath, config_path)
else:
config_path = os.path.join(os.path.dirname(pretrained_model_name_or_path), "config.json")
config = cls.config_class.get(
config_path,
**kwargs)
model = cls(config, **kwargs)
model(model.dummy_inputs(kwargs.get('input_sequence_length', 512)), mode='eval', output_features=False)
archive_file = None
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
archive_file = os.path.join(FLAGS.modelZooBasePath, archive_file)
elif "/" in pretrained_model_name_or_path:
archive_file = pretrained_model_name_or_path
if tf.gfile.Exists(archive_file+".data-00000-of-00001"):
model._init_from_pretrained_model(archive_file)
else:
tf.logging.info("archive file {} does not exists".format(archive_file))
tf.logging.info("ckpt {} not in model zoo, random initialization".format(pretrained_model_name_or_path))
return model
def _init_from_pretrained_model(self, pretrained_model_path):
tvars = tf.trainable_variables()
network_name_to_variable = {}
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
network_name_to_variable[name] = var
try:
reader = pywrap_tensorflow.NewCheckpointReader(pretrained_model_path)
var_to_shape_map = reader.get_variable_to_shape_map()
except errors_impl.DataLossError:
raise ImportError(
'`load_weights` requires correct tf ckpts.')
assignment_map = {}
for key in var_to_shape_map:
if "Adam" in key or "beta1_power" in key or "beta2_power" in key:
continue
if "global_step" in key:
continue
var = None
if "pre_trained_model" in key:
root_key = key.replace(key.split("/")[0]+"/","")
else:
root_key = key
for network_key in network_name_to_variable.keys():
if root_key in network_key:
var = network_name_to_variable[network_key]
break
if var is None:
print("Variable: {} in ckpt not in trainable variable".format(key))
continue
#raise ValueError("ckpt var name {} not in trainable variable".format(key))
assignment_map[key] = var
tf.logging.info("Load key {} from {}".format(key, pretrained_model_path))
tf.logging.info("Load weights from {}".format(pretrained_model_path))
tf.train.init_from_checkpoint(pretrained_model_path, assignment_map)
def init_from_checkpoint_without_training_ops(pretrained_model_path):
tvars = tf.trainable_variables()
network_name_to_variable = {}
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
network_name_to_variable[name] = var
try:
reader = pywrap_tensorflow.NewCheckpointReader(pretrained_model_path)
var_to_shape_map = reader.get_variable_to_shape_map()
except errors_impl.DataLossError:
raise ImportError(
'`load_weights` requires correct tf ckpts.')
assignment_map = {}
for key in var_to_shape_map:
if "Adam" in key or "beta1_power" in key or "beta2_power" in key:
continue
if "global_step" in key:
continue
var = None
if "pre_trained_model" in key:
root_key = key.replace(key.split("/")[0]+"/","")
else:
root_key = key
for network_key in network_name_to_variable.keys():
if root_key in network_key:
var = network_name_to_variable[network_key]
break
if var is None:
print("Variable: {} in ckpt not in trainable variable".format(key))
continue
#raise ValueError("ckpt var name {} not in trainable variable".format(key))
assignment_map[key] = var
tf.logging.info("Load weights from {}".format(pretrained_model_path))
tf.train.init_from_checkpoint(pretrained_model_path, assignment_map)
|
bench_fio/benchlib/runfio.py | Flowrey/fio-plot | 148 | 11145788 | #!/usr/bin/env python3
import subprocess
import sys
import os
import copy
from numpy import linspace
import time
from . import (
supporting,
checks
)
def check_fio_version(settings):
"""The 3.x series .json format is different from the 2.x series format.
This breaks fio-plot, thus this older version is not supported.
"""
command = ["fio", "--version"]
result = run_raw_command(command).stdout
result = result.decode("UTF-8").strip()
if "fio-3" in result:
return True
elif "fio-2" in result:
print(f"Your Fio version ({result}) is not compatible. Please use Fio-3.x")
sys.exit(1)
else:
print("Could not detect Fio version.")
sys.exit(1)
def drop_caches(settings):
command = ["echo", "3", ">", "/proc/sys/vm/drop_caches"]
run_raw_command(command)
def run_raw_command(command, env=None):
result = subprocess.run(
command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
if result.returncode > 0 or (len(str(result.stderr)) > 3):
stdout = result.stdout.decode("UTF-8").strip()
stderr = result.stderr.decode("UTF-8").strip()
print(f"\nAn error occurred: {stderr} - {stdout}")
sys.exit(1)
return result
def run_command(settings, benchmark, command):
"""This command sets up the environment that is used in conjunction
with the Fio .ini job file.
"""
output_directory = supporting.generate_output_directory(settings, benchmark)
env = os.environ
settings = supporting.convert_dict_vals_to_str(settings)
benchmark = supporting.convert_dict_vals_to_str(benchmark)
env.update(settings)
env.update(benchmark)
env.update({"OUTPUT": output_directory})
run_raw_command(command, env)
def run_fio(settings, benchmark):
output_directory = supporting.generate_output_directory(settings, benchmark)
output_file = f"{output_directory}/{benchmark['mode']}-{benchmark['iodepth']}-{benchmark['numjobs']}.json"
command = [
"fio",
"--output-format=json",
f"--output={output_file}",
settings["template"],
]
command = supporting.expand_command_line(command, settings, benchmark)
target_parameter = checks.check_target_type(benchmark["target"], settings["type"])
if target_parameter:
command.append(f"{target_parameter}={benchmark['target']}")
if not settings["dry_run"]:
supporting.make_directory(output_directory)
run_command(settings, benchmark, command)
# else:
# pprint.pprint(command)
def run_precondition_benchmark(settings, device, run):
if settings["precondition"] and settings["type"] == "device":
settings_copy = copy.deepcopy(settings)
settings_copy["template"] = settings["precondition_template"]
template = supporting.import_fio_template(settings["precondition_template"])
benchmark = {
"target": device,
"mode": template["precondition"]["rw"],
"iodepth": template["precondition"]["iodepth"],
"block_size": template["precondition"]["bs"],
"numjobs": template["precondition"]["numjobs"],
"run": run,
}
run_fio(settings, benchmark)
def run_benchmarks(settings, benchmarks):
# pprint.pprint(benchmarks)
if not settings["quiet"]:
run = 1
for benchmark in ProgressBar(benchmarks):
if settings["precondition_repeat"]:
run_precondition_benchmark(settings, benchmark["target"], run)
run += 1
drop_caches(settings)
run_fio(settings, benchmark)
else:
for benchmark in benchmarks:
drop_caches(settings)
run_fio(settings, benchmark)
def ProgressBar(iterObj):
"""https://stackoverflow.com/questions/3160699/python-progress-bar/49234284#49234284"""
def SecToStr(sec):
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
L = len(iterObj)
steps = {
int(x): y
for x, y in zip(
linspace(0, L, min(100, L), endpoint=False),
linspace(0, 100, min(100, L), endpoint=False),
)
}
# quarter and half block chars
qSteps = ["", "\u258E", "\u258C", "\u258A"]
startT = time.time()
timeStr = " [0:00:00, -:--:--]"
activity = [" -", " \\", " |", " /"]
for nn, item in enumerate(iterObj):
if nn in steps:
done = "\u2588" * int(steps[nn] / 4.0) + qSteps[int(steps[nn] % 4)]
todo = " " * (25 - len(done))
barStr = "%4d%% |%s%s|" % (steps[nn], done, todo)
if nn > 0:
endT = time.time()
timeStr = " [%s, %s]" % (
SecToStr(endT - startT),
SecToStr((endT - startT) * (L / float(nn) - 1)),
)
sys.stdout.write("\r" + barStr + activity[nn % 4] + timeStr)
sys.stdout.flush()
yield item
barStr = "%4d%% |%s|" % (100, "\u2588" * 25)
timeStr = " [%s, 0:00:00]\n" % (SecToStr(time.time() - startT))
sys.stdout.write("\r" + barStr + timeStr)
sys.stdout.flush()
|
vcx/wrappers/python3/demo/alice_create_with_message_flow.py | sklump/indy-sdk | 636 | 11145802 | <reponame>sklump/indy-sdk
import asyncio
import json
from alice import init, connect, accept_offer, create_proof
from demo_utils import download_message, update_message_as_read
from vcx import logging
from vcx.api.credential import Credential
from vcx.api.disclosed_proof import DisclosedProof
async def main():
await init()
connection_to_faber = None
while True:
answer = input(
"Would you like to do? \n "
"0 - establish connection \n "
"1 - check for credential offer \n "
"2 - check for proof request \n "
"else finish \n") \
.lower().strip()
if answer == '0':
connection_to_faber = await connect()
elif answer == '1':
print("Check agency for a credential offer")
pw_did = await connection_to_faber.get_my_pw_did()
uid, offer, _ = await download_message(pw_did, 'credential-offer')
credential = await Credential.create('credential', json.loads(offer))
await accept_offer(connection_to_faber, credential)
await update_message_as_read(pw_did, uid)
elif answer == '2':
print("Check agency for a proof request")
pw_did = await connection_to_faber.get_my_pw_did()
uid, request, _ = await download_message(pw_did, 'presentation-request')
print("#23 Create a Disclosed proof object from proof request")
proof = await DisclosedProof.create('proof', json.loads(request))
await create_proof(connection_to_faber, proof)
await update_message_as_read(pw_did, uid)
else:
pass
# break
print("Finished")
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
common/utils/analysis/tsne.py | billzhonggz/Transfer-Learning-Library | 1,474 | 11145834 | <reponame>billzhonggz/Transfer-Learning-Library<filename>common/utils/analysis/tsne.py
"""
@author: <NAME>
@contact: <EMAIL>
"""
import torch
import matplotlib
matplotlib.use('Agg')
from sklearn.manifold import TSNE
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as col
def visualize(source_feature: torch.Tensor, target_feature: torch.Tensor,
filename: str, source_color='r', target_color='b'):
"""
Visualize features from different domains using t-SNE.
Args:
source_feature (tensor): features from source domain in shape :math:`(minibatch, F)`
target_feature (tensor): features from target domain in shape :math:`(minibatch, F)`
filename (str): the file name to save t-SNE
source_color (str): the color of the source features. Default: 'r'
target_color (str): the color of the target features. Default: 'b'
"""
source_feature = source_feature.numpy()
target_feature = target_feature.numpy()
features = np.concatenate([source_feature, target_feature], axis=0)
# map features to 2-d using TSNE
X_tsne = TSNE(n_components=2, random_state=33).fit_transform(features)
# domain labels, 1 represents source while 0 represents target
domains = np.concatenate((np.ones(len(source_feature)), np.zeros(len(target_feature))))
# visualize using matplotlib
fig, ax = plt.subplots(figsize=(10, 10))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=domains, cmap=col.ListedColormap([target_color, source_color]), s=20)
plt.xticks([])
plt.yticks([])
plt.savefig(filename)
|
tests/test_03_files.py | tilschaef/genomepy | 146 | 11145860 | <gh_stars>100-1000
import filecmp
import os
import shutil
import subprocess as sp
from tempfile import NamedTemporaryFile
import pytest
from pyfaidx import Fasta
import genomepy.files
@pytest.fixture
def zipped_genome():
tmp = NamedTemporaryFile(suffix=".fa.zip", delete=False)
shutil.copyfile("tests/data/zip_genome.fa.zip", tmp.name)
yield tmp.name
# Remove temp file if it's zipped or not
if os.path.exists(tmp.name):
os.unlink(tmp.name)
if os.path.exists(tmp.name[:-4]):
os.unlink(tmp.name[:-4])
def test_read_readme():
metadata, lines = genomepy.files.read_readme("tests/data/not_an_existsing_file")
assert metadata["name"] == "na"
assert lines == []
wd = os.getcwd()
readme = os.path.join(wd, "README.txt")
with open(readme, "w") as f:
f.writelines("provider: asd\n")
f.writelines(
"this is a regular line\n \n \n \nonly one empty line before me\n"
)
metadata, lines = genomepy.files.read_readme(readme)
assert metadata["provider"] == "asd"
assert lines == ["this is a regular line", "", "only one empty line before me"]
os.unlink(readme)
def test_write_readme():
wd = os.getcwd()
readme = os.path.join(wd, "README.txt")
metadata, lines = genomepy.files.read_readme(readme)
metadata["name"] = "my_cool_genome"
lines = ["", "I wanted to do some regex, but regex is hard"]
genomepy.files.write_readme(readme, metadata, lines)
metadata2, lines2 = genomepy.files.read_readme(readme)
assert metadata == metadata2
assert lines == lines2
os.unlink(readme)
def test_update_readme():
wd = os.getcwd()
readme = os.path.join(wd, "README.txt")
# new data added
updated_metadata = {"key": "value"}
extra_lines = ["let's add some text to the lines section"]
genomepy.files.update_readme(readme, updated_metadata, extra_lines)
metadata, lines = genomepy.files.read_readme(readme)
assert metadata["key"] == updated_metadata["key"]
assert extra_lines[0] in lines
# new data overwrites
updated_metadata = {"key": "value2"}
genomepy.files.update_readme(readme, updated_metadata)
metadata, lines = genomepy.files.read_readme(readme)
assert metadata["key"] == updated_metadata["key"]
os.unlink(readme)
def test_extract_archive(zipped_genome):
assert os.path.exists(zipped_genome)
fname = genomepy.files.extract_archive(zipped_genome)
assert fname.endswith(".fa")
assert os.path.exists(fname)
assert not os.path.exists(fname + ".zip")
def test_extract_zip(zipped_genome):
assert os.path.exists(zipped_genome)
fname = genomepy.files.extract_zip(zipped_genome)
assert fname.endswith(".fa")
assert os.path.exists(fname)
assert not os.path.exists(fname + ".zip")
def test_extract_gzip(fname="tests/data/small_genome.fa.gz"):
assert os.path.exists(fname)
fname = genomepy.files.extract_gzip(fname)
assert fname.endswith(".fa")
assert os.path.exists(fname)
assert not os.path.exists(fname + ".gz")
def test_gzip_and_name(fname="tests/data/small_genome.fa"):
assert os.path.exists(fname)
fname = genomepy.files.gzip_and_name(fname)
assert fname.endswith(".gz")
assert os.path.exists(fname)
assert not os.path.exists(fname[:-3])
fname = genomepy.files.extract_gzip(fname)
assert fname.endswith(".fa")
assert os.path.exists(fname)
assert not os.path.exists(fname + ".gz")
def test_bgzip_and_name(fname="tests/data/small_genome.fa"):
assert os.path.exists(fname)
fname = genomepy.files.bgzip_and_name(fname)
assert fname.endswith(".gz")
assert os.path.exists(fname)
assert not os.path.exists(fname[:-3])
with pytest.raises(sp.CalledProcessError):
genomepy.files.bgzip_and_name("tests/data/nofile.fa")
def test_extract_tarball():
fname = "tests/data/tar2.fa.tar.gz"
outname = "tests/data/tar2.fa"
genomepy.files.extract_tarball(fname, outfile=outname, concat=True) # noqa
assert os.path.exists(outname)
# tar2.fa is a copy of tar1.fa. Check if they are identical after untarring.
assert filecmp.cmp(outname, "tests/data/tar1.fa")
os.unlink(outname)
def test__open():
# read/write regular file
a1 = "tests/data/data.annotation.gtf"
with genomepy.files._open(a1, "w") as gtf:
gtf.write("regular file")
with open(a1) as gtf:
lines1 = gtf.readlines()
assert lines1 == ["regular file"]
with genomepy.files._open(a1) as gtf:
lines2 = gtf.readlines()
assert lines2 == lines1
genomepy.files.rm_rf(a1)
# read/write gzipped file
a2 = "tests/data/data.annotation.gtf.gz"
with genomepy.files._open(a2, "w") as gtf:
gtf.write("gzipped file")
with pytest.raises(UnicodeDecodeError):
with open(a2) as gtf:
gtf.read()
with genomepy.files._open(a2) as gtf:
lines = gtf.readlines()
assert lines == ["gzipped file"]
genomepy.files.rm_rf(a1)
def test_get_file_info(fname="tests/data/small_genome.fa.gz"):
ext, gz = genomepy.files.get_file_info(fname)
assert ext == ".fa" and gz
ext, gz = genomepy.files.get_file_info(fname[:-2] + "fai")
assert ext == ".fai" and not gz
def test_glob_ext_files(file="tests/data/small_genome.fa"):
assert file not in genomepy.files.glob_ext_files("tests/data")
assert file + ".gz" in genomepy.files.glob_ext_files("tests/data")
assert len(genomepy.files.glob_ext_files("tests/data", "fake_ext")) == 0
assert len(genomepy.files.glob_ext_files("tests/data/regexp")) == 1
def test_filter_fasta(fname="tests/data/regexp/regexp.fa"):
# function proper
regexps = [
("Chr.*", 2, 15),
("Scaffold.*", 1, 16),
("scaffold_.*", 3, 14),
(r">\d+$", 4, 13),
("chr.*", 4, 13),
]
for regex, match, no_match in regexps:
with NamedTemporaryFile(suffix=".fa") as tmpfa:
genomepy.files.filter_fasta(
fname, regex=regex, invert_match=False, outfa=tmpfa.name
)
keys = Fasta(tmpfa.name).keys()
assert len(keys) == match, regex
with NamedTemporaryFile(suffix=".fa") as tmpfa:
genomepy.files.filter_fasta(
fname, regex=regex, invert_match=True, outfa=tmpfa.name
)
keys = Fasta(tmpfa.name).keys()
assert len(keys) == no_match, regex
|
etc/theory/experiments/huffman_idealness/analysis.py | Cancelll/rupture | 184 | 11145911 | <reponame>Cancelll/rupture
from string import printable
from generate_huffman import get_huffman_tree
import matplotlib.pyplot as plt
from collections import OrderedDict
import numpy as np
from string import whitespace
def get_ideal_tree(freq_list):
freq_list = sorted(freq_list, reverse=True)
output = []
output.append('Char\t#\tHuff.codelen')
tree = []
for (current_occ, current_char) in freq_list:
if tree:
previous = tree[-1]
if current_occ == previous[1]:
tree.append((current_char, current_occ, previous[2]))
else:
tree.append((current_char, current_occ, previous[2]+1))
else:
tree.append((current_char, current_occ, 1))
plot_data = OrderedDict()
for n in tree:
output.append('{}\t{}\t{}'.format(repr(n[0]), n[1], n[2]))
plot_data[n[0]] = n[2]
return '\n'.join(output), plot_data
with open('social_network_script') as f:
text = f.read()
frequencies = []
for c in printable:
if c in text:
frequencies.append((text.count(c), c))
huffman_tree, huffman_plot_data = get_huffman_tree(frequencies)
with open('huffman_social_network', 'w') as f:
f.write(huffman_tree)
ideal_tree, ideal_plot_data = get_ideal_tree(frequencies)
with open('ideal_social_network', 'w') as f:
f.write(ideal_tree)
letters = [i for i in huffman_plot_data]
data = OrderedDict([
('Ideal', [ideal_plot_data[i] for i in ideal_plot_data]),
('Huffman', [huffman_plot_data[i] for i in huffman_plot_data])
])
font = {
'size': 12
}
plt.rc('font', **font)
fig, ax1 = plt.subplots()
fig.suptitle('Huffman & Ideal compression comparison')
ax2 = ax1.twinx()
ax1.set_xlabel('Text Characters')
ax1.set_ylabel('Ideal compression (Bytes)')
ax2.set_ylabel('Huffman compression (Bytes)')
x = [i for i in range(len(letters)) if i % 3 == 0]
lets = []
for i in range(len(letters)):
if i % 3 == 0:
l = letters[i]
c = repr(l) if l in whitespace else l
lets.append(c)
y = np.array([data['Ideal'][i] for i in range(len(data['Ideal'])) if i % 3 == 0])
plt.xticks(x, lets)
ax1.plot(x, y)
y = np.array([data['Huffman'][i] for i in range(len(data['Huffman'])) if i % 3 == 0])
plt.xticks(x, lets)
plt.plot(x, y)
ax2.plot(x, y)
plt.legend([i for i in data])
plt.savefig('huffman_idealness.png')
|
examples/pp_stochastic_control_flow.py | zhangyewu/edward | 5,200 | 11145951 | """Stochastic control flow.
We sample from a geometric random variable by using samples from
Bernoulli random variables. It requires a while loop whose condition
is stochastic.
References
----------
https://probmods.org/chapters/02-generative-models.html#stochastic-recursion
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import tensorflow as tf
from edward.models import Bernoulli
def geometric(p):
i = tf.constant(0)
sample = tf.while_loop(
cond=lambda i: tf.cast(1 - Bernoulli(probs=p), tf.bool),
body=lambda i: i + 1,
loop_vars=[i])
return sample
def main(_):
p = 0.1
geom = geometric(p)
sess = tf.Session()
samples = [sess.run(geom) for _ in range(1000)]
plt.hist(samples, bins='auto')
plt.title("Geometric({0})".format(p))
plt.show()
if __name__ == "__main__":
tf.app.run()
|
kolibri/core/test/test_setlanguage.py | MBKayro/kolibri | 545 | 11146000 | from django.conf import settings
from django.http import HttpResponseNotAllowed
from django.test import TestCase
from django.urls import reverse
from django.urls import translate_url
from django.utils.translation import get_language
from django.utils.translation import LANGUAGE_SESSION_KEY
class I18NTests(TestCase):
"""
Tests set_language view in kolibri/core/views.py
Copied from https://github.com/django/django/blob/stable/1.11.x/tests/view_tests/tests/test_i18n.py
"""
def _get_inactive_language_code(self):
"""Return language code for a language which is not activated."""
current_language = get_language()
return [
code for code, name in settings.LANGUAGES if not code == current_language
][0]
def test_setlang(self):
"""
The set_language view can be used to change the session language.
"""
lang_code = self._get_inactive_language_code()
post_data = dict(language=lang_code)
response = self.client.post(reverse("kolibri:core:set_language"), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode("utf-8"),
translate_url(reverse("kolibri:core:redirect_user"), lang_code),
)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_next_valid(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the "next" argument.
"""
lang_code = self._get_inactive_language_code()
next_url = reverse("kolibri:kolibri.plugins.learn:learn")
post_data = dict(language=lang_code, next=next_url)
response = self.client.post(reverse("kolibri:core:set_language"), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode("utf-8"),
translate_url(reverse("kolibri:kolibri.plugins.learn:learn"), lang_code),
)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_next_invalid(self):
"""
The set_language view can be used to change the session language.
The user is redirected to user redirect if the "next" argument is invalid.
"""
lang_code = self._get_inactive_language_code()
next_url = "/not/a/real/url"
post_data = dict(language=lang_code, next=next_url)
response = self.client.post(reverse("kolibri:core:set_language"), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode("utf-8"),
translate_url(reverse("kolibri:core:redirect_user"), lang_code),
)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_null(self):
"""
The set_language view can be used to change the session language.
"""
lang_code = self._get_inactive_language_code()
post_data = dict(language=lang_code)
response = self.client.post(reverse("kolibri:core:set_language"), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode("utf-8"),
translate_url(reverse("kolibri:core:redirect_user"), lang_code),
)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
post_data = dict(language=None)
response = self.client.post(reverse("kolibri:core:set_language"), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode("utf-8"),
translate_url(reverse("kolibri:core:redirect_user"), "en"),
)
self.assertFalse(LANGUAGE_SESSION_KEY in self.client.session)
def test_setlang_null_next_valid(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the "next" argument.
"""
lang_code = self._get_inactive_language_code()
post_data = dict(language=lang_code)
response = self.client.post(reverse("kolibri:core:set_language"), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode("utf-8"),
translate_url(reverse("kolibri:core:redirect_user"), lang_code),
)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
next_url = reverse("kolibri:kolibri.plugins.learn:learn")
post_data = dict(language=None, next=next_url)
response = self.client.post(reverse("kolibri:core:set_language"), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode("utf-8"),
translate_url(reverse("kolibri:kolibri.plugins.learn:learn"), "en"),
)
self.assertFalse(LANGUAGE_SESSION_KEY in self.client.session)
def test_setlang_null_next_invalid(self):
"""
The set_language view can be used to change the session language.
The user is redirected to user redirect if the "next" argument is invalid.
"""
lang_code = self._get_inactive_language_code()
post_data = dict(language=lang_code)
response = self.client.post(reverse("kolibri:core:set_language"), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode("utf-8"),
translate_url(reverse("kolibri:core:redirect_user"), lang_code),
)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
next_url = "/not/a/real/url"
post_data = dict(language=None, next=next_url)
response = self.client.post(reverse("kolibri:core:set_language"), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode("utf-8"),
translate_url(reverse("kolibri:core:redirect_user"), "en"),
)
self.assertFalse(LANGUAGE_SESSION_KEY in self.client.session)
def test_setlang_get(self):
"""
The set_language view is forbidden to be accessed via GET
"""
lang_code = self._get_inactive_language_code()
post_data = dict(language=lang_code)
response = self.client.get(reverse("kolibri:core:set_language"), data=post_data)
self.assertEqual(type(response), HttpResponseNotAllowed)
def test_setlang_for_ajax(self):
"""
The set_language view returns 200 for AJAX calls by default.
"""
lang_code = self._get_inactive_language_code()
post_data = dict(language=lang_code)
response = self.client.post(
reverse("kolibri:core:set_language"),
post_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode("utf-8"),
translate_url(reverse("kolibri:core:redirect_user"), lang_code),
)
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
|
py/testdir_single_jvm/test_export_import.py | gigliovale/h2o | 882 | 11146008 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_rf as h2o_rf, h2o_import as h2i, h2o_exec, h2o_jobs, h2o_gbm
paramDict = {
'response': 'C55',
'cols': None,
# 'ignored_cols_by_name': 'C1,C2,C6,C7,C8',
'ignored_cols_by_name': None,
'classification': 1,
'validation': None,
# fail case
# 'ntrees': 1,
# 'max_depth': 30,
# 'nbins': 100,
'ntrees': 10,
'max_depth': 20,
'min_rows': 1, # normally 1 for classification, 5 for regression
'nbins': 200,
'mtries': None,
'sample_rate': 0.66,
'importance': 0,
'seed': None,
}
DO_OOBE = False
# TRY = 'max_depth'
# TRY = 'ntrees'
TRY = 'nbins'
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1, java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_export_import(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
importFolderPath = "standard"
# Parse Train ******************************************************
csvTrainFilename = 'covtype.shuffled.90pct.data'
csvTrainPathname = importFolderPath + "/" + csvTrainFilename
trainKey = csvTrainFilename + ".hex"
parseTrainResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvTrainPathname, hex_key=trainKey,
timeoutSecs=180, doSummary=False)
inspect = h2o_cmd.runInspect(None, trainKey)
# Parse Test ******************************************************
csvTestFilename = 'covtype.shuffled.10pct.data'
csvTestPathname = importFolderPath + "/" + csvTestFilename
testKey = csvTestFilename + ".hex"
parseTestResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvTestPathname, hex_key=testKey,
timeoutSecs=180)
inspect = h2o_cmd.runInspect(None, testKey)
trial = 0
ntreesList = [5, 10, 20, 30]
# ntreesList = [2]
nbinsList = [10, 100, 1000]
if TRY == 'max_depth':
tryList = depthList
elif TRY == 'ntrees':
tryList = ntreesList
elif TRY == 'nbins':
tryList = nbinsList
else:
raise Exception("huh? %s" % TRY)
for d in tryList:
if TRY == 'max_depth':
paramDict['max_depth'] = d
elif TRY == 'ntrees':
paramDict['ntrees'] = d
elif TRY == 'nbins':
paramDict['nbins'] = d
else:
raise Exception("huh? %s" % TRY)
# adjust timeoutSecs with the number of trees
# seems ec2 can be really slow
if DO_OOBE:
paramDict['validation'] = None
else:
paramDict['validation'] = parseTestResult['destination_key']
timeoutSecs = 30 + paramDict['ntrees'] * 200
# do ten starts, to see the bad id problem?
trial += 1
kwargs = paramDict.copy()
modelKey = 'RFModel_' + str(trial)
kwargs['destination_key'] = modelKey
start = time.time()
rfResult = h2o_cmd.runRF(parseResult=parseTrainResult, timeoutSecs=timeoutSecs, **kwargs)
trainElapsed = time.time() - start
print 'rf train end on', csvTrainPathname, 'took', trainElapsed, 'seconds'
h2o.nodes[0].export_files(src_key=testKey, path=SYNDATASETS_DIR + "/" + testKey, force=1)
h2o.nodes[0].export_files(src_key=trainKey, path=SYNDATASETS_DIR + "/" + trainKey, force=1)
# h2o.nodes[0].export_files(src_key=modelKey, path=SYNDATASETS_DIR + "/" + modelKey, force=1)
rf_model = rfResult['drf_model']
cms = rf_model['cms']
### print "cm:", h2o.dump_json(cm)
ntrees = rf_model['N']
errs = rf_model['errs']
N = rf_model['N']
varimp = rf_model['varimp']
treeStats = rf_model['treeStats']
print "maxDepth:", treeStats['maxDepth']
print "maxLeaves:", treeStats['maxLeaves']
print "minDepth:", treeStats['minDepth']
print "minLeaves:", treeStats['minLeaves']
print "meanLeaves:", treeStats['meanLeaves']
print "meanDepth:", treeStats['meanDepth']
print "errs[0]:", errs[0]
print "errs[-1]:", errs[-1]
print "errs:", errs
(classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfResult)
print "classErrorPctList:", classErrorPctList
self.assertEqual(len(classErrorPctList), 7, "Should be 7 output classes, so should have 7 class error percentages from a reasonable predict")
# FIX! should update this expected classification error
predict = h2o.nodes[0].generate_predictions(model_key=modelKey, data_key=testKey)
if __name__ == '__main__':
h2o.unit_main()
|
classifier/classifier_tuner.py | meghanaravikumar/sigopt-examples | 213 | 11146049 | <filename>classifier/classifier_tuner.py
"""Class for searching for the best classification hyperparameters for a given dataset."""
from __future__ import print_function
import argparse
import itertools
import json
import numpy
import sys
from sklearn import datasets, svm, ensemble
from sigopt import Connection
from sigopt.exception import ApiException
from constant import CLASSIFIER_TYPE_TO_PARAMS, NUM_SIGOPT_SUGGESTIONS, GRID_SEARCH_WIDTH, NUM_RANDOM_SEARCHES, Dataset
class ExampleRunner(object):
"""Searches for the best classification hyperparameters for a given dataset.
Can use the following methods for hyperparameter optimization
- Bayesian Optimization (via SigOpt https://sigopt.com)
- Grid Search
- Random Search
Example for two classifier types (with more soon):
- 'GBC': Gradient Boosting Classifier
http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html
- 'SVC': Support Vector Classifier
http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
- 'RFC': Random Forest Classifier
http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
Examples:
From python:
>>> from classifier_tuner import ExampleRunner
>>> runner = ExampleRunner(classifier_type='GBC', ...)
>>> runner.run_example(runner.sigopt_generator, sigopt_post=True, output_file='data/GBC_sigopt.txt')
>>> runner.run_example(runner.grid_generator, output_file='data/GBC_grid.txt')
>>> runner.run_example(runner.random_generator, output_file='data/GBC_random.txt')
From a shell:
$ python classifier_tuner.py --help
Questions? Comments? Email <EMAIL>, we're happy to help!
"""
def __init__(self, **kwargs):
self.classifier_type = kwargs.get('classifier_type') or 'GBC'
if self.classifier_type not in CLASSIFIER_TYPE_TO_PARAMS.keys():
raise Exception("classifier_type must be one of %s" % CLASSIFIER_TYPE_TO_PARAMS.keys())
self.client_token = kwargs.get('client_token')
self.dataset_name = kwargs.get('dataset_name')
self.test_set_size = kwargs.get('test_set_size')
self.num_sigopt_suggestions = kwargs.get('num_sigopt_suggestions') or NUM_SIGOPT_SUGGESTIONS
self.grid_search_width = kwargs.get('grid_search_width') or GRID_SEARCH_WIDTH
self.num_random_searches = kwargs.get('num_random_searches') or NUM_RANDOM_SEARCHES
self.dataset = self._load_dataset()
def _load_dataset(self):
"""Return a Dataset with training and test data.
Replace this with your dataset, or try one of the many public datasets at http://scikit-learn.org/stable/datasets/
"""
print('Downloading dataset...')
if self.dataset_name:
if not self.test_set_size:
raise Exception("Must provide `test_set_size` argument when using custom dataset")
data = datasets.fetch_mldata(self.dataset_name)
test_set_size = self.test_set_size
else:
# This is a small dataset that will run quickly, but is too small to provide interesting results
data = datasets.load_digits()
test_set_size = self.test_set_size or 300
return Dataset(
data['data'][:-test_set_size],
data['target'][:-test_set_size],
data['data'][-test_set_size:],
data['target'][-test_set_size:],
)
def get_classifier(self, parameters):
"""Return a sklearn classifier with the given parameters."""
# json unicode needs to be transformed into strings for sklearn
parameters = dict((
(key, str(value) if isinstance(value, unicode) else value) for key, value in parameters.iteritems()
))
if self.classifier_type == 'SVC':
return svm.SVC(**parameters)
elif self.classifier_type == 'GBC':
return ensemble.GradientBoostingClassifier(**parameters)
elif self.classifier_type == 'RFC':
return ensemble.RandomForestClassifier(n_jobs=-1, **parameters)
else:
raise(NotImplementedError)
def create_experiment(self):
"""Create a SigOpt experiment for optimizing the classifier hyperparameters."""
conn = Connection(client_token=self.client_token)
params = CLASSIFIER_TYPE_TO_PARAMS[self.classifier_type]
try:
return conn.experiments().create(
name="Example Classifier",
project="sigopt-examples",
parameters=params,
metrics=[dict(name='classifier_score', objective='maximize')],
observation_budget=self.num_sigopt_suggestions,
)
except ApiException as err:
if err.status_code == 403 and '<EMAIL>' in str(err):
existing_experiments = list(conn.experiments().fetch().iterate_pages())
if existing_experiments:
raise Exception(
"You have existing experiments on sigopt.com: {0}."
" You have exceeded the number of experiments that can be created under your plan."
" Please visit https://sigopt.com/contact to upgrade your plan."
.format(['https://sigopt.com/experiment/{0}'.format(e.id) for e in existing_experiments])
)
raise
def sigopt_generator(self, experiment):
"""Generate optimal parameter configurations using SigOpt."""
for _ in xrange(experiment.observation_budget):
conn = Connection(client_token=self.client_token)
suggestion = conn.experiments(experiment.id).suggestions().create()
yield suggestion.assignments.to_json()
def random_generator(self, experiment):
"""Return a random parameter configuration within the bounds of the parameters"""
for _ in xrange(self.num_random_searches):
suggestion = {}
for param in experiment.parameters:
if param.type == 'int':
suggestion[param.name] = numpy.random.randint(
param.bounds.min,
param.bounds.max,
)
if param.type == 'double':
suggestion[param.name] = numpy.random.uniform(
param.bounds.min,
param.bounds.max,
)
elif param.type == 'categorical':
categories = [str(cat.name) for cat in param.categorical_values]
suggestion[param.name] = str(numpy.random.choice(categories))
yield suggestion
def grid_generator(self, experiment):
"""Iterate through a grid of points within the bounds of the parameters."""
param_value_lists = []
for param in experiment.parameters:
if param.type == 'categorical':
categories = [cat.name for cat in param.categorical_values]
param_value_lists.append(categories)
else:
linspace = numpy.linspace(
param.bounds.min,
param.bounds.max,
self.grid_search_width,
)
if param.type == 'int':
param_value_lists.append([
int(i)
for i
in numpy.unique([round(i) for i in linspace])
])
else:
param_value_lists.append(linspace)
for param_values in itertools.product(*param_value_lists):
suggestion = {}
for i, param_value in enumerate(param_values):
if experiment.parameters[i].type == 'categorical':
suggestion[experiment.parameters[i].name] = str(param_value)
else:
suggestion[experiment.parameters[i].name] = param_value
yield suggestion
def output_score(self, experiment, assignments, score, fout, sigopt_post=False):
"""Log the score, optionally save it to file, and/or report it back to SigOpt."""
suggestion = [assignments[param.name] for param in experiment.parameters]
output = "score: {suggestion} = {score}\n".format(suggestion=tuple(suggestion), score=score)
print(output, end='')
fout.write(output)
if sigopt_post is True:
conn = Connection(client_token=self.client_token)
conn.experiments(experiment.id).observations().create(
assignments=assignments,
value=score,
)
conn.experiments(experiment.id).suggestions().delete()
def calculate_objective(self, assignments):
"""Return the fit of the classifier with the given hyperparameters and the test data."""
classifier = self.get_classifier(assignments)
classifier.fit(self.dataset.X_train, self.dataset.y_train)
return classifier.score(self.dataset.X_test, self.dataset.y_test)
def run_example(self, experiment, generator, sigopt_post=False, output_file=None):
"""Test various hyperparameter configurations against the dataset given a generator."""
with open(output_file, 'w') as fout:
for assignments in generator(experiment):
score = self.calculate_objective(assignments)
self.output_score(experiment, assignments, score, fout, sigopt_post=sigopt_post)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Classifier Tuner')
parser.add_argument(
'--client-token',
type=str,
help='Your sigopt API token. Get this from https://sigopt.com/tokens',
required=True,
)
parser.add_argument(
'--classifier-type',
type=str,
choices=CLASSIFIER_TYPE_TO_PARAMS.keys(),
help='The type of classifier to use. Defaults to GBC.',
default='GBC',
)
parser.add_argument(
'--dataset-name',
type=str,
help='The sklearn dataset to use. Defaults to datasets.load_digits().',
)
parser.add_argument(
'--test-set-size',
type=int,
help='The number of points in the test set. The remainder of the dataset will be the test set.',
)
parser.add_argument(
'--num-sigopt-suggestions',
type=int,
help='The number of suggestions to request from SigOpt.',
default=NUM_SIGOPT_SUGGESTIONS,
)
parser.add_argument(
'--grid-search-width',
type=int,
help='How many grid points in each dimension to use for grid search',
default=GRID_SEARCH_WIDTH,
)
parser.add_argument(
'--num-random-searches',
type=int,
help='How many random search parameter configurations to test',
default=NUM_RANDOM_SEARCHES,
)
args = vars(parser.parse_args())
try:
runner = ExampleRunner(**args)
experiment = runner.create_experiment()
print('Running SigOpt...')
runner.run_example(
experiment,
runner.sigopt_generator,
sigopt_post=True,
output_file='data/{classifier_type}_{dataset_name}_sigopt.txt'.format(
classifier_type=runner.classifier_type,
dataset_name=runner.dataset_name,
),
)
print('Running Grid Search...')
runner.run_example(
experiment,
runner.grid_generator,
output_file='data/{classifier_type}_{dataset_name}_grid.txt'.format(
classifier_type=runner.classifier_type,
dataset_name=runner.dataset_name,
),
)
print('Running Random Search...')
runner.run_example(
experiment,
runner.random_generator,
output_file='data/{classifier_type}_{dataset_name}_random.txt'.format(
classifier_type=runner.classifier_type,
dataset_name=runner.dataset_name,
),
)
print('All done! Check out your experiment at https://sigopt.com/experiment/{0}'.format(experiment.id))
except Exception as e:
print(str(e), file=sys.stderr)
print('Consult --help for for more information.', file=sys.stderr)
exit(1)
|
Intermediate Challenges/Challenge 0046 Intermediate/solutions/solution.py | FreddieV4/DailyProgrammerChallenges | 331 | 11146067 | <reponame>FreddieV4/DailyProgrammerChallenges
# tlseabra@github
import random
def game():
indices = list(range(8))
places = [None] * 8
while indices:
d = random.randint(0,9)
i = round(d*7/9)
real_index, c = i, 1
while not real_index in indices:
if all(x in indices for x in (i+c, i-c)):
real_index = i+c if sum(indices)/len(indices) > 3.5 else i-c
elif i+c in indices:
real_index = i+c
elif i-c in indices:
real_index = i-c
c += 1
places[real_index] = str(d)
indices.remove(real_index)
return places == sorted(places)
print(sum(game() for _ in range(1000000)) / 10000)
|
openproblems/tasks/dimensionality_reduction/methods/__init__.py | rcannood/openproblems | 134 | 11146119 | from .pca import pca
from .phate import phate_default
from .phate import phate_scanpy
from .tsne import tsne
from .umap import umap
|
dbaas/notification/tests/test_notification.py | didindinn/database-as-a-service | 303 | 11146125 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from mock import patch
from django.test import TestCase
from django.core import mail
from account.tests.factory import TeamFactory
from logical.tests.factory import DatabaseFactory
from system.models import Configuration
from notification.tasks import database_notification_for_team
from dbaas.tests.helpers import InstanceHelper
class BaseTestCase(object):
engine_name = ''
port = None
replication_topology_class_path = None
instance_helper = InstanceHelper
instance_quantity = 1
instance_type = 1
def setUp(self):
self.team = TeamFactory()
self.threshold_database_notification = Configuration(
name='threshold_database_notification', value=70,
description='Threshold infra notification'
)
self.threshold_database_notification.save()
self.new_user_notify_email = Configuration(
name='new_user_notify_email', value='<EMAIL>',
description='New user notify e-mail'
)
self.new_user_notify_email.save()
self.database_big = DatabaseFactory(
databaseinfra__engine__engine_type__name=self.engine_name,
)
self.database_big.team = self.team
self.database_big.save()
self.infra_big = self.database_big.databaseinfra
self.infra_big.plan.replication_topology.class_path = self.replication_topology_class_path
self.infra_big.plan.replication_topology.save()
self.infra_big.save()
self.database_small = DatabaseFactory(
databaseinfra__engine__engine_type__name=self.engine_name
)
self.database_small.team = self.team
self.database_small.save()
self.infra_small = self.database_small.databaseinfra
self.infra_small.plan.replication_topology.class_path = self.replication_topology_class_path
self.infra_small.plan.replication_topology.save()
self.infra_small.save()
self.instance_helper.create_instances_by_quant(
qt=self.instance_quantity, infra=self.infra_big,
total_size_in_bytes=10000, used_size_in_bytes=9900,
port=self.port, instance_type=self.instance_type
)
self.instance_helper.create_instances_by_quant(
qt=self.instance_quantity, infra=self.infra_small,
total_size_in_bytes=10000, used_size_in_bytes=1900,
port=self.port, instance_type=self.instance_type
)
def test_team_can_receive_notification(self, check_master_mock):
database_notification_for_team(team=self.team)
self.assertEqual(len(mail.outbox), 2)
def test_team_do_not_want_receive_notification(self, check_master_mock):
self.database_big.subscribe_to_email_events = False
self.database_big.save()
database_notification_for_team(team=self.team)
self.assertEqual(len(mail.outbox), 0)
@patch('drivers.mysqldb.MySQL.check_instance_is_master',
side_effect=InstanceHelper.check_instance_is_master)
class MySQLSingleTestCase(BaseTestCase, TestCase):
engine_name = 'mysql'
port = 3306
replication_topology_class_path = 'drivers.replication_topologies.mysql.MySQLSingle'
@patch('drivers.mysqldb.MySQLFOXHA.check_instance_is_master',
side_effect=InstanceHelper.check_instance_is_master)
class MySQLFoxHATestCase(BaseTestCase, TestCase):
engine_name = 'mysql'
port = 3306
replication_topology_class_path = 'drivers.replication_topologies.mysql.MySQLFoxHA'
instance_quantity = 2
@patch('drivers.mongodb.MongoDB.check_instance_is_master',
side_effect=InstanceHelper.check_instance_is_master)
class MongoDBSingleTestCase(BaseTestCase, TestCase):
engine_name = 'mongodb'
port = 27017
replication_topology_class_path = 'drivers.replication_topologies.mongodb.MongoDBSingle'
instance_type = 2
@patch('drivers.mongodb.MongoDBReplicaSet.check_instance_is_master',
side_effect=InstanceHelper.check_instance_is_master)
class MongoDBReplicasetTestCase(BaseTestCase, TestCase):
engine_name = 'mongodb'
port = 27017
replication_topology_class_path = 'drivers.replication_topologies.mongodb.MongoDBReplicaset'
instance_type = 2
instance_quantity = 2
@patch('drivers.redis.Redis.check_instance_is_master',
side_effect=InstanceHelper.check_instance_is_master)
class RedisSingleTestCase(BaseTestCase, TestCase):
engine_name = 'redis'
port = 6379
replication_topology_class_path = 'drivers.replication_topologies.redis.RedisSingle'
instance_type = 4
@patch('drivers.redis.RedisSentinel.check_instance_is_master',
side_effect=InstanceHelper.check_instance_is_master)
class RedisSentinelTestCase(BaseTestCase, TestCase):
engine_name = 'redis'
port = 6379
replication_topology_class_path = 'drivers.replication_topologies.redis.RedisSentinel'
instance_type = 4
instance_quantity = 2
@patch('drivers.redis.RedisCluster.check_instance_is_master',
side_effect=InstanceHelper.check_instance_is_master)
class RedisClusterTestCase(BaseTestCase, TestCase):
engine_name = 'redis'
port = 6379
replication_topology_class_path = 'drivers.replication_topologies.redis.RedisCluster'
instance_type = 4
instance_quantity = 6
|
scripts/automation/regression/hltapi_playground.py | timgates42/trex-core | 956 | 11146130 | <filename>scripts/automation/regression/hltapi_playground.py
#!/router/bin/python
import outer_packages
#from trex_stl_lib.trex_stl_hltapi import CTRexHltApi, CStreamsPerPort
from trex_stl_lib.trex_stl_hltapi import *
import traceback
import sys, time
from pprint import pprint
import argparse
def error(err = None):
if not err:
raise Exception('Unknown exception, look traceback')
if type(err) is str and not err.startswith('[ERR]'):
err = '[ERR] ' + err
print err
sys.exit(1)
def check_res(res):
if res['status'] == 0:
error('Encountered error:\n%s' % res['log'])
return res
def print_brief_stats(res):
title_str = ' '*3
tx_str = 'TX:'
rx_str = 'RX:'
for port_id, stat in res.iteritems():
if type(port_id) is not int:
continue
title_str += ' '*10 + 'Port%s' % port_id
tx_str += '%15s' % res[port_id]['aggregate']['tx']['total_pkts']
rx_str += '%15s' % res[port_id]['aggregate']['rx']['total_pkts']
print(title_str)
print(tx_str)
print(rx_str)
def wait_with_progress(seconds):
for i in range(0, seconds):
time.sleep(1)
sys.stdout.write('.')
sys.stdout.flush()
print('')
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(description='Example of using stateless TRex via HLT API.', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-v', dest = 'verbose', default = 0, help='Stateless API verbosity:\n0: No prints\n1: Commands and their status\n2: Same as 1 + ZMQ in&out')
parser.add_argument('--device', dest = 'device', default = 'localhost', help='Address of TRex server')
args = parser.parse_args()
hlt_client = CTRexHltApi(verbose = int(args.verbose))
print('Connecting to %s...' % args.device)
res = check_res(hlt_client.connect(device = args.device, port_list = [0, 1], username = 'danklei', break_locks = True, reset = True))
port_handle = res['port_handle']
print('Connected, got port handles %s' % port_handle)
ports_streams_dict = CStreamsPerPort()
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_config(mode = 'create', l2_encap = 'ethernet_ii_vlan', rate_pps = 1,
l3_protocol = 'ipv4',
#length_mode = 'imix', l3_length = 200,
ipv6_dst_mode = 'decrement', ipv6_dst_count = 300, ipv6_dst_addr = 'fe80:0:0:0:0:0:0:000f',
port_handle = port_handle, port_handle2 = port_handle[1],
#save_to_yaml = '/tmp/d1.yaml',
#stream_id = 1,
)
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_control(action = 'run')
print hlt_client.traffic_control(action = 'poll')
wait_with_progress(2)
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_control(action = 'stop')
print hlt_client.traffic_control(action = 'poll')
print hlt_client.traffic_stats(mode = 'aggregate')
print hlt_client.traffic_control(action = 'clear_stats')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
wait_with_progress(1)
print hlt_client.traffic_stats(mode = 'aggregate')
#print res
#print hlt_client._streams_history
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[1])
#ports_streams_dict.add_streams_from_res(res)
sys.exit(0)
res = check_res(hlt_client.traffic_config(mode = 'create', l2_encap = 'ethernet_ii_vlan', rate_pps = 1,
port_handle = port_handle[0], port_handle2 = port_handle[1], save_to_yaml = '/tmp/d1.yaml',
l4_protocol = 'udp',
#udp_src_port_mode = 'decrement',
#udp_src_port_count = 10, udp_src_port = 5,
))
ports_streams_dict.add_streams_from_res(res)
sys.exit(0)
#print ports_streams_dict
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
res = check_res(hlt_client.traffic_config(mode = 'modify', port_handle = port_handle[0], stream_id = ports_streams_dict[0][0],
mac_src = '1-2-3:4:5:6', l4_protocol = 'udp', save_to_yaml = '/tmp/d2.yaml'))
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
#print hlt_client._streams_history
res = check_res(hlt_client.traffic_config(mode = 'modify', port_handle = port_handle[0], stream_id = ports_streams_dict[0][0],
mac_dst = '{ 7 7 7-7:7:7}', save_to_yaml = '/tmp/d3.yaml'))
#print hlt_client.trex_client._STLClient__get_all_streams(port_id = port_handle[0])
check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle))
res = check_res(hlt_client.traffic_config(mode = 'create', bidirectional = True, length_mode = 'fixed',
port_handle = port_handle[0], port_handle2 = port_handle[1],
transmit_mode = 'single_burst', pkts_per_burst = 100, rate_pps = 100,
mac_src = '1-2-3-4-5-6',
mac_dst = '6:5:4:4:5:6',
save_to_yaml = '/tmp/imix.yaml'))
ports_streams_dict.add_streams_from_res(res)
print('Create single_burst 100 packets rate_pps=100 on port 0')
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[0], transmit_mode = 'single_burst',
pkts_per_burst = 100, rate_pps = 100))
ports_streams_dict.add_streams_from_res(res)
# playground - creating various streams on port 1
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt2.yaml',
tcp_src_port_mode = 'decrement',
tcp_src_port_count = 10, tcp_dst_port_count = 10, tcp_dst_port_mode = 'random'))
ports_streams_dict.add_streams_from_res(res)
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt3.yaml',
l4_protocol = 'udp',
udp_src_port_mode = 'decrement',
udp_src_port_count = 10, udp_dst_port_count = 10, udp_dst_port_mode = 'random'))
ports_streams_dict.add_streams_from_res(res)
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt4.yaml',
length_mode = 'increment',
#ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2))
ports_streams_dict.add_streams_from_res(res)
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt5.yaml',
length_mode = 'decrement', frame_size_min = 100, frame_size_max = 3000,
#ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
#ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2
))
ports_streams_dict.add_streams_from_res(res)
# remove the playground
check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle[1]))
print('Create continuous stream for port 1, rate_pps = 1')
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[1], save_to_yaml = '/tmp/hlt1.yaml',
#length_mode = 'increment', l3_length_min = 200,
ip_src_addr = '192.168.1.1', ip_src_mode = 'increment', ip_src_count = 5,
ip_dst_addr = '5.5.5.5', ip_dst_mode = 'random', ip_dst_count = 2))
check_res(hlt_client.traffic_control(action = 'run', port_handle = port_handle))
wait_with_progress(1)
print('Sample after 1 seconds (only packets count)')
res = check_res(hlt_client.traffic_stats(mode = 'all', port_handle = port_handle))
print_brief_stats(res)
print ''
print('Port 0 has finished the burst, put continuous instead with rate 1000. No stopping of other ports.')
check_res(hlt_client.traffic_control(action = 'stop', port_handle = port_handle[0]))
check_res(hlt_client.traffic_config(mode = 'reset', port_handle = port_handle[0]))
res = check_res(hlt_client.traffic_config(mode = 'create', port_handle = port_handle[0], rate_pps = 1000))
ports_streams_dict.add_streams_from_res(res)
check_res(hlt_client.traffic_control(action = 'run', port_handle = port_handle[0]))
wait_with_progress(5)
print('Sample after another 5 seconds (only packets count)')
res = check_res(hlt_client.traffic_stats(mode = 'aggregate', port_handle = port_handle))
print_brief_stats(res)
print ''
print('Stop traffic at port 1')
res = check_res(hlt_client.traffic_control(action = 'stop', port_handle = port_handle[1]))
wait_with_progress(5)
print('Sample after another %s seconds (only packets count)' % 5)
res = check_res(hlt_client.traffic_stats(mode = 'aggregate', port_handle = port_handle))
print_brief_stats(res)
print ''
print('Full HLT stats:')
pprint(res)
check_res(hlt_client.cleanup_session())
except Exception as e:
print(traceback.print_exc())
print(e)
raise
finally:
print('Done.')
|
tensorflow/python/data/kernel_tests/range_test.py | abhaikollara/tensorflow | 848 | 11146156 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.range()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class RangeTest(test_base.DatasetTestBase):
def testStop(self):
dataset = dataset_ops.Dataset.range(5)
self.assertDatasetProduces(dataset, expected_output=range(5))
def testStartStop(self):
start, stop = 2, 5
dataset = dataset_ops.Dataset.range(start, stop)
self.assertDatasetProduces(dataset, expected_output=range(2, 5))
def testStartStopStep(self):
start, stop, step = 2, 10, 2
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(2, 10, 2))
def testZeroStep(self):
start, stop, step = 2, 10, 0
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(start, stop, step)
self.evaluate(dataset._variant_tensor)
def testNegativeStep(self):
start, stop, step = 2, 10, -1
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(2, 10, -1))
def testStopLessThanStart(self):
start, stop = 10, 2
dataset = dataset_ops.Dataset.range(start, stop)
self.assertDatasetProduces(dataset, expected_output=range(10, 2))
def testStopLessThanStartWithPositiveStep(self):
start, stop, step = 10, 2, 2
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(10, 2, 2))
def testStopLessThanStartWithNegativeStep(self):
start, stop, step = 10, 2, -1
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(10, 2, -1))
if __name__ == "__main__":
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.