blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bd1e0023e976133adf144217da8c40767542ad6a | 40c4b8e9ac9074869bfb0dc1d3c3f566371f1764 | /Hangman1/dog_1.py | 8ef10b06f11b544da283359081ffd9dca0a7cd4a | [] | no_license | katuhito/Hangman001 | 870a8827e69cbd9a8b01ffb55f5c499c71861b76 | 710a201c6ad8284e164ea8ad26cd061486c50849 | refs/heads/master | 2022-12-06T16:30:24.613288 | 2020-08-22T10:19:27 | 2020-08-22T10:19:27 | 285,448,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | class Dog:
def __init__(self, name, breed, owner):
self.name = name
self.breed = breed
self.owner = owner
class Person:
def __init__(self, name):
self.name = name
mick = Person("Mick Jagger")
stan = Dog("Stanley", "Bulldog", mick)
print(stan.owner.name)
| [
"[email protected]"
] | |
9520ad44cace073dcbbf25bfb2530de13bce6960 | fe19d2fac4580d463132e61509bd6e3cc2cf958d | /toontown/suit/DistributedGoon.py | 6f991324caf0d47395d571c95bf7668c93012657 | [] | no_license | t00nt0wn1dk/c0d3 | 3e6db6dd42c3aa36ad77709cf9016176a3f3a44f | 7de105d7f3de0f8704b020e32fd063ee2fad8d0d | refs/heads/master | 2021-01-01T16:00:15.367822 | 2015-03-21T21:25:52 | 2015-03-21T21:25:55 | 32,647,654 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 18,204 | py | # 2013.08.22 22:25:21 Pacific Daylight Time
# Embedded file name: toontown.suit.DistributedGoon
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleProps import *
from GoonGlobals import *
from direct.fsm import FSM
from direct.distributed import ClockDelta
from otp.level import BasicEntities
from otp.level import DistributedEntity
from direct.directnotify import DirectNotifyGlobal
from toontown.coghq import DistributedCrushableEntity
from toontown.toonbase import ToontownGlobals
from toontown.coghq import MovingPlatform
import Goon
from direct.task.Task import Task
from otp.level import PathEntity
import GoonDeath
import random
class DistributedGoon(DistributedCrushableEntity.DistributedCrushableEntity, Goon.Goon, FSM.FSM):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGoon')
def __init__(self, cr):
try:
self.DistributedGoon_initialized
except:
self.DistributedGoon_initialized = 1
DistributedCrushableEntity.DistributedCrushableEntity.__init__(self, cr)
Goon.Goon.__init__(self)
FSM.FSM.__init__(self, 'DistributedGoon')
self.setCacheable(0)
self.rayNode = None
self.checkForWalls = 0
self.triggerEvent = None
self.animTrack = None
self.walkTrack = None
self.pauseTime = 0
self.paused = 0
self.path = None
self.dir = GOON_FORWARD
self.animMultiplier = 1.0
self.isDead = 0
self.isStunned = 0
self.collapseSound = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_hunker_down.mp3')
self.recoverSound = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_rattle_shake.mp3')
self.attackSound = loader.loadSfx('phase_9/audio/sfx/CHQ_GOON_tractor_beam_alarmed.mp3')
return
def announceGenerate(self):
DistributedCrushableEntity.DistributedCrushableEntity.announceGenerate(self)
if hasattr(self, 'goonType'):
self.initGoon(self.goonType)
else:
self.initGoon('pg')
self.scaleRadar()
self.colorHat()
if self.level:
self.initClipPlanes()
self.level.setEntityCreateCallback(self.parentEntId, self.initPath)
else:
self.enterOff()
taskMgr.doMethodLater(0.1, self.makeCollidable, self.taskName('makeCollidable'))
self.setGoonScale(self.scale)
self.animMultiplier = self.velocity / (ANIM_WALK_RATE * self.scale)
self.setPlayRate(self.animMultiplier, 'walk')
def initPath(self):
self.enterOff()
self.setPath()
taskMgr.doMethodLater(0.1, self.makeCollidable, self.taskName('makeCollidable'))
def makeCollidable(self, task):
self.initCollisions()
self.initializeBodyCollisions()
triggerName = self.uniqueName('GoonTrigger')
self.trigger.setName(triggerName)
self.triggerEvent = 'enter%s' % triggerName
self.startToonDetect()
def generate(self):
DistributedCrushableEntity.DistributedCrushableEntity.generate(self)
def scaleRadar(self):
Goon.Goon.scaleRadar(self)
self.trigger = self.radar.find('**/trigger')
triggerName = self.uniqueName('GoonTrigger')
self.trigger.setName(triggerName)
def initCollisions(self):
self.cSphere = CollisionSphere(0.0, 0.0, 1.0, 1.0)
self.cSphereNode = CollisionNode('goonCollSphere')
self.cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = self.head.attachNewNode(self.cSphereNode)
self.cSphereNodePath.hide()
self.cSphereBitMask = ToontownGlobals.WallBitmask
self.cSphereNode.setCollideMask(self.cSphereBitMask)
self.cSphere.setTangible(1)
self.sSphere = CollisionSphere(0.0, 0.0, self.headHeight + 0.8, 0.2)
self.sSphereNode = CollisionNode('toonSphere')
self.sSphereNode.addSolid(self.sSphere)
self.sSphereNodePath = self.head.attachNewNode(self.sSphereNode)
self.sSphereNodePath.hide()
self.sSphereBitMask = ToontownGlobals.WallBitmask
self.sSphereNode.setCollideMask(self.sSphereBitMask)
self.sSphere.setTangible(1)
def initializeBodyCollisions(self):
self.cSphereNode.setName(self.uniqueName('goonCollSphere'))
self.sSphereNode.setName(self.uniqueName('toonSphere'))
self.accept(self.uniqueName('entertoonSphere'), self.__handleStun)
def disableBodyCollisions(self):
self.ignore(self.uniqueName('entertoonSphere'))
def deleteCollisions(self):
if hasattr(self, 'sSphereNodePath'):
self.sSphereNodePath.removeNode()
del self.sSphereNodePath
del self.sSphereNode
del self.sSphere
if hasattr(self, 'cSphereNodePath'):
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
del self.cSphereNode
del self.cSphere
def initClipPlanes(self):
zoneNum = self.getZoneEntity().getZoneNum()
clipList = self.level.goonClipPlanes.get(zoneNum)
if clipList:
for id in clipList:
clipPlane = self.level.getEntity(id)
self.radar.setClipPlane(clipPlane.getPlane())
def disableClipPlanes(self):
if self.radar:
self.radar.clearClipPlane()
if __dev__:
def refreshPath(self):
self.setPath()
self.request('Off')
self.request('Walk')
def setPath(self):
self.path = self.level.getEntity(self.parentEntId)
if __dev__:
if hasattr(self, 'pathChangeEvent'):
self.ignore(self.pathChangeEvent)
self.pathChangeEvent = self.path.getChangeEvent()
self.accept(self.pathChangeEvent, self.refreshPath)
if self.walkTrack:
self.walkTrack.pause()
self.walkTrack = None
self.walkTrack = self.path.makePathTrack(self, self.velocity, self.uniqueName('goonWalk'), turnTime=T_TURN)
if self.gridId != None:
self.sendUpdate('setParameterize', [self.path.pos[0],
self.path.pos[1],
self.path.pos[2],
self.path.pathIndex])
return
def disable(self):
self.notify.debug('DistributedGoon %d: disabling' % self.getDoId())
self.ignoreAll()
self.stopToonDetect()
taskMgr.remove(self.taskName('resumeWalk'))
taskMgr.remove(self.taskName('recoveryDone'))
self.request('Off')
self.disableBodyCollisions()
self.disableClipPlanes()
if self.animTrack:
self.animTrack.finish()
self.animTrack = None
if self.walkTrack:
self.walkTrack.pause()
self.walkTrack = None
DistributedCrushableEntity.DistributedCrushableEntity.disable(self)
return
def delete(self):
try:
self.DistributedSuit_deleted
except:
self.DistributedSuit_deleted = 1
self.notify.debug('DistributedGoon %d: deleting' % self.getDoId())
taskMgr.remove(self.taskName('makeCollidable'))
self.deleteCollisions()
self.head.removeNode()
del self.head
del self.attackSound
del self.collapseSound
del self.recoverSound
DistributedCrushableEntity.DistributedCrushableEntity.delete(self)
Goon.Goon.delete(self)
def enterOff(self, *args):
self.hideNametag3d()
self.hideNametag2d()
self.hide()
self.isStunned = 0
self.isDead = 0
if self.animTrack:
self.animTrack.finish()
self.animTrack = None
if self.walkTrack:
self.walkTrack.pause()
self.walkTrack = None
return
def exitOff(self):
self.show()
self.showNametag3d()
self.showNametag2d()
def enterWalk(self, avId = None, ts = 0):
self.notify.debug('enterWalk, ts = %s' % ts)
self.startToonDetect()
self.loop('walk', 0)
self.isStunned = 0
if self.path:
if not self.walkTrack:
self.walkTrack = self.path.makePathTrack(self, self.velocity, self.uniqueName('goonWalk'), turnTime=T_TURN)
self.startWalk(ts)
def startWalk(self, ts):
tOffset = ts % self.walkTrack.getDuration()
self.walkTrack.loop()
self.walkTrack.pause()
self.walkTrack.setT(tOffset)
self.walkTrack.resume()
self.paused = 0
def exitWalk(self):
self.notify.debug('exitWalk')
self.stopToonDetect()
if self.walkTrack and not self.paused:
self.pauseTime = self.walkTrack.pause()
self.paused = 1
self.stop()
def enterBattle(self, avId = None, ts = 0):
self.notify.debug('enterBattle')
self.stopToonDetect()
if self.animTrack:
self.animTrack.finish()
self.animTrack = None
self.isStunned = 0
if avId == base.localAvatar.doId:
if self.level:
self.level.b_setOuch(self.strength)
self.animTrack = self.makeAttackTrack()
self.animTrack.loop()
return
def exitBattle(self):
self.notify.debug('exitBattle')
if self.animTrack:
self.animTrack.finish()
self.animTrack = None
self.head.setHpr(0, 0, 0)
return
def enterStunned(self, ts = 0):
self.ignore(self.uniqueName('entertoonSphere'))
self.isStunned = 1
self.notify.debug('enterStunned')
if self.radar:
self.radar.hide()
self.animTrack = Parallel(Sequence(ActorInterval(self, 'collapse'), Func(self.pose, 'collapse', 48)), SoundInterval(self.collapseSound, node=self))
self.animTrack.start(ts)
def exitStunned(self):
self.notify.debug('exitStunned')
if self.radar:
self.radar.show()
if self.animTrack:
self.animTrack.finish()
self.animTrack = None
self.accept(self.uniqueName('entertoonSphere'), self.__handleStun)
return
def enterRecovery(self, ts = 0, pauseTime = 0):
self.notify.debug('enterRecovery')
self.ignore(self.uniqueName('entertoonSphere'))
self.isStunned = 1
if self.animTrack:
self.animTrack.finish()
self.animTrack = None
self.animTrack = self.getRecoveryTrack()
duration = self.animTrack.getDuration()
self.animTrack.start(ts)
delay = max(0, duration - ts)
taskMgr.remove(self.taskName('recoveryDone'))
taskMgr.doMethodLater(delay, self.recoveryDone, self.taskName('recoveryDone'), extraArgs=(pauseTime,))
return
def getRecoveryTrack(self):
return Parallel(Sequence(ActorInterval(self, 'recovery'), Func(self.pose, 'recovery', 96)), Func(base.playSfx, self.recoverSound, node=self))
def recoveryDone(self, pauseTime):
self.request('Walk', None, pauseTime)
return
def exitRecovery(self):
self.notify.debug('exitRecovery')
taskMgr.remove(self.taskName('recoveryDone'))
if self.animTrack:
self.animTrack.finish()
self.animTrack = None
self.accept(self.uniqueName('entertoonSphere'), self.__handleStun)
return
def makeAttackTrack(self):
h = self.head.getH()
freakDeg = 60
hatZ = self.hat.getZ()
track = Parallel(Sequence(LerpColorScaleInterval(self.eye, 0.2, Vec4(1, 0, 0, 1)), LerpColorScaleInterval(self.eye, 0.2, Vec4(0, 0, 1, 1)), LerpColorScaleInterval(self.eye, 0.2, Vec4(1, 0, 0, 1)), LerpColorScaleInterval(self.eye, 0.2, Vec4(0, 0, 1, 1)), Func(self.eye.clearColorScale)), SoundInterval(self.attackSound, node=self, volume=0.4))
return track
def doDetect(self):
pass
def doAttack(self, avId):
pass
def __startResumeWalkTask(self, ts):
resumeTime = 1.5
if ts < resumeTime:
taskMgr.remove(self.taskName('resumeWalk'))
taskMgr.doMethodLater(resumeTime - ts, self.request, self.taskName('resumeWalk'), extraArgs=('Walk',))
else:
self.request('Walk', ts - resumeTime)
def __reverseWalk(self, task):
self.request('Walk')
return Task.done
def __startRecoverTask(self, ts):
stunTime = 4.0
if ts < stunTime:
taskMgr.remove(self.taskName('resumeWalk'))
taskMgr.doMethodLater(stunTime - ts, self.request, self.taskName('resumeWalk'), extraArgs=('Recovery',))
else:
self.request('Recovery', ts - stunTime)
def startToonDetect(self):
self.radar.show()
if self.triggerEvent:
self.accept(self.triggerEvent, self.handleToonDetect)
def stopToonDetect(self):
if self.triggerEvent:
self.ignore(self.triggerEvent)
def handleToonDetect(self, collEntry = None):
if base.localAvatar.isStunned:
return
if self.state == 'Off':
return
self.stopToonDetect()
self.request('Battle', base.localAvatar.doId)
if self.walkTrack:
self.pauseTime = self.walkTrack.pause()
self.paused = 1
if self.dclass and hasattr(self, 'dclass'):
self.sendUpdate('requestBattle', [self.pauseTime])
else:
self.notify.info('Goon deleted and still trying to call handleToonDetect()')
def __handleStun(self, collEntry):
toon = base.localAvatar
if toon:
toonDistance = self.getPos(toon).length()
if toonDistance > self.attackRadius:
self.notify.warning('Stunned a good, but outside of attack radius')
return
else:
self.request('Stunned')
if self.walkTrack:
self.pauseTime = self.walkTrack.pause()
self.paused = 1
self.sendUpdate('requestStunned', [self.pauseTime])
def setMovie(self, mode, avId, pauseTime, timestamp):
if self.isDead:
return
ts = ClockDelta.globalClockDelta.localElapsedTime(timestamp)
self.notify.debug('%s: setMovie(%s,%s,%s,%s)' % (self.doId,
mode,
avId,
pauseTime,
ts))
if mode == GOON_MOVIE_BATTLE:
if self.state != 'Battle':
self.request('Battle', avId, ts)
elif mode == GOON_MOVIE_STUNNED:
if self.state != 'Stunned':
toon = base.cr.doId2do.get(avId)
if toon:
toonDistance = self.getPos(toon).length()
if toonDistance > self.attackRadius:
self.notify.warning('Stunned a goon, but outside of attack radius')
return
else:
self.request('Stunned', ts)
elif mode == GOON_MOVIE_RECOVERY:
if self.state != 'Recovery':
self.request('Recovery', ts, pauseTime)
elif mode == GOON_MOVIE_SYNC:
if self.walkTrack:
self.walkTrack.pause()
self.paused = 1
if self.state == 'Off' or self.state == 'Walk':
self.request('Walk', avId, pauseTime + ts)
else:
if self.walkTrack:
self.walkTrack.pause()
self.walkTrack = None
self.request('Walk', avId, pauseTime + ts)
return
def stunToon(self, avId):
self.notify.debug('stunToon(%s)' % avId)
av = base.cr.doId2do.get(avId)
if av != None:
av.stunToon()
return
def isLocalToon(self, avId):
if avId == base.localAvatar.doId:
return 1
return 0
def playCrushMovie(self, crusherId, axis):
goonPos = self.getPos()
sx = random.uniform(0.3, 0.8) * self.scale
sz = random.uniform(0.3, 0.8) * self.scale
crushTrack = Sequence(GoonDeath.createGoonExplosion(self.getParent(), goonPos, VBase3(sx, 1, sz)), name=self.uniqueName('crushTrack'), autoFinish=1)
self.dead()
crushTrack.start()
def setVelocity(self, velocity):
self.velocity = velocity
self.animMultiplier = velocity / (ANIM_WALK_RATE * self.scale)
self.setPlayRate(self.animMultiplier, 'walk')
def dead(self):
if not self.isDead and not self.isDisabled():
self.stopToonDetect()
self.detachNode()
self.isDead = 1
def undead(self):
if self.isDead:
self.startToonDetect()
self.reparentTo(render)
self.isDead = 0
def resync(self):
if not self.isDead:
self.sendUpdate('requestResync')
def setHFov(self, hFov):
if hFov != self.hFov:
self.hFov = hFov
if self.isGenerated():
self.scaleRadar()
def setAttackRadius(self, attackRadius):
if attackRadius != self.attackRadius:
self.attackRadius = attackRadius
if self.isGenerated():
self.scaleRadar()
def setStrength(self, strength):
if strength != self.strength:
self.strength = strength
if self.isGenerated():
self.colorHat()
def setGoonScale(self, scale):
if scale != self.scale:
self.scale = scale
if self.isGenerated():
self.getGeomNode().setScale(self.scale)
self.scaleRadar()
def setupGoon(self, velocity, hFov, attackRadius, strength, scale):
self.setVelocity(velocity)
self.setHFov(hFov)
self.setAttackRadius(attackRadius)
self.setStrength(strength)
self.setGoonScale(scale)
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\suit\DistributedGoon.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:25:22 Pacific Daylight Time
| [
"[email protected]"
] | |
3b39a7459507e3e865fdf5bbc05da93b29990c97 | 8792e3449fbc6c8dec99f6af1d9f1b4caddad1f7 | /105.py | 9c3a0992e67c64804a57d489ee89d15378444d1d | [] | no_license | aarthisandhiya/aarthisandhiya1 | c19c1951c9ba01cd97eeddd44614953088718357 | e6f10247b6a84d6eaf371a23f2f9c3bebbc73e5b | refs/heads/master | 2020-04-15T17:17:07.151242 | 2019-05-20T05:24:19 | 2019-05-20T05:24:19 | 164,868,494 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | a,b=map(str,input().split())
z = int(str(a) + str(b))
print(z)
| [
"[email protected]"
] | |
73632a0fc46bcb44ba1b1359191e89d643076b20 | 6d87f66357e4002c461532b39498636d29956097 | /stack/cinder/cinder/tests/integrated/test_volumes.py | 0ec851113d15b555d1a9e13b95fbf0ec2994a1dd | [
"Apache-2.0"
] | permissive | megaumi/openstack-tbd-scheduler | b8588b35c45b8fe9eee59723276047601ce2dfde | 13928e9fec092e573c4945343a8b60e1fa86c4b3 | refs/heads/master | 2021-06-06T05:45:48.026940 | 2020-07-24T08:52:55 | 2020-07-24T08:52:55 | 6,046,752 | 0 | 1 | null | 2020-07-24T08:52:57 | 2012-10-02T15:11:38 | Python | UTF-8 | Python | false | false | 7,530 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import unittest
from cinder import service
from cinder.openstack.common import log as logging
from cinder.tests.integrated import integrated_helpers
from cinder.tests.integrated.api import client
from cinder.volume import driver
LOG = logging.getLogger(__name__)
class VolumesTest(integrated_helpers._IntegratedTestBase):
def setUp(self):
super(VolumesTest, self).setUp()
driver.LoggingVolumeDriver.clear_logs()
def _start_api_service(self):
self.osapi = service.WSGIService("osapi_volume")
self.osapi.start()
self.auth_url = 'http://%s:%s/v1' % (self.osapi.host, self.osapi.port)
LOG.warn(self.auth_url)
def _get_flags(self):
f = super(VolumesTest, self)._get_flags()
f['use_local_volumes'] = False # Avoids calling local_path
f['volume_driver'] = 'cinder.volume.driver.LoggingVolumeDriver'
return f
def test_get_volumes_summary(self):
"""Simple check that listing volumes works."""
volumes = self.api.get_volumes(False)
for volume in volumes:
LOG.debug("volume: %s" % volume)
def test_get_volumes(self):
"""Simple check that listing volumes works."""
volumes = self.api.get_volumes()
for volume in volumes:
LOG.debug("volume: %s" % volume)
def _poll_while(self, volume_id, continue_states, max_retries=5):
"""Poll (briefly) while the state is in continue_states."""
retries = 0
while True:
try:
found_volume = self.api.get_volume(volume_id)
except client.OpenStackApiNotFoundException:
found_volume = None
LOG.debug("Got 404, proceeding")
break
LOG.debug("Found %s" % found_volume)
self.assertEqual(volume_id, found_volume['id'])
if not found_volume['status'] in continue_states:
break
time.sleep(1)
retries = retries + 1
if retries > max_retries:
break
return found_volume
def test_create_and_delete_volume(self):
"""Creates and deletes a volume."""
# Create volume
created_volume = self.api.post_volume({'volume': {'size': 1}})
LOG.debug("created_volume: %s" % created_volume)
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
# Check it's there
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
# It should also be in the all-volume list
volumes = self.api.get_volumes()
volume_names = [volume['id'] for volume in volumes]
self.assertTrue(created_volume_id in volume_names)
# Wait (briefly) for creation. Delay is due to the 'message queue'
found_volume = self._poll_while(created_volume_id, ['creating'])
# It should be available...
self.assertEqual('available', found_volume['status'])
# Delete the volume
self.api.delete_volume(created_volume_id)
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_volume = self._poll_while(created_volume_id, ['deleting'])
# Should be gone
self.assertFalse(found_volume)
LOG.debug("Logs: %s" % driver.LoggingVolumeDriver.all_logs())
create_actions = driver.LoggingVolumeDriver.logs_like(
'create_volume',
id=created_volume_id)
LOG.debug("Create_Actions: %s" % create_actions)
self.assertEquals(1, len(create_actions))
create_action = create_actions[0]
self.assertEquals(create_action['id'], created_volume_id)
self.assertEquals(create_action['availability_zone'], 'nova')
self.assertEquals(create_action['size'], 1)
export_actions = driver.LoggingVolumeDriver.logs_like(
'create_export',
id=created_volume_id)
self.assertEquals(1, len(export_actions))
export_action = export_actions[0]
self.assertEquals(export_action['id'], created_volume_id)
self.assertEquals(export_action['availability_zone'], 'nova')
delete_actions = driver.LoggingVolumeDriver.logs_like(
'delete_volume',
id=created_volume_id)
self.assertEquals(1, len(delete_actions))
delete_action = export_actions[0]
self.assertEquals(delete_action['id'], created_volume_id)
def test_create_volume_with_metadata(self):
"""Creates a volume with metadata."""
# Create volume
metadata = {'key1': 'value1',
'key2': 'value2'}
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'metadata': metadata}})
LOG.debug("created_volume: %s" % created_volume)
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
# Check it's there and metadata present
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(metadata, found_volume['metadata'])
def test_create_volume_in_availability_zone(self):
"""Creates a volume in availability_zone."""
# Create volume
availability_zone = 'zone1:host1'
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'availability_zone': availability_zone}})
LOG.debug("created_volume: %s" % created_volume)
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
# Check it's there and availability zone present
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(availability_zone, found_volume['availability_zone'])
def test_create_and_update_volume(self):
# Create vol1
created_volume = self.api.post_volume({'volume': {
'size': 1, 'display_name': 'vol1'}})
self.assertEqual(created_volume['display_name'], 'vol1')
created_volume_id = created_volume['id']
# update volume
body = {'volume': {'display_name': 'vol-one'}}
updated_volume = self.api.put_volume(created_volume_id, body)
self.assertEqual(updated_volume['display_name'], 'vol-one')
# check for update
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(found_volume['display_name'], 'vol-one')
if __name__ == "__main__":
unittest.main()
| [
"umi@apricot.(none)"
] | umi@apricot.(none) |
73d0714f19bdb72afedfbdfb6a4e3fdd2f379fcd | c57439f0c98af370ace65f9d55ef5a457bedc531 | /ydk/models/infra/Cisco_IOS_XR_infra_infra_clock_cfg.py | f85b7e5635a2a58fec6e49f2f48e3d0d54e76eaa | [
"Apache-2.0"
] | permissive | myahmao/ydk-py | c932fbd8245e554227cce0fd723d9a22887b0c40 | 2f367d93f2088d4abdc2f2bb10ca4864952b458a | refs/heads/master | 2021-01-14T11:32:29.064494 | 2016-03-15T22:44:05 | 2016-03-15T22:44:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,599 | py | """ Cisco_IOS_XR_infra_infra_clock_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR infra\-infra\-clock package configuration.
This module contains definitions
for the following management objects\:
clock\: Configure time\-of\-day clock
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYDataValidationError
class ClockMonth_Enum(Enum):
"""
ClockMonth_Enum
Clock month
"""
"""
January
"""
JANUARY = 0
"""
February
"""
FEBRUARY = 1
"""
March
"""
MARCH = 2
"""
April
"""
APRIL = 3
"""
May
"""
MAY = 4
"""
June
"""
JUNE = 5
"""
July
"""
JULY = 6
"""
August
"""
AUGUST = 7
"""
September
"""
SEPTEMBER = 8
"""
October
"""
OCTOBER = 9
"""
November
"""
NOVEMBER = 10
"""
December
"""
DECEMBER = 11
@staticmethod
def _meta_info():
from ydk.models.infra._meta import _Cisco_IOS_XR_infra_infra_clock_cfg as meta
return meta._meta_table['ClockMonth_Enum']
class ClockSummerTimeMode_Enum(Enum):
"""
ClockSummerTimeMode_Enum
Clock summer time mode
"""
"""
Recurring summer time
"""
RECURRING = 0
"""
Absolute summer time
"""
DATE = 1
@staticmethod
def _meta_info():
from ydk.models.infra._meta import _Cisco_IOS_XR_infra_infra_clock_cfg as meta
return meta._meta_table['ClockSummerTimeMode_Enum']
class Clock(object):
"""
Configure time\-of\-day clock
.. attribute:: summer_time
Configure summer (daylight savings) time
**type**\: :py:class:`SummerTime <ydk.models.infra.Cisco_IOS_XR_infra_infra_clock_cfg.Clock.SummerTime>`
.. attribute:: time_zone
Configure time zone
**type**\: :py:class:`TimeZone <ydk.models.infra.Cisco_IOS_XR_infra_infra_clock_cfg.Clock.TimeZone>`
"""
_prefix = 'infra-infra-clock-cfg'
_revision = '2015-11-09'
def __init__(self):
self.summer_time = None
self.time_zone = None
class SummerTime(object):
"""
Configure summer (daylight savings) time
.. attribute:: end_hour
Hour to end
**type**\: int
**range:** 0..23
.. attribute:: end_minute
Minute to end
**type**\: int
**range:** 0..59
.. attribute:: end_month
Month to end
**type**\: :py:class:`ClockMonth_Enum <ydk.models.infra.Cisco_IOS_XR_infra_infra_clock_cfg.ClockMonth_Enum>`
.. attribute:: end_week_number_or_end_date
If Mode is set to 'Recurring' specify Week number of the Month to end (first and last strings are not allowed as they are in CLI), if Mode is set to 'Date' specify Date to End
**type**\: int
**range:** 1..31
.. attribute:: end_weekday_or_end_year
If Mode is set to 'Recurring' specify Weekday to end , if Mode is set to 'Date' specify Year to end
**type**\: int
**range:** 0..2035
.. attribute:: mode
Summer time mode
**type**\: :py:class:`ClockSummerTimeMode_Enum <ydk.models.infra.Cisco_IOS_XR_infra_infra_clock_cfg.ClockSummerTimeMode_Enum>`
.. attribute:: offset
Offset to add in minutes
**type**\: int
**range:** 1..1440
.. attribute:: start_hour
Hour to start
**type**\: int
**range:** 0..23
.. attribute:: start_minute
Minute to start
**type**\: int
**range:** 0..59
.. attribute:: start_month
Month to start
**type**\: :py:class:`ClockMonth_Enum <ydk.models.infra.Cisco_IOS_XR_infra_infra_clock_cfg.ClockMonth_Enum>`
.. attribute:: start_week_number_or_start_date
If Mode is set to 'Recurring' specify Week number of the Month to start (first and last strings are not allowed as they are in CLI) , if Mode is set to 'Date' specify Date to start
**type**\: int
**range:** 1..31
.. attribute:: start_weekday_or_start_year
If Mode is set to 'Recurring' specify Weekday to start , if Mode is set to 'Date' specify Year to start
**type**\: int
**range:** 0..2035
.. attribute:: time_zone_name
Name of time zone in summer
**type**\: str
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'infra-infra-clock-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.end_hour = None
self.end_minute = None
self.end_month = None
self.end_week_number_or_end_date = None
self.end_weekday_or_end_year = None
self.mode = None
self.offset = None
self.start_hour = None
self.start_minute = None
self.start_month = None
self.start_week_number_or_start_date = None
self.start_weekday_or_start_year = None
self.time_zone_name = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-infra-clock-cfg:clock/Cisco-IOS-XR-infra-infra-clock-cfg:summer-time'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.is_presence():
return True
if self.end_hour is not None:
return True
if self.end_minute is not None:
return True
if self.end_month is not None:
return True
if self.end_week_number_or_end_date is not None:
return True
if self.end_weekday_or_end_year is not None:
return True
if self.mode is not None:
return True
if self.offset is not None:
return True
if self.start_hour is not None:
return True
if self.start_minute is not None:
return True
if self.start_month is not None:
return True
if self.start_week_number_or_start_date is not None:
return True
if self.start_weekday_or_start_year is not None:
return True
if self.time_zone_name is not None:
return True
return False
def is_presence(self):
''' Returns True if this instance represents presence container else returns False '''
return True
@staticmethod
def _meta_info():
from ydk.models.infra._meta import _Cisco_IOS_XR_infra_infra_clock_cfg as meta
return meta._meta_table['Clock.SummerTime']['meta_info']
class TimeZone(object):
"""
Configure time zone
.. attribute:: hour_offset
Hours offset from UTC
**type**\: int
**range:** \-23..23
.. attribute:: minute_offset
Minutes offset from UTC
**type**\: int
**range:** 0..59
.. attribute:: time_zone_name
Name of time zone
**type**\: str
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'infra-infra-clock-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.hour_offset = None
self.minute_offset = None
self.time_zone_name = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-infra-clock-cfg:clock/Cisco-IOS-XR-infra-infra-clock-cfg:time-zone'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.is_presence():
return True
if self.hour_offset is not None:
return True
if self.minute_offset is not None:
return True
if self.time_zone_name is not None:
return True
return False
def is_presence(self):
''' Returns True if this instance represents presence container else returns False '''
return True
@staticmethod
def _meta_info():
from ydk.models.infra._meta import _Cisco_IOS_XR_infra_infra_clock_cfg as meta
return meta._meta_table['Clock.TimeZone']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-infra-clock-cfg:clock'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.is_presence():
return True
if self.summer_time is not None and self.summer_time._has_data():
return True
if self.summer_time is not None and self.summer_time.is_presence():
return True
if self.time_zone is not None and self.time_zone._has_data():
return True
if self.time_zone is not None and self.time_zone.is_presence():
return True
return False
def is_presence(self):
''' Returns True if this instance represents presence container else returns False '''
return False
@staticmethod
def _meta_info():
from ydk.models.infra._meta import _Cisco_IOS_XR_infra_infra_clock_cfg as meta
return meta._meta_table['Clock']['meta_info']
| [
"[email protected]"
] | |
7ac9d1d7cdbc4d296c89596b2cbfc6ba7d04f0d9 | f7c07caa1210d2a08e8433cdd854b1232efa88e3 | /Date-And-Time-Modules/Time-Module/asctime.py | 4636038147253d9c36f45035d301ee8980be3e2f | [] | no_license | rchicoli/ispycode-python | c2fbecc28bf32933150986d24f77b7297f50b78e | fa27f2377943ac2e4d983065406578151091e3f5 | refs/heads/master | 2020-03-20T11:34:59.698618 | 2018-06-14T21:14:02 | 2018-06-14T21:14:02 | 137,407,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py |
import time
print(time.asctime())
print(time.asctime([2000,1,31,6,30,59,17,0,0]))
| [
"[email protected]"
] | |
0e9b9d7b4cfe391c63a71319701c9315c52c1588 | efc690a4c42b1511deb0fe80bf146872c45aed69 | /conf_site/sponsorship/models.py | f1a2c0a6aceed0242319506cff3c3807176f3152 | [
"MIT"
] | permissive | jasongrout/conf_site | 34aa1197727fbbbdf8811338764a7451445f1803 | 6b3beb21de8d847cba65dcb6da84464b40739d48 | refs/heads/master | 2021-03-31T06:35:59.696561 | 2020-03-17T20:39:57 | 2020-03-17T20:39:57 | 248,086,087 | 0 | 0 | MIT | 2020-03-17T22:32:02 | 2020-03-17T22:32:01 | null | UTF-8 | Python | false | false | 421 | py | from django.db import models
from wagtail.contrib.settings.models import BaseSetting, register_setting
@register_setting(icon="group")
class SponsorshipSettings(BaseSetting):
info_link = models.URLField(
default=u"https://pydata.org/pdf/sponsor-prospectus.pdf",
max_length=2083,
verbose_name=u"Link to sponsor prospectus.",
)
class Meta:
verbose_name = u"sponsor settings"
| [
"[email protected]"
] | |
2512b083e55f6101e8cf2f1a317a475ba920dc4c | 32e948f9e7d6bd8771d266d93db0a2653043af0c | /retrieve_cci.py | 2e4c71257d529c9c71e1d2bc844b836f83643c91 | [
"MIT"
] | permissive | brorfred/density_stats | f14c979f6b6214f4546f5d6f14767cb0825c1d02 | b66673c67083010f29afbac1ce30ab431643697f | refs/heads/main | 2023-05-05T11:26:48.312639 | 2023-04-17T13:39:42 | 2023-04-17T13:39:42 | 506,174,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,701 | py |
import numpy as np
import xarray as xr
import pandas as pd
import pyresample as pr
from fast_histogram import histogram1d
import resample
bins = 100
binrange = np.log(10**-4), np.log(10**2)
binlist = np.linspace(*binrange, bins)
def open_dataset(date="2000-01-01"):
"""Open OC-CCI dataset as xarray dataarray via opendap"""
#dtm = np.datetime64(date) if type(date) is str else date
url = "https://www.oceancolour.org/thredds/dodsC/CCI_ALL-v5.0-DAILY"
ds = xr.open_dataset(url)
return ds["chlor_a"].sel(time=date)
def setup_grid():
"""Create matrices with latitudes and longitudes for the t-coords"""
i0t,imt,j0t,jmt = (0000 ,8640, 0, 4320)
incr = 360.0/imt
jR = np.arange(j0t, jmt)
iR = np.arange(i0t, imt)
latvec = ( 90 - jR*incr - incr/2)[::-1]
lonvec = (-180 + iR*incr + incr/2)
lons,lats = np.meshgrid(lonvec, latvec)
grid = pr.geometry.GridDefinition(lons=lons, lats=lats)
grid.ivec = np.arange(grid.shape[1])
grid.jvec = np.arange(grid.shape[0])
grid.iarr,grid.jarr = np.meshgrid(grid.ivec, grid.jvec)
return grid
def setup_darwin_grid():
"""Create matrices with latitudes and longitudes for the t-coords"""
latvec = np.arange(-79.5, 80.5)
lonvec = np.arange(-179.5, 180.5)
lons,lats = np.meshgrid(lonvec, latvec)
grid = pr.geometry.GridDefinition(lons=lons, lats=lats)
grid.ivec = np.arange(grid.shape[1])
grid.jvec = np.arange(grid.shape[0])
grid.iarr,grid.jarr = np.meshgrid(grid.ivec, grid.jvec)
return grid
def fields_to_histograms(date1="2001-01-01", date2="2001-12-31"):
"""Read CCI fields and convert to histograms, separating month and region"""
longh = xr.open_dataset("indata/longhurst_darwin.nc")
reglist = np.unique(longh.regions.data.astype(int))
histmat = np.zeros((12, len(reglist), len(binlist)))
griddr = setup_darwin_grid()
grid = setup_grid()
for dtm in pd.date_range(date1, date2):
fld = resample.coarsegrid(grid, open_dataset(date=dtm).data, griddr)
for npos,reg in enumerate(reglist):
mask = np.isfinite(fld) & (fld>0) & (reg==longh.regions.data)
cnt = histogram1d(np.log(fld[mask]), range=binrange, bins=bins)
histmat[dtm.month,npos,:] = cnt
print(dtm)
return histmat
"""
def longhurst_nc_file():
ds = longhurst.open_dataset()
griddr = setup_darwin_grid()
dsgr = xr.Dataset( coords={"lat":griddr.lats[:,0], "lon":griddr.lons[0,:]})
for key in ds.data_vars:
arr = resample.coarsegrid(longhurst.setup_grid(), ds[key].data, griddr)
dsgr[key] = (("lat","lon"), arr)
dsgr.to_netcdf("indata/longhurst_darwin.nc")
"""
| [
"[email protected]"
] | |
9c4c84794897db611285d44fe40a857bd5997ffb | e1303b5f9b13b9b7f0fa7b5af1e7b8b38314d31f | /findTreeDiameter.py | 8945e0de95a004a85b0c3fd800eb3850c42f4d14 | [] | no_license | middleverse/ads_prac | a77b0a162ac34eae3847533f773e28e6bec93fbb | 5b41defb74b1ae1fb38f244d9ffa070ac07d44c9 | refs/heads/master | 2023-03-25T06:54:56.913539 | 2021-03-10T23:06:51 | 2021-03-10T23:06:51 | 307,391,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
class TreeDiameter:
def __init__(self):
self.treeDiameter = 0
def find_diameter(self, root):
self.find_diameter_recursive(root)
return self.treeDiameter
def find_diameter_recursive(self, node):
if node is None:
return 0
if node.left is None and node.right is None:
return 1
else:
left_depth = self.find_diameter_recursive(node.left)
right_depth = self.find_diameter_recursive(node.right)
current_diameter = left_depth + right_depth + 1
self.treeDiameter = max(self.treeDiameter, current_diameter)
return 1 + max(left_depth, right_depth)
def main():
treeDiameter = TreeDiameter()
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.right.left = TreeNode(5)
root.right.right = TreeNode(6)
print("Tree Diameter: " + str(treeDiameter.find_diameter(root)))
root.left.left = None
root.right.left.left = TreeNode(7)
root.right.left.right = TreeNode(8)
root.right.right.left = TreeNode(9)
root.right.left.right.left = TreeNode(10)
root.right.right.left.left = TreeNode(11)
print("Tree Diameter: " + str(treeDiameter.find_diameter(root)))
main()
| [
"[email protected]"
] | |
cc4d1a777b82c57297ced99c26076184686c1a16 | de8e0c5c759347917ca7f06b42ca6c82b8f8c95f | /baekjoon/11_math-2/9613.py | b08bf5a8ddfbf8f859876049db7ce69d8f7dcee3 | [] | no_license | Greek-and-Roman-God/Apollo | aaeb315a9e70c719b3e53e3c4b9b5dde7b517ec0 | 2823cbcc9fc10ecd3f1785732403cb9c288f8ef3 | refs/heads/main | 2023-08-23T12:08:05.322733 | 2021-10-02T10:54:13 | 2021-10-02T10:54:13 | 308,242,023 | 1 | 1 | null | 2020-11-26T12:03:44 | 2020-10-29T06:49:26 | Python | UTF-8 | Python | false | false | 502 | py | # 9613 GCD 합
# https://www.acmicpc.net/problem/9613
def gcd(a, b):
result = 0
if a > b:
a, b = b, a
while b > 0:
result = b
a, b = b, a % b
return result
t = int(input())
for _ in range(t):
inp = list(map(int, input().split()))
cnt = inp[0]
num_list = inp[1:]
answer = 0
while num_list:
num = num_list.pop(0)
temp = 0
for n in num_list:
temp += gcd(num, n)
answer += temp
print(answer)
| [
"[email protected]"
] | |
12a0858f92e944f3282765fe64115943dabf630e | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/aio/operations/_express_route_service_providers_operations.py | 562620df96ed6195c4d70aade703489bdeef9938 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 5,138 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteServiceProvidersOperations:
"""ExpressRouteServiceProvidersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.ExpressRouteServiceProviderListResult"]:
"""Gets all the available express route service providers.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteServiceProviderListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.ExpressRouteServiceProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteServiceProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteServiceProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'} # type: ignore
| [
"[email protected]"
] | |
533602f46e5fd616444778719ca121007c3b3786 | 8e58feae60f66ed52e5e5a04843580591bc6968d | /setup.py | b295a85ded33f5522c9e1cac762aaf01290d18b2 | [
"BSD-3-Clause"
] | permissive | rblack42/sphinxcontrib-lpblocks | 89a50ff64cbf8866ec2663dd999ccca0fad4bfd9 | a30869b1ee2f4f0f73b35f4a9841609d1fafd487 | refs/heads/master | 2022-07-31T23:29:57.534383 | 2020-05-18T20:06:33 | 2020-05-18T20:06:33 | 264,270,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | '''
Sphinxcontrib-lpblocks
~~~~~~~~~~~~~~~~~~~~~~
Sphinx extension to support Literate Programming.
'''
import io
from setuptools import setup, find_packages
import sphinxcontrib.lpblocks as lp
def readfile(filename):
with io.open(filename, encoding="utf-8") as stream:
return stream.read().split("\n")
readme = readfile("README.rst")
setup(
name='sphinxcontrib-lpblocks',
version=lp.version,
url=lp.url,
download_url=lp.pypi,
license=lp.license,
author=lp.author,
author_email=lp.email,
description=lp.summary,
long_description="\n".join(readme),
long_description_content_type='text/x-rst',
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=['sphinx', 'sphinx-rtd-theme'],
namespace_packages=['sphinxcontrib'],
)
| [
"[email protected]"
] | |
f95a5a18b076a96c572670f92020e9b9eb2a8756 | faa84a83a3d347ac8b17cab2fa0599c65652a85d | /uliweb/contrib/jsonql/__init__.py | 983480b5218ad736d2b4e7dba40780990a35cd85 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | limodou/uliweb | 4d829568cb6bab4a807fb80adb84b6aa1d0e76ab | 8bc827fa6bf7bf58aa8136b6c920fe2650c52422 | refs/heads/master | 2023-08-22T09:10:34.609797 | 2021-07-04T01:20:33 | 2021-07-04T01:20:33 | 283,042 | 230 | 65 | BSD-2-Clause | 2019-07-29T11:17:01 | 2009-08-20T13:50:46 | Python | UTF-8 | Python | false | false | 22,652 | py | #coding=utf8
from uliweb import settings
from uliweb.utils.common import safe_str, import_attr
from uliweb.utils.storage import Storage
from uliweb.orm import do_, get_model
from uliweb.utils.sorteddict import SortedDict
from sqlalchemy import __version__ as sa_version, select, true, text, literal
import logging
DEBUG = False
__schemas__ = {}
__relations__ = None
__default_limit__ = 10
log = logging.getLogger(__name__)
class ModelError(Exception): pass
class SchemaError(Exception): pass
class RelationError(Exception): pass
class Type(object):
creation_counter = 1
def __init__(self, type='str', label='', field_name=None, **kwargs):
self.type = type
self.label = label
self.field_name = field_name
self.name = None
self.kwargs = kwargs
self.creation_counter = Type.creation_counter
Type.creation_counter += 1
class SchemaMetaClass(type):
def __init__(cls, name, bases, dct):
super(SchemaMetaClass, cls).__init__(name, bases, dct)
if name == 'Schema':
return
cls.properties = {}
cls._fields_list = []
cls._collection_names = {}
cls._bind()
for attr_name in dct.keys():
attr = dct[attr_name]
if isinstance(attr, Type):
attr.name = attr_name
cls.properties[attr_name] = attr
fields_list = [(k, v) for k, v in cls.properties.items()]
fields_list.sort(lambda x, y: cmp(x[1].creation_counter, y[1].creation_counter))
cls._fields_list = [k for k,v in fields_list]
cls._bind_query()
def reflect_column(column):
type_name = column.type.__class__.__name__.lower()
kwargs = SortedDict()
field_type = type_name
if type_name in ('char', 'varchar'):
kwargs['max_length'] = column.type.length
elif type_name in ('text', 'blob', 'integer', 'float', 'bigint'):
pass
elif type_name == 'long':
field_type = 'bigint'
elif type_name in ('clob',):
field_type = 'text'
elif type_name in ('decimal', 'float'):
kwargs['precision'] = column.type.precision
kwargs['scale'] = column.type.scale
elif type_name == 'raw': # oracle
field_type = 'binary'
kwargs['max_length'] = column_type.length
elif type_name == 'number':
if column.type.scale:
kwargs['precision'] = column.type.precision
kwargs['scale'] = column.type.scale
field_type = 'decimal'
else:
field_type = 'int'
elif type_name == 'numeric':
field_type = 'decimal'
kwargs['precision'] = column.type.precision
kwargs['scale'] = column.type.scale
elif type_name in ('timestamp',):
field_type = 'timestamp'
elif type_name in ('datetime', 'date', 'time'):
pass
# for tinyint will be treated as bool
elif type_name in ('tinyint', 'boolean'):
field_type = 'bool'
else:
raise ValueError("Don't support column [{0}] for type [{1}] when parsing {2}".format(column.name, type_name, column.table.name))
if sa_version >= '1.2' and column.comment:
kwargs['label'] = column.comment
if not kwargs.get('label'):
kwargs['label'] = column.name
return field_type, kwargs
class Schema(object):
__metaclass__ = SchemaMetaClass
__model__ = None #Model name
__table__ = None #table name
__fields__ = []
__query__ = None
@classmethod
def __repr__(cls):
d = []
d.append('{}{{'.format(cls.__name__))
for name in cls._fields_list:
f = cls.properties[name]
field_name = ''
if f.field_name:
field_name = ' ,field_name={}'.format(f.field_name)
d.append(' {}(type=\'{}\', label=\'{}\'{})'.format(f.name, f.type, safe_str(f.label), field_name))
d.append('}')
return '\n'.join(d)
@classmethod
def _bind(cls):
from uliweb.orm import reflect_table
if not cls.__table__ is not None and cls.__model__:
model = get_model(cls.__model__)
if not model:
raise ModelError('Model {} can not be found'.format(cls.__model__))
cls.__table__ = model.table
if cls.__table__ is not None:
cls.__table__ = reflect_table(cls.__table__)
for f in (cls.__fields__ or cls.__table__.columns.keys()):
col = cls.__table__.columns.get(f)
if col is not None:
field_type, kwargs = reflect_column(col)
field = Type(field_type, **kwargs)
field.name = f
cls.properties[f] = field
else:
raise FieldError('Field {} can not be found in table {}'.format(f, cls.table.name))
@classmethod
def _bind_query(cls):
if cls.__table__ is not None and not cls.__query__:
fields = []
for f in cls.properties.values():
name = f.field_name or f.name
col = cls.__table__.columns.get(name)
if col is not None:
fields.append(col)
cls.__query__ = select(fields, from_obj=[cls.__table__])
@classmethod
def get_column(cls, name):
alias = ''
if ':' in name:
name, alias = [x.strip() for x in name.split(':')]
col = cls.__table__.columns.get(name)
if col is None:
if alias:
col = text(name + ' as ' + alias)
else:
col = text(name)
else:
if alias:
col = col.label(alias)
return col
class Relation(object):
def __init__(self, relation):
self._schema_a = None # schema class
self._schema_b = None
self._schema_a_name = None
self._schema_b_name = None
self._fields_a = set()
self._fields_b = set()
self.relation_key = None # saving [(schema_a, schema_b), (schema_b, schema_a)]
self.cached = {}
if not isinstance(relation, (tuple, list)):
relation = [relation]
for v in relation:
t1, t2 = [x.strip() for x in v.split('=')]
schema_a_name, field_a_name = t1.split('.')
schema_b_name, field_b_name = t2.split('.')
key = (schema_a_name, schema_b_name)
if self.relation_key and key not in self.relation_key:
raise RelationError('Relation {!r} is not matched with before value {!r}'.format(
key, self.relation_key))
self._schema_a_name = schema_a_name
self._schema_b_name = schema_b_name
self.relation_key = [key, (schema_b_name, schema_a_name)]
self._fields_a.add((field_a_name, field_b_name))
self._fields_b.add((field_b_name, field_a_name))
@property
def schema_a(self):
if not self._schema_a:
self._schema_a = get_schema(self._schema_a_name)
return self._schema_a
@property
def schema_b(self):
if not self._schema_b:
self._schema_b = get_schema(self._schema_b_name)
return self._schema_b
def __eq__(self, key):
"""
:param key: (schema_a, schema_b)
:return:
"""
return key in self.relation_key
def get_condition(self, key):
condition = None
a, b = key
if not self == key:
return condition
condition = self.cached.get(key)
if not condition:
condition = true()
if a == self._schema_a_name:
for fa, fb in self._fields_a:
condition = (self.schema_a.get_column(fa) == self.schema_b.get_column(fb)) & condition
else:
for fb, fa in self._fields_b:
condition = (self.schema_b.get_column(fb) == self.schema_a.get_column(fa)) & condition
self.cached[key] = condition
return condition
class Relations(object):
def __init__(self):
self.relations = {}
def add(self, relation):
"""
relation is a string list, just like:
['User.id = Group.user', 'User.username = Group.username']
:param relation:
:return:
"""
r = Relation(relation)
key = r.relation_key[0]
if key not in self.relations:
self.relations[key] = r
self.relations[r.relation_key[1]]= r
def get_condition(self, relation):
"""
:param relation: (schema_a, schema_b)
:return:
"""
condition = None
r = self.relations.get(relation)
if r:
condition = r.get_condition(relation)
return condition
__relations__ = Relations()
def add_relation(relation):
global __relations__
__relations__.add(relation)
def get_relation_condition(key):
"""
Get relation condition
:param key: should be (schema_a, schema_b)
:return:
"""
global __relations__
return __relations__.get_condition(key)
def get_schema(name, exception=True):
global __schemas__
s = __schemas__.get(name)
if not s and exception:
raise SchemaError('Schema {} can not be found in settings.'.format(name))
return s
class Query(object):
def __init__(self, data):
self.data = data
def run(self):
data = {}
for name, param in self.data.items():
k, result = self.query_schema(name, param)
data[k] = result
return data
def parse_entry(self, name):
"""
Parse query entry name, just like:
{
'User[]:user'
}
'User[]:user' is an entry name.
:param name:
:return:
"""
# calculate schema mode
# if ':name' or '' or '[]:name' or '[]' found, it'll be treat as multiple Schema query
alias = name
if ':' in name:
name, alias = name.split(':')
if name.endswith('[]'):
need_list = True
name = name[:-2]
else:
need_list = False
return alias, name, need_list
def query_schema(self, name, param):
"""
If name includes '[]', then it'll return a list
:param name: schema name
:param param: json parameters
:return:
"""
alias, name, need_list = self.parse_entry(name)
if not name:
result = self.process_multiple_query(need_list, param)
else:
result = self.process_single_query(name, need_list, param)
return alias, result
def parse_condition(self, schema, name, v):
"""
Parse name = 'value' to condition
:param name: column name
:param schema: schema name
:param v: column value
:return:
"""
S = schema
col = S.get_column(name)
condition = None
if col is not None: # can create condition
if isinstance(v, (str, unicode)):
if v.startswith('>='):
condition = (col >= eval(v[2:].strip()))
elif v.startswith('>'):
condition = (col > eval(v[1:].strip()))
elif v.startswith('<='):
condition = (col <= eval(v[2:].strip()))
elif v.startswith('<'):
condition = (col < eval(v[1:].strip()))
elif v.startswith('='):
condition = (col == eval(v[1:].strip()))
elif v.startswith('!='):
condition = (col != eval(v[2:].strip()))
elif v.startswith('like'):
condition = col.like(v[4:].strip())
elif v.startswith('between'):
_v = eval(v[7:].strip())
if not isinstance(_v, (tuple, list)):
raise ValueError("Between operation should be a list, but {!r} found".format(v))
condition = (col.between(*_v))
elif v.startswith('in'):
condition = (col.in_(eval(v[2:].strip())))
else:
if '%' in v: # like
condition = col.like(v)
else:
condition = (col == v)
elif isinstance(v, (tuple, list)):
condition = (col.in_(v))
else:
condition = (col == v)
return condition
def parse_param(self, name, param):
"""
Parse schema parameter, it'll return
{
condition
columns
limit
order_by
group_by
total
page
table
name #schema name
}
:param name: schema name
:param param: schema query parameter
:return: dict
"""
S = get_schema(name)
# prepare condition
condition = true()
fields = []
columns = []
columns_param = {}
limit = __default_limit__
order_by = []
group_by = []
total = None
page = 0
table = S.__table__
relation = None
for k, v in param.items():
if k.startswith('@'):
if k == '@columns':
fields = v[:]
elif k == '@limit':
limit = v
elif k == '@page':
page = v
elif k == '@order_by':
if isinstance(v, (str, unicode)):
orders = v.split(',')
else:
orders = v
for c in orders:
if '.' in c:
col_name, dir = c.split('.')
else:
col_name = c
dir = 'asc'
col = S.get_column(col_name)
if dir == 'desc':
order_by.append(col.desc())
else:
order_by.append(col)
elif k == '@group_by':
if isinstance(v, (str, unicode)):
groups = v.split(',')
else:
groups = v
for c in groups:
col = S.get_column(c)
group_by.append(col)
elif k == '@total':
total = v
elif k == '@relation':
relation_key = name, v
relation = get_relation_condition(relation_key)
elif k.startswith('$'): # condition
c = self.parse_condition(S, k[1:], v)
if c is not None:
condition = c & condition
elif isinstance(v, dict): # guest schema
# todo nested schema
# if there is not one row, it'll using left join otherwise using standalone
# query
nested_alias, nested_name, nested_need_list = self.parse_entry(k)
nested_config = self.parse_param(nested_name, value)
if nested_need_list:
# insert resolve function
pass
else:
relation = name, nested_config.name
outerjoin_condition = get_relation_condition(relation)
if outerjoin_condition is None:
raise RelationError("Relation between {!r} can not be found".format(relation))
table.outerjoin(nested_config.table, outerjoin_condition)
condition = nested_config.condition & condition
columns.extend(nested_config.columns)
else:
# columns
if k not in fields:
fields.append(k)
columns.extend([S.get_column(x) for x in fields or S._fields_list]) # used for select
config = Storage({})
config.table = table
config.condition = condition
config.columns = columns
config.columns_param = columns_param
config.total = total
config.limit = limit
config.page = page
config.order_by = order_by
config.group_by = group_by
config.name = name
config.schema = S
config.relation = relation
return config
def parse_multiple_query(self, param):
tables = []
condition = true()
order_by = []
group_by = []
limit = __default_limit__
total = None
page = 0
columns = []
for k, v in param.items():
if isinstance(v, dict): # Schema
c = self.parse_param(k, v)
tables.append(c.table)
columns.extend(c.columns)
condition = c.condition & condition
if c.relation is not None:
condition = c.relation & condition
else:
if k.startswith('@'):
if k == '@limit':
limit = v
elif k == '@page':
page = v
elif k == '@order_by':
if isinstance(v, (str, unicode)):
orders = v.split(',')
else:
orders = v
for c in orders:
if '.' in c:
v = c.split('.')
if len(v) == 3:
schema_name, col_name, dir = v
else:
schema_name, col_name = v
dir = 'asc'
else:
col_name = c
dir = 'asc'
S = get_schema(schema_name)
col = S.get_column(col_name)
if dir == 'desc':
order_by.append(col.desc())
else:
order_by.append(col)
elif k == '@group_by':
if isinstance(v, (str, unicode)):
groups = v.split(',')
else:
groups = v
for c in groups:
if '.' in c:
schema_name, col_name = c.split('.')
S = get_schema(schema_name)
col = S.get_column(col_name)
group_by.append(col)
elif k == '@total':
total = v
config = Storage({})
config.tables = tables
config.condition = condition
config.columns = columns
config.order_by = order_by
config.group_by = group_by
config.page = page
config.limit = limit
config.total = total
return config
def process_multiple_query(self, need_list, param):
config = self.parse_multiple_query(param)
count = 0
query = select(config.columns, config.condition, from_obj=config.tables)
if need_list:
if config.order_by:
query = query.order_by(*config.order_by)
if config.group_by:
query = query.group_by(*config.group_by)
if config.total:
if DEBUG:
log.debug('Query Schema {} Count:'.format(config.name))
log.debug(query.count())
count = do_(query.count()).scalar()
if config.page > 0:
query = query.limit(config.limit).offset((config.page-1)*config.limit)
if DEBUG:
log.debug('Query Schema {}:'.format(config.name))
log.debug(query)
result = {'data': [dict(row) for row in do_(query)]}
if config.total:
result['total'] = count
else:
query = query.limit(1)
if DEBUG:
log.debug('Query Schema {}:'.format(config.name))
result = list(do_(query))
if result:
result = dict(result[0])
else:
result = {}
return result
def process_single_query(self, name, need_list, param):
config = self.parse_param(name, param)
count = 0
query = select(config.columns, config.condition, from_obj=[config.table])
if need_list:
if config.order_by:
query = query.order_by(*config.order_by)
if config.group_by:
query = query.group_by(*config.group_by)
if config.total:
if DEBUG:
log.debug('Query Schema {} Count:'.format(config.name))
log.debug(query.count())
count = do_(query.count()).scalar()
if config.page > 0:
query = query.limit(config.limit).offset((config.page-1)*config.limit)
if DEBUG:
log.debug('Query Schema {}:'.format(config.name))
log.debug(query)
result = {'data': [dict(row) for row in do_(query)]}
if config.total:
result['total'] = count
else:
query = query.limit(1)
if DEBUG:
log.debug('Query Schema {}:'.format(config.name))
result = list(do_(query))
if result:
result = dict(result[0])
else:
result = {}
return result
def query(d):
"""
Query schema
:param d: dict options
:return:
"""
q = Query(d)
return q.run()
def after_init_apps(sender):
global __schemas__, __default_limit__
if 'JSONQL_SCHEMA' in settings:
for name, model_path in settings.JSONQL_SCHEMA.items():
if not model_path: continue
if isinstance(model_path, (str, unicode)):
path = model_path
else:
raise Exception("Schema path should be a string but %r found" % model_path)
__schemas__[name] = import_attr(model_path)
__default_limit__ = settings.JSONQL.get('limit', 10) | [
"[email protected]"
] | |
90cec1ab49e52fccaa6aa9922683ffe103721e6a | 3002452f8e5520645109eb68b70128b9ebb01ccf | /turngeneration/tests/test_models.py | bfd85131f2c6bbacf1a3efafc5563b520a00a38f | [
"MIT"
] | permissive | jbradberry/django-turn-generation | f01b2d681ec8a01263dd8afa467544ae85082351 | dbfec9d0addbff2d8d54597b7520e171938c9107 | refs/heads/master | 2021-05-16T03:13:09.156798 | 2019-12-21T02:21:24 | 2019-12-21T02:21:24 | 14,544,818 | 0 | 1 | MIT | 2019-12-21T02:21:26 | 2013-11-20T02:45:08 | Python | UTF-8 | Python | false | false | 10,757 | py | import datetime
from django.contrib.auth.models import User
from django.utils import timezone
from django.test import TestCase
from mock import patch, call
from dateutil import rrule
import pytz
from ..models import Generator, GenerationTime, GenerationRule, Pause, Ready
from sample_app.models import TestRealm, TestAgent
@patch('turngeneration.tasks.ready_generation')
@patch('turngeneration.tasks.timed_generation')
class GeneratorTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='player',
password='password')
self.realm = TestRealm.objects.create(slug='mygame')
self.generator = Generator(realm=self.realm)
self.generator.save()
self.agent1 = self.realm.agents.create(slug='agent1')
self.agent2 = self.realm.agents.create(slug='agent2')
def test_is_ready(self, timed_task, ready_task):
self.assertFalse(self.generator.is_ready())
Ready(agent=self.agent1,
generator=self.generator,
user=self.user).save()
self.assertFalse(self.generator.is_ready())
Ready(agent=self.agent2,
generator=self.generator,
user=self.user).save()
self.assertTrue(self.generator.is_ready())
def test_enabling_autogen_when_ready(self, timed_task, ready_task):
self.generator.autogenerate = False
self.generator.save()
self.assertFalse(self.generator.is_ready())
Ready(agent=self.agent1,
generator=self.generator,
user=self.user).save()
Ready(agent=self.agent2,
generator=self.generator,
user=self.user).save()
self.assertTrue(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertFalse(ready_task.mock_calls)
self.generator.autogenerate = True
self.generator.save()
self.assertTrue(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertEqual(ready_task.mock_calls,
[call.apply_async((self.generator.pk,))])
def test_enabling_autogen_when_ready_and_paused(self, timed_task, ready_task):
self.generator.autogenerate = False
self.generator.save()
self.assertFalse(self.generator.is_ready())
Pause(agent=self.agent2,
generator=self.generator,
user=self.user,
reason="1 week vacation.").save()
Ready(agent=self.agent1,
generator=self.generator,
user=self.user).save()
Ready(agent=self.agent2,
generator=self.generator,
user=self.user).save()
self.assertTrue(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertFalse(ready_task.mock_calls)
self.generator.autogenerate = True
self.generator.save()
self.assertTrue(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertEqual(ready_task.mock_calls,
[call.apply_async((self.generator.pk,))])
def test_enabling_forcegen_when_no_existing_task(self, timed_task, ready_task):
mocktask = timed_task.apply_async.return_value
mocktask.id = 'fake'
self.generator.rules.create(
freq=rrule.DAILY,
dtstart=datetime.datetime(2014, 11, 29, 18, tzinfo=pytz.utc)
)
self.generator.force_generate = False
self.generator.save()
self.assertFalse(self.generator.task_id)
self.assertIsNone(self.generator.generation_time)
self.generator.force_generate = True
self.generator.save()
self.assertTrue(self.generator.task_id)
self.assertIsNotNone(self.generator.generation_time)
def test_enabling_forcegen_when_task_exists(self, timed_task, ready_task):
Generator.objects.filter(id=self.generator.id).update(
force_generate=False, task_id='fake',
generation_time=datetime.datetime(2014, 11, 29, 18, tzinfo=pytz.utc)
)
generator = Generator.objects.get(id=self.generator.id)
generator.force_generate = True
generator.save()
self.assertEqual(generator.task_id, 'fake')
self.assertEqual(generator.generation_time,
datetime.datetime(2014, 11, 29, 18, tzinfo=pytz.utc))
self.assertFalse(timed_task.mock_calls)
def test_disabling_forcegen_when_task_exists(self, timed_task, ready_task):
Generator.objects.filter(id=self.generator.id).update(
task_id='fake',
generation_time=datetime.datetime(2014, 11, 29, 18, tzinfo=pytz.utc)
)
generator = Generator.objects.get(id=self.generator.id)
with patch('celery.current_app.control') as control:
generator.force_generate = False
generator.save()
self.assertEqual(control.mock_calls, [call.revoke('fake')])
self.assertFalse(generator.task_id)
self.assertIsNone(generator.generation_time)
def test_disabling_forcegen_when_no_existing_task(self, timed_task, ready_task):
self.assertTrue(self.generator.force_generate)
self.assertFalse(self.generator.task_id)
self.assertIsNone(self.generator.generation_time)
with patch('celery.current_app.control') as control:
self.generator.force_generate = False
self.generator.save()
self.assertFalse(control.mock_calls)
self.assertFalse(self.generator.task_id)
self.assertIsNone(self.generator.generation_time)
class GenerationRuleTestCase(TestCase):
def setUp(self):
self.realm = TestRealm.objects.create()
self.generator = Generator(realm=self.realm)
self.generator.save()
self.now = datetime.datetime(2014, 11, 30, 10)
def test_empty(self):
next_time = self.generator.next_time(self.now)
self.assertIsNone(next_time)
def test_daily(self):
self.generator.rules.create(
freq=rrule.DAILY,
dtstart=datetime.datetime(2014, 11, 29, 18, tzinfo=pytz.utc)
)
self.assertEqual(self.generator.rules.count(), 1)
next_time = self.generator.next_time(self.now)
self.assertEqual(next_time,
datetime.datetime(2014, 11, 30, 18, tzinfo=pytz.utc))
def test_weekly(self):
self.generator.rules.create(
freq=rrule.WEEKLY,
dtstart=datetime.datetime(2014, 11, 29, 18, tzinfo=pytz.utc)
)
self.assertEqual(self.generator.rules.count(), 1)
next_time = self.generator.next_time(self.now)
self.assertEqual(next_time,
datetime.datetime(2014, 12, 6, 18, tzinfo=pytz.utc))
@patch('turngeneration.tasks.ready_generation')
@patch('turngeneration.tasks.timed_generation')
class ReadyTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='player',
password='password')
self.realm = TestRealm.objects.create(slug='mygame')
self.generator = Generator(realm=self.realm)
self.generator.save()
self.agent1 = self.realm.agents.create(slug='agent1')
self.agent2 = self.realm.agents.create(slug='agent2')
def test_autogenerate_when_ready_and_enabled(self, timed_task, ready_task):
self.assertTrue(self.generator.autogenerate)
Ready(agent=self.agent1,
generator=self.generator,
user=self.user).save()
self.assertFalse(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertFalse(ready_task.mock_calls)
Ready(agent=self.agent2,
generator=self.generator,
user=self.user).save()
self.assertTrue(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertEqual(ready_task.mock_calls,
[call.apply_async((self.generator.pk,))])
def test_no_autogenerate_when_ready_and_disabled(self, timed_task, ready_task):
self.generator.autogenerate = False
self.generator.save()
self.assertFalse(self.generator.autogenerate)
Ready(agent=self.agent1,
generator=self.generator,
user=self.user).save()
self.assertFalse(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertFalse(ready_task.mock_calls)
Ready(agent=self.agent2,
generator=self.generator,
user=self.user).save()
self.assertTrue(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertFalse(ready_task.mock_calls)
def test_still_autogenerate_when_paused(self, timed_task, ready_task):
self.assertTrue(self.generator.autogenerate)
self.assertTrue(self.generator.allow_pauses)
Pause(agent=self.agent2,
generator=self.generator,
user=self.user,
reason="1 week vacation.").save()
Ready(agent=self.agent1,
generator=self.generator,
user=self.user).save()
self.assertFalse(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertFalse(ready_task.mock_calls)
Ready(agent=self.agent2,
generator=self.generator,
user=self.user).save()
self.assertTrue(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertEqual(ready_task.mock_calls,
[call.apply_async((self.generator.pk,))])
def test_no_autogenerate_when_paused_and_disabled(self, timed_task, ready_task):
self.generator.autogenerate = False
self.generator.save()
self.assertFalse(self.generator.autogenerate)
self.assertTrue(self.generator.allow_pauses)
Pause(agent=self.agent2,
generator=self.generator,
user=self.user,
reason="1 week vacation.").save()
Ready(agent=self.agent1,
generator=self.generator,
user=self.user).save()
self.assertFalse(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertFalse(ready_task.mock_calls)
Ready(agent=self.agent2,
generator=self.generator,
user=self.user).save()
self.assertTrue(self.generator.is_ready())
self.assertFalse(timed_task.mock_calls)
self.assertFalse(ready_task.mock_calls)
| [
"[email protected]"
] | |
28b5f273d0e04b143c9cd5bb9d092bf1a50eedb1 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/ipython-5.0.0-py27_0/lib/python2.7/site-packages/IPython/core/pylabtools.py | e7327e58e8c5648d59463ff3f067689983b9f24d | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 13,845 | py | # -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities."""
from __future__ import print_function
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
from IPython.utils import py3compat
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'notebook': 'nbAgg',
'inline' : 'module://ipykernel.pylab.backend_inline'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
"""Print a figure to an image, and return the resulting file data
Returned data will be bytes unless ``fmt='svg'``,
in which case it will be unicode.
Any keyword args are passed to fig.canvas.print_figure,
such as ``quality`` or ``bbox_inches``.
"""
from matplotlib import rcParams
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = rcParams['savefig.dpi']
if dpi == 'figure':
dpi = fig.dpi
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = dict(
format=fmt,
facecolor=fig.get_facecolor(),
edgecolor=fig.get_edgecolor(),
dpi=dpi,
bbox_inches=bbox_inches,
)
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina', **kwargs)
# Make sure that retina_figure acts just like print_figure and returns
# None when the figure is empty.
if pngdata is None:
return
w, h = _pngxy(pngdata)
metadata = dict(width=w//2, height=h//2)
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pylab as pylab
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if pylab.draw_if_interactive.called:
pylab.draw()
pylab.draw_if_interactive.called = False
return mpl_execfile
def _reshow_nbagg_figure(fig):
"""reshow an nbagg figure"""
try:
reshow = fig.canvas.manager.reshow
except AttributeError:
raise NotImplementedError()
else:
reshow()
def select_figure_formats(shell, formats, **kwargs):
"""Select figure formats for the inline backend.
Parameters
==========
shell : InteractiveShell
The main IPython instance.
formats : str or set
One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs : any
Extra keyword arguments to be passed to fig.canvas.print_figure.
"""
import matplotlib
from matplotlib.figure import Figure
from ipykernel.pylab import backend_inline
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, py3compat.string_types):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
if matplotlib.get_backend().lower() == 'nbagg':
formatter = shell.display_formatter.ipython_display_formatter
formatter.for_type(Figure, _reshow_nbagg_figure)
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pylab as pylab
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from ipykernel.pylab.backend_inline import InlineBackend
except ImportError:
return
from matplotlib import pyplot
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from ipykernel.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = dict()
for k in cfg.rc:
shell._saved_rcParams[k] = pyplot.rcParams[k]
# load inline_rc
pyplot.rcParams.update(cfg.rc)
new_backend_name = "inline"
else:
from ipykernel.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
pyplot.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
new_backend_name = "other"
# only enable the formats once -> don't change the enabled formats (which the user may
# has changed) when getting another "%matplotlib inline" call.
# See https://github.com/ipython/ipykernel/issues/29
cur_backend = getattr(configure_inline_support, "current_backend", "unset")
if new_backend_name != cur_backend:
# Setup the default figure format
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
configure_inline_support.current_backend = new_backend_name
| [
"[email protected]"
] | |
e53dd06c2a2df6fe95473cf29333718731785fe7 | be5f4d79910e4a93201664270916dcea51d3b9ee | /rovers/fastdownward/experiments/issue420/issue420-v1.py | b5d93b01dad7fc570cabc56ea38aee5819c50c0a | [
"MIT",
"GPL-1.0-or-later",
"GPL-3.0-or-later"
] | permissive | mehrdadzakershahrak/Online-Explanation-Generation | 17c3ab727c2a4a60381402ff44e95c0d5fd0e283 | e41ad9b5a390abdaf271562a56105c191e33b74d | refs/heads/master | 2022-12-09T15:49:45.709080 | 2019-12-04T10:23:23 | 2019-12-04T10:23:23 | 184,834,004 | 0 | 0 | MIT | 2022-12-08T17:42:50 | 2019-05-04T00:04:59 | Python | UTF-8 | Python | false | false | 718 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward.suites import suite_optimal_with_ipc11
import common_setup
REVS = ["issue420-base", "issue420-v1"]
CONFIGS = {
"blind": ["--search", "astar(blind())"],
"lmcut": ["--search", "astar(lmcut())"],
}
TEST_RUN = False
if TEST_RUN:
SUITE = "gripper:prob01.pddl"
PRIORITY = None # "None" means local experiment
else:
SUITE = suite_optimal_with_ipc11()
PRIORITY = 0 # number means maia experiment
exp = common_setup.MyExperiment(
grid_priority=PRIORITY,
revisions=REVS,
configs=CONFIGS,
suite=SUITE,
)
exp.add_comparison_table_step(
attributes=common_setup.MyExperiment.DEFAULT_TABLE_ATTRIBUTES
)
exp()
| [
"[email protected]"
] | |
258b646c702736c7a992f113a68f318a0df6c2a5 | e0980f704a573894350e285f66f4cf390837238e | /.history/menus/models_20201030152822.py | c70dffeb2c5e9ce219e941ae4b37043c1fd1e98f | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | from django.db import models
from django_extensions.db.fields import AutoSlugField
from modelcluster.models import ClusterableModel
from modelcluster.fields import ParentalKey
from wagtail.core.models import Orderable
from wagtail.admin.edit_handlers import FieldPanel, PageChooserPanel, InlinePanel
class MenuItem(Orderable):
link_title = models.CharField(blank=True, max_length=50)
link_url = models.CharField(max_length=500, blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE,
)
open_in_new_tab = models.BooleanField(
default=False,
blank=True,
)
panels = [
FieldPanel('link_title'),
FieldPanel('link_url'),
PageChooserPanel('link_page'),
FieldPanel('open_in_new_tab',),
]
page = ParentalKey('Menu', related_name='menu_items')
@property
def link(self):
if self.link_page:
return self.link_page
elif self.link_url:
return self.link_url
return '#'
@property
def title(self):
if self.link_page and not self.link_title:
return self.link_page.from django.utils.translation import ugettext_lazy as _
class Menu(ClusterableModel):
title = models.CharField(max_length=100)
slug = AutoSlugField(
populate_from='title',
editable=True,
)
panels = [
FieldPanel('title'),
FieldPanel('slug'),
InlinePanel('menu_items', label='Menu Item'),
]
def __str__(self):
return self.title
| [
"[email protected]"
] | |
3e9956e9dcc4a1c4b5873fb6cc00798a7a1b8a6e | 2eb386991d9975f0f8440d90de26e950304ac42f | /TGHACK2020/mystic/mystic.py | 890845cecbe1fd91e46764c04ab3175c590b43f6 | [] | no_license | Quintec/CTFs2020 | 2816a66e8a486537c31e5ac25253840bc3a8ffe9 | bdaa327c9f0b0ee16ff95bafcaf65f0df8acd8b9 | refs/heads/master | 2022-12-19T21:39:14.129702 | 2020-10-01T16:49:06 | 2020-10-01T16:49:06 | 281,812,929 | 1 | 0 | null | 2020-10-01T16:49:08 | 2020-07-23T00:37:44 | null | UTF-8 | Python | false | false | 182 | py | with open("mystic.png", 'rb') as file:
dat = file.read()
print(dat)
print(type(dat))
dat = bytes([d^42 for d in dat])
with open("mystic.dat", 'wb') as file:
file.write(dat) | [
"[email protected]"
] | |
571dce4b35f1680fb3c9ae0aa7327fffecc8a440 | eac985db4df6ef7429ee6798956aad85087dc444 | /rcsfootprints/settings.py | 5809049ca8dfede2566f444d0a91519b4eaebd56 | [] | no_license | scottcoughlin2014/rcsfootprints | 10e7caafda5062ecece0cf1eba978a853e35240e | 06317fd97cd1feda745428df4c65662941caa755 | refs/heads/master | 2022-11-28T15:50:57.004215 | 2020-08-03T20:34:06 | 2020-08-03T20:34:06 | 284,801,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,713 | py | """
Django settings for rcsfootprints project.
Generated by 'django-admin startproject' using Django 3.0.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
try:
SECRET_KEY
except NameError:
SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')
try:
SECRET_KEY = open(SECRET_FILE).read().strip()
except IOError:
try:
import random
SECRET_KEY = ''.join([random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
secret = open(SECRET_FILE, 'w')
secret.write(SECRET_KEY)
secret.close()
except IOError:
Exception('Please create a %s file with random characters \
to generate your secret key!' % SECRET_FILE)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# rcsfootprints apps
'issue.apps.IssueConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rcsfootprints.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rcsfootprints.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
1cc919de041c45de9d95e37af262c9a2f2d6e5fe | c2f85286d1e21fb803c35f6d996abc850b993e53 | /mystorage/models.py | 3b1da397f6d612372399cd856fd46f538d5bc4d8 | [] | no_license | devdw98/likelion_drf | dfeec1bf5ee153918807f99040c8c33240c4344c | 6d0171961bc93f4edd7998b7351034e0a936079d | refs/heads/master | 2020-07-29T20:38:29.041098 | 2019-10-27T07:22:53 | 2019-10-27T07:22:53 | 209,951,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | from django.db import models
from django.conf import settings
class Essay(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, default = 1, on_delete=models.CASCADE) #모델지우면 그 모델과 관련한 모든 것 지움
title = models.CharField(max_length = 30)
body = models.TextField()
class Album(models.Model): #image파일을 효율적으로 관리하기 위해 pip install Pillow 하기
author = models.ForeignKey(settings.AUTH_USER_MODEL, default = 1, on_delete=models.CASCADE) #모델지우면 그 모델과 관련한 모든 것 지움
image = models.ImageField(upload_to="images")
desc = models.CharField(max_length = 100)
class Files(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, default = 1, on_delete=models.CASCADE) #모델지우면 그 모델과 관련한 모든 것 지움
myfile = models.FileField(blank = False, null = False, upload_to="files")
desc = models.CharField(max_length = 100) | [
"[email protected]"
] | |
00b51cd8339ee6726731147e53432b24dc540662 | a4146004694aa8745f751fd49152b01718e43cdd | /Algorithm/test/IM/D03/미로탈출 로봇중간 단계2.py | 6085d18524e9fd1a78f28e0e385c58c842be2f6e | [] | no_license | hoyoung2176/TIL | 1f4e2110e1e21d03855889044a7df280ad9788fc | e2bca069d36455fdf8f142fa9a06fb1a39f3c99f | refs/heads/master | 2021-06-25T13:34:01.463989 | 2019-07-31T05:05:45 | 2019-07-31T05:05:45 | 163,781,026 | 0 | 0 | null | 2021-06-10T21:19:53 | 2019-01-02T01:39:56 | Python | UTF-8 | Python | false | false | 872 | py | import sys
sys.stdin = open("미로탈출 로봇중간 단계.txt")
N = int(input())
arr = [[1]*(N*2) for _ in range(N+2)]
for i in range(1,N+1):
arr[i]= [1] + (list(map(int, input()))) + [1]
Darr = list(map(int,input().split()))
Dno = 0 #방향순서
dr = [0, 1, 0, -1, 0] #아래1 왼2 위3, 오른4 방향
dc = [0, -1, 0, 1]
r,c = 1, 1 #현재좌표
cnt = 0
while True:
#좌표계산
r = r + dr[Darr[Dno]]
c = c + dc[Darr[Dno]]
if arr[r][c] == 0: # 0이면
#방문표시하고 카운트
arr[r][c] = 9
cnt += 1
elif arr[r][c] == 1: # 1이면
# 이전좌표로 이동하고 방향전환(단, 방향은 로테이션)
r = r - dr[Darr[Dno]]
c = c - dc[Darr[Dno]]
Dno += (Dno+1) % 4 #4가 넘으면 초기화 된다.
else:
break # 지나간 자리이면 탈출
print(cnt) | [
"[email protected]"
] | |
4c119e4d899ded17fc322cbed2aae93f00f17728 | 28b4c4676cb016e35cc7b1048097628caa113114 | /sso/relay.py | 99757032575f381faeec89c1b1124012853f5fa4 | [
"MIT"
] | permissive | shiroyuki/voila-id | 829127ef258c829f33d6f4079486829e3325f208 | da86daef255d23b73ad6f2ba67fba02201407f09 | refs/heads/master | 2016-09-06T16:56:45.724263 | 2014-08-25T05:50:43 | 2014-08-25T05:50:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | from sso.common import Controller
from sso.security import AccessMode
class Relay(Controller):
def get(self):
routing_map = self.component('routing_map')
if not self.session.get('auth'):
return self.redirect(routing_map.resolve('authentication'))
auth = self.session.get('auth')
if auth['access_mode'] == AccessMode.MASTER:
return self.redirect(routing_map.resolve('admin.profile.list'))
if not self.session.get('referer'):
return self.redirect(routing_map.resolve('user.profile', key = auth['username']))
referer = self.session.get('referer')
raise NotImplemented('To be implemented')
return self.redirect(routing_map.resolve('authentication')) | [
"[email protected]"
] | |
afe11d1033c0af9c586eb0ac088d3f083c15409b | 44d62a165f943ca752795be97b5921767c034f29 | /iblrig/frame2TTL.py | e20a493f101e1a7f882816f9d47bac791410a1f8 | [
"MIT"
] | permissive | magically-solutions-india/iblrig | 1d2c52d9966f3b3c9f681fde37a9926e35b62542 | 0d428591bd91d22feed5254f36a5dc9e104d4ff5 | refs/heads/master | 2020-12-20T03:41:08.808129 | 2020-01-22T10:07:55 | 2020-01-22T10:07:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,300 | py | import logging
import struct
import numpy as np
import serial
import iblrig.alyx
import iblrig.params
log = logging.getLogger('iblrig')
class Frame2TTL(object):
def __init__(self, serial_port):
self.serial_port = serial_port
self.connected = False
self.ser = self.connect(serial_port)
self.light_threshold = 40
self.dark_threshold = 80
self.streaming = False
self.measured_black = None
self.measured_white = None
self.recomend_dark = None
self.recomend_light = None
def connect(self, serial_port) -> serial.Serial:
"""Create connection to serial_port"""
ser = serial.Serial(port=serial_port, baudrate=115200, timeout=1)
self.connected = True
return ser
def close(self) -> None:
"""Close connection to serial port"""
self.ser.close()
self.connected = False
def start_stream(self) -> None:
"""Enable streaming to USB (stream rate 100Hz)
response = int.from_bytes(self.ser.read(4), byteorder='little')"""
self.ser.write(struct.pack('cB', b'S', 1))
self.streaming = True
def stop_stream(self) -> None:
"""Disable streaming to USB"""
self.ser.write(struct.pack('cB', b'S', 0))
self.streaming = False
def read_value(self) -> int:
"""Read one value from sensor (current)"""
self.ser.write(b'V')
response = self.ser.read(4)
# print(np.frombuffer(response, dtype=np.uint32))
response = int.from_bytes(response, byteorder='little')
return response
def measure_photons(self, num_samples: int = 250) -> dict:
"""Measure <num_samples> values from the sensor and return basic stats.
Mean, Std, SEM, Nsamples
"""
import time
sample_sum = []
for i in range(num_samples):
sample_sum.append(self.read_value())
time.sleep(0.001)
out = {
'mean_value': float(np.array(sample_sum).mean()),
'max_value': float(np.array(sample_sum).max()),
'min_value': float(np.array(sample_sum).min()),
'std_value': float(np.array(sample_sum).std()),
'sem_value': float(np.array(sample_sum).std() / np.sqrt(num_samples)),
'nsamples': float(num_samples)
}
return out
def set_thresholds(self, dark=None, light=None) -> None:
"""Set light, dark, or both thresholds for the device"""
if dark is None:
dark = self.dark_threshold
if light is None:
light = self.light_threshold
self.ser.write(b'C')
response = self.ser.read(1)
if response[0] != 218:
raise(ConnectionError)
# Device wants light threshold before dark
self.ser.write(struct.pack('<BHH', ord('T'), int(light), int(dark)))
if light != self.light_threshold:
log.info(f"Light threshold set to {light}")
if dark != self.dark_threshold:
log.info(f"Dark threshold set to {dark}")
if light == 40 and dark == 80:
log.info(f"Resetted to default values: light={light} - dark={dark}")
self.dark_threshold = dark
self.light_threshold = light
def measure_white(self):
log.info("Measuring white...")
self.measured_white = self.measure_photons(1000)
return self.measured_white
def measure_black(self):
log.info("Measuring black...")
self.measured_black = self.measure_photons(1000)
return self.measured_black
def calc_recomend_thresholds(self):
if (self.measured_black is None) or (self.measured_white is None):
log.error("No mesures exist")
return -1
self.recomend_light = self.measured_white.get('max_value')
if self.measured_black['min_value'] - self.recomend_light > 40:
self.recomend_dark = self.recomend_light + 40
else:
self.recomend_dark = round(self.recomend_light + (
(self.measured_black['min_value'] - self.recomend_light) / 3))
if self.recomend_dark - self.recomend_light < 5:
log.error('Cannot recommend thresholds:'),
log.error('Black and White measurements may be too close for accurate frame detection')
log.error(f'Light = {self.recomend_light}, Dark = {self.recomend_dark}')
return -1
else:
log.info('Recommended thresholds:')
log.info(f'Light ={self.recomend_light}, Dark = {self.recomend_dark}.')
print('Done')
return self.recomend_dark, self.recomend_light
def set_recommendations(self):
log.info(f'Sending thresholds to device...')
self.set_thresholds(dark=self.recomend_dark, light=self.recomend_light)
def suggest_thresholds(self) -> None:
input("Set pixels under Frame2TTL to white (rgb 255,255,255) and press enter >")
print(" ")
print("Measuring white...")
white_data = self.measure_photons(10000)
input("Set pixels under Frame2TTL to black (rgb 0,0,0) and press enter >")
print(" ")
print("Measuring black...")
dark_data = self.measure_photons(10000)
print(" ")
light_max = white_data.get('max_value')
dark_min = dark_data.get('min_value')
print(f"Max sensor reading for white (lower is brighter) = {light_max}.")
print(f"Min sensor reading for black = {dark_min}.")
recomend_light = light_max
if dark_min - recomend_light > 40:
recomend_dark = recomend_light + 40
else:
recomend_dark = round(
recomend_light + ((dark_min - recomend_light) / 3))
if recomend_dark - recomend_light < 5:
print('Error: Cannot recommend thresholds:',
'light and dark measurements may be too close for accurate frame detection')
else:
log.info(f"Recommended thresholds: Light = {recomend_light}, Dark = {recomend_dark}.")
log.info(f"Sending thresholds to device...")
self.recomend_dark = recomend_dark
self.recomend_light = recomend_light
self.set_thresholds(light=recomend_light, dark=recomend_dark)
print('Done')
def get_and_set_thresholds():
params = iblrig.params.load_params_file()
for k in params:
if 'F2TTL' in k and params[k] is None:
log.error(f"Missing parameter {k}, please calibrate the device.")
raise(KeyError)
return -1
dev = Frame2TTL(params['COM_F2TTL'])
dev.set_thresholds(dark=params['F2TTL_DARK_THRESH'], light=params['F2TTL_LIGHT_THRESH'])
log.info(f"Frame2TTL: Thresholds set.")
return 0
if __name__ == "__main__":
com_port = 'COM3'
f = Frame2TTL(com_port)
print(f.read_value())
print(f.measure_photons())
f.set_thresholds()
f.set_thresholds(light=41, dark=81)
f.set_thresholds(light=41)
f.set_thresholds(dark=81)
f.suggest_thresholds()
print('.')
| [
"[email protected]"
] | |
323314a44358a166ba601398ae2f8308055495fa | cfc46fd56c16ac9c010bcf0c1eb50da3047d1b2b | /toscametrics/general/text_entropy.py | d41f0bfbd5d377be29b2831b990d4c15a8b08cbf | [
"Apache-2.0"
] | permissive | radon-h2020/radon-tosca-metrics | d93ef5b3dc53c7863ba98a985919237fe6c4aadf | d0a10e10f2d897299a04f69290f09d5589bc039f | refs/heads/master | 2021-08-24T13:53:43.207745 | 2021-07-06T08:44:00 | 2021-07-06T08:44:00 | 242,997,596 | 3 | 0 | Apache-2.0 | 2021-03-29T13:47:46 | 2020-02-25T12:45:05 | Python | UTF-8 | Python | false | false | 811 | py | import re
from math import log2
from toscametrics import utils
from toscametrics.blueprint_metric import BlueprintMetric
def splitter(x):
return re.sub(r'\s+', ' ', str(x)).split(' ')
class TextEntropy(BlueprintMetric):
""" This class measures the blueprint's Shannon entropy for keywords frequencies """
def count(self):
words_list = utils.all_keys(self.blueprint)
words_list.extend(utils.all_values(self.blueprint))
words_list = [item for sublist in list(map(splitter, words_list)) for item in sublist]
words_set = set(words_list)
freq = {w: words_list.count(w) for w in words_set}
entropy = 0
for word in words_set:
p = freq[word] / len(words_list)
entropy -= p * log2(p)
return round(entropy, 2)
| [
"[email protected]"
] | |
c5fa70baf76c52c1e7edd22a04caa0f0124d5192 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_10971.py | 1e427429e1f1fb08ee4d28ae4224b1b47e60d17e | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | # sampling random floats on a range in numpy
np.random.uniform(5,10) # A single value
np.random.uniform(5,10,[2,3]) # A 2x3 array
| [
"[email protected]"
] | |
2450814533832c1abc008339e64b4cf98eedf527 | 0206ac23a29673ee52c367b103dfe59e7733cdc1 | /src/fao_un/fao_explore_text_file.py | 8f720005daa4892cb920451ae2460557678d06ed | [] | no_license | guziy/RPN | 2304a93f9ced626ae5fc8abfcc079e33159ae56a | 71b94f4c73d4100345d29a6fbfa9fa108d8027b5 | refs/heads/master | 2021-11-27T07:18:22.705921 | 2021-11-27T00:54:03 | 2021-11-27T00:54:03 | 2,078,454 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | __author__ = 'huziy'
def main():
path = "/skynet3_rech1/huziy/Global_terrain_slopes_30s/GloSlopesCl1_30as.asc"
with open(path) as f:
for i, line in enumerate(f):
if i < 6:
print(line)
if 3000 < i < 4000:
nums = [int(s.strip()) for s in line.split()]
nums = [n for n in nums if n != 255]
if len(nums):
print(min(nums), max(nums), len(nums))
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
e61562e0b288dce372a49eb34d6462236ab75a15 | 3a117858e61c87b703c694d8e1b6da61e6074851 | /src/.history/Test/HiwinRT605_Strategy_testsocket_20190626140818.py | ae6dddd6e9834397236b8fe1d7181f7c4ff0b95c | [
"MIT"
] | permissive | SamKaiYang/ROS_Socket | 4daa2c66181a76038e20161a44f1362084b6bd58 | aa131496617cec0a9c32141565faa668db738eb9 | refs/heads/master | 2020-05-25T22:02:02.468922 | 2019-07-18T09:02:35 | 2019-07-18T09:02:35 | 188,009,003 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,551 | py | #!/usr/bin/env python3
# license removed for brevity
#策略 機械手臂 四點來回跑
import threading
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import math
import enum
import Hiwin_RT605_Arm_Command as ArmTask
##----Arm state-----------
Arm_state_flag = 0
Strategy_flag = 0
Sent_data_flag = True
##----Arm status enum
class Arm_status(enum.IntEnum):
Idle = 0
Isbusy = 1
Error = 2
shutdown = 6
##-----------server feedback arm state----------
def Arm_state(req):
global CurrentMissionType,Strategy_flag,Arm_state_flag
Arm_state_flag = int('%s'%req.Arm_state)
if Arm_state_flag == Arm_status.Isbusy: #表示手臂忙碌
Strategy_flag = False
return(1)
if Arm_state_flag == Arm_status.Idle: #表示手臂準備
Strategy_flag = True
return(0)
if Arm_state_flag == Arm_status.shutdown: #表示程式中斷
Strategy_flag = 6
return(6)
##-----------server feedback Sent_flag----------
def Sent_flag(req):
global Sent_data_flag
Sent_data_flag = int('%s'%req.sent_flag)
return(1)
def arm_state_server():
#rospy.init_node(NAME)
s = rospy.Service('arm_state',arm_state, Arm_state) ##server arm state
a = rospy.Service('sent_flag',sent_flag,Sent_flag)
#rospy.spin() ## spin one
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##------------class-------
class point():
def __init__(self,x,y,z,pitch,roll,yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##-------------------------strategy---------------------
action = 0
def Mission_Trigger(ItemNo):
global action,Arm_state_flag,Sent_data_flag
if Arm_state_flag == Arm_status.Idle:
# Sent_data_flag = False
for case in switch(ItemNo): #傳送指令給socket選擇手臂動作
if case(0):
pos.x = 10
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
action = 1
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
#ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
break
if case(1):
pos.x = 10
pos.y = 42
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
action = 2
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
#ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
break
if case(2):
pos.x = -10
pos.y = 42
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
action = 3
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
#ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
break
if case(3):
pos.x = -10
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
action = 4
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
#ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
break
if case(4):
pos.x = 0
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
action = 5
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
#ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
ArmTask.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
ArmTask.strategy_client_Arm_Mode(2,1,0,10,2)#action,ra,grip,vel,both
break
if case(): # default, could also just omit condition or 'if True'
rospy.on_shutdown(myhook)
ArmTask.rospy.on_shutdown(myhook)
#action: ptp line
#ra : abs rel
#grip 夾爪
#vel speed
#both : Ctrl_Mode
##-------------strategy end ------------
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
argv = rospy.myargv()
rospy.init_node('strategy', anonymous=True)
GetInfoFlag = True #Test no data
arm_state_server()
start_input=int(input('開始策略請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
# encoding: UTF-8
timer = threading.Timer(5, Mission_Trigger(action))
timer.start()
##while 1:
#Mission_Trigger(action)
if start_input == 3:
pass
ArmTask.rospy.spin()
rospy.spin()
| [
"[email protected]"
] | |
c2c096ac99f6a8ef635bfe244d6f1b363cafbb99 | 25bb4e760769cc483a20f27b6312698891dce034 | /algorithms/dynamic programming/fibonacci-modified-English.py | 43e913d6860797d42644fb453e498a9e8ba33f17 | [] | no_license | rangaeeeee/codes-hackerrank | e13d22adff1ef74974e34251d9bfac6cfd36f2b0 | ce7fdf7f336c10164fd2f779d4ed3713849d7c2b | refs/heads/master | 2021-01-19T17:07:28.451983 | 2017-09-01T18:05:33 | 2017-09-01T18:05:33 | 101,049,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
t1,t2,n = map(int,input().split())
result = [None] * (n+1)
result[1] = t1
result[2] = t2
for i in range(3,n+1,1):
if result[i] == None:
result[i] = result[i-2] + result[i-1]**2
print(result[n]) | [
"[email protected]"
] | |
812f0756e1b6b47085b05cdfe5bb31706fea3a48 | d91a61442d0ba0547301f54e7f63039ab5346198 | /for/exe_5_soma_impares.py | e24ac2e7564b8fc8ec7eb560600909bf26b37fd9 | [] | no_license | pedroceciliocn/programa-o-1 | a41bf89475939ad1791320c8fa4845744921ec19 | 15f8a3dbc13f4cbbc15f2b626ccef47fe145cc3f | refs/heads/main | 2023-07-12T20:06:20.632304 | 2021-08-17T22:05:57 | 2021-08-17T22:05:57 | 377,301,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | """
Somar os inteiros ímpares entre dois valores
inteiros informados pelo usuário.
"""
n_1 = int(input("De o primeiro valor do intervalo: "))
n_2 = int(input("De o segundo valor do intervalo: "))
s = 0
if n_1 > n_2:
n_1, n_2 = n_2, n_1
for i in range(n_1, n_2 + 1):
if i % 2 != 0:
s += i
print(f"+ {s}")
print("------")
print(f"s = {s}")
| [
"[email protected]"
] | |
e7a515daeaa03636535a18bbda0da4090a2ea830 | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/network/v20200601/express_route_circuit.py | 727cf5612ac08490037b0bda33dc7615a0e558e1 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,206 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ExpressRouteCircuitArgs', 'ExpressRouteCircuit']
@pulumi.input_type
class ExpressRouteCircuitArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
allow_classic_operations: Optional[pulumi.Input[bool]] = None,
authorizations: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitAuthorizationArgs']]]] = None,
bandwidth_in_gbps: Optional[pulumi.Input[float]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
circuit_provisioning_state: Optional[pulumi.Input[str]] = None,
express_route_port: Optional[pulumi.Input['SubResourceArgs']] = None,
gateway_manager_etag: Optional[pulumi.Input[str]] = None,
global_reach_enabled: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]] = None,
service_key: Optional[pulumi.Input[str]] = None,
service_provider_notes: Optional[pulumi.Input[str]] = None,
service_provider_properties: Optional[pulumi.Input['ExpressRouteCircuitServiceProviderPropertiesArgs']] = None,
service_provider_provisioning_state: Optional[pulumi.Input[Union[str, 'ServiceProviderProvisioningState']]] = None,
sku: Optional[pulumi.Input['ExpressRouteCircuitSkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ExpressRouteCircuit resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[bool] allow_classic_operations: Allow classic operations.
:param pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitAuthorizationArgs']]] authorizations: The list of authorizations.
:param pulumi.Input[float] bandwidth_in_gbps: The bandwidth of the circuit when the circuit is provisioned on an ExpressRoutePort resource.
:param pulumi.Input[str] circuit_name: The name of the circuit.
:param pulumi.Input[str] circuit_provisioning_state: The CircuitProvisioningState state of the resource.
:param pulumi.Input['SubResourceArgs'] express_route_port: The reference to the ExpressRoutePort resource when the circuit is provisioned on an ExpressRoutePort resource.
:param pulumi.Input[str] gateway_manager_etag: The GatewayManager Etag.
:param pulumi.Input[bool] global_reach_enabled: Flag denoting global reach status.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]] peerings: The list of peerings.
:param pulumi.Input[str] service_key: The ServiceKey.
:param pulumi.Input[str] service_provider_notes: The ServiceProviderNotes.
:param pulumi.Input['ExpressRouteCircuitServiceProviderPropertiesArgs'] service_provider_properties: The ServiceProviderProperties.
:param pulumi.Input[Union[str, 'ServiceProviderProvisioningState']] service_provider_provisioning_state: The ServiceProviderProvisioningState state of the resource.
:param pulumi.Input['ExpressRouteCircuitSkuArgs'] sku: The SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if allow_classic_operations is not None:
pulumi.set(__self__, "allow_classic_operations", allow_classic_operations)
if authorizations is not None:
pulumi.set(__self__, "authorizations", authorizations)
if bandwidth_in_gbps is not None:
pulumi.set(__self__, "bandwidth_in_gbps", bandwidth_in_gbps)
if circuit_name is not None:
pulumi.set(__self__, "circuit_name", circuit_name)
if circuit_provisioning_state is not None:
pulumi.set(__self__, "circuit_provisioning_state", circuit_provisioning_state)
if express_route_port is not None:
pulumi.set(__self__, "express_route_port", express_route_port)
if gateway_manager_etag is not None:
pulumi.set(__self__, "gateway_manager_etag", gateway_manager_etag)
if global_reach_enabled is not None:
pulumi.set(__self__, "global_reach_enabled", global_reach_enabled)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if peerings is not None:
pulumi.set(__self__, "peerings", peerings)
if service_key is not None:
pulumi.set(__self__, "service_key", service_key)
if service_provider_notes is not None:
pulumi.set(__self__, "service_provider_notes", service_provider_notes)
if service_provider_properties is not None:
pulumi.set(__self__, "service_provider_properties", service_provider_properties)
if service_provider_provisioning_state is not None:
pulumi.set(__self__, "service_provider_provisioning_state", service_provider_provisioning_state)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="allowClassicOperations")
def allow_classic_operations(self) -> Optional[pulumi.Input[bool]]:
"""
Allow classic operations.
"""
return pulumi.get(self, "allow_classic_operations")
@allow_classic_operations.setter
def allow_classic_operations(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_classic_operations", value)
@property
@pulumi.getter
def authorizations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitAuthorizationArgs']]]]:
"""
The list of authorizations.
"""
return pulumi.get(self, "authorizations")
@authorizations.setter
def authorizations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitAuthorizationArgs']]]]):
pulumi.set(self, "authorizations", value)
@property
@pulumi.getter(name="bandwidthInGbps")
def bandwidth_in_gbps(self) -> Optional[pulumi.Input[float]]:
"""
The bandwidth of the circuit when the circuit is provisioned on an ExpressRoutePort resource.
"""
return pulumi.get(self, "bandwidth_in_gbps")
@bandwidth_in_gbps.setter
def bandwidth_in_gbps(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "bandwidth_in_gbps", value)
@property
@pulumi.getter(name="circuitName")
def circuit_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the circuit.
"""
return pulumi.get(self, "circuit_name")
@circuit_name.setter
def circuit_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "circuit_name", value)
@property
@pulumi.getter(name="circuitProvisioningState")
def circuit_provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
The CircuitProvisioningState state of the resource.
"""
return pulumi.get(self, "circuit_provisioning_state")
@circuit_provisioning_state.setter
def circuit_provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "circuit_provisioning_state", value)
@property
@pulumi.getter(name="expressRoutePort")
def express_route_port(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The reference to the ExpressRoutePort resource when the circuit is provisioned on an ExpressRoutePort resource.
"""
return pulumi.get(self, "express_route_port")
@express_route_port.setter
def express_route_port(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "express_route_port", value)
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> Optional[pulumi.Input[str]]:
"""
The GatewayManager Etag.
"""
return pulumi.get(self, "gateway_manager_etag")
@gateway_manager_etag.setter
def gateway_manager_etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gateway_manager_etag", value)
@property
@pulumi.getter(name="globalReachEnabled")
def global_reach_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Flag denoting global reach status.
"""
return pulumi.get(self, "global_reach_enabled")
@global_reach_enabled.setter
def global_reach_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "global_reach_enabled", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def peerings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]]:
"""
The list of peerings.
"""
return pulumi.get(self, "peerings")
@peerings.setter
def peerings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]]):
pulumi.set(self, "peerings", value)
@property
@pulumi.getter(name="serviceKey")
def service_key(self) -> Optional[pulumi.Input[str]]:
"""
The ServiceKey.
"""
return pulumi.get(self, "service_key")
@service_key.setter
def service_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_key", value)
@property
@pulumi.getter(name="serviceProviderNotes")
def service_provider_notes(self) -> Optional[pulumi.Input[str]]:
"""
The ServiceProviderNotes.
"""
return pulumi.get(self, "service_provider_notes")
@service_provider_notes.setter
def service_provider_notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_provider_notes", value)
@property
@pulumi.getter(name="serviceProviderProperties")
def service_provider_properties(self) -> Optional[pulumi.Input['ExpressRouteCircuitServiceProviderPropertiesArgs']]:
"""
The ServiceProviderProperties.
"""
return pulumi.get(self, "service_provider_properties")
@service_provider_properties.setter
def service_provider_properties(self, value: Optional[pulumi.Input['ExpressRouteCircuitServiceProviderPropertiesArgs']]):
pulumi.set(self, "service_provider_properties", value)
@property
@pulumi.getter(name="serviceProviderProvisioningState")
def service_provider_provisioning_state(self) -> Optional[pulumi.Input[Union[str, 'ServiceProviderProvisioningState']]]:
"""
The ServiceProviderProvisioningState state of the resource.
"""
return pulumi.get(self, "service_provider_provisioning_state")
@service_provider_provisioning_state.setter
def service_provider_provisioning_state(self, value: Optional[pulumi.Input[Union[str, 'ServiceProviderProvisioningState']]]):
pulumi.set(self, "service_provider_provisioning_state", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['ExpressRouteCircuitSkuArgs']]:
"""
The SKU.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['ExpressRouteCircuitSkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ExpressRouteCircuit(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_classic_operations: Optional[pulumi.Input[bool]] = None,
authorizations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitAuthorizationArgs']]]]] = None,
bandwidth_in_gbps: Optional[pulumi.Input[float]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
circuit_provisioning_state: Optional[pulumi.Input[str]] = None,
express_route_port: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
gateway_manager_etag: Optional[pulumi.Input[str]] = None,
global_reach_enabled: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_key: Optional[pulumi.Input[str]] = None,
service_provider_notes: Optional[pulumi.Input[str]] = None,
service_provider_properties: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitServiceProviderPropertiesArgs']]] = None,
service_provider_provisioning_state: Optional[pulumi.Input[Union[str, 'ServiceProviderProvisioningState']]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
ExpressRouteCircuit resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_classic_operations: Allow classic operations.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitAuthorizationArgs']]]] authorizations: The list of authorizations.
:param pulumi.Input[float] bandwidth_in_gbps: The bandwidth of the circuit when the circuit is provisioned on an ExpressRoutePort resource.
:param pulumi.Input[str] circuit_name: The name of the circuit.
:param pulumi.Input[str] circuit_provisioning_state: The CircuitProvisioningState state of the resource.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] express_route_port: The reference to the ExpressRoutePort resource when the circuit is provisioned on an ExpressRoutePort resource.
:param pulumi.Input[str] gateway_manager_etag: The GatewayManager Etag.
:param pulumi.Input[bool] global_reach_enabled: Flag denoting global reach status.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringArgs']]]] peerings: The list of peerings.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_key: The ServiceKey.
:param pulumi.Input[str] service_provider_notes: The ServiceProviderNotes.
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitServiceProviderPropertiesArgs']] service_provider_properties: The ServiceProviderProperties.
:param pulumi.Input[Union[str, 'ServiceProviderProvisioningState']] service_provider_provisioning_state: The ServiceProviderProvisioningState state of the resource.
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitSkuArgs']] sku: The SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ExpressRouteCircuitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
ExpressRouteCircuit resource.
:param str resource_name: The name of the resource.
:param ExpressRouteCircuitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExpressRouteCircuitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_classic_operations: Optional[pulumi.Input[bool]] = None,
authorizations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitAuthorizationArgs']]]]] = None,
bandwidth_in_gbps: Optional[pulumi.Input[float]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
circuit_provisioning_state: Optional[pulumi.Input[str]] = None,
express_route_port: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
gateway_manager_etag: Optional[pulumi.Input[str]] = None,
global_reach_enabled: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_key: Optional[pulumi.Input[str]] = None,
service_provider_notes: Optional[pulumi.Input[str]] = None,
service_provider_properties: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitServiceProviderPropertiesArgs']]] = None,
service_provider_provisioning_state: Optional[pulumi.Input[Union[str, 'ServiceProviderProvisioningState']]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ExpressRouteCircuitArgs.__new__(ExpressRouteCircuitArgs)
__props__.__dict__["allow_classic_operations"] = allow_classic_operations
__props__.__dict__["authorizations"] = authorizations
__props__.__dict__["bandwidth_in_gbps"] = bandwidth_in_gbps
__props__.__dict__["circuit_name"] = circuit_name
__props__.__dict__["circuit_provisioning_state"] = circuit_provisioning_state
__props__.__dict__["express_route_port"] = express_route_port
__props__.__dict__["gateway_manager_etag"] = gateway_manager_etag
__props__.__dict__["global_reach_enabled"] = global_reach_enabled
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
__props__.__dict__["peerings"] = peerings
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["service_key"] = service_key
__props__.__dict__["service_provider_notes"] = service_provider_notes
__props__.__dict__["service_provider_properties"] = service_provider_properties
__props__.__dict__["service_provider_provisioning_state"] = service_provider_provisioning_state
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["stag"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20150501preview:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20150615:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20150615:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20160330:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20160330:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20160601:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20160601:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20160901:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20160901:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20161201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20161201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20170301:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20170301:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20170601:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20170601:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20170801:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20170801:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20170901:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20171001:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20171101:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20180101:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20180201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20180401:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20180601:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20180701:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20180801:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20181001:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20181101:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20181201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20190201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20190401:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20190601:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20190701:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20190801:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20190901:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20191101:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20191201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20200301:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20200401:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20200501:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20200701:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20200801:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20201101:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20201101:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20210201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20210201:ExpressRouteCircuit"), pulumi.Alias(type_="azure-native:network/v20210301:ExpressRouteCircuit"), pulumi.Alias(type_="azure-nextgen:network/v20210301:ExpressRouteCircuit")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuit, __self__).__init__(
'azure-native:network/v20200601:ExpressRouteCircuit',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuit':
"""
Get an existing ExpressRouteCircuit resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ExpressRouteCircuitArgs.__new__(ExpressRouteCircuitArgs)
__props__.__dict__["allow_classic_operations"] = None
__props__.__dict__["authorizations"] = None
__props__.__dict__["bandwidth_in_gbps"] = None
__props__.__dict__["circuit_provisioning_state"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["express_route_port"] = None
__props__.__dict__["gateway_manager_etag"] = None
__props__.__dict__["global_reach_enabled"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["service_key"] = None
__props__.__dict__["service_provider_notes"] = None
__props__.__dict__["service_provider_properties"] = None
__props__.__dict__["service_provider_provisioning_state"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["stag"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return ExpressRouteCircuit(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowClassicOperations")
def allow_classic_operations(self) -> pulumi.Output[Optional[bool]]:
"""
Allow classic operations.
"""
return pulumi.get(self, "allow_classic_operations")
@property
@pulumi.getter
def authorizations(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteCircuitAuthorizationResponse']]]:
"""
The list of authorizations.
"""
return pulumi.get(self, "authorizations")
@property
@pulumi.getter(name="bandwidthInGbps")
def bandwidth_in_gbps(self) -> pulumi.Output[Optional[float]]:
"""
The bandwidth of the circuit when the circuit is provisioned on an ExpressRoutePort resource.
"""
return pulumi.get(self, "bandwidth_in_gbps")
@property
@pulumi.getter(name="circuitProvisioningState")
def circuit_provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The CircuitProvisioningState state of the resource.
"""
return pulumi.get(self, "circuit_provisioning_state")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRoutePort")
def express_route_port(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The reference to the ExpressRoutePort resource when the circuit is provisioned on an ExpressRoutePort resource.
"""
return pulumi.get(self, "express_route_port")
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> pulumi.Output[Optional[str]]:
"""
The GatewayManager Etag.
"""
return pulumi.get(self, "gateway_manager_etag")
@property
@pulumi.getter(name="globalReachEnabled")
def global_reach_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Flag denoting global reach status.
"""
return pulumi.get(self, "global_reach_enabled")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]]:
"""
The list of peerings.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the express route circuit resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="serviceKey")
def service_key(self) -> pulumi.Output[Optional[str]]:
"""
The ServiceKey.
"""
return pulumi.get(self, "service_key")
@property
@pulumi.getter(name="serviceProviderNotes")
def service_provider_notes(self) -> pulumi.Output[Optional[str]]:
"""
The ServiceProviderNotes.
"""
return pulumi.get(self, "service_provider_notes")
@property
@pulumi.getter(name="serviceProviderProperties")
def service_provider_properties(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitServiceProviderPropertiesResponse']]:
"""
The ServiceProviderProperties.
"""
return pulumi.get(self, "service_provider_properties")
@property
@pulumi.getter(name="serviceProviderProvisioningState")
def service_provider_provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The ServiceProviderProvisioningState state of the resource.
"""
return pulumi.get(self, "service_provider_provisioning_state")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitSkuResponse']]:
"""
The SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def stag(self) -> pulumi.Output[int]:
"""
The identifier of the circuit traffic. Outer tag for QinQ encapsulation.
"""
return pulumi.get(self, "stag")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
fb3de3e1e992cf59a0db77e1e77140ba3f9e8072 | 64dfafd940d77129b38865ccac869aadffb868ff | /natural_satmass_plots.py | 0a3c6e64ef4ce8f400172a6e12b45c5a341759a2 | [] | no_license | RaymondSimons/foggie_local | 242d4ef5fa814d44747b2bc38825433ade73de9f | f10bc56ef90b274d08fc81c8d02ddd9b653dfd19 | refs/heads/master | 2021-07-07T18:03:27.996681 | 2020-07-30T16:34:40 | 2020-07-30T16:34:40 | 147,853,943 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,178 | py | import glob
from glob import glob
from astropy.io import fits
from scipy import interpolate
plt.rcParams['text.usetex'] = True
plt.ioff()
plt.close('all')
DD_to_t = np.load('/Users/rsimons/Dropbox/rcs_foggie/outputs/DD_time.npy')[()]
#DDs = arange(44, 800)
DDs = arange(44, 800)
sat_ns = arange(0, 12)
clrs = ['blue', 'navy', 'darkblue', 'royalblue', 'red']
ls = ['-', '-', ':', '--','-.']
lw = [2,2,2,2,2]
alp = [0.8, 0.8, 0.8, 0.8, 0.8]
#for s, sim in enumerate(array(['forced', 'natural(v1)', 'natural(v2)', 'natural(v3)', 'natural(v4)'])):
for sat_n in sat_ns:
print sat_n
fig, axes = plt.subplots(2,2, figsize = (10, 10))
fig2, ax2 = plt.subplots(1,1, figsize = (12, 6))
for s, sim in enumerate(array(['natural(v1)', 'natural(v2)', 'natural(v3)', 'natural(v4)'])):
ts = []
ms = []
mg = []
sf = []
dm = []
R_90 = []
#if sim == 'forced': DDs_use = arange(400, 1000)
#elif sim == 'natural(v1)': DDs_use = arange(44, 800)
#else: DDs_use = DDs
DDs_use = DDs
for DD in DDs_use:
if sim == 'forced': simname = 'nref11n_nref10f'
if sim == 'natural(v1)':
simname = 'natural'
dirname = 'natural'
ind_use = 50
if sim == 'natural(v2)':
dirname = 'natural_v2'
simname = 'nref11n_v2_selfshield_z15'
ind_use = 0
if sim == 'natural(v3)':
dirname = 'natural_v3'
simname = 'nref11n_v3_selfshield_z15'
ind_use = 0
if sim == 'natural(v4)':
dirname = 'natural_v4'
simname = 'nref11n_v4_selfshield_z15'
ind_use = 0
#fl = glob('/Users/rsimons/Dropbox/rcs_foggie/cenmass/%s_DD%.4i_mass.fits'%(simname, DD))
fl_name = '/Users/rsimons/Dropbox/rcs_foggie/satmass/%s/%s_DD%.4i_mass_sat%.2i.fits'%(dirname, simname, DD, sat_n)
fl = glob(fl_name)
if len(fl) > 0:
try:
a = fits.open(fl[0])
gd = where(DD_to_t[0] == DD)
t = DD_to_t[2][gd][0]
frac_mass = a['STARS_MASS'].data[:]/a['STARS_MASS'].data[-1]
ts.append(t)
ms.append(a['STARS_MASS'].data[ind_use])
mg.append(a['GAS_TOT'].data[ind_use])
sf.append(a['STARS_YOUNGMASS'].data[ind_use]/2.e7)
dm.append(a['DARK_MATTER'].data[ind_use])
#f = interpolate.interp1d(frac_mass, a['DISTANCE'].data)
#R_90.append(f(0.9))
except:
pass
axes[0,0].plot(ts, ms, label = sim, color = clrs[s], linestyle = ls[s], linewidth = lw[s], alpha = alp[s])
axes[0,1].plot(ts, dm, label = sim, color = clrs[s], linestyle = ls[s], linewidth = lw[s], alpha = alp[s])
axes[1,0].plot(ts, mg, label = sim, color = clrs[s], linestyle = ls[s], linewidth = lw[s], alpha = alp[s])
#axes[1,1].plot(ts, R_90, label = sim, color = clrs[s], linestyle = ls[s], linewidth = lw[s], alpha = alp[s])
ax2.plot(ts, sf, label = sim, color = clrs[s], linestyle = ls[s], linewidth = 1, alpha = alp[s])
axes[0,0].legend(loc = 2)
ax2.legend(loc = 2)
fs = 12
axes[0,0].set_ylabel('M$_*$ (M$_{\odot}$)', fontsize = fs)
axes[0,1].set_ylabel('M$_{DM}$ (M$_{\odot}$)', fontsize = fs)
axes[1,0].set_ylabel('M$_{g}$ (M$_{\odot}$)', fontsize = fs)
axes[1,1].set_ylabel(r'r$_{*,90}$ (kpc)', fontsize = fs)
ax2.set_ylabel('star formation rate (M$_{\odot}$ yr$^{-1}$)', fontsize = fs)
for ax in axes.ravel(): ax.set_xlim(1, 5.5)
ax2.set_xlim(1, 5.5)
axes[1,1].axis('off')
for ax in axes.ravel(): ax.set_xlabel('time (Gyr)', fontsize = fs)
ax2.set_xlabel('time (Gyr)', fontsize = fs)
fig.savefig('/Users/rsimons/Dropbox/rcs_foggie/figures/butterfly_sats/%i_mass.png'%sat_n, dpi = 300)
fig2.savefig('/Users/rsimons/Dropbox/rcs_foggie/figures/butterfly_sats/%i_SFR.png'%sat_n, dpi = 300)
plt.close('all')
| [
"[email protected]"
] | |
bd2be70e948d6957ceca507b2d58feba34736b39 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/network/azure-mgmt-network/generated_samples/hub_route_table_delete.py | 6e2f314f2351a185a63bd8d45de74648f9983c8e | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,554 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python hub_route_table_delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
client.hub_route_tables.begin_delete(
resource_group_name="rg1",
virtual_hub_name="virtualHub1",
route_table_name="hubRouteTable1",
).result()
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2023-04-01/examples/HubRouteTableDelete.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
70fe8e8b64018de3046f650a5b1487419d30a178 | af259acdd0acd341370c9d5386c444da6a7a28a6 | /Deep-Learning-in-Python/04-Fine-tuning-keras-models/02-Changing-optimization-parameters.py | de78e0ef25b6108c14e0a39d883bc0b1c62905bf | [] | no_license | pace-noge/datacamp | fcd544f6478040660f7149b1a37bfd957eef9747 | eeffb8af233e7304c0f122a48e6b4f78ee7c650e | refs/heads/master | 2020-07-04T12:41:29.635167 | 2019-09-17T10:11:39 | 2019-09-17T10:11:39 | 202,289,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,013 | py | """
Changing optimization parameters
It's time to get your hands dirty with optimization. You'll now try optimizing a model at a very low learning rate, a very high learning rate, and a "just right" learning rate. You'll want to look at the results after running this exercise, remembering that a low value for the loss function is good.
For these exercises, we've pre-loaded the predictors and target values from your previous classification models (predicting who would survive on the Titanic). You'll want the optimization to start from scratch every time you change the learning rate, to give a fair comparison of how each learning rate did in your results. So we have created a function get_new_model() that creates an unoptimized model to optimize.
INSTRUCTION
-----------
Import SGD from keras.optimizers.
Create a list of learning rates to try optimizing with called lr_to_test. The learning rates in it should be .000001, 0.01, and 1.
Using a for loop to iterate over lr_to_test:
Use the get_new_model() function to build a new, unoptimized model.
Create an optimizer called my_optimizer using the SGD() constructor with keyword argument lr=lr.
Compile your model. Set the optimizer parameter to be the SGD object you created above, and because this is a classification problem, use 'categorical_crossentropy' for the loss parameter.
Fit your model using the predictors and target.
"""
# Import the SGD optimizer
from keras.optimizers import SGD
# Create list of learning rates: lr_to_test
lr_to_test = [.000001, 0.01, 1]
# Loop over learning rates
for lr in lr_to_test:
print('\n\nTesting model with learning rate: %f\n'%lr )
# Build new model to test, unaffected by previous models
model = get_new_model()
# Create SGD optimizer with specified learning rate: my_optimizer
my_optimizer = SGD(lr=lr)
# Compile the model
model.compile(optimizer=my_optimizer, loss='categorical_crossentropy')
# Fit the model
model.fit(predictors, target)
| [
"[email protected]"
] | |
5264e111233945faf45b9aefd6d120ef0d823acf | 8a5ab3d33e3b653c4c64305d81a85f6a4582d7ac | /PySide/QtXml/QDomDocumentFragment.py | 05e5e380013867b6c5bff79ce5d82cf2508b4f84 | [
"Apache-2.0"
] | permissive | sonictk/python-skeletons | be09526bf490856bb644fed6bf4e801194089f0d | 49bc3fa51aacbc2c7f0c7ab86dfb61eefe02781d | refs/heads/master | 2020-04-06T04:38:01.918589 | 2016-06-09T20:37:43 | 2016-06-09T20:37:43 | 56,334,503 | 0 | 0 | null | 2016-04-15T16:30:42 | 2016-04-15T16:30:42 | null | UTF-8 | Python | false | false | 801 | py | # encoding: utf-8
# module PySide.QtXml
# from /corp.blizzard.net/BFD/Deploy/Packages/Published/ThirdParty/Qt4.8.4/2015-05-15.163857/prebuilt/linux_x64_gcc41_python2.7_ucs4/PySide/QtXml.so
# by generator 1.138
# no doc
# no imports
from QDomNode import QDomNode
class QDomDocumentFragment(QDomNode):
# no doc
def nodeType(self, *args, **kwargs): # real signature unknown
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *more): # real signature unknown; restored from __doc__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
__new__ = None
| [
"[email protected]"
] | |
cba3267fc1a634b25b0ca18b959b737bd7ad0cde | 9f1db0cce0aebc6c14c7e3c4adfa424bbc20c968 | /project/lit/migrations/0008_auto_20151112_0935.py | e03309c46b32757228973dba1d3bbb8e0d673561 | [] | no_license | JoshAddington/hawc | 6b8f9efb0dec2aeb972a66a636dfc7493502003b | 28cb8a2690ba934134c4dd4660d51e573cd6b895 | refs/heads/master | 2020-12-25T17:16:05.746517 | 2016-06-10T18:03:32 | 2016-06-10T18:03:32 | 44,217,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import utils.models
class Migration(migrations.Migration):
dependencies = [
('lit', '0007_auto_20151103_0925'),
]
operations = [
migrations.AlterField(
model_name='reference',
name='full_text_url',
field=utils.models.CustomURLField(help_text=b'Link to full-text publication (may require increased access privileges, only reviewers and team-members)', blank=True),
),
]
| [
"[email protected]"
] | |
69a804c507365deb078ecc69332d2ffa90e580e1 | fac4c2fa64e6a22d0a80eec7b65c93d7a6236b7f | /original-modules/text-to-text-transfer-transformer-master/t5/models/gin/objectives/span.gin | 2da19b4e5ebab684ee90690c65fafe40978d8775 | [
"Apache-2.0"
] | permissive | zouning68/nlp-transfer-learning | ec2b9e91f4b3bb9d77bf88dd78282f6ff5aaa4fd | e9b1544b55905ceb2235471f036abc1d7c4160db | refs/heads/master | 2023-04-13T18:25:22.206475 | 2020-01-15T02:36:19 | 2020-01-15T02:36:19 | 228,514,893 | 2 | 0 | Apache-2.0 | 2023-03-24T23:36:35 | 2019-12-17T02:21:15 | Python | UTF-8 | Python | false | false | 1,801 | gin | # -*-Python-*-
# Random pattern of noise and non-noise spans.
include 'objectives/denoise.gin'
preprocessors.unsupervised.preprocessors = [
@preprocessors.select_random_chunk,
@preprocessors.reduce_concat_tokens,
@preprocessors.split_tokens,
@preprocessors.denoise,
]
inputs_length = 512
noise_density = 0.15
preprocessors.denoise.noise_density = %noise_density
mean_noise_span_length = 3.0
preprocessors.denoise.inputs_fn = @preprocessors.noise_span_to_unique_sentinel
preprocessors.denoise.targets_fn = @preprocessors.nonnoise_span_to_unique_sentinel
preprocessors.denoise.noise_mask_fn = @preprocessors.random_spans_noise_mask
preprocessors.random_spans_noise_mask.mean_noise_span_length = %mean_noise_span_length
# Based on this combination of noise_mask_fn, inputs_fn, and targets_fn, we
# compute the exact split length and targets length so that the resulting
# training examples fit perfectly without any padding.
#
# These settings still work (but leave some padding) if the inputs_fn or
# targets_fn is switched to drop instead of replacing spans by sentinels.
# Compute the split length based on the other hyperparameters
preprocessors.split_tokens.max_tokens_per_segment = @preprocessors.random_spans_tokens_length()
targets_length = @preprocessors.random_spans_targets_length()
preprocessors.random_spans_helper.inputs_length = %inputs_length
preprocessors.random_spans_helper.noise_density = %noise_density
preprocessors.random_spans_helper.mean_noise_span_length = %mean_noise_span_length
preprocessors.random_spans_helper.extra_tokens_per_span_inputs = 1
preprocessors.random_spans_helper.extra_tokens_per_span_targets = 1
utils.run.sequence_length = {"inputs": %inputs_length, "targets": %targets_length}
sentencepiece_vocabulary.SentencePieceVocabulary.extra_ids = 100
| [
"[email protected]"
] | |
530054b5f5bfcf224797ff7cb03ce8898da54858 | 6c5a2649bd7dce505e108d5ceb862d9a3912ca8b | /score_of_data.py | dc556bef64979e96be25a36395d5a45b68c9bbf1 | [] | no_license | maidao/machine_learning_Intermediate | 680295ba8b9282312cd499151e57ef03e1a55283 | 7cb5a05b4bdd30f0555faa3705d7ab20793d61b5 | refs/heads/master | 2020-06-18T07:33:47.152520 | 2019-07-17T10:03:00 | 2019-07-17T10:03:00 | 196,215,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=10, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds) | [
"[email protected]"
] | |
2497cfe49693743af6f390ab5ee75b6bd96ebf9e | 48894a44b01b055c9d88eb93f4472fa1da72441b | /run.py | ffab80d383c6be37ca0c1c76c9b9eb84500f98b7 | [] | no_license | Biking0/ambari_monitor | 4f349846f5be9042854427979e844934db3881c0 | 137cd8d7b051253a616c6b106722cac539616610 | refs/heads/master | 2020-08-10T22:39:44.331360 | 2020-03-19T05:41:22 | 2020-03-19T05:41:22 | 214,435,780 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | #!/usr/bin/env python
# -*-coding:utf-8 -*-
#********************************************************************************
# ** 文件名称:run.py
# ** 功能描述:ambari 监控项目启动所有监控程序
# ** 输 入 表:
# ** 输 出 表:
# ** 创 建 者:hyn
# ** 创建日期:20191020
# ** 修改日志:
# ** 修改日期:
# *******************************************************************************
# ** 程序调用格式:nohup python run.py >> nohup.out &
# *******************************************************************************
import os
import time
import config
import service_monitor
import solr_monitor
# 启动
if __name__=='__main__':
while True:
# 1.监控各类服务
service_monitor_object = service_monitor.ServiceMonitor()
service_monitor_object.request_data()
# 2.监控solr
solr_monitor_object = solr_monitor.SolrMonitor()
solr_monitor_object.request_data()
# 3.监控kafka消费
# 4.监控kafka日志,有监控
print('sleep 900s')
time.sleep(config.sleep_time)
#time.sleep(3)
| [
"[email protected]"
] | |
87cddce92b5071b9d47617bca4b11461da55b66a | 046207f434966462fff55f634ba5a450d2208534 | /CodeUp/1259_0186.py | d3941c4ae56ec159bd0e541f26d3f46a954997cd | [] | no_license | sungsikyang92/pythonStudy | e293e1ac8af443809f840ccee7052a8f57480b70 | 26522b5e232ccd9ab25c52122d254aa7249a8fdf | refs/heads/master | 2023-07-04T16:58:40.318976 | 2021-08-04T02:00:27 | 2021-08-04T02:00:27 | 365,398,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | n = int(input())
sum = 0
for x in range(1, n+1):
if x % 2 == 0 :
sum += x
print(sum) | [
"[email protected]"
] | |
45a941542c6eb500c823b5d445f76412a2149b8a | 1208ac3718420c4a118ab6b777d99980b85f952a | /112.py | aabd7878a6fe754ceb9cd8e4a711f5e799a5cd90 | [] | no_license | deimelperez/150_Py_challenges | 6ab9aea77c9c117b682790bfe36fb5e280cb8afc | b58f55312e7abf30cb7cb6d68b249bb5dcd3c862 | refs/heads/master | 2023-03-13T02:30:15.095467 | 2021-03-04T19:02:11 | 2021-03-04T19:02:11 | 344,579,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | import csv
file = open ('111 Books.csv','a')
title = input('Enter title: ')
author = input('Enter author: ')
date = input('Enter date: ')
newRecord = tile + ',' + author + ',' + date + '\n'
file.write(str(newRecord))
file.close()
| [
"[email protected]"
] | |
69a692f69279f5075583c5453a69f1f6df3bec6c | 079989df1765293d3536b98f841630246bb77a85 | /information/models.py | 69782d9228cdbcd29e08eba464dd17a09dce24be | [] | no_license | R-Mielamud/ClasssixSite | fb74cf71acc55fd32b7be0b4365ff3b2099d5d7c | 0d3bc58b4e13ddc29f0a7f2025a6c813d828433f | refs/heads/master | 2020-12-27T08:10:52.040513 | 2020-07-09T19:19:15 | 2020-07-09T19:19:15 | 237,826,755 | 1 | 0 | null | 2020-02-10T13:18:41 | 2020-02-02T19:47:34 | HTML | UTF-8 | Python | false | false | 723 | py | from django.db.models import *
from diary.models import Subject
class TeacherData(Model):
full_name = CharField(max_length=100, default="")
subject = ForeignKey(Subject, on_delete=CASCADE, related_name="teacher_data_sets")
def __str__(self):
return "{} | {}".format(self.full_name, self.subject)
class Meta:
verbose_name_plural = "Teacher data sets"
class ScheduleSubject(Model):
day_number = IntegerField(default=1)
day_index = IntegerField(default=1)
subject = CharField(default="", max_length=200)
cabinet = CharField(default="31", max_length=10)
def __str__(self):
return "{} {} {} {}".format(self.day_number, self.day_index, self.subject, self.cabinet)
| [
"[email protected]"
] | |
1a1b25fc8c24554f7190b90d529bd53fdea7f86e | f3e9cabfa31638f3de80f5b39794a3fc2a8edc7e | /src/py3.x/ml/16.RecommenderSystems/RS-usercf.py | 97d0e54c7c09effb2a1f102b859a7782462a2ccd | [] | no_license | Michael-24/ML-master | 21ef050c32b8a156f6346456aa686f7bdb8f10e1 | bb4ff4252db9f4657f395b76d4bbe20d9edef29a | refs/heads/master | 2023-03-18T06:34:48.472023 | 2020-08-07T04:37:45 | 2020-08-07T04:37:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,223 | py | #!/usr/bin/python
# coding:utf8
'''
Created on 2015-06-22
Update on 2019-05-16
Author: Lockvictor/片刻
《推荐系统实践》协同过滤算法源代码
'''
from __future__ import print_function
import sys
import math
import random
from operator import itemgetter
print(__doc__)
# 作用: 使得随机数据可预测
random.seed(0)
class UserBasedCF():
''' TopN recommendation - UserBasedCF '''
def __init__(self):
self.trainset = {}
self.testset = {}
# n_sim_user: top 20个用户, n_rec_movie: top 10个推荐结果
self.n_sim_user = 20
self.n_rec_movie = 10
# user_sim_mat: 用户之间的相似度, movie_popular: 电影的出现次数, movie_count: 总电影数量
self.user_sim_mat = {}
self.movie_popular = {}
self.movie_count = 0
print('similar user number = %d' % self.n_sim_user, file=sys.stderr)
print('recommended movie number = %d' % self.n_rec_movie, file=sys.stderr)
@staticmethod
def loadfile(filename):
"""loadfile(加载文件,返回一个生成器)
Args:
filename 文件名
Returns:
line 行数据,去空格
"""
fp = open(filename, 'r')
for i, line in enumerate(fp):
yield line.strip('\r\n')
if i > 0 and i % 100000 == 0:
print('loading %s(%s)' % (filename, i), file=sys.stderr)
fp.close()
print('load %s success' % filename, file=sys.stderr)
def generate_dataset(self, filename, pivot=0.7):
"""loadfile(加载文件,将数据集按照7:3 进行随机拆分)
Args:
filename 文件名
pivot 拆分比例
"""
trainset_len = 0
testset_len = 0
for line in self.loadfile(filename):
# 用户ID,电影名称,评分,时间戳timestamp
# user, movie, rating, timestamp = line.split('::')
user, movie, rating, _ = line.split('\t')
# 通过pivot和随机函数比较,然后初始化用户和对应的值
if (random.random() < pivot):
# dict.setdefault(key, default=None)
# key -- 查找的键值
# default -- 键不存在时,设置的默认键值
self.trainset.setdefault(user, {})
self.trainset[user][movie] = int(rating)
trainset_len += 1
else:
self.testset.setdefault(user, {})
self.testset[user][movie] = int(rating)
testset_len += 1
print('分离训练集和测试集成功', file=sys.stderr)
print('train set = %s' % trainset_len, file=sys.stderr)
print('test set = %s' % testset_len, file=sys.stderr)
def calc_user_sim(self):
"""calc_user_sim(计算用户之间的相似度)"""
# build inverse table for item-users
# key=movieID, value=list of userIDs who have seen this movie
print('building movie-users inverse table...', file=sys.stderr)
movie2users = dict()
# 同一个电影中,收集用户的集合
# 统计在所有的用户中,不同电影的总出现次数
for user, movies in self.trainset.items():
for movie in movies:
# inverse table for item-users
if movie not in movie2users:
movie2users[movie] = set()
movie2users[movie].add(user)
# count item popularity at the same time
if movie not in self.movie_popular:
self.movie_popular[movie] = 0
self.movie_popular[movie] += 1
print('build movie-users inverse table success', file=sys.stderr)
# save the total movie number, which will be used in evaluation
self.movie_count = len(movie2users)
print('total movie number = %d' % self.movie_count, file=sys.stderr)
usersim_mat = self.user_sim_mat
# 统计在相同电影时,不同用户同时出现的次数
print('building user co-rated movies matrix...', file=sys.stderr)
for movie, users in movie2users.items():
for u in users:
for v in users:
if u == v:
continue
usersim_mat.setdefault(u, {})
usersim_mat[u].setdefault(v, 0)
usersim_mat[u][v] += 1
print('build user co-rated movies matrix success', file=sys.stderr)
# calculate similarity matrix
print('calculating user similarity matrix...', file=sys.stderr)
simfactor_count = 0
PRINT_STEP = 2000000
for u, related_users in usersim_mat.items():
for v, count in related_users.iteritems():
# 余弦相似度
usersim_mat[u][v] = count / math.sqrt(
len(self.trainset[u]) * len(self.trainset[v]))
simfactor_count += 1
# 打印进度条
if simfactor_count % PRINT_STEP == 0:
print('calculating user similarity factor(%d)' % simfactor_count, file=sys.stderr)
print('calculate user similarity matrix(similarity factor) success', file=sys.stderr)
print('Total similarity factor number = %d' % simfactor_count, file=sys.stderr)
# @profile
def recommend(self, user):
"""recommend(找出top K的用户,所看过的电影,对电影进行相似度sum的排序,取出top N的电影数)
Args:
user 用户
Returns:
rec_movie 电影推荐列表,按照相似度从大到小的排序
"""
''' Find K similar users and recommend N movies. '''
K = self.n_sim_user
N = self.n_rec_movie
rank = dict()
watched_movies = self.trainset[user]
# 计算top K 用户的相似度
# v=similar user, wuv=不同用户同时出现的次数,根据wuv倒序从大到小选出K个用户进行排列
# 耗时分析: 50.4%的时间在 line-160行
for v, wuv in sorted(
self.user_sim_mat[user].items(), key=itemgetter(1),
reverse=True)[0:K]:
for movie, rating in self.trainset[v].iteritems():
if movie in watched_movies:
continue
# predict the user's "interest" for each movie
rank.setdefault(movie, 0)
rank[movie] += wuv * rating
# return the N best movies
"""
wuv
precision=0.3766 recall=0.0759 coverage=0.3183 popularity=6.9194
wuv * rating
precision=0.3865 recall=0.0779 coverage=0.2681 popularity=7.0116
"""
return sorted(rank.items(), key=itemgetter(1), reverse=True)[0:N]
def evaluate(self):
''' return precision, recall, coverage and popularity '''
print('Evaluation start...', file=sys.stderr)
# 返回top N的推荐结果
N = self.n_rec_movie
# varables for precision and recall
# hit表示命中(测试集和推荐集相同+1),rec_count 每个用户的推荐数, test_count 每个用户对应的测试数据集的电影数
hit = 0
rec_count = 0
test_count = 0
# varables for coverage
all_rec_movies = set()
# varables for popularity
popular_sum = 0
# enumerate将其组成一个索引序列,利用它可以同时获得索引和值
# 参考地址: http://blog.csdn.net/churximi/article/details/51648388
for i, user in enumerate(self.trainset):
if i > 0 and i % 500 == 0:
print('recommended for %d users' % i, file=sys.stderr)
test_movies = self.testset.get(user, {})
rec_movies = self.recommend(user)
# 对比测试集和推荐集的差异 movie, w
for movie, _ in rec_movies:
if movie in test_movies:
hit += 1
all_rec_movies.add(movie)
# 计算用户对应的电影出现次数log值的sum加和
popular_sum += math.log(1 + self.movie_popular[movie])
rec_count += N
test_count += len(test_movies)
precision = hit / (1.0 * rec_count)
recall = hit / (1.0 * test_count)
coverage = len(all_rec_movies) / (1.0 * self.movie_count)
popularity = popular_sum / (1.0 * rec_count)
print('precision=%.4f \t recall=%.4f \t coverage=%.4f \t popularity=%.4f' % (
precision, recall, coverage, popularity), file=sys.stderr)
if __name__ == '__main__':
# ratingfile = 'data/16.RecommenderSystems/ml-1m/ratings.dat'
ratingfile = 'data/16.RecommenderSystems/ml-100k/u.data'
# 创建UserCF对象
usercf = UserBasedCF()
# 将数据按照 7:3的比例,拆分成: 训练集和测试集,存储在usercf的trainset和testset中
usercf.generate_dataset(ratingfile, pivot=0.7)
# 计算用户之间的相似度
usercf.calc_user_sim()
# 评估推荐效果
usercf.evaluate()
| [
"[email protected]"
] | |
e00636abdfb80b92e41cf30784c894caed7f6519 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/battle_control/controllers/team_bases_ctrl.py | 4d0149d8b224752e45e0be7a44f2f435f4a1e495 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 12,531 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/battle_control/controllers/team_bases_ctrl.py
from collections import defaultdict
import BattleReplay
import BigWorld
import SoundGroups
from helpers import dependency
from skeletons.gui.battle_session import IBattleSessionProvider
from constants import TEAMS_IN_ARENA
from debug_utils import LOG_CURRENT_EXCEPTION
from gui.battle_control.arena_info.interfaces import ITeamsBasesController
from gui.battle_control.battle_constants import BATTLE_CTRL_ID
from gui.battle_control.view_components import ViewComponentsController
from PlayerEvents import g_playerEvents
_BASE_CAPTURE_SOUND_NAME_ENEMY = 'base_capture_2'
_BASE_CAPTURE_SOUND_NAME_ALLY = 'base_capture_1'
_AVAILABLE_TEAMS_NUMBERS = range(1, TEAMS_IN_ARENA.MAX_TEAMS + 1)
_UPDATE_POINTS_DELAY = 1.0
_ENEMY_OFFSET_DISABLED_BY_GAMEPLAY = ('assault',
'assault2',
'domination',
'domination30x30',
'epic')
def makeClientTeamBaseID(team, baseID):
if baseID is None:
baseID = 0
return (int(baseID) << 6) + team
def parseClientTeamBaseID(clientID):
team = clientID & 63
return (team, clientID >> 6)
class ITeamBasesListener(object):
def setOffsetForEnemyPoints(self):
pass
def addCapturingTeamBase(self, clientID, playerTeam, points, rate, timeLeft, invadersCnt, capturingStopped):
pass
def addCapturedTeamBase(self, clientID, playerTeam, timeLeft, invadersCnt):
pass
def updateTeamBasePoints(self, clientID, points, rate, timeLeft, invadersCnt):
pass
def stopTeamBaseCapturing(self, clientID, points):
pass
def blockTeamBaseCapturing(self, clientID, points):
pass
def setTeamBaseCaptured(self, clientID, playerTeam):
pass
def removeTeamBase(self, clientID):
pass
def removeTeamsBases(self):
pass
def setNoBaseCapturing(self):
pass
class BattleTeamsBasesController(ITeamsBasesController, ViewComponentsController):
sessionProvider = dependency.descriptor(IBattleSessionProvider)
__slots__ = ('__battleCtx', '__arenaVisitor', '__clientIDs', '__points', '__sounds', '__callbackIDs', '__snap', '__captured')
def __init__(self):
super(BattleTeamsBasesController, self).__init__()
self.__battleCtx = None
self.__arenaVisitor = None
self.__clientIDs = set()
self.__points = {}
self.__captured = set()
self.__sounds = {}
self.__callbackIDs = {}
self.__snap = defaultdict(tuple)
return
def getControllerID(self):
return BATTLE_CTRL_ID.TEAM_BASES
def startControl(self, battleCtx, arenaVisitor):
self.__battleCtx = battleCtx
self.__arenaVisitor = arenaVisitor
feedback = self.sessionProvider.shared.feedback
if feedback is not None:
feedback.onRoundFinished += self.__onRoundFinished
g_playerEvents.onTeamChanged += self.__onTeamChanged
g_playerEvents.onRoundFinished += self.__onRoundFinished
return
def stopControl(self):
if self._viewComponents:
self.clearViewComponents()
self.__battleCtx = None
self.__arenaVisitor = None
self.__clearUpdateCallbacks()
self.__stopCaptureSounds()
self.__clientIDs.clear()
self.__points.clear()
self.__sounds.clear()
self.__snap.clear()
feedback = self.sessionProvider.shared.feedback
if feedback is not None:
feedback.onRoundFinished -= self.__onRoundFinished
g_playerEvents.onTeamChanged -= self.__onTeamChanged
g_playerEvents.onRoundFinished -= self.__onRoundFinished
return
def setViewComponents(self, *components):
super(BattleTeamsBasesController, self).setViewComponents(*components)
if not self._viewComponents:
return
name = self.__arenaVisitor.type.getGamePlayName()
if name and name not in _ENEMY_OFFSET_DISABLED_BY_GAMEPLAY:
for viewCmp in self._viewComponents:
viewCmp.setOffsetForEnemyPoints()
playerTeam = self.__battleCtx.getArenaDP().getNumberOfTeam()
isCapturing = False
for clientID, (points, timeLeft, invadersCnt, stopped) in self.__points.iteritems():
if clientID in self.__captured:
for viewCmp in self._viewComponents:
isCapturing = True
viewCmp.addCapturedTeamBase(clientID, playerTeam, timeLeft, invadersCnt)
if points and not BigWorld.player().isObserver():
for viewCmp in self._viewComponents:
isCapturing = True
viewCmp.addCapturingTeamBase(clientID, playerTeam, points, self._getProgressRate(), timeLeft, invadersCnt, stopped)
if not isCapturing:
for viewCmp in self._viewComponents:
viewCmp.setNoBaseCapturing()
def getTeamBasePoints(self, clientID):
points = 0
if clientID in self.__points:
points, _, _, _ = self.__points[clientID]
return points
def isTeamBaseCaptured(self, clientID):
return clientID in self.__captured
def invalidateTeamBasePoints(self, baseTeam, baseID, points, timeLeft, invadersCnt, capturingStopped):
if baseTeam not in _AVAILABLE_TEAMS_NUMBERS:
return
clientID = makeClientTeamBaseID(baseTeam, baseID)
arenaDP = self.__battleCtx.getArenaDP()
playerTeam = arenaDP.getNumberOfTeam()
isEnemyBase = arenaDP.isEnemyTeam(baseTeam)
self.__points[clientID] = (points,
timeLeft,
invadersCnt,
capturingStopped)
if self._teamBaseLeft(points, invadersCnt):
if clientID in self.__clientIDs:
if not invadersCnt:
self.__clearUpdateCallback(clientID)
self.__clientIDs.remove(clientID)
for viewCmp in self._viewComponents:
viewCmp.stopTeamBaseCapturing(clientID, points)
if not invadersCnt:
viewCmp.removeTeamBase(clientID)
if not self.__hasBaseID(baseTeam) or isEnemyBase:
self.__stopCaptureSound(baseTeam)
else:
if clientID in self.__clientIDs:
if capturingStopped:
for viewCmp in self._viewComponents:
viewCmp.blockTeamBaseCapturing(clientID, points)
else:
self.__clientIDs.add(clientID)
self._addCapturingTeamBase(clientID, playerTeam, points, timeLeft, invadersCnt, capturingStopped)
self.__addUpdateCallback(clientID)
if not capturingStopped:
self.__playCaptureSound(playerTeam, baseTeam)
elif not self.__hasBaseID(baseTeam, exclude=clientID) or isEnemyBase:
self.__stopCaptureSound(baseTeam)
def invalidateTeamBaseCaptured(self, baseTeam, baseID):
if baseTeam not in _AVAILABLE_TEAMS_NUMBERS:
return
clientID = makeClientTeamBaseID(baseTeam, baseID)
playerTeam = self.__battleCtx.getArenaDP().getNumberOfTeam()
self.__captured.add(clientID)
if clientID in self.__clientIDs:
for viewCmp in self._viewComponents:
viewCmp.setTeamBaseCaptured(clientID, playerTeam)
else:
self.__clientIDs.add(clientID)
timeLeft = invadersCnt = 0
if clientID in self.__points:
_, timeLeft, invadersCnt, _ = self.__points[clientID]
for viewCmp in self._viewComponents:
viewCmp.addCapturedTeamBase(clientID, playerTeam, timeLeft, invadersCnt)
self.__stopCaptureSound(baseTeam)
def removeTeamsBases(self):
if not BattleReplay.isPlaying():
for viewCmp in self._viewComponents:
viewCmp.removeTeamsBases()
self.__stopCaptureSounds()
def clearViewComponents(self):
while self.__clientIDs:
clientID = self.__clientIDs.pop()
for viewCmp in self._viewComponents:
viewCmp.removeTeamBase(clientID)
def _teamBaseLeft(self, points, invadersCnt):
return not points
def _removeBarEntry(self, clientID, baseTeam):
self.__clientIDs.remove(clientID)
self.__stopCaptureSound(baseTeam)
def _containsClientID(self, clientID):
return clientID in self.__clientIDs
def _getPoints(self, clientID):
return self.__points[clientID]
def _getSnapDictForClientID(self, clientID):
return self.__snap[clientID]
def _setSnapForClientID(self, clientID, points, rate, timeLeft):
self.__snap[clientID] = (points, rate, timeLeft)
def _addCapturingTeamBase(self, clientID, playerTeam, points, timeLeft, invadersCnt, capturingStopped):
for viewCmp in self._viewComponents:
viewCmp.addCapturingTeamBase(clientID, playerTeam, points, self._getProgressRate(), timeLeft, invadersCnt, capturingStopped)
def __onTeamChanged(self, teamID):
for clientID in self.__clientIDs:
self.__clearUpdateCallback(clientID)
self.__stopCaptureSound(clientID)
for viewCmp in self._viewComponents:
viewCmp.removeTeamBase(clientID)
self.__clientIDs.clear()
def _getProgressRate(self):
pass
def __hasBaseID(self, team, exclude=-1):
return len([ i for i in self.__clientIDs if i & team != 0 and i != exclude ]) > 0
def __playCaptureSound(self, playerTeam, baseTeam):
if baseTeam not in self.__sounds:
if playerTeam ^ baseTeam:
soundID = _BASE_CAPTURE_SOUND_NAME_ENEMY
else:
soundID = _BASE_CAPTURE_SOUND_NAME_ALLY
try:
sound = self.__sounds.get(baseTeam, None)
if sound is not None:
sound.stop()
sound = SoundGroups.g_instance.getSound2D(soundID)
sound.play()
self.__sounds[baseTeam] = sound
except Exception:
LOG_CURRENT_EXCEPTION()
return
def __stopCaptureSound(self, team):
sound = self.__sounds.pop(team, None)
if sound is not None:
try:
sound.stop()
except Exception:
LOG_CURRENT_EXCEPTION()
return
def __stopCaptureSounds(self):
teams = self.__sounds.keys()
for team in teams:
self.__stopCaptureSound(team)
def _updatePoints(self, clientID):
if clientID not in self.__clientIDs:
return
points, timeLeft, invadersCnt, stopped = self.__points[clientID]
if stopped:
return
rate = self._getProgressRate()
if self._viewComponents and self.__snap[clientID] != (points, rate, timeLeft) and points > 0:
self.__snap[clientID] = (points, rate, timeLeft)
for viewCmp in self._viewComponents:
viewCmp.updateTeamBasePoints(clientID, points, rate, timeLeft, invadersCnt)
def __tickToUpdatePoints(self, clientID):
self.__callbackIDs.pop(clientID, None)
self._updatePoints(clientID)
self.__addUpdateCallback(clientID)
return
def __addUpdateCallback(self, clientID):
self.__callbackIDs[clientID] = BigWorld.callback(_UPDATE_POINTS_DELAY, lambda : self.__tickToUpdatePoints(clientID))
def __clearUpdateCallback(self, clientID):
callbackID = self.__callbackIDs.pop(clientID, None)
if callbackID is not None:
BigWorld.cancelCallback(callbackID)
self.__snap.pop(clientID, None)
return
def __clearUpdateCallbacks(self):
for _, callbackID in self.__callbackIDs.items():
BigWorld.cancelCallback(callbackID)
self.__callbackIDs.clear()
def __onRoundFinished(self, *args):
self.removeTeamsBases()
class BattleTeamsBasesPlayer(BattleTeamsBasesController):
def _getProgressRate(self):
rate = BattleReplay.g_replayCtrl.playbackSpeed
if rate is None:
rate = super(BattleTeamsBasesPlayer, self)._getProgressRate()
return rate
def createTeamsBasesCtrl(setup):
if setup.isReplayPlaying:
ctrl = BattleTeamsBasesPlayer()
else:
ctrl = BattleTeamsBasesController()
return ctrl
| [
"[email protected]"
] | |
3810a9e948c34313e813a1fe7c302f115e7b368c | 0a28bcde2499e6a41e16d88ed62cd2e80a5b464d | /hb_quant/huobi/service/account/get_account_balance_by_subuid.py | 29532076b68ed85bd1db6979439b5886c05dab41 | [
"MIT"
] | permissive | wenli135/Binance-volatility-trading-bot | 2cfe66007294b13a89b16d1622d50ce1615f1d66 | 75a03ad61df0e95492128fb6f1f419d4dc256ab3 | refs/heads/main | 2023-06-13T06:40:43.855256 | 2021-07-01T02:03:25 | 2021-07-01T02:03:25 | 373,853,320 | 0 | 0 | MIT | 2021-06-04T13:38:26 | 2021-06-04T13:38:26 | null | UTF-8 | Python | false | false | 696 | py | from huobi.connection.restapi_sync_client import RestApiSyncClient
from huobi.constant.system import HttpMethod
from huobi.model.account import *
class GetAccountBalanceBySubUidService:
def __init__(self, params):
self.params = params
def request(self, **kwargs):
sub_uid = self.params["sub-uid"]
def get_channel():
path = "/v1/account/accounts/{}"
return path.format(sub_uid)
def parse(dict_data):
data_list = dict_data.get("data", [])
return AccountBalance.json_parse_list(data_list)
return RestApiSyncClient(**kwargs).request_process(HttpMethod.GET_SIGN, get_channel(), self.params, parse)
| [
"[email protected]"
] | |
29cff6d8c3117c518a9ab74b682916b64efc225e | 9b0bdebe81e558d3851609687e4ccd70ad026c7f | /数据结构/栈/04.合法的出栈顺序.py | 1f404b5a1970e989acf41c5634f48a5b42087105 | [] | no_license | lizenghui1121/DS_algorithms | 645cdad007ccbbfa82cc5ca9e3fc7f543644ab21 | 9690efcfe70663670691de02962fb534161bfc8d | refs/heads/master | 2022-12-13T22:45:23.108838 | 2020-09-07T13:40:17 | 2020-09-07T13:40:17 | 275,062,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | """
1到n的数字序列,入栈,每个数字入栈后,即可出栈,也可在栈中停留,求该数字序列的出栈序列是否合法。
@Author: Li Zenghui
@Date: 2020-03-21 21:19
"""
from queue_and_stack import MyStack
def check_is_valid_order(arr):
s = MyStack()
for i in range(1, len(arr)+1):
s.push(i)
while not s.is_empty() and arr[0] == s.top():
s.pop()
arr.pop(0)
if not s.is_empty():
return False
return True
# 直接用列表当栈用
def check_is_valid_order_2(arr):
s = []
for i in range(1, len(arr)+1):
s.append(i)
while s and arr[0] == s[-1]:
s.pop()
arr.pop(0)
if s:
return False
return True
if __name__ == '__main__':
print(check_is_valid_order([3, 2, 5, 4, 1]))
print(check_is_valid_order([3, 1, 2, 4, 5]))
print(check_is_valid_order_2([3, 2, 5, 4, 1]))
print(check_is_valid_order_2([3, 1, 2, 4, 5]))
| [
"[email protected]"
] | |
11a6f509dac11d1628a4239ad6e3882798927964 | 1839a3881de40db86a2c1e50086ccd6562ed221e | /UnetVgg11/network/textnet.py | cf1ac08f05461b500ccb74444b31ded0a846858b | [] | no_license | weijiawu/Yibao-cup_competition | 491c3e63ee1398c97339e95d5299f2aa2ea8af32 | 84369b038d8c3b3b2e9bfeae824518e49586612e | refs/heads/master | 2020-05-07T12:09:22.987473 | 2019-04-10T06:41:16 | 2019-04-10T06:41:16 | 180,490,552 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,052 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torchvision.models
import torch.nn.functional as F
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
class UnetVgg11(nn.Module):
def __init__(self, n_classes=3, num_filters=64, v=1):
super(UnetVgg11, self).__init__()
print('UnetVgg11 version={}'.format(v))
print('base num_filters={}'.format(num_filters))
self.pool = nn.MaxPool2d(2, 2)
self.encoder = torchvision.models.vgg11(pretrained=True).features
self.relu = self.encoder[1]
self.conv1 = self.encoder[0]
self.conv2 = self.encoder[3]
self.conv3s = self.encoder[6]
self.conv3 = self.encoder[8]
self.conv4s = self.encoder[11]
self.conv4 = self.encoder[13]
self.conv5s = self.encoder[16]
self.conv5 = self.encoder[18]
self.center = DecoderBlock_5(num_filters * 8, num_filters * 16, num_filters * 8)
self.dec5 = DecoderBlock(num_filters * 16, num_filters * 8, num_filters * 8)
self.dec4 = DecoderBlock(num_filters * 16, num_filters * 8, num_filters * 4)
self.dec3 = DecoderBlock(num_filters * 8, num_filters * 4, num_filters * 2)
self.dec2 = DecoderBlock(num_filters * 4, num_filters * 2, num_filters)
self.dec1 = ConvRelu(num_filters * 2, num_filters)
self.final = nn.Conv2d(num_filters, n_classes, kernel_size=1)
def forward(self, x):
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(self.pool(conv1)))
conv3s = self.relu(self.conv3s(self.pool(conv2)))
conv3 = self.relu(self.conv3(conv3s))
conv4s = self.relu(self.conv4s(self.pool(conv3)))
conv4 = self.relu(self.conv4(conv4s))
conv5s = self.relu(self.conv5s(self.pool(conv4)))
conv5 = self.relu(self.conv5(conv5s))
center = self.center(self.pool(conv5))
# print 'dec5.in_channels', self.dec5.in_channels
# print center.size(), conv5.size()
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
return self.final(dec1)
class ConvRelu(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvRelu, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
# nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.block(x)
class DoubleConvRelu(nn.Module):
def __init__(self, in_channels, out_channels):
super(DoubleConvRelu, self).__init__()
self.block = nn.Sequential(
ConvRelu(in_channels, out_channels),
ConvRelu(out_channels, out_channels),
)
def forward(self, x):
return self.block(x)
class DecoderBlock_5(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True):
super(DecoderBlock_5, self).__init__()
self.in_channels = in_channels
if is_deconv:
self.block = nn.Sequential(
ConvRelu(in_channels, middle_channels),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2,
padding=1, output_padding=1),
nn.ReLU(inplace=True)
# nn.Upsample(scale_factor=2)
)
else:
self.block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
ConvRelu(in_channels, middle_channels),
ConvRelu(middle_channels, out_channels),
)
def forward(self, x):
return self.block(x)
class DecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True):
super(DecoderBlock, self).__init__()
self.in_channels = in_channels
if is_deconv:
self.block = nn.Sequential(
ConvRelu(in_channels, middle_channels),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=3, stride=2,
padding=1, output_padding=1),
nn.ReLU(inplace=True)
# nn.Upsample(scale_factor=2)
)
else:
self.block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
ConvRelu(in_channels, middle_channels),
ConvRelu(middle_channels, out_channels),
)
def forward(self, x):
return self.block(x)
if __name__ == '__main__':
import torch
input = torch.randn((1, 3, 400, 400))
print(input.dtype)
net = torch.nn.DataParallel(UnetVgg11(n_classes=3)).cuda()
print(net(input.cuda()).shape)
print(net(input.cuda())[:,0].view(-1).shape)
pass | [
"[email protected]"
] | |
165ee5fd66aeb6ac69a6639f029b320d336eb6d4 | a596d773e011ca0f4e75c725a24bb263b67b16d1 | /MINDIVNO.PY | 8d937c6f7dca64910cd6f2ccb8a096b2c985fe19 | [] | no_license | shamiliraghul/python | 47b1ca1b4be40d89b014755c51b8da9787b3ffff | 207b6a3d6222d95af9c74b6f91a94f8f6115b3d5 | refs/heads/master | 2020-05-23T02:04:20.900868 | 2019-07-19T07:10:05 | 2019-07-19T07:10:05 | 186,596,277 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | n=int(input())
C=0
for i in range(1,n+1):
if n%i==0 and (n/i)%2!=0:
print(i)
break
| [
"[email protected]"
] | |
016861683eb38bdf6dc98d015f3a661609b4eb1b | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_07_01_preview/models/_models_py3.py | 48be276791f964e4836563a515cd28181ea518b3 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 122,736 | py | # coding=utf-8
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
import sys
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from ... import _serialization
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else:
from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
class CertificateBodyDescription(_serialization.Model):
"""The JSON-serialized X509 Certificate.
:ivar certificate: base-64 representation of the X509 leaf certificate .cer file or just .pem
file content.
:vartype certificate: str
"""
_attribute_map = {
"certificate": {"key": "certificate", "type": "str"},
}
def __init__(self, *, certificate: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword certificate: base-64 representation of the X509 leaf certificate .cer file or just
.pem file content.
:paramtype certificate: str
"""
super().__init__(**kwargs)
self.certificate = certificate
class CertificateDescription(_serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The description of an X509 CA Certificate.
:vartype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.CertificateProperties
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"etag": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"properties": {"key": "properties", "type": "CertificateProperties"},
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"etag": {"key": "etag", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(self, *, properties: Optional["_models.CertificateProperties"] = None, **kwargs: Any) -> None:
"""
:keyword properties: The description of an X509 CA Certificate.
:paramtype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.CertificateProperties
"""
super().__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.etag = None
self.type = None
class CertificateListDescription(_serialization.Model):
"""The JSON-serialized array of Certificate objects.
:ivar value: The array of Certificate objects.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.CertificateDescription]
"""
_attribute_map = {
"value": {"key": "value", "type": "[CertificateDescription]"},
}
def __init__(self, *, value: Optional[List["_models.CertificateDescription"]] = None, **kwargs: Any) -> None:
"""
:keyword value: The array of Certificate objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.CertificateDescription]
"""
super().__init__(**kwargs)
self.value = value
class CertificateProperties(_serialization.Model):
"""The description of an X509 CA Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
"subject": {"readonly": True},
"expiry": {"readonly": True},
"thumbprint": {"readonly": True},
"is_verified": {"readonly": True},
"created": {"readonly": True},
"updated": {"readonly": True},
}
_attribute_map = {
"subject": {"key": "subject", "type": "str"},
"expiry": {"key": "expiry", "type": "rfc-1123"},
"thumbprint": {"key": "thumbprint", "type": "str"},
"is_verified": {"key": "isVerified", "type": "bool"},
"created": {"key": "created", "type": "rfc-1123"},
"updated": {"key": "updated", "type": "rfc-1123"},
"certificate": {"key": "certificate", "type": "str"},
}
def __init__(self, *, certificate: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword certificate: The certificate content.
:paramtype certificate: str
"""
super().__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.certificate = certificate
class CertificatePropertiesWithNonce(_serialization.Model):
"""The description of an X509 CA Certificate including the challenge nonce issued for the
Proof-Of-Possession flow.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar verification_code: The certificate's verification code that will be used for proof of
possession.
:vartype verification_code: str
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
"subject": {"readonly": True},
"expiry": {"readonly": True},
"thumbprint": {"readonly": True},
"is_verified": {"readonly": True},
"created": {"readonly": True},
"updated": {"readonly": True},
"verification_code": {"readonly": True},
"certificate": {"readonly": True},
}
_attribute_map = {
"subject": {"key": "subject", "type": "str"},
"expiry": {"key": "expiry", "type": "rfc-1123"},
"thumbprint": {"key": "thumbprint", "type": "str"},
"is_verified": {"key": "isVerified", "type": "bool"},
"created": {"key": "created", "type": "rfc-1123"},
"updated": {"key": "updated", "type": "rfc-1123"},
"verification_code": {"key": "verificationCode", "type": "str"},
"certificate": {"key": "certificate", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.verification_code = None
self.certificate = None
class CertificateVerificationDescription(_serialization.Model):
"""The JSON-serialized leaf certificate.
:ivar certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:vartype certificate: str
"""
_attribute_map = {
"certificate": {"key": "certificate", "type": "str"},
}
def __init__(self, *, certificate: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:paramtype certificate: str
"""
super().__init__(**kwargs)
self.certificate = certificate
class CertificateWithNonceDescription(_serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:vartype properties:
~azure.mgmt.iothub.v2019_07_01_preview.models.CertificatePropertiesWithNonce
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"etag": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"properties": {"key": "properties", "type": "CertificatePropertiesWithNonce"},
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"etag": {"key": "etag", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(self, *, properties: Optional["_models.CertificatePropertiesWithNonce"] = None, **kwargs: Any) -> None:
"""
:keyword properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:paramtype properties:
~azure.mgmt.iothub.v2019_07_01_preview.models.CertificatePropertiesWithNonce
"""
super().__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.etag = None
self.type = None
class CloudToDeviceProperties(_serialization.Model):
"""The IoT hub cloud-to-device messaging properties.
:ivar max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
:ivar default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype default_ttl_as_iso8601: ~datetime.timedelta
:ivar feedback: The properties of the feedback queue for cloud-to-device messages.
:vartype feedback: ~azure.mgmt.iothub.v2019_07_01_preview.models.FeedbackProperties
"""
_validation = {
"max_delivery_count": {"maximum": 100, "minimum": 1},
}
_attribute_map = {
"max_delivery_count": {"key": "maxDeliveryCount", "type": "int"},
"default_ttl_as_iso8601": {"key": "defaultTtlAsIso8601", "type": "duration"},
"feedback": {"key": "feedback", "type": "FeedbackProperties"},
}
def __init__(
self,
*,
max_delivery_count: Optional[int] = None,
default_ttl_as_iso8601: Optional[datetime.timedelta] = None,
feedback: Optional["_models.FeedbackProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
:keyword default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype default_ttl_as_iso8601: ~datetime.timedelta
:keyword feedback: The properties of the feedback queue for cloud-to-device messages.
:paramtype feedback: ~azure.mgmt.iothub.v2019_07_01_preview.models.FeedbackProperties
"""
super().__init__(**kwargs)
self.max_delivery_count = max_delivery_count
self.default_ttl_as_iso8601 = default_ttl_as_iso8601
self.feedback = feedback
class EndpointHealthData(_serialization.Model):
"""The health data for an endpoint.
:ivar endpoint_id: Id of the endpoint.
:vartype endpoint_id: str
:ivar health_status: Health statuses have following meanings. The 'healthy' status shows that
the endpoint is accepting messages as expected. The 'unhealthy' status shows that the endpoint
is not accepting messages as expected and IoT Hub is retrying to send data to this endpoint.
The status of an unhealthy endpoint will be updated to healthy when IoT Hub has established an
eventually consistent state of health. The 'dead' status shows that the endpoint is not
accepting messages, after IoT Hub retried sending messages for the retrial period. See IoT Hub
metrics to identify errors and monitor issues with endpoints. The 'unknown' status shows that
the IoT Hub has not established a connection with the endpoint. No messages have been delivered
to or rejected from this endpoint. Known values are: "unknown", "healthy", "unhealthy", and
"dead".
:vartype health_status: str or
~azure.mgmt.iothub.v2019_07_01_preview.models.EndpointHealthStatus
"""
_attribute_map = {
"endpoint_id": {"key": "endpointId", "type": "str"},
"health_status": {"key": "healthStatus", "type": "str"},
}
def __init__(
self,
*,
endpoint_id: Optional[str] = None,
health_status: Optional[Union[str, "_models.EndpointHealthStatus"]] = None,
**kwargs: Any
) -> None:
"""
:keyword endpoint_id: Id of the endpoint.
:paramtype endpoint_id: str
:keyword health_status: Health statuses have following meanings. The 'healthy' status shows
that the endpoint is accepting messages as expected. The 'unhealthy' status shows that the
endpoint is not accepting messages as expected and IoT Hub is retrying to send data to this
endpoint. The status of an unhealthy endpoint will be updated to healthy when IoT Hub has
established an eventually consistent state of health. The 'dead' status shows that the endpoint
is not accepting messages, after IoT Hub retried sending messages for the retrial period. See
IoT Hub metrics to identify errors and monitor issues with endpoints. The 'unknown' status
shows that the IoT Hub has not established a connection with the endpoint. No messages have
been delivered to or rejected from this endpoint. Known values are: "unknown", "healthy",
"unhealthy", and "dead".
:paramtype health_status: str or
~azure.mgmt.iothub.v2019_07_01_preview.models.EndpointHealthStatus
"""
super().__init__(**kwargs)
self.endpoint_id = endpoint_id
self.health_status = health_status
class EndpointHealthDataListResult(_serialization.Model):
"""The JSON-serialized array of EndpointHealthData objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: JSON-serialized array of Endpoint health data.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.EndpointHealthData]
:ivar next_link: Link to more results.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[EndpointHealthData]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.EndpointHealthData"]] = None, **kwargs: Any) -> None:
"""
:keyword value: JSON-serialized array of Endpoint health data.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.EndpointHealthData]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class EnrichmentProperties(_serialization.Model):
"""The properties of an enrichment that your IoT hub applies to messages delivered to endpoints.
All required parameters must be populated in order to send to Azure.
:ivar key: The key or name for the enrichment property. Required.
:vartype key: str
:ivar value: The value for the enrichment property. Required.
:vartype value: str
:ivar endpoint_names: The list of endpoints for which the enrichment is applied to the message.
Required.
:vartype endpoint_names: list[str]
"""
_validation = {
"key": {"required": True},
"value": {"required": True},
"endpoint_names": {"required": True, "min_items": 1},
}
_attribute_map = {
"key": {"key": "key", "type": "str"},
"value": {"key": "value", "type": "str"},
"endpoint_names": {"key": "endpointNames", "type": "[str]"},
}
def __init__(self, *, key: str, value: str, endpoint_names: List[str], **kwargs: Any) -> None:
"""
:keyword key: The key or name for the enrichment property. Required.
:paramtype key: str
:keyword value: The value for the enrichment property. Required.
:paramtype value: str
:keyword endpoint_names: The list of endpoints for which the enrichment is applied to the
message. Required.
:paramtype endpoint_names: list[str]
"""
super().__init__(**kwargs)
self.key = key
self.value = value
self.endpoint_names = endpoint_names
class ErrorDetails(_serialization.Model):
"""Error details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar http_status_code: The HTTP status code.
:vartype http_status_code: str
:ivar message: The error message.
:vartype message: str
:ivar details: The error details.
:vartype details: str
"""
_validation = {
"code": {"readonly": True},
"http_status_code": {"readonly": True},
"message": {"readonly": True},
"details": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"http_status_code": {"key": "httpStatusCode", "type": "str"},
"message": {"key": "message", "type": "str"},
"details": {"key": "details", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.code = None
self.http_status_code = None
self.message = None
self.details = None
class EventHubConsumerGroupInfo(_serialization.Model):
"""The properties of the EventHubConsumerGroupInfo object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The tags.
:vartype properties: dict[str, str]
:ivar id: The Event Hub-compatible consumer group identifier.
:vartype id: str
:ivar name: The Event Hub-compatible consumer group name.
:vartype name: str
:ivar type: the resource type.
:vartype type: str
:ivar etag: The etag.
:vartype etag: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"etag": {"readonly": True},
}
_attribute_map = {
"properties": {"key": "properties", "type": "{str}"},
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"etag": {"key": "etag", "type": "str"},
}
def __init__(self, *, properties: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
"""
:keyword properties: The tags.
:paramtype properties: dict[str, str]
"""
super().__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.type = None
self.etag = None
class EventHubConsumerGroupsListResult(_serialization.Model):
"""The JSON-serialized array of Event Hub-compatible consumer group names with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of consumer groups objects.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.EventHubConsumerGroupInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[EventHubConsumerGroupInfo]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.EventHubConsumerGroupInfo"]] = None, **kwargs: Any) -> None:
"""
:keyword value: List of consumer groups objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.EventHubConsumerGroupInfo]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class EventHubProperties(_serialization.Model):
"""The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype retention_time_in_days: int
:ivar partition_count: The number of partitions for receiving device-to-cloud messages in the
Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype partition_count: int
:ivar partition_ids: The partition ids in the Event Hub-compatible endpoint.
:vartype partition_ids: list[str]
:ivar path: The Event Hub-compatible name.
:vartype path: str
:ivar endpoint: The Event Hub-compatible endpoint.
:vartype endpoint: str
"""
_validation = {
"partition_ids": {"readonly": True},
"path": {"readonly": True},
"endpoint": {"readonly": True},
}
_attribute_map = {
"retention_time_in_days": {"key": "retentionTimeInDays", "type": "int"},
"partition_count": {"key": "partitionCount", "type": "int"},
"partition_ids": {"key": "partitionIds", "type": "[str]"},
"path": {"key": "path", "type": "str"},
"endpoint": {"key": "endpoint", "type": "str"},
}
def __init__(
self, *, retention_time_in_days: Optional[int] = None, partition_count: Optional[int] = None, **kwargs: Any
) -> None:
"""
:keyword retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype retention_time_in_days: int
:keyword partition_count: The number of partitions for receiving device-to-cloud messages in
the Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype partition_count: int
"""
super().__init__(**kwargs)
self.retention_time_in_days = retention_time_in_days
self.partition_count = partition_count
self.partition_ids = None
self.path = None
self.endpoint = None
class ExportDevicesRequest(_serialization.Model):
"""Use to provide parameters when requesting an export of all devices in the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar export_blob_container_uri: The export blob container URI. Required.
:vartype export_blob_container_uri: str
:ivar exclude_keys: The value indicating whether keys should be excluded during export.
Required.
:vartype exclude_keys: bool
"""
_validation = {
"export_blob_container_uri": {"required": True},
"exclude_keys": {"required": True},
}
_attribute_map = {
"export_blob_container_uri": {"key": "exportBlobContainerUri", "type": "str"},
"exclude_keys": {"key": "excludeKeys", "type": "bool"},
}
def __init__(self, *, export_blob_container_uri: str, exclude_keys: bool, **kwargs: Any) -> None:
"""
:keyword export_blob_container_uri: The export blob container URI. Required.
:paramtype export_blob_container_uri: str
:keyword exclude_keys: The value indicating whether keys should be excluded during export.
Required.
:paramtype exclude_keys: bool
"""
super().__init__(**kwargs)
self.export_blob_container_uri = export_blob_container_uri
self.exclude_keys = exclude_keys
class FailoverInput(_serialization.Model):
"""Use to provide failover region when requesting manual Failover for a hub.
All required parameters must be populated in order to send to Azure.
:ivar failover_region: Region the hub will be failed over to. Required.
:vartype failover_region: str
"""
_validation = {
"failover_region": {"required": True},
}
_attribute_map = {
"failover_region": {"key": "failoverRegion", "type": "str"},
}
def __init__(self, *, failover_region: str, **kwargs: Any) -> None:
"""
:keyword failover_region: Region the hub will be failed over to. Required.
:paramtype failover_region: str
"""
super().__init__(**kwargs)
self.failover_region = failover_region
class FallbackRouteProperties(_serialization.Model):
"""The properties of the fallback route. IoT Hub uses these properties when it routes messages to
the fallback endpoint.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:vartype name: str
:ivar source: The source to which the routing rule is to be applied to. For example,
DeviceMessages. Required. Known values are: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and "DigitalTwinChangeEvents".
:vartype source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:ivar condition: The condition which is evaluated in order to apply the fallback route. If the
condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: The list of endpoints to which the messages that satisfy the condition
are routed to. Currently only 1 endpoint is allowed. Required.
:vartype endpoint_names: list[str]
:ivar is_enabled: Used to specify whether the fallback route is enabled. Required.
:vartype is_enabled: bool
"""
_validation = {
"source": {"required": True},
"endpoint_names": {"required": True, "max_items": 1, "min_items": 1},
"is_enabled": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"source": {"key": "source", "type": "str"},
"condition": {"key": "condition", "type": "str"},
"endpoint_names": {"key": "endpointNames", "type": "[str]"},
"is_enabled": {"key": "isEnabled", "type": "bool"},
}
def __init__(
self,
*,
source: Union[str, "_models.RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
name: Optional[str] = None,
condition: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword name: The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:paramtype name: str
:keyword source: The source to which the routing rule is to be applied to. For example,
DeviceMessages. Required. Known values are: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and "DigitalTwinChangeEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:keyword condition: The condition which is evaluated in order to apply the fallback route. If
the condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: The list of endpoints to which the messages that satisfy the condition
are routed to. Currently only 1 endpoint is allowed. Required.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Used to specify whether the fallback route is enabled. Required.
:paramtype is_enabled: bool
"""
super().__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class FeedbackProperties(_serialization.Model):
"""The properties of the feedback queue for cloud-to-device messages.
:ivar lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message on the
feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
"""
_validation = {
"max_delivery_count": {"maximum": 100, "minimum": 1},
}
_attribute_map = {
"lock_duration_as_iso8601": {"key": "lockDurationAsIso8601", "type": "duration"},
"ttl_as_iso8601": {"key": "ttlAsIso8601", "type": "duration"},
"max_delivery_count": {"key": "maxDeliveryCount", "type": "int"},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs: Any
) -> None:
"""
:keyword lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message on
the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
"""
super().__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class ImportDevicesRequest(_serialization.Model):
"""Use to provide parameters when requesting an import of all devices in the hub.
All required parameters must be populated in order to send to Azure.
:ivar input_blob_container_uri: The input blob container URI. Required.
:vartype input_blob_container_uri: str
:ivar output_blob_container_uri: The output blob container URI. Required.
:vartype output_blob_container_uri: str
"""
_validation = {
"input_blob_container_uri": {"required": True},
"output_blob_container_uri": {"required": True},
}
_attribute_map = {
"input_blob_container_uri": {"key": "inputBlobContainerUri", "type": "str"},
"output_blob_container_uri": {"key": "outputBlobContainerUri", "type": "str"},
}
def __init__(self, *, input_blob_container_uri: str, output_blob_container_uri: str, **kwargs: Any) -> None:
"""
:keyword input_blob_container_uri: The input blob container URI. Required.
:paramtype input_blob_container_uri: str
:keyword output_blob_container_uri: The output blob container URI. Required.
:paramtype output_blob_container_uri: str
"""
super().__init__(**kwargs)
self.input_blob_container_uri = input_blob_container_uri
self.output_blob_container_uri = output_blob_container_uri
class IotHubCapacity(_serialization.Model):
"""IoT Hub capacity information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum number of units.
:vartype minimum: int
:ivar maximum: The maximum number of units.
:vartype maximum: int
:ivar default: The default number of units.
:vartype default: int
:ivar scale_type: The type of the scaling enabled. Known values are: "Automatic", "Manual", and
"None".
:vartype scale_type: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubScaleType
"""
_validation = {
"minimum": {"readonly": True, "maximum": 1, "minimum": 1},
"maximum": {"readonly": True},
"default": {"readonly": True},
"scale_type": {"readonly": True},
}
_attribute_map = {
"minimum": {"key": "minimum", "type": "int"},
"maximum": {"key": "maximum", "type": "int"},
"default": {"key": "default", "type": "int"},
"scale_type": {"key": "scaleType", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default = None
self.scale_type = None
class Resource(_serialization.Model):
"""The common properties of an Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: The resource location. Required.
:vartype location: str
:ivar tags: The resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True, "pattern": r"^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$"},
"type": {"readonly": True},
"location": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
"""
:keyword location: The resource location. Required.
:paramtype location: str
:keyword tags: The resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class IotHubDescription(Resource):
"""The description of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: The resource location. Required.
:vartype location: str
:ivar tags: The resource tags.
:vartype tags: dict[str, str]
:ivar etag: The Etag field is *not* required. If it is provided in the response body, it must
also be provided as a header per the normal ETag convention.
:vartype etag: str
:ivar properties: IotHub properties.
:vartype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubProperties
:ivar sku: IotHub SKU info. Required.
:vartype sku: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuInfo
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True, "pattern": r"^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$"},
"type": {"readonly": True},
"location": {"required": True},
"sku": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"etag": {"key": "etag", "type": "str"},
"properties": {"key": "properties", "type": "IotHubProperties"},
"sku": {"key": "sku", "type": "IotHubSkuInfo"},
}
def __init__(
self,
*,
location: str,
sku: "_models.IotHubSkuInfo",
tags: Optional[Dict[str, str]] = None,
etag: Optional[str] = None,
properties: Optional["_models.IotHubProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword location: The resource location. Required.
:paramtype location: str
:keyword tags: The resource tags.
:paramtype tags: dict[str, str]
:keyword etag: The Etag field is *not* required. If it is provided in the response body, it
must also be provided as a header per the normal ETag convention.
:paramtype etag: str
:keyword properties: IotHub properties.
:paramtype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubProperties
:keyword sku: IotHub SKU info. Required.
:paramtype sku: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuInfo
"""
super().__init__(location=location, tags=tags, **kwargs)
self.etag = etag
self.properties = properties
self.sku = sku
class IotHubDescriptionListResult(_serialization.Model):
"""The JSON-serialized array of IotHubDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubDescription objects.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[IotHubDescription]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.IotHubDescription"]] = None, **kwargs: Any) -> None:
"""
:keyword value: The array of IotHubDescription objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubDescription]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubLocationDescription(_serialization.Model):
"""Public representation of one of the locations where a resource is provisioned.
:ivar location: Azure Geo Regions.
:vartype location: str
:ivar role: Specific Role assigned to this location. Known values are: "primary" and
"secondary".
:vartype role: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubReplicaRoleType
"""
_attribute_map = {
"location": {"key": "location", "type": "str"},
"role": {"key": "role", "type": "str"},
}
def __init__(
self,
*,
location: Optional[str] = None,
role: Optional[Union[str, "_models.IotHubReplicaRoleType"]] = None,
**kwargs: Any
) -> None:
"""
:keyword location: Azure Geo Regions.
:paramtype location: str
:keyword role: Specific Role assigned to this location. Known values are: "primary" and
"secondary".
:paramtype role: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubReplicaRoleType
"""
super().__init__(**kwargs)
self.location = location
self.role = role
class IotHubNameAvailabilityInfo(_serialization.Model):
"""The properties indicating whether a given IoT hub name is available.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name_available: The value which indicates whether the provided name is available.
:vartype name_available: bool
:ivar reason: The reason for unavailability. Known values are: "Invalid" and "AlreadyExists".
:vartype reason: str or
~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubNameUnavailabilityReason
:ivar message: The detailed reason message.
:vartype message: str
"""
_validation = {
"name_available": {"readonly": True},
"reason": {"readonly": True},
}
_attribute_map = {
"name_available": {"key": "nameAvailable", "type": "bool"},
"reason": {"key": "reason", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(self, *, message: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword message: The detailed reason message.
:paramtype message: str
"""
super().__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = message
class IotHubProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""The properties of an IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar authorization_policies: The shared access policies you can use to secure a connection to
the IoT hub.
:vartype authorization_policies:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.SharedAccessSignatureAuthorizationRule]
:ivar ip_filter_rules: The IP filter rules.
:vartype ip_filter_rules: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IpFilterRule]
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:ivar state: The hub state.
:vartype state: str
:ivar host_name: The name of the host.
:vartype host_name: str
:ivar event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible keys
to this dictionary is events. This key has to be present in the dictionary while making create
or update calls for the IoT hub.
:vartype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.EventHubProperties]
:ivar routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:vartype routing: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingProperties
:ivar storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:vartype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.StorageEndpointProperties]
:ivar messaging_endpoints: The messaging endpoint properties for the file upload notification
queue.
:vartype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.MessagingEndpointProperties]
:ivar enable_file_upload_notifications: If True, file upload notifications are enabled.
:vartype enable_file_upload_notifications: bool
:ivar cloud_to_device: The IoT hub cloud-to-device messaging properties.
:vartype cloud_to_device: ~azure.mgmt.iothub.v2019_07_01_preview.models.CloudToDeviceProperties
:ivar comments: IoT hub comments.
:vartype comments: str
:ivar device_streams: The device streams properties of iothub.
:vartype device_streams:
~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubPropertiesDeviceStreams
:ivar features: The capabilities and features enabled for the IoT hub. Known values are: "None"
and "DeviceManagement".
:vartype features: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.Capabilities
:ivar locations: Primary and secondary location for iot hub.
:vartype locations:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubLocationDescription]
"""
_validation = {
"provisioning_state": {"readonly": True},
"state": {"readonly": True},
"host_name": {"readonly": True},
"locations": {"readonly": True},
}
_attribute_map = {
"authorization_policies": {"key": "authorizationPolicies", "type": "[SharedAccessSignatureAuthorizationRule]"},
"ip_filter_rules": {"key": "ipFilterRules", "type": "[IpFilterRule]"},
"provisioning_state": {"key": "provisioningState", "type": "str"},
"state": {"key": "state", "type": "str"},
"host_name": {"key": "hostName", "type": "str"},
"event_hub_endpoints": {"key": "eventHubEndpoints", "type": "{EventHubProperties}"},
"routing": {"key": "routing", "type": "RoutingProperties"},
"storage_endpoints": {"key": "storageEndpoints", "type": "{StorageEndpointProperties}"},
"messaging_endpoints": {"key": "messagingEndpoints", "type": "{MessagingEndpointProperties}"},
"enable_file_upload_notifications": {"key": "enableFileUploadNotifications", "type": "bool"},
"cloud_to_device": {"key": "cloudToDevice", "type": "CloudToDeviceProperties"},
"comments": {"key": "comments", "type": "str"},
"device_streams": {"key": "deviceStreams", "type": "IotHubPropertiesDeviceStreams"},
"features": {"key": "features", "type": "str"},
"locations": {"key": "locations", "type": "[IotHubLocationDescription]"},
}
def __init__(
self,
*,
authorization_policies: Optional[List["_models.SharedAccessSignatureAuthorizationRule"]] = None,
ip_filter_rules: Optional[List["_models.IpFilterRule"]] = None,
event_hub_endpoints: Optional[Dict[str, "_models.EventHubProperties"]] = None,
routing: Optional["_models.RoutingProperties"] = None,
storage_endpoints: Optional[Dict[str, "_models.StorageEndpointProperties"]] = None,
messaging_endpoints: Optional[Dict[str, "_models.MessagingEndpointProperties"]] = None,
enable_file_upload_notifications: Optional[bool] = None,
cloud_to_device: Optional["_models.CloudToDeviceProperties"] = None,
comments: Optional[str] = None,
device_streams: Optional["_models.IotHubPropertiesDeviceStreams"] = None,
features: Optional[Union[str, "_models.Capabilities"]] = None,
**kwargs: Any
) -> None:
"""
:keyword authorization_policies: The shared access policies you can use to secure a connection
to the IoT hub.
:paramtype authorization_policies:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.SharedAccessSignatureAuthorizationRule]
:keyword ip_filter_rules: The IP filter rules.
:paramtype ip_filter_rules: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IpFilterRule]
:keyword event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible
keys to this dictionary is events. This key has to be present in the dictionary while making
create or update calls for the IoT hub.
:paramtype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.EventHubProperties]
:keyword routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:paramtype routing: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingProperties
:keyword storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:paramtype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.StorageEndpointProperties]
:keyword messaging_endpoints: The messaging endpoint properties for the file upload
notification queue.
:paramtype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.MessagingEndpointProperties]
:keyword enable_file_upload_notifications: If True, file upload notifications are enabled.
:paramtype enable_file_upload_notifications: bool
:keyword cloud_to_device: The IoT hub cloud-to-device messaging properties.
:paramtype cloud_to_device:
~azure.mgmt.iothub.v2019_07_01_preview.models.CloudToDeviceProperties
:keyword comments: IoT hub comments.
:paramtype comments: str
:keyword device_streams: The device streams properties of iothub.
:paramtype device_streams:
~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubPropertiesDeviceStreams
:keyword features: The capabilities and features enabled for the IoT hub. Known values are:
"None" and "DeviceManagement".
:paramtype features: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.Capabilities
"""
super().__init__(**kwargs)
self.authorization_policies = authorization_policies
self.ip_filter_rules = ip_filter_rules
self.provisioning_state = None
self.state = None
self.host_name = None
self.event_hub_endpoints = event_hub_endpoints
self.routing = routing
self.storage_endpoints = storage_endpoints
self.messaging_endpoints = messaging_endpoints
self.enable_file_upload_notifications = enable_file_upload_notifications
self.cloud_to_device = cloud_to_device
self.comments = comments
self.device_streams = device_streams
self.features = features
self.locations = None
class IotHubPropertiesDeviceStreams(_serialization.Model):
"""The device streams properties of iothub.
:ivar streaming_endpoints: List of Device Streams Endpoints.
:vartype streaming_endpoints: list[str]
"""
_attribute_map = {
"streaming_endpoints": {"key": "streamingEndpoints", "type": "[str]"},
}
def __init__(self, *, streaming_endpoints: Optional[List[str]] = None, **kwargs: Any) -> None:
"""
:keyword streaming_endpoints: List of Device Streams Endpoints.
:paramtype streaming_endpoints: list[str]
"""
super().__init__(**kwargs)
self.streaming_endpoints = streaming_endpoints
class IotHubQuotaMetricInfo(_serialization.Model):
"""Quota metrics properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the quota metric.
:vartype name: str
:ivar current_value: The current value for the quota metric.
:vartype current_value: int
:ivar max_value: The maximum value of the quota metric.
:vartype max_value: int
"""
_validation = {
"name": {"readonly": True},
"current_value": {"readonly": True},
"max_value": {"readonly": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"current_value": {"key": "currentValue", "type": "int"},
"max_value": {"key": "maxValue", "type": "int"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.name = None
self.current_value = None
self.max_value = None
class IotHubQuotaMetricInfoListResult(_serialization.Model):
"""The JSON-serialized array of IotHubQuotaMetricInfo objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of quota metrics objects.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubQuotaMetricInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[IotHubQuotaMetricInfo]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.IotHubQuotaMetricInfo"]] = None, **kwargs: Any) -> None:
"""
:keyword value: The array of quota metrics objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubQuotaMetricInfo]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuDescription(_serialization.Model):
"""SKU properties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:ivar sku: The type of the resource. Required.
:vartype sku: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuInfo
:ivar capacity: IotHub capacity. Required.
:vartype capacity: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubCapacity
"""
_validation = {
"resource_type": {"readonly": True},
"sku": {"required": True},
"capacity": {"required": True},
}
_attribute_map = {
"resource_type": {"key": "resourceType", "type": "str"},
"sku": {"key": "sku", "type": "IotHubSkuInfo"},
"capacity": {"key": "capacity", "type": "IotHubCapacity"},
}
def __init__(self, *, sku: "_models.IotHubSkuInfo", capacity: "_models.IotHubCapacity", **kwargs: Any) -> None:
"""
:keyword sku: The type of the resource. Required.
:paramtype sku: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuInfo
:keyword capacity: IotHub capacity. Required.
:paramtype capacity: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubCapacity
"""
super().__init__(**kwargs)
self.resource_type = None
self.sku = sku
self.capacity = capacity
class IotHubSkuDescriptionListResult(_serialization.Model):
"""The JSON-serialized array of IotHubSkuDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubSkuDescription.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[IotHubSkuDescription]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.IotHubSkuDescription"]] = None, **kwargs: Any) -> None:
"""
:keyword value: The array of IotHubSkuDescription.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuDescription]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuInfo(_serialization.Model):
"""Information about the SKU of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the SKU. Required. Known values are: "F1", "S1", "S2", "S3", "B1",
"B2", and "B3".
:vartype name: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSku
:ivar tier: The billing tier for the IoT hub. Known values are: "Free", "Standard", and
"Basic".
:vartype tier: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuTier
:ivar capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:vartype capacity: int
"""
_validation = {
"name": {"required": True},
"tier": {"readonly": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"tier": {"key": "tier", "type": "str"},
"capacity": {"key": "capacity", "type": "int"},
}
def __init__(self, *, name: Union[str, "_models.IotHubSku"], capacity: Optional[int] = None, **kwargs: Any) -> None:
"""
:keyword name: The name of the SKU. Required. Known values are: "F1", "S1", "S2", "S3", "B1",
"B2", and "B3".
:paramtype name: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSku
:keyword capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:paramtype capacity: int
"""
super().__init__(**kwargs)
self.name = name
self.tier = None
self.capacity = capacity
class IpFilterRule(_serialization.Model):
"""The IP filter rules for the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar filter_name: The name of the IP filter rule. Required.
:vartype filter_name: str
:ivar action: The desired action for requests captured by this rule. Required. Known values
are: "Accept" and "Reject".
:vartype action: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IpFilterActionType
:ivar ip_mask: A string that contains the IP address range in CIDR notation for the rule.
Required.
:vartype ip_mask: str
"""
_validation = {
"filter_name": {"required": True},
"action": {"required": True},
"ip_mask": {"required": True},
}
_attribute_map = {
"filter_name": {"key": "filterName", "type": "str"},
"action": {"key": "action", "type": "str"},
"ip_mask": {"key": "ipMask", "type": "str"},
}
def __init__(
self, *, filter_name: str, action: Union[str, "_models.IpFilterActionType"], ip_mask: str, **kwargs: Any
) -> None:
"""
:keyword filter_name: The name of the IP filter rule. Required.
:paramtype filter_name: str
:keyword action: The desired action for requests captured by this rule. Required. Known values
are: "Accept" and "Reject".
:paramtype action: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IpFilterActionType
:keyword ip_mask: A string that contains the IP address range in CIDR notation for the rule.
Required.
:paramtype ip_mask: str
"""
super().__init__(**kwargs)
self.filter_name = filter_name
self.action = action
self.ip_mask = ip_mask
class JobResponse(_serialization.Model):
"""The properties of the Job Response object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar job_id: The job identifier.
:vartype job_id: str
:ivar start_time_utc: The start time of the job.
:vartype start_time_utc: ~datetime.datetime
:ivar end_time_utc: The time the job stopped processing.
:vartype end_time_utc: ~datetime.datetime
:ivar type: The type of the job. Known values are: "unknown", "export", "import", "backup",
"readDeviceProperties", "writeDeviceProperties", "updateDeviceConfiguration", "rebootDevice",
"factoryResetDevice", and "firmwareUpdate".
:vartype type: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.JobType
:ivar status: The status of the job. Known values are: "unknown", "enqueued", "running",
"completed", "failed", and "cancelled".
:vartype status: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.JobStatus
:ivar failure_reason: If status == failed, this string containing the reason for the failure.
:vartype failure_reason: str
:ivar status_message: The status message for the job.
:vartype status_message: str
:ivar parent_job_id: The job identifier of the parent job, if any.
:vartype parent_job_id: str
"""
_validation = {
"job_id": {"readonly": True},
"start_time_utc": {"readonly": True},
"end_time_utc": {"readonly": True},
"type": {"readonly": True},
"status": {"readonly": True},
"failure_reason": {"readonly": True},
"status_message": {"readonly": True},
"parent_job_id": {"readonly": True},
}
_attribute_map = {
"job_id": {"key": "jobId", "type": "str"},
"start_time_utc": {"key": "startTimeUtc", "type": "rfc-1123"},
"end_time_utc": {"key": "endTimeUtc", "type": "rfc-1123"},
"type": {"key": "type", "type": "str"},
"status": {"key": "status", "type": "str"},
"failure_reason": {"key": "failureReason", "type": "str"},
"status_message": {"key": "statusMessage", "type": "str"},
"parent_job_id": {"key": "parentJobId", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.job_id = None
self.start_time_utc = None
self.end_time_utc = None
self.type = None
self.status = None
self.failure_reason = None
self.status_message = None
self.parent_job_id = None
class JobResponseListResult(_serialization.Model):
"""The JSON-serialized array of JobResponse objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of JobResponse objects.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.JobResponse]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[JobResponse]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.JobResponse"]] = None, **kwargs: Any) -> None:
"""
:keyword value: The array of JobResponse objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.JobResponse]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class MatchedRoute(_serialization.Model):
"""Routes that matched.
:ivar properties: Properties of routes that matched.
:vartype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties
"""
_attribute_map = {
"properties": {"key": "properties", "type": "RouteProperties"},
}
def __init__(self, *, properties: Optional["_models.RouteProperties"] = None, **kwargs: Any) -> None:
"""
:keyword properties: Properties of routes that matched.
:paramtype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties
"""
super().__init__(**kwargs)
self.properties = properties
class MessagingEndpointProperties(_serialization.Model):
"""The properties of the messaging endpoints used by this IoT hub.
:ivar lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype max_delivery_count: int
"""
_validation = {
"max_delivery_count": {"maximum": 100, "minimum": 1},
}
_attribute_map = {
"lock_duration_as_iso8601": {"key": "lockDurationAsIso8601", "type": "duration"},
"ttl_as_iso8601": {"key": "ttlAsIso8601", "type": "duration"},
"max_delivery_count": {"key": "maxDeliveryCount", "type": "int"},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs: Any
) -> None:
"""
:keyword lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message.
See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype max_delivery_count: int
"""
super().__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class Name(_serialization.Model):
"""Name of Iot Hub type.
:ivar value: IotHub type.
:vartype value: str
:ivar localized_value: Localized value of name.
:vartype localized_value: str
"""
_attribute_map = {
"value": {"key": "value", "type": "str"},
"localized_value": {"key": "localizedValue", "type": "str"},
}
def __init__(self, *, value: Optional[str] = None, localized_value: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword value: IotHub type.
:paramtype value: str
:keyword localized_value: Localized value of name.
:paramtype localized_value: str
"""
super().__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class Operation(_serialization.Model):
"""IoT Hub REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{read | write | action | delete}.
:vartype name: str
:ivar display: The object that represents the operation.
:vartype display: ~azure.mgmt.iothub.v2019_07_01_preview.models.OperationDisplay
"""
_validation = {
"name": {"readonly": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display": {"key": "display", "type": "OperationDisplay"},
}
def __init__(self, *, display: Optional["_models.OperationDisplay"] = None, **kwargs: Any) -> None:
"""
:keyword display: The object that represents the operation.
:paramtype display: ~azure.mgmt.iothub.v2019_07_01_preview.models.OperationDisplay
"""
super().__init__(**kwargs)
self.name = None
self.display = display
class OperationDisplay(_serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft Devices.
:vartype provider: str
:ivar resource: Resource Type: IotHubs.
:vartype resource: str
:ivar operation: Name of the operation.
:vartype operation: str
:ivar description: Description of the operation.
:vartype description: str
"""
_validation = {
"provider": {"readonly": True},
"resource": {"readonly": True},
"operation": {"readonly": True},
"description": {"readonly": True},
}
_attribute_map = {
"provider": {"key": "provider", "type": "str"},
"resource": {"key": "resource", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"description": {"key": "description", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationInputs(_serialization.Model):
"""Input values.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the IoT hub to check. Required.
:vartype name: str
"""
_validation = {
"name": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
}
def __init__(self, *, name: str, **kwargs: Any) -> None:
"""
:keyword name: The name of the IoT hub to check. Required.
:paramtype name: str
"""
super().__init__(**kwargs)
self.name = name
class OperationListResult(_serialization.Model):
"""Result of the request to list IoT Hub operations. It contains a list of operations and a URL
link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of IoT Hub operations supported by the Microsoft.Devices resource provider.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
"value": {"readonly": True},
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[Operation]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.value = None
self.next_link = None
class RegistryStatistics(_serialization.Model):
"""Identity registry statistics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar total_device_count: The total count of devices in the identity registry.
:vartype total_device_count: int
:ivar enabled_device_count: The count of enabled devices in the identity registry.
:vartype enabled_device_count: int
:ivar disabled_device_count: The count of disabled devices in the identity registry.
:vartype disabled_device_count: int
"""
_validation = {
"total_device_count": {"readonly": True},
"enabled_device_count": {"readonly": True},
"disabled_device_count": {"readonly": True},
}
_attribute_map = {
"total_device_count": {"key": "totalDeviceCount", "type": "int"},
"enabled_device_count": {"key": "enabledDeviceCount", "type": "int"},
"disabled_device_count": {"key": "disabledDeviceCount", "type": "int"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.total_device_count = None
self.enabled_device_count = None
self.disabled_device_count = None
class RouteCompilationError(_serialization.Model):
"""Compilation error when evaluating route.
:ivar message: Route error message.
:vartype message: str
:ivar severity: Severity of the route error. Known values are: "error" and "warning".
:vartype severity: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorSeverity
:ivar location: Location where the route error happened.
:vartype location: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorRange
"""
_attribute_map = {
"message": {"key": "message", "type": "str"},
"severity": {"key": "severity", "type": "str"},
"location": {"key": "location", "type": "RouteErrorRange"},
}
def __init__(
self,
*,
message: Optional[str] = None,
severity: Optional[Union[str, "_models.RouteErrorSeverity"]] = None,
location: Optional["_models.RouteErrorRange"] = None,
**kwargs: Any
) -> None:
"""
:keyword message: Route error message.
:paramtype message: str
:keyword severity: Severity of the route error. Known values are: "error" and "warning".
:paramtype severity: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorSeverity
:keyword location: Location where the route error happened.
:paramtype location: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorRange
"""
super().__init__(**kwargs)
self.message = message
self.severity = severity
self.location = location
class RouteErrorPosition(_serialization.Model):
"""Position where the route error happened.
:ivar line: Line where the route error happened.
:vartype line: int
:ivar column: Column where the route error happened.
:vartype column: int
"""
_attribute_map = {
"line": {"key": "line", "type": "int"},
"column": {"key": "column", "type": "int"},
}
def __init__(self, *, line: Optional[int] = None, column: Optional[int] = None, **kwargs: Any) -> None:
"""
:keyword line: Line where the route error happened.
:paramtype line: int
:keyword column: Column where the route error happened.
:paramtype column: int
"""
super().__init__(**kwargs)
self.line = line
self.column = column
class RouteErrorRange(_serialization.Model):
"""Range of route errors.
:ivar start: Start where the route error happened.
:vartype start: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorPosition
:ivar end: End where the route error happened.
:vartype end: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorPosition
"""
_attribute_map = {
"start": {"key": "start", "type": "RouteErrorPosition"},
"end": {"key": "end", "type": "RouteErrorPosition"},
}
def __init__(
self,
*,
start: Optional["_models.RouteErrorPosition"] = None,
end: Optional["_models.RouteErrorPosition"] = None,
**kwargs: Any
) -> None:
"""
:keyword start: Start where the route error happened.
:paramtype start: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorPosition
:keyword end: End where the route error happened.
:paramtype end: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorPosition
"""
super().__init__(**kwargs)
self.start = start
self.end = end
class RouteProperties(_serialization.Model):
"""The properties of a routing rule that your IoT hub uses to route messages to endpoints.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique. Required.
:vartype name: str
:ivar source: The source that the routing rule is to be applied to, such as DeviceMessages.
Required. Known values are: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and "DigitalTwinChangeEvents".
:vartype source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:ivar condition: The condition that is evaluated to apply the routing rule. If no condition is
provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: The list of endpoints to which messages that satisfy the condition are
routed. Currently only one endpoint is allowed. Required.
:vartype endpoint_names: list[str]
:ivar is_enabled: Used to specify whether a route is enabled. Required.
:vartype is_enabled: bool
"""
_validation = {
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
"source": {"required": True},
"endpoint_names": {"required": True, "max_items": 1, "min_items": 1},
"is_enabled": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"source": {"key": "source", "type": "str"},
"condition": {"key": "condition", "type": "str"},
"endpoint_names": {"key": "endpointNames", "type": "[str]"},
"is_enabled": {"key": "isEnabled", "type": "bool"},
}
def __init__(
self,
*,
name: str,
source: Union[str, "_models.RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
condition: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword name: The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
Required.
:paramtype name: str
:keyword source: The source that the routing rule is to be applied to, such as DeviceMessages.
Required. Known values are: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and "DigitalTwinChangeEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:keyword condition: The condition that is evaluated to apply the routing rule. If no condition
is provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: The list of endpoints to which messages that satisfy the condition are
routed. Currently only one endpoint is allowed. Required.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Used to specify whether a route is enabled. Required.
:paramtype is_enabled: bool
"""
super().__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class RoutingEndpoints(_serialization.Model):
"""The properties related to the custom endpoints to which your IoT hub routes messages based on
the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for
paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
:ivar service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_queues:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingServiceBusQueueEndpointProperties]
:ivar service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_topics:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingServiceBusTopicEndpointProperties]
:ivar event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:vartype event_hubs:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingEventHubProperties]
:ivar storage_containers: The list of storage container endpoints that IoT hub routes messages
to, based on the routing rules.
:vartype storage_containers:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingStorageContainerProperties]
"""
_attribute_map = {
"service_bus_queues": {"key": "serviceBusQueues", "type": "[RoutingServiceBusQueueEndpointProperties]"},
"service_bus_topics": {"key": "serviceBusTopics", "type": "[RoutingServiceBusTopicEndpointProperties]"},
"event_hubs": {"key": "eventHubs", "type": "[RoutingEventHubProperties]"},
"storage_containers": {"key": "storageContainers", "type": "[RoutingStorageContainerProperties]"},
}
def __init__(
self,
*,
service_bus_queues: Optional[List["_models.RoutingServiceBusQueueEndpointProperties"]] = None,
service_bus_topics: Optional[List["_models.RoutingServiceBusTopicEndpointProperties"]] = None,
event_hubs: Optional[List["_models.RoutingEventHubProperties"]] = None,
storage_containers: Optional[List["_models.RoutingStorageContainerProperties"]] = None,
**kwargs: Any
) -> None:
"""
:keyword service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:paramtype service_bus_queues:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingServiceBusQueueEndpointProperties]
:keyword service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes
the messages to, based on the routing rules.
:paramtype service_bus_topics:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingServiceBusTopicEndpointProperties]
:keyword event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:paramtype event_hubs:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingEventHubProperties]
:keyword storage_containers: The list of storage container endpoints that IoT hub routes
messages to, based on the routing rules.
:paramtype storage_containers:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingStorageContainerProperties]
"""
super().__init__(**kwargs)
self.service_bus_queues = service_bus_queues
self.service_bus_topics = service_bus_topics
self.event_hubs = event_hubs
self.storage_containers = storage_containers
class RoutingEventHubProperties(_serialization.Model):
"""The properties related to an event hub endpoint.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: The connection string of the event hub endpoint. Required.
:vartype connection_string: str
:ivar name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. Required.
:vartype name: str
:ivar subscription_id: The subscription identifier of the event hub endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the event hub endpoint.
:vartype resource_group: str
"""
_validation = {
"connection_string": {"required": True},
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"name": {"key": "name", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group": {"key": "resourceGroup", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword connection_string: The connection string of the event hub endpoint. Required.
:paramtype connection_string: str
:keyword name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. Required.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the event hub endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the event hub endpoint.
:paramtype resource_group: str
"""
super().__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingMessage(_serialization.Model):
"""Routing message.
:ivar body: Body of routing message.
:vartype body: str
:ivar app_properties: App properties.
:vartype app_properties: dict[str, str]
:ivar system_properties: System properties.
:vartype system_properties: dict[str, str]
"""
_attribute_map = {
"body": {"key": "body", "type": "str"},
"app_properties": {"key": "appProperties", "type": "{str}"},
"system_properties": {"key": "systemProperties", "type": "{str}"},
}
def __init__(
self,
*,
body: Optional[str] = None,
app_properties: Optional[Dict[str, str]] = None,
system_properties: Optional[Dict[str, str]] = None,
**kwargs: Any
) -> None:
"""
:keyword body: Body of routing message.
:paramtype body: str
:keyword app_properties: App properties.
:paramtype app_properties: dict[str, str]
:keyword system_properties: System properties.
:paramtype system_properties: dict[str, str]
"""
super().__init__(**kwargs)
self.body = body
self.app_properties = app_properties
self.system_properties = system_properties
class RoutingProperties(_serialization.Model):
"""The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:ivar endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:vartype endpoints: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingEndpoints
:ivar routes: The list of user-provided routing rules that the IoT hub uses to route messages
to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and
a maximum of 5 routing rules are allowed for free hubs.
:vartype routes: list[~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties]
:ivar fallback_route: The properties of the route that is used as a fall-back route when none
of the conditions specified in the 'routes' section are met. This is an optional parameter.
When this property is not set, the messages which do not meet any of the conditions specified
in the 'routes' section get routed to the built-in eventhub endpoint.
:vartype fallback_route: ~azure.mgmt.iothub.v2019_07_01_preview.models.FallbackRouteProperties
:ivar enrichments: The list of user-provided enrichments that the IoT hub applies to messages
to be delivered to built-in and custom endpoints. See: https://aka.ms/iotmsgenrich.
:vartype enrichments: list[~azure.mgmt.iothub.v2019_07_01_preview.models.EnrichmentProperties]
"""
_attribute_map = {
"endpoints": {"key": "endpoints", "type": "RoutingEndpoints"},
"routes": {"key": "routes", "type": "[RouteProperties]"},
"fallback_route": {"key": "fallbackRoute", "type": "FallbackRouteProperties"},
"enrichments": {"key": "enrichments", "type": "[EnrichmentProperties]"},
}
def __init__(
self,
*,
endpoints: Optional["_models.RoutingEndpoints"] = None,
routes: Optional[List["_models.RouteProperties"]] = None,
fallback_route: Optional["_models.FallbackRouteProperties"] = None,
enrichments: Optional[List["_models.EnrichmentProperties"]] = None,
**kwargs: Any
) -> None:
"""
:keyword endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:paramtype endpoints: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingEndpoints
:keyword routes: The list of user-provided routing rules that the IoT hub uses to route
messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid
hubs and a maximum of 5 routing rules are allowed for free hubs.
:paramtype routes: list[~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties]
:keyword fallback_route: The properties of the route that is used as a fall-back route when
none of the conditions specified in the 'routes' section are met. This is an optional
parameter. When this property is not set, the messages which do not meet any of the conditions
specified in the 'routes' section get routed to the built-in eventhub endpoint.
:paramtype fallback_route:
~azure.mgmt.iothub.v2019_07_01_preview.models.FallbackRouteProperties
:keyword enrichments: The list of user-provided enrichments that the IoT hub applies to
messages to be delivered to built-in and custom endpoints. See: https://aka.ms/iotmsgenrich.
:paramtype enrichments:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.EnrichmentProperties]
"""
super().__init__(**kwargs)
self.endpoints = endpoints
self.routes = routes
self.fallback_route = fallback_route
self.enrichments = enrichments
class RoutingServiceBusQueueEndpointProperties(_serialization.Model):
"""The properties related to service bus queue endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: The connection string of the service bus queue endpoint. Required.
:vartype connection_string: str
:ivar name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. The name need not be the same as the actual queue name. Required.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus queue endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus queue endpoint.
:vartype resource_group: str
"""
_validation = {
"connection_string": {"required": True},
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"name": {"key": "name", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group": {"key": "resourceGroup", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword connection_string: The connection string of the service bus queue endpoint. Required.
:paramtype connection_string: str
:keyword name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. The name need not be the same as the actual queue name. Required.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus queue endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus queue endpoint.
:paramtype resource_group: str
"""
super().__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingServiceBusTopicEndpointProperties(_serialization.Model):
"""The properties related to service bus topic endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: The connection string of the service bus topic endpoint. Required.
:vartype connection_string: str
:ivar name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. The name need not be the same as the actual topic name.
Required.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus topic endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus topic endpoint.
:vartype resource_group: str
"""
_validation = {
"connection_string": {"required": True},
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"name": {"key": "name", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group": {"key": "resourceGroup", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword connection_string: The connection string of the service bus topic endpoint. Required.
:paramtype connection_string: str
:keyword name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. The name need not be the same as the actual topic name.
Required.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus topic endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus topic endpoint.
:paramtype resource_group: str
"""
super().__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingStorageContainerProperties(_serialization.Model):
"""The properties related to a storage container endpoint.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: The connection string of the storage account. Required.
:vartype connection_string: str
:ivar name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. Required.
:vartype name: str
:ivar subscription_id: The subscription identifier of the storage account.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the storage account.
:vartype resource_group: str
:ivar container_name: The name of storage container in the storage account. Required.
:vartype container_name: str
:ivar file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:vartype file_name_format: str
:ivar batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:vartype batch_frequency_in_seconds: int
:ivar max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value
should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:vartype max_chunk_size_in_bytes: int
:ivar encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Known values are: "Avro",
"AvroDeflate", and "JSON".
:vartype encoding: str or
~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingStorageContainerPropertiesEncoding
"""
_validation = {
"connection_string": {"required": True},
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
"container_name": {"required": True},
"batch_frequency_in_seconds": {"maximum": 720, "minimum": 60},
"max_chunk_size_in_bytes": {"maximum": 524288000, "minimum": 10485760},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"name": {"key": "name", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group": {"key": "resourceGroup", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"file_name_format": {"key": "fileNameFormat", "type": "str"},
"batch_frequency_in_seconds": {"key": "batchFrequencyInSeconds", "type": "int"},
"max_chunk_size_in_bytes": {"key": "maxChunkSizeInBytes", "type": "int"},
"encoding": {"key": "encoding", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
name: str,
container_name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
file_name_format: Optional[str] = None,
batch_frequency_in_seconds: Optional[int] = None,
max_chunk_size_in_bytes: Optional[int] = None,
encoding: Optional[Union[str, "_models.RoutingStorageContainerPropertiesEncoding"]] = None,
**kwargs: Any
) -> None:
"""
:keyword connection_string: The connection string of the storage account. Required.
:paramtype connection_string: str
:keyword name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. Required.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the storage account.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the storage account.
:paramtype resource_group: str
:keyword container_name: The name of storage container in the storage account. Required.
:paramtype container_name: str
:keyword file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:paramtype file_name_format: str
:keyword batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:paramtype batch_frequency_in_seconds: int
:keyword max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage.
Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:paramtype max_chunk_size_in_bytes: int
:keyword encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Known values are: "Avro",
"AvroDeflate", and "JSON".
:paramtype encoding: str or
~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingStorageContainerPropertiesEncoding
"""
super().__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
self.container_name = container_name
self.file_name_format = file_name_format
self.batch_frequency_in_seconds = batch_frequency_in_seconds
self.max_chunk_size_in_bytes = max_chunk_size_in_bytes
self.encoding = encoding
class RoutingTwin(_serialization.Model):
"""Twin reference input parameter. This is an optional parameter.
:ivar tags: Twin Tags.
:vartype tags: JSON
:ivar properties:
:vartype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwinProperties
"""
_attribute_map = {
"tags": {"key": "tags", "type": "object"},
"properties": {"key": "properties", "type": "RoutingTwinProperties"},
}
def __init__(
self,
*,
tags: Optional[JSON] = None,
properties: Optional["_models.RoutingTwinProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword tags: Twin Tags.
:paramtype tags: JSON
:keyword properties:
:paramtype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwinProperties
"""
super().__init__(**kwargs)
self.tags = tags
self.properties = properties
class RoutingTwinProperties(_serialization.Model):
"""RoutingTwinProperties.
:ivar desired: Twin desired properties.
:vartype desired: JSON
:ivar reported: Twin desired properties.
:vartype reported: JSON
"""
_attribute_map = {
"desired": {"key": "desired", "type": "object"},
"reported": {"key": "reported", "type": "object"},
}
def __init__(self, *, desired: Optional[JSON] = None, reported: Optional[JSON] = None, **kwargs: Any) -> None:
"""
:keyword desired: Twin desired properties.
:paramtype desired: JSON
:keyword reported: Twin desired properties.
:paramtype reported: JSON
"""
super().__init__(**kwargs)
self.desired = desired
self.reported = reported
class SharedAccessSignatureAuthorizationRule(_serialization.Model):
"""The properties of an IoT hub shared access policy.
All required parameters must be populated in order to send to Azure.
:ivar key_name: The name of the shared access policy. Required.
:vartype key_name: str
:ivar primary_key: The primary key.
:vartype primary_key: str
:ivar secondary_key: The secondary key.
:vartype secondary_key: str
:ivar rights: The permissions assigned to the shared access policy. Required. Known values are:
"RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead,
RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite,
ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect",
"RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect",
and "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:vartype rights: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.AccessRights
"""
_validation = {
"key_name": {"required": True},
"rights": {"required": True},
}
_attribute_map = {
"key_name": {"key": "keyName", "type": "str"},
"primary_key": {"key": "primaryKey", "type": "str"},
"secondary_key": {"key": "secondaryKey", "type": "str"},
"rights": {"key": "rights", "type": "str"},
}
def __init__(
self,
*,
key_name: str,
rights: Union[str, "_models.AccessRights"],
primary_key: Optional[str] = None,
secondary_key: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword key_name: The name of the shared access policy. Required.
:paramtype key_name: str
:keyword primary_key: The primary key.
:paramtype primary_key: str
:keyword secondary_key: The secondary key.
:paramtype secondary_key: str
:keyword rights: The permissions assigned to the shared access policy. Required. Known values
are: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead,
RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite,
ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect",
"RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect",
and "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:paramtype rights: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.AccessRights
"""
super().__init__(**kwargs)
self.key_name = key_name
self.primary_key = primary_key
self.secondary_key = secondary_key
self.rights = rights
class SharedAccessSignatureAuthorizationRuleListResult(_serialization.Model):
"""The list of shared access policies with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of shared access policies.
:vartype value:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.SharedAccessSignatureAuthorizationRule]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[SharedAccessSignatureAuthorizationRule]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self, *, value: Optional[List["_models.SharedAccessSignatureAuthorizationRule"]] = None, **kwargs: Any
) -> None:
"""
:keyword value: The list of shared access policies.
:paramtype value:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.SharedAccessSignatureAuthorizationRule]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class StorageEndpointProperties(_serialization.Model):
"""The properties of the Azure Storage endpoint for file upload.
All required parameters must be populated in order to send to Azure.
:ivar sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:vartype sas_ttl_as_iso8601: ~datetime.timedelta
:ivar connection_string: The connection string for the Azure Storage account to which files are
uploaded. Required.
:vartype connection_string: str
:ivar container_name: The name of the root container where you upload files. The container need
not exist but should be creatable using the connectionString specified. Required.
:vartype container_name: str
"""
_validation = {
"connection_string": {"required": True},
"container_name": {"required": True},
}
_attribute_map = {
"sas_ttl_as_iso8601": {"key": "sasTtlAsIso8601", "type": "duration"},
"connection_string": {"key": "connectionString", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
container_name: str,
sas_ttl_as_iso8601: Optional[datetime.timedelta] = None,
**kwargs: Any
) -> None:
"""
:keyword sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:paramtype sas_ttl_as_iso8601: ~datetime.timedelta
:keyword connection_string: The connection string for the Azure Storage account to which files
are uploaded. Required.
:paramtype connection_string: str
:keyword container_name: The name of the root container where you upload files. The container
need not exist but should be creatable using the connectionString specified. Required.
:paramtype container_name: str
"""
super().__init__(**kwargs)
self.sas_ttl_as_iso8601 = sas_ttl_as_iso8601
self.connection_string = connection_string
self.container_name = container_name
class TagsResource(_serialization.Model):
"""A container holding only the Tags for a resource, allowing the user to update the tags on an
IoT Hub instance.
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.tags = tags
class TestAllRoutesInput(_serialization.Model):
"""Input for testing all routes.
:ivar routing_source: Routing source. Known values are: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and
"DigitalTwinChangeEvents".
:vartype routing_source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:ivar message: Routing message.
:vartype message: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingMessage
:ivar twin: Routing Twin Reference.
:vartype twin: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwin
"""
_attribute_map = {
"routing_source": {"key": "routingSource", "type": "str"},
"message": {"key": "message", "type": "RoutingMessage"},
"twin": {"key": "twin", "type": "RoutingTwin"},
}
def __init__(
self,
*,
routing_source: Optional[Union[str, "_models.RoutingSource"]] = None,
message: Optional["_models.RoutingMessage"] = None,
twin: Optional["_models.RoutingTwin"] = None,
**kwargs: Any
) -> None:
"""
:keyword routing_source: Routing source. Known values are: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and
"DigitalTwinChangeEvents".
:paramtype routing_source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:keyword message: Routing message.
:paramtype message: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingMessage
:keyword twin: Routing Twin Reference.
:paramtype twin: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwin
"""
super().__init__(**kwargs)
self.routing_source = routing_source
self.message = message
self.twin = twin
class TestAllRoutesResult(_serialization.Model):
"""Result of testing all routes.
:ivar routes: JSON-serialized array of matched routes.
:vartype routes: list[~azure.mgmt.iothub.v2019_07_01_preview.models.MatchedRoute]
"""
_attribute_map = {
"routes": {"key": "routes", "type": "[MatchedRoute]"},
}
def __init__(self, *, routes: Optional[List["_models.MatchedRoute"]] = None, **kwargs: Any) -> None:
"""
:keyword routes: JSON-serialized array of matched routes.
:paramtype routes: list[~azure.mgmt.iothub.v2019_07_01_preview.models.MatchedRoute]
"""
super().__init__(**kwargs)
self.routes = routes
class TestRouteInput(_serialization.Model):
"""Input for testing route.
All required parameters must be populated in order to send to Azure.
:ivar message: Routing message.
:vartype message: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingMessage
:ivar route: Route properties. Required.
:vartype route: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties
:ivar twin: Routing Twin Reference.
:vartype twin: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwin
"""
_validation = {
"route": {"required": True},
}
_attribute_map = {
"message": {"key": "message", "type": "RoutingMessage"},
"route": {"key": "route", "type": "RouteProperties"},
"twin": {"key": "twin", "type": "RoutingTwin"},
}
def __init__(
self,
*,
route: "_models.RouteProperties",
message: Optional["_models.RoutingMessage"] = None,
twin: Optional["_models.RoutingTwin"] = None,
**kwargs: Any
) -> None:
"""
:keyword message: Routing message.
:paramtype message: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingMessage
:keyword route: Route properties. Required.
:paramtype route: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties
:keyword twin: Routing Twin Reference.
:paramtype twin: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwin
"""
super().__init__(**kwargs)
self.message = message
self.route = route
self.twin = twin
class TestRouteResult(_serialization.Model):
"""Result of testing one route.
:ivar result: Result of testing route. Known values are: "undefined", "false", and "true".
:vartype result: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.TestResultStatus
:ivar details: Detailed result of testing route.
:vartype details: ~azure.mgmt.iothub.v2019_07_01_preview.models.TestRouteResultDetails
"""
_attribute_map = {
"result": {"key": "result", "type": "str"},
"details": {"key": "details", "type": "TestRouteResultDetails"},
}
def __init__(
self,
*,
result: Optional[Union[str, "_models.TestResultStatus"]] = None,
details: Optional["_models.TestRouteResultDetails"] = None,
**kwargs: Any
) -> None:
"""
:keyword result: Result of testing route. Known values are: "undefined", "false", and "true".
:paramtype result: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.TestResultStatus
:keyword details: Detailed result of testing route.
:paramtype details: ~azure.mgmt.iothub.v2019_07_01_preview.models.TestRouteResultDetails
"""
super().__init__(**kwargs)
self.result = result
self.details = details
class TestRouteResultDetails(_serialization.Model):
"""Detailed result of testing a route.
:ivar compilation_errors: JSON-serialized list of route compilation errors.
:vartype compilation_errors:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RouteCompilationError]
"""
_attribute_map = {
"compilation_errors": {"key": "compilationErrors", "type": "[RouteCompilationError]"},
}
def __init__(
self, *, compilation_errors: Optional[List["_models.RouteCompilationError"]] = None, **kwargs: Any
) -> None:
"""
:keyword compilation_errors: JSON-serialized list of route compilation errors.
:paramtype compilation_errors:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RouteCompilationError]
"""
super().__init__(**kwargs)
self.compilation_errors = compilation_errors
class UserSubscriptionQuota(_serialization.Model):
"""User subscription quota response.
:ivar id: IotHub type id.
:vartype id: str
:ivar type: Response type.
:vartype type: str
:ivar unit: Unit of IotHub type.
:vartype unit: str
:ivar current_value: Current number of IotHub type.
:vartype current_value: int
:ivar limit: Numerical limit on IotHub type.
:vartype limit: int
:ivar name: IotHub type.
:vartype name: ~azure.mgmt.iothub.v2019_07_01_preview.models.Name
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"type": {"key": "type", "type": "str"},
"unit": {"key": "unit", "type": "str"},
"current_value": {"key": "currentValue", "type": "int"},
"limit": {"key": "limit", "type": "int"},
"name": {"key": "name", "type": "Name"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
type: Optional[str] = None,
unit: Optional[str] = None,
current_value: Optional[int] = None,
limit: Optional[int] = None,
name: Optional["_models.Name"] = None,
**kwargs: Any
) -> None:
"""
:keyword id: IotHub type id.
:paramtype id: str
:keyword type: Response type.
:paramtype type: str
:keyword unit: Unit of IotHub type.
:paramtype unit: str
:keyword current_value: Current number of IotHub type.
:paramtype current_value: int
:keyword limit: Numerical limit on IotHub type.
:paramtype limit: int
:keyword name: IotHub type.
:paramtype name: ~azure.mgmt.iothub.v2019_07_01_preview.models.Name
"""
super().__init__(**kwargs)
self.id = id
self.type = type
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
class UserSubscriptionQuotaListResult(_serialization.Model):
"""Json-serialized array of User subscription quota response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.UserSubscriptionQuota]
:ivar next_link:
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[UserSubscriptionQuota]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.UserSubscriptionQuota"]] = None, **kwargs: Any) -> None:
"""
:keyword value:
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.UserSubscriptionQuota]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
| [
"[email protected]"
] | |
26306fc13a2759dc8d39618d990a765d293540d4 | 49aa2eaeda287fdbbab6f83734473633f200b501 | /dataPlatform/go_platform/api/LibScootersOwners.py | 7172161c8125935f57716365ef901d7d05f770c6 | [] | no_license | petercheng168/My-account-automation | 9d41255bb18421a887a308345db94e973467bae5 | 31c29cc9a9b3f5a4b2c2b721a33df04c52b9d80a | refs/heads/master | 2023-03-13T10:14:00.896812 | 2020-09-11T06:26:44 | 2020-09-11T06:26:44 | 341,831,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,604 | py | import os
import sys
__dirname = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(__dirname, '../api'))
from _init_ import _init_
class LibScootersOwners(object):
def __init__(self):
self.init = _init_()
def scooters_owners_get(self, request_data_type=None, owner_ids= None, scooter_ids=None, scooter_plates=None,
scooter_vins=None, scooter_guids=None, scooter_vin_for_transfer=None,
scooter_plate_for_transfer=None, profile_id_for_transfer=None, account=None, offset=None, limit=None):
""" get scooter's owner
Examples:
| ${resp} = | Scooters Owners Get | data |
"""
self.init.authHeader(account)
data = {
"op_code": "get",
"get_data": {
"request_data_type": request_data_type,
"owner_ids": owner_ids,
"scooter_ids": scooter_ids,
"scooter_plates": scooter_plates,
"scooter_vins": scooter_vins,
"scooter_guids": scooter_guids,
"scooter_vin_for_transfer": scooter_vin_for_transfer,
"scooter_plate_for_transfer": scooter_plate_for_transfer,
"profile_id_for_transfer": profile_id_for_transfer,
"pagination_criteria": {
"offset": offset,
"limit": limit
}
}
}
resp = self.init.request('post', "/scooters/owners", json=data)
return resp | [
"[email protected]"
] | |
c04def4b5c87882e4cda68bfee1339fd1ca34a84 | daa75594c01a09c60cfcd0afa215ff6cc6676c09 | /detectron2/engine/hooks.py | 10359e534ce737a16d980fbc468f92700726afe7 | [
"Apache-2.0"
] | permissive | Samleo8/detectron2 | 3d0f8cbacf3f6386d4a9efd7ff0568a5a2981c34 | 488da8e86e4eb4ee2dc4e6d8dc032af4976bdc58 | refs/heads/master | 2023-02-28T10:40:05.339246 | 2021-02-01T22:31:52 | 2021-02-01T22:31:52 | 271,258,350 | 2 | 0 | Apache-2.0 | 2020-06-10T11:26:49 | 2020-06-10T11:26:48 | null | UTF-8 | Python | false | false | 15,070 | py | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import datetime
import itertools
import logging
import os
import tempfile
import time
from collections import Counter
import torch
from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
from fvcore.common.file_io import PathManager
from fvcore.common.timer import Timer
from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
import detectron2.utils.comm as comm
from detectron2.evaluation.testing import flatten_results_dict
from detectron2.utils.events import EventStorage, EventWriter
from .train_loop import HookBase
__all__ = [
"CallbackHook",
"IterationTimer",
"PeriodicWriter",
"PeriodicCheckpointer",
"LRScheduler",
"AutogradProfiler",
"EvalHook",
"PreciseBN",
]
"""
Implement some common hooks.
"""
class CallbackHook(HookBase):
"""
Create a hook using callback functions provided by the user.
"""
def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
"""
Each argument is a function that takes one argument: the trainer.
"""
self._before_train = before_train
self._before_step = before_step
self._after_step = after_step
self._after_train = after_train
def before_train(self):
if self._before_train:
self._before_train(self.trainer)
def after_train(self):
if self._after_train:
self._after_train(self.trainer)
# The functions may be closures that hold reference to the trainer
# Therefore, delete them to avoid circular reference.
del self._before_train, self._after_train
del self._before_step, self._after_step
def before_step(self):
if self._before_step:
self._before_step(self.trainer)
def after_step(self):
if self._after_step:
self._after_step(self.trainer)
class IterationTimer(HookBase):
"""
Track the time spent for each iteration (each run_step call in the trainer).
Print a summary in the end of training.
This hook uses the time between the call to its :meth:`before_step`
and :meth:`after_step` methods.
Under the convention that :meth:`before_step` of all hooks should only
take negligible amount of time, the :class:`IterationTimer` hook should be
placed at the beginning of the list of hooks to obtain accurate timing.
"""
def __init__(self, warmup_iter=3):
"""
Args:
warmup_iter (int): the number of iterations at the beginning to exclude
from timing.
"""
self._warmup_iter = warmup_iter
self._step_timer = Timer()
self._start_time = time.perf_counter()
self._total_timer = Timer()
def before_train(self):
self._start_time = time.perf_counter()
self._total_timer.reset()
self._total_timer.pause()
def after_train(self):
logger = logging.getLogger(__name__)
total_time = time.perf_counter() - self._start_time
total_time_minus_hooks = self._total_timer.seconds()
hook_time = total_time - total_time_minus_hooks
num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter
if num_iter > 0 and total_time_minus_hooks > 0:
# Speed is meaningful only after warmup
# NOTE this format is parsed by grep in some scripts
logger.info(
"Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
num_iter,
str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
total_time_minus_hooks / num_iter,
)
)
logger.info(
"Total training time: {} ({} on hooks)".format(
str(datetime.timedelta(seconds=int(total_time))),
str(datetime.timedelta(seconds=int(hook_time))),
)
)
def before_step(self):
self._step_timer.reset()
self._total_timer.resume()
def after_step(self):
# +1 because we're in after_step
iter_done = self.trainer.iter - self.trainer.start_iter + 1
if iter_done >= self._warmup_iter:
sec = self._step_timer.seconds()
self.trainer.storage.put_scalars(time=sec)
else:
self._start_time = time.perf_counter()
self._total_timer.reset()
self._total_timer.pause()
class PeriodicWriter(HookBase):
"""
Write events to EventStorage (by calling ``writer.write()``) periodically.
It is executed every ``period`` iterations and after the last iteration.
Note that ``period`` does not affect how data is smoothed by each writer.
"""
def __init__(self, writers, period=20):
"""
Args:
writers (list[EventWriter]): a list of EventWriter objects
period (int):
"""
self._writers = writers
for w in writers:
assert isinstance(w, EventWriter), w
self._period = period
def after_step(self):
if (self.trainer.iter + 1) % self._period == 0 or (
self.trainer.iter == self.trainer.max_iter - 1
):
for writer in self._writers:
writer.write()
def after_train(self):
for writer in self._writers:
writer.close()
class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
"""
Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook.
Note that when used as a hook,
it is unable to save additional data other than what's defined
by the given `checkpointer`.
It is executed every ``period`` iterations and after the last iteration.
"""
def before_train(self):
self.max_iter = self.trainer.max_iter
def after_step(self):
# No way to use **kwargs
self.step(self.trainer.iter)
class LRScheduler(HookBase):
"""
A hook which executes a torch builtin LR scheduler and summarizes the LR.
It is executed after every iteration.
"""
def __init__(self, optimizer, scheduler):
"""
Args:
optimizer (torch.optim.Optimizer):
scheduler (torch.optim._LRScheduler)
"""
self._optimizer = optimizer
self._scheduler = scheduler
# NOTE: some heuristics on what LR to summarize
# summarize the param group with most parameters
largest_group = max(len(g["params"]) for g in optimizer.param_groups)
if largest_group == 1:
# If all groups have one parameter,
# then find the most common initial LR, and use it for summary
lr_count = Counter([g["lr"] for g in optimizer.param_groups])
lr = lr_count.most_common()[0][0]
for i, g in enumerate(optimizer.param_groups):
if g["lr"] == lr:
self._best_param_group_id = i
break
else:
for i, g in enumerate(optimizer.param_groups):
if len(g["params"]) == largest_group:
self._best_param_group_id = i
break
def after_step(self):
lr = self._optimizer.param_groups[self._best_param_group_id]["lr"]
self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
self._scheduler.step()
class AutogradProfiler(HookBase):
"""
A hook which runs `torch.autograd.profiler.profile`.
Examples:
.. code-block:: python
hooks.AutogradProfiler(
lambda trainer: trainer.iter > 10 and trainer.iter < 20, self.cfg.OUTPUT_DIR
)
The above example will run the profiler for iteration 10~20 and dump
results to ``OUTPUT_DIR``. We did not profile the first few iterations
because they are typically slower than the rest.
The result files can be loaded in the ``chrome://tracing`` page in chrome browser.
Note:
When used together with NCCL on older version of GPUs,
autograd profiler may cause deadlock because it unnecessarily allocates
memory on every device it sees. The memory management calls, if
interleaved with NCCL calls, lead to deadlock on GPUs that do not
support `cudaLaunchCooperativeKernelMultiDevice`.
"""
def __init__(self, enable_predicate, output_dir, *, use_cuda=True):
"""
Args:
enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
and returns whether to enable the profiler.
It will be called once every step, and can be used to select which steps to profile.
output_dir (str): the output directory to dump tracing files.
use_cuda (bool): same as in `torch.autograd.profiler.profile`.
"""
self._enable_predicate = enable_predicate
self._use_cuda = use_cuda
self._output_dir = output_dir
def before_step(self):
if self._enable_predicate(self.trainer):
self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)
self._profiler.__enter__()
else:
self._profiler = None
def after_step(self):
if self._profiler is None:
return
self._profiler.__exit__(None, None, None)
PathManager.mkdirs(self._output_dir)
out_file = os.path.join(
self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter)
)
if "://" not in out_file:
self._profiler.export_chrome_trace(out_file)
else:
# Support non-posix filesystems
with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d:
tmp_file = os.path.join(d, "tmp.json")
self._profiler.export_chrome_trace(tmp_file)
with open(tmp_file) as f:
content = f.read()
with PathManager.open(out_file, "w") as f:
f.write(content)
class EvalHook(HookBase):
"""
Run an evaluation function periodically, and at the end of training.
It is executed every ``eval_period`` iterations and after the last iteration.
"""
def __init__(self, eval_period, eval_function):
"""
Args:
eval_period (int): the period to run `eval_function`.
eval_function (callable): a function which takes no arguments, and
returns a nested dict of evaluation metrics.
Note:
This hook must be enabled in all or none workers.
If you would like only certain workers to perform evaluation,
give other workers a no-op function (`eval_function=lambda: None`).
"""
self._period = eval_period
self._func = eval_function
def _do_eval(self):
results = self._func()
if results:
assert isinstance(
results, dict
), "Eval function must return a dict. Got {} instead.".format(results)
flattened_results = flatten_results_dict(results)
for k, v in flattened_results.items():
try:
v = float(v)
except Exception:
raise ValueError(
"[EvalHook] eval_function should return a nested dict of float. "
"Got '{}: {}' instead.".format(k, v)
)
self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
# Evaluation may take different time among workers.
# A barrier make them start the next iteration together.
comm.synchronize()
def after_step(self):
next_iter = self.trainer.iter + 1
is_final = next_iter == self.trainer.max_iter
if is_final or (self._period > 0 and next_iter % self._period == 0):
self._do_eval()
def after_train(self):
# func is likely a closure that holds reference to the trainer
# therefore we clean it to avoid circular reference in the end
del self._func
class PreciseBN(HookBase):
"""
The standard implementation of BatchNorm uses EMA in inference, which is
sometimes suboptimal.
This class computes the true average of statistics rather than the moving average,
and put true averages to every BN layer in the given model.
It is executed every ``period`` iterations and after the last iteration.
"""
def __init__(self, period, model, data_loader, num_iter):
"""
Args:
period (int): the period this hook is run, or 0 to not run during training.
The hook will always run in the end of training.
model (nn.Module): a module whose all BN layers in training mode will be
updated by precise BN.
Note that user is responsible for ensuring the BN layers to be
updated are in training mode when this hook is triggered.
data_loader (iterable): it will produce data to be run by `model(data)`.
num_iter (int): number of iterations used to compute the precise
statistics.
"""
self._logger = logging.getLogger(__name__)
if len(get_bn_modules(model)) == 0:
self._logger.info(
"PreciseBN is disabled because model does not contain BN layers in training mode."
)
self._disabled = True
return
self._model = model
self._data_loader = data_loader
self._num_iter = num_iter
self._period = period
self._disabled = False
self._data_iter = None
def after_step(self):
next_iter = self.trainer.iter + 1
is_final = next_iter == self.trainer.max_iter
if is_final or (self._period > 0 and next_iter % self._period == 0):
self.update_stats()
def update_stats(self):
"""
Update the model with precise statistics. Users can manually call this method.
"""
if self._disabled:
return
if self._data_iter is None:
self._data_iter = iter(self._data_loader)
def data_loader():
for num_iter in itertools.count(1):
if num_iter % 100 == 0:
self._logger.info(
"Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter)
)
# This way we can reuse the same iterator
yield next(self._data_iter)
with EventStorage(): # capture events in a new storage to discard them
self._logger.info(
"Running precise-BN for {} iterations... ".format(self._num_iter)
+ "Note that this could produce different statistics every time."
)
update_bn_stats(self._model, data_loader(), self._num_iter)
| [
"[email protected]"
] | |
b85b77389b8994f59d4cb318c5d635f56232ca7d | e1e8e29d202b663be6d424a41aaf7c42f90c8ab3 | /ex40c1.py | 72888476074cfb401f36728067a2464107d84161 | [] | no_license | Amo95/Learning-coding-with-Python3 | 414f60d1b7da2fa41a4527034420c6ab34c6acb7 | c4527a670b01c55031eb1ad65403a032e545baac | refs/heads/master | 2020-12-15T19:20:12.266274 | 2020-01-21T18:12:58 | 2020-01-21T18:12:58 | 235,226,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | class MyStuff(object):
"""docstring for ClassName"""
def __init__(self):
self.tangerine = "I love coding in python"
print(self.tangerine)
def apple(self):
print("Coding OOP is amazing")
def main():
thing = MyStuff()
thing.apple()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
9e439aa2203215a329bac34f106ea9ae0aaa38c5 | a3b306df800059a5b74975793251a28b8a5f49c7 | /Graphs/LX-2/molecule_otsu = False/BioImageXD-1.0/ITK/lib/InsightToolkit/WrapITK/lib/itkCropImageFilterPython.py | 0bb7d058f400eafaae2cf2ceb3959b1130ff9995 | [] | no_license | giacomo21/Image-analysis | dc17ba2b6eb53f48963fad931568576fda4e1349 | ea8bafa073de5090bd8f83fb4f5ca16669d0211f | refs/heads/master | 2016-09-06T21:42:13.530256 | 2013-07-22T09:35:56 | 2013-07-22T09:35:56 | 11,384,784 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 108,489 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.40
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _itkCropImageFilterPython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkCropImageFilterPython', [dirname(__file__)])
except ImportError:
import _itkCropImageFilterPython
return _itkCropImageFilterPython
if fp is not None:
try:
_mod = imp.load_module('_itkCropImageFilterPython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkCropImageFilterPython = swig_import_helper()
del swig_import_helper
else:
import _itkCropImageFilterPython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import itkSizePython
import pyBasePython
import itkExtractImageFilterPython
import ITKCommonBasePython
import itkEventObjectsPython
import ITKRegionsPython
import itkIndexPython
import itkOffsetPython
import itkImageToImageFilterBPython
import itkImagePython
import itkFixedArrayPython
import itkCovariantVectorPython
import vnl_vectorPython
import vcl_complexPython
import vnl_matrixPython
import itkVectorPython
import vnl_vector_refPython
import itkPointPython
import itkMatrixPython
import vnl_matrix_fixedPython
import itkRGBAPixelPython
import itkSymmetricSecondRankTensorPython
import itkRGBPixelPython
import itkImageSourcePython
import itkVectorImagePython
import itkVariableLengthVectorPython
import itkImageToImageFilterAPython
def itkCropImageFilterICF3ICF3_New():
return itkCropImageFilterICF3ICF3.New()
def itkCropImageFilterICF2ICF2_New():
return itkCropImageFilterICF2ICF2.New()
def itkCropImageFilterID3ID3_New():
return itkCropImageFilterID3ID3.New()
def itkCropImageFilterID2ID2_New():
return itkCropImageFilterID2ID2.New()
def itkCropImageFilterIF3IF3_New():
return itkCropImageFilterIF3IF3.New()
def itkCropImageFilterIF2IF2_New():
return itkCropImageFilterIF2IF2.New()
def itkCropImageFilterIUS3IUS3_New():
return itkCropImageFilterIUS3IUS3.New()
def itkCropImageFilterIUS2IUS2_New():
return itkCropImageFilterIUS2IUS2.New()
def itkCropImageFilterIUL3IUL3_New():
return itkCropImageFilterIUL3IUL3.New()
def itkCropImageFilterIUL2IUL2_New():
return itkCropImageFilterIUL2IUL2.New()
def itkCropImageFilterIUC3IUC3_New():
return itkCropImageFilterIUC3IUC3.New()
def itkCropImageFilterIUC2IUC2_New():
return itkCropImageFilterIUC2IUC2.New()
def itkCropImageFilterICVF33ICVF33_New():
return itkCropImageFilterICVF33ICVF33.New()
def itkCropImageFilterICVF22ICVF22_New():
return itkCropImageFilterICVF22ICVF22.New()
def itkCropImageFilterIVF33IVF33_New():
return itkCropImageFilterIVF33IVF33.New()
def itkCropImageFilterIVF22IVF22_New():
return itkCropImageFilterIVF22IVF22.New()
def itkCropImageFilterIRGBAUS3IRGBAUS3_New():
return itkCropImageFilterIRGBAUS3IRGBAUS3.New()
def itkCropImageFilterIRGBAUS2IRGBAUS2_New():
return itkCropImageFilterIRGBAUS2IRGBAUS2.New()
def itkCropImageFilterIRGBUS3IRGBUS3_New():
return itkCropImageFilterIRGBUS3IRGBUS3.New()
def itkCropImageFilterIRGBUS2IRGBUS2_New():
return itkCropImageFilterIRGBUS2IRGBUS2.New()
class itkCropImageFilterICF2ICF2(itkExtractImageFilterPython.itkExtractImageFilterICF2ICF2):
"""Proxy of C++ itkCropImageFilterICF2ICF2 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterICF2ICF2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize2 s)"""
return _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterICF2ICF2
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterICF2ICF2"""
return _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterICF2ICF2"""
return _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterICF2ICF2
Create a new object of the class itkCropImageFilterICF2ICF2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterICF2ICF2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterICF2ICF2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterICF2ICF2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterICF2ICF2.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF2ICF2_SetUpperBoundaryCropSize,None,itkCropImageFilterICF2ICF2)
itkCropImageFilterICF2ICF2.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF2ICF2_GetUpperBoundaryCropSize,None,itkCropImageFilterICF2ICF2)
itkCropImageFilterICF2ICF2.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF2ICF2_SetLowerBoundaryCropSize,None,itkCropImageFilterICF2ICF2)
itkCropImageFilterICF2ICF2.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF2ICF2_GetLowerBoundaryCropSize,None,itkCropImageFilterICF2ICF2)
itkCropImageFilterICF2ICF2.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF2ICF2_SetBoundaryCropSize,None,itkCropImageFilterICF2ICF2)
itkCropImageFilterICF2ICF2.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF2ICF2_GetPointer,None,itkCropImageFilterICF2ICF2)
itkCropImageFilterICF2ICF2_swigregister = _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_swigregister
itkCropImageFilterICF2ICF2_swigregister(itkCropImageFilterICF2ICF2)
def itkCropImageFilterICF2ICF2___New_orig__():
"""itkCropImageFilterICF2ICF2___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterICF2ICF2___New_orig__()
def itkCropImageFilterICF2ICF2_cast(*args):
"""itkCropImageFilterICF2ICF2_cast(itkLightObject obj) -> itkCropImageFilterICF2ICF2"""
return _itkCropImageFilterPython.itkCropImageFilterICF2ICF2_cast(*args)
class itkCropImageFilterICF3ICF3(itkExtractImageFilterPython.itkExtractImageFilterICF3ICF3):
"""Proxy of C++ itkCropImageFilterICF3ICF3 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterICF3ICF3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize3 s)"""
return _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterICF3ICF3
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterICF3ICF3"""
return _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterICF3ICF3"""
return _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterICF3ICF3
Create a new object of the class itkCropImageFilterICF3ICF3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterICF3ICF3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterICF3ICF3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterICF3ICF3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterICF3ICF3.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF3ICF3_SetUpperBoundaryCropSize,None,itkCropImageFilterICF3ICF3)
itkCropImageFilterICF3ICF3.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF3ICF3_GetUpperBoundaryCropSize,None,itkCropImageFilterICF3ICF3)
itkCropImageFilterICF3ICF3.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF3ICF3_SetLowerBoundaryCropSize,None,itkCropImageFilterICF3ICF3)
itkCropImageFilterICF3ICF3.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF3ICF3_GetLowerBoundaryCropSize,None,itkCropImageFilterICF3ICF3)
itkCropImageFilterICF3ICF3.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF3ICF3_SetBoundaryCropSize,None,itkCropImageFilterICF3ICF3)
itkCropImageFilterICF3ICF3.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICF3ICF3_GetPointer,None,itkCropImageFilterICF3ICF3)
itkCropImageFilterICF3ICF3_swigregister = _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_swigregister
itkCropImageFilterICF3ICF3_swigregister(itkCropImageFilterICF3ICF3)
def itkCropImageFilterICF3ICF3___New_orig__():
"""itkCropImageFilterICF3ICF3___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterICF3ICF3___New_orig__()
def itkCropImageFilterICF3ICF3_cast(*args):
"""itkCropImageFilterICF3ICF3_cast(itkLightObject obj) -> itkCropImageFilterICF3ICF3"""
return _itkCropImageFilterPython.itkCropImageFilterICF3ICF3_cast(*args)
class itkCropImageFilterICVF22ICVF22(itkExtractImageFilterPython.itkExtractImageFilterICVF22ICVF22):
"""Proxy of C++ itkCropImageFilterICVF22ICVF22 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize2 s)"""
return _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterICVF22ICVF22
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterICVF22ICVF22"""
return _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterICVF22ICVF22"""
return _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterICVF22ICVF22
Create a new object of the class itkCropImageFilterICVF22ICVF22 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterICVF22ICVF22.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterICVF22ICVF22.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterICVF22ICVF22.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterICVF22ICVF22.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_SetUpperBoundaryCropSize,None,itkCropImageFilterICVF22ICVF22)
itkCropImageFilterICVF22ICVF22.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_GetUpperBoundaryCropSize,None,itkCropImageFilterICVF22ICVF22)
itkCropImageFilterICVF22ICVF22.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_SetLowerBoundaryCropSize,None,itkCropImageFilterICVF22ICVF22)
itkCropImageFilterICVF22ICVF22.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_GetLowerBoundaryCropSize,None,itkCropImageFilterICVF22ICVF22)
itkCropImageFilterICVF22ICVF22.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_SetBoundaryCropSize,None,itkCropImageFilterICVF22ICVF22)
itkCropImageFilterICVF22ICVF22.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_GetPointer,None,itkCropImageFilterICVF22ICVF22)
itkCropImageFilterICVF22ICVF22_swigregister = _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_swigregister
itkCropImageFilterICVF22ICVF22_swigregister(itkCropImageFilterICVF22ICVF22)
def itkCropImageFilterICVF22ICVF22___New_orig__():
"""itkCropImageFilterICVF22ICVF22___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22___New_orig__()
def itkCropImageFilterICVF22ICVF22_cast(*args):
"""itkCropImageFilterICVF22ICVF22_cast(itkLightObject obj) -> itkCropImageFilterICVF22ICVF22"""
return _itkCropImageFilterPython.itkCropImageFilterICVF22ICVF22_cast(*args)
class itkCropImageFilterICVF33ICVF33(itkExtractImageFilterPython.itkExtractImageFilterICVF33ICVF33):
"""Proxy of C++ itkCropImageFilterICVF33ICVF33 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize3 s)"""
return _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterICVF33ICVF33
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterICVF33ICVF33"""
return _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterICVF33ICVF33"""
return _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterICVF33ICVF33
Create a new object of the class itkCropImageFilterICVF33ICVF33 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterICVF33ICVF33.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterICVF33ICVF33.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterICVF33ICVF33.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterICVF33ICVF33.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_SetUpperBoundaryCropSize,None,itkCropImageFilterICVF33ICVF33)
itkCropImageFilterICVF33ICVF33.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_GetUpperBoundaryCropSize,None,itkCropImageFilterICVF33ICVF33)
itkCropImageFilterICVF33ICVF33.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_SetLowerBoundaryCropSize,None,itkCropImageFilterICVF33ICVF33)
itkCropImageFilterICVF33ICVF33.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_GetLowerBoundaryCropSize,None,itkCropImageFilterICVF33ICVF33)
itkCropImageFilterICVF33ICVF33.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_SetBoundaryCropSize,None,itkCropImageFilterICVF33ICVF33)
itkCropImageFilterICVF33ICVF33.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_GetPointer,None,itkCropImageFilterICVF33ICVF33)
itkCropImageFilterICVF33ICVF33_swigregister = _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_swigregister
itkCropImageFilterICVF33ICVF33_swigregister(itkCropImageFilterICVF33ICVF33)
def itkCropImageFilterICVF33ICVF33___New_orig__():
"""itkCropImageFilterICVF33ICVF33___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33___New_orig__()
def itkCropImageFilterICVF33ICVF33_cast(*args):
"""itkCropImageFilterICVF33ICVF33_cast(itkLightObject obj) -> itkCropImageFilterICVF33ICVF33"""
return _itkCropImageFilterPython.itkCropImageFilterICVF33ICVF33_cast(*args)
class itkCropImageFilterID2ID2(itkExtractImageFilterPython.itkExtractImageFilterID2ID2):
"""Proxy of C++ itkCropImageFilterID2ID2 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterID2ID2_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterID2ID2_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterID2ID2_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterID2ID2_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterID2ID2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterID2ID2_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterID2ID2_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterID2ID2_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterID2ID2_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize2 s)"""
return _itkCropImageFilterPython.itkCropImageFilterID2ID2_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterID2ID2
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterID2ID2"""
return _itkCropImageFilterPython.itkCropImageFilterID2ID2_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterID2ID2"""
return _itkCropImageFilterPython.itkCropImageFilterID2ID2_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterID2ID2
Create a new object of the class itkCropImageFilterID2ID2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterID2ID2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterID2ID2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterID2ID2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterID2ID2.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID2ID2_SetUpperBoundaryCropSize,None,itkCropImageFilterID2ID2)
itkCropImageFilterID2ID2.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID2ID2_GetUpperBoundaryCropSize,None,itkCropImageFilterID2ID2)
itkCropImageFilterID2ID2.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID2ID2_SetLowerBoundaryCropSize,None,itkCropImageFilterID2ID2)
itkCropImageFilterID2ID2.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID2ID2_GetLowerBoundaryCropSize,None,itkCropImageFilterID2ID2)
itkCropImageFilterID2ID2.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID2ID2_SetBoundaryCropSize,None,itkCropImageFilterID2ID2)
itkCropImageFilterID2ID2.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID2ID2_GetPointer,None,itkCropImageFilterID2ID2)
itkCropImageFilterID2ID2_swigregister = _itkCropImageFilterPython.itkCropImageFilterID2ID2_swigregister
itkCropImageFilterID2ID2_swigregister(itkCropImageFilterID2ID2)
def itkCropImageFilterID2ID2___New_orig__():
"""itkCropImageFilterID2ID2___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterID2ID2___New_orig__()
def itkCropImageFilterID2ID2_cast(*args):
"""itkCropImageFilterID2ID2_cast(itkLightObject obj) -> itkCropImageFilterID2ID2"""
return _itkCropImageFilterPython.itkCropImageFilterID2ID2_cast(*args)
class itkCropImageFilterID3ID3(itkExtractImageFilterPython.itkExtractImageFilterID3ID3):
"""Proxy of C++ itkCropImageFilterID3ID3 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterID3ID3_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterID3ID3_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterID3ID3_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterID3ID3_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterID3ID3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterID3ID3_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterID3ID3_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterID3ID3_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterID3ID3_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize3 s)"""
return _itkCropImageFilterPython.itkCropImageFilterID3ID3_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterID3ID3
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterID3ID3"""
return _itkCropImageFilterPython.itkCropImageFilterID3ID3_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterID3ID3"""
return _itkCropImageFilterPython.itkCropImageFilterID3ID3_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterID3ID3
Create a new object of the class itkCropImageFilterID3ID3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterID3ID3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterID3ID3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterID3ID3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterID3ID3.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID3ID3_SetUpperBoundaryCropSize,None,itkCropImageFilterID3ID3)
itkCropImageFilterID3ID3.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID3ID3_GetUpperBoundaryCropSize,None,itkCropImageFilterID3ID3)
itkCropImageFilterID3ID3.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID3ID3_SetLowerBoundaryCropSize,None,itkCropImageFilterID3ID3)
itkCropImageFilterID3ID3.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID3ID3_GetLowerBoundaryCropSize,None,itkCropImageFilterID3ID3)
itkCropImageFilterID3ID3.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID3ID3_SetBoundaryCropSize,None,itkCropImageFilterID3ID3)
itkCropImageFilterID3ID3.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterID3ID3_GetPointer,None,itkCropImageFilterID3ID3)
itkCropImageFilterID3ID3_swigregister = _itkCropImageFilterPython.itkCropImageFilterID3ID3_swigregister
itkCropImageFilterID3ID3_swigregister(itkCropImageFilterID3ID3)
def itkCropImageFilterID3ID3___New_orig__():
"""itkCropImageFilterID3ID3___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterID3ID3___New_orig__()
def itkCropImageFilterID3ID3_cast(*args):
"""itkCropImageFilterID3ID3_cast(itkLightObject obj) -> itkCropImageFilterID3ID3"""
return _itkCropImageFilterPython.itkCropImageFilterID3ID3_cast(*args)
class itkCropImageFilterIF2IF2(itkExtractImageFilterPython.itkExtractImageFilterIF2IF2):
"""Proxy of C++ itkCropImageFilterIF2IF2 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIF2IF2_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIF2IF2_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIF2IF2_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIF2IF2_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIF2IF2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIF2IF2_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIF2IF2_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIF2IF2_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIF2IF2_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize2 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIF2IF2_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIF2IF2
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIF2IF2"""
return _itkCropImageFilterPython.itkCropImageFilterIF2IF2_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIF2IF2"""
return _itkCropImageFilterPython.itkCropImageFilterIF2IF2_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIF2IF2
Create a new object of the class itkCropImageFilterIF2IF2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIF2IF2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIF2IF2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIF2IF2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIF2IF2.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF2IF2_SetUpperBoundaryCropSize,None,itkCropImageFilterIF2IF2)
itkCropImageFilterIF2IF2.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF2IF2_GetUpperBoundaryCropSize,None,itkCropImageFilterIF2IF2)
itkCropImageFilterIF2IF2.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF2IF2_SetLowerBoundaryCropSize,None,itkCropImageFilterIF2IF2)
itkCropImageFilterIF2IF2.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF2IF2_GetLowerBoundaryCropSize,None,itkCropImageFilterIF2IF2)
itkCropImageFilterIF2IF2.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF2IF2_SetBoundaryCropSize,None,itkCropImageFilterIF2IF2)
itkCropImageFilterIF2IF2.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF2IF2_GetPointer,None,itkCropImageFilterIF2IF2)
itkCropImageFilterIF2IF2_swigregister = _itkCropImageFilterPython.itkCropImageFilterIF2IF2_swigregister
itkCropImageFilterIF2IF2_swigregister(itkCropImageFilterIF2IF2)
def itkCropImageFilterIF2IF2___New_orig__():
"""itkCropImageFilterIF2IF2___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIF2IF2___New_orig__()
def itkCropImageFilterIF2IF2_cast(*args):
"""itkCropImageFilterIF2IF2_cast(itkLightObject obj) -> itkCropImageFilterIF2IF2"""
return _itkCropImageFilterPython.itkCropImageFilterIF2IF2_cast(*args)
class itkCropImageFilterIF3IF3(itkExtractImageFilterPython.itkExtractImageFilterIF3IF3):
"""Proxy of C++ itkCropImageFilterIF3IF3 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIF3IF3_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIF3IF3_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIF3IF3_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIF3IF3_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIF3IF3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIF3IF3_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIF3IF3_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIF3IF3_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIF3IF3_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize3 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIF3IF3_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIF3IF3
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIF3IF3"""
return _itkCropImageFilterPython.itkCropImageFilterIF3IF3_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIF3IF3"""
return _itkCropImageFilterPython.itkCropImageFilterIF3IF3_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIF3IF3
Create a new object of the class itkCropImageFilterIF3IF3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIF3IF3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIF3IF3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIF3IF3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIF3IF3.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF3IF3_SetUpperBoundaryCropSize,None,itkCropImageFilterIF3IF3)
itkCropImageFilterIF3IF3.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF3IF3_GetUpperBoundaryCropSize,None,itkCropImageFilterIF3IF3)
itkCropImageFilterIF3IF3.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF3IF3_SetLowerBoundaryCropSize,None,itkCropImageFilterIF3IF3)
itkCropImageFilterIF3IF3.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF3IF3_GetLowerBoundaryCropSize,None,itkCropImageFilterIF3IF3)
itkCropImageFilterIF3IF3.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF3IF3_SetBoundaryCropSize,None,itkCropImageFilterIF3IF3)
itkCropImageFilterIF3IF3.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIF3IF3_GetPointer,None,itkCropImageFilterIF3IF3)
itkCropImageFilterIF3IF3_swigregister = _itkCropImageFilterPython.itkCropImageFilterIF3IF3_swigregister
itkCropImageFilterIF3IF3_swigregister(itkCropImageFilterIF3IF3)
def itkCropImageFilterIF3IF3___New_orig__():
"""itkCropImageFilterIF3IF3___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIF3IF3___New_orig__()
def itkCropImageFilterIF3IF3_cast(*args):
"""itkCropImageFilterIF3IF3_cast(itkLightObject obj) -> itkCropImageFilterIF3IF3"""
return _itkCropImageFilterPython.itkCropImageFilterIF3IF3_cast(*args)
class itkCropImageFilterIRGBAUS2IRGBAUS2(itkExtractImageFilterPython.itkExtractImageFilterIRGBAUS2IRGBAUS2):
"""Proxy of C++ itkCropImageFilterIRGBAUS2IRGBAUS2 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize2 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIRGBAUS2IRGBAUS2
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIRGBAUS2IRGBAUS2"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIRGBAUS2IRGBAUS2"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIRGBAUS2IRGBAUS2
Create a new object of the class itkCropImageFilterIRGBAUS2IRGBAUS2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIRGBAUS2IRGBAUS2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIRGBAUS2IRGBAUS2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIRGBAUS2IRGBAUS2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIRGBAUS2IRGBAUS2.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_SetUpperBoundaryCropSize,None,itkCropImageFilterIRGBAUS2IRGBAUS2)
itkCropImageFilterIRGBAUS2IRGBAUS2.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_GetUpperBoundaryCropSize,None,itkCropImageFilterIRGBAUS2IRGBAUS2)
itkCropImageFilterIRGBAUS2IRGBAUS2.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_SetLowerBoundaryCropSize,None,itkCropImageFilterIRGBAUS2IRGBAUS2)
itkCropImageFilterIRGBAUS2IRGBAUS2.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_GetLowerBoundaryCropSize,None,itkCropImageFilterIRGBAUS2IRGBAUS2)
itkCropImageFilterIRGBAUS2IRGBAUS2.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_SetBoundaryCropSize,None,itkCropImageFilterIRGBAUS2IRGBAUS2)
itkCropImageFilterIRGBAUS2IRGBAUS2.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_GetPointer,None,itkCropImageFilterIRGBAUS2IRGBAUS2)
itkCropImageFilterIRGBAUS2IRGBAUS2_swigregister = _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_swigregister
itkCropImageFilterIRGBAUS2IRGBAUS2_swigregister(itkCropImageFilterIRGBAUS2IRGBAUS2)
def itkCropImageFilterIRGBAUS2IRGBAUS2___New_orig__():
"""itkCropImageFilterIRGBAUS2IRGBAUS2___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2___New_orig__()
def itkCropImageFilterIRGBAUS2IRGBAUS2_cast(*args):
"""itkCropImageFilterIRGBAUS2IRGBAUS2_cast(itkLightObject obj) -> itkCropImageFilterIRGBAUS2IRGBAUS2"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS2IRGBAUS2_cast(*args)
class itkCropImageFilterIRGBAUS3IRGBAUS3(itkExtractImageFilterPython.itkExtractImageFilterIRGBAUS3IRGBAUS3):
"""Proxy of C++ itkCropImageFilterIRGBAUS3IRGBAUS3 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize3 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIRGBAUS3IRGBAUS3
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIRGBAUS3IRGBAUS3"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIRGBAUS3IRGBAUS3"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIRGBAUS3IRGBAUS3
Create a new object of the class itkCropImageFilterIRGBAUS3IRGBAUS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIRGBAUS3IRGBAUS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIRGBAUS3IRGBAUS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIRGBAUS3IRGBAUS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIRGBAUS3IRGBAUS3.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_SetUpperBoundaryCropSize,None,itkCropImageFilterIRGBAUS3IRGBAUS3)
itkCropImageFilterIRGBAUS3IRGBAUS3.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_GetUpperBoundaryCropSize,None,itkCropImageFilterIRGBAUS3IRGBAUS3)
itkCropImageFilterIRGBAUS3IRGBAUS3.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_SetLowerBoundaryCropSize,None,itkCropImageFilterIRGBAUS3IRGBAUS3)
itkCropImageFilterIRGBAUS3IRGBAUS3.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_GetLowerBoundaryCropSize,None,itkCropImageFilterIRGBAUS3IRGBAUS3)
itkCropImageFilterIRGBAUS3IRGBAUS3.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_SetBoundaryCropSize,None,itkCropImageFilterIRGBAUS3IRGBAUS3)
itkCropImageFilterIRGBAUS3IRGBAUS3.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_GetPointer,None,itkCropImageFilterIRGBAUS3IRGBAUS3)
itkCropImageFilterIRGBAUS3IRGBAUS3_swigregister = _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_swigregister
itkCropImageFilterIRGBAUS3IRGBAUS3_swigregister(itkCropImageFilterIRGBAUS3IRGBAUS3)
def itkCropImageFilterIRGBAUS3IRGBAUS3___New_orig__():
"""itkCropImageFilterIRGBAUS3IRGBAUS3___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3___New_orig__()
def itkCropImageFilterIRGBAUS3IRGBAUS3_cast(*args):
"""itkCropImageFilterIRGBAUS3IRGBAUS3_cast(itkLightObject obj) -> itkCropImageFilterIRGBAUS3IRGBAUS3"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBAUS3IRGBAUS3_cast(*args)
class itkCropImageFilterIRGBUS2IRGBUS2(itkExtractImageFilterPython.itkExtractImageFilterIRGBUS2IRGBUS2):
"""Proxy of C++ itkCropImageFilterIRGBUS2IRGBUS2 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize2 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIRGBUS2IRGBUS2
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIRGBUS2IRGBUS2"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIRGBUS2IRGBUS2"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIRGBUS2IRGBUS2
Create a new object of the class itkCropImageFilterIRGBUS2IRGBUS2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIRGBUS2IRGBUS2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIRGBUS2IRGBUS2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIRGBUS2IRGBUS2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIRGBUS2IRGBUS2.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_SetUpperBoundaryCropSize,None,itkCropImageFilterIRGBUS2IRGBUS2)
itkCropImageFilterIRGBUS2IRGBUS2.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_GetUpperBoundaryCropSize,None,itkCropImageFilterIRGBUS2IRGBUS2)
itkCropImageFilterIRGBUS2IRGBUS2.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_SetLowerBoundaryCropSize,None,itkCropImageFilterIRGBUS2IRGBUS2)
itkCropImageFilterIRGBUS2IRGBUS2.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_GetLowerBoundaryCropSize,None,itkCropImageFilterIRGBUS2IRGBUS2)
itkCropImageFilterIRGBUS2IRGBUS2.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_SetBoundaryCropSize,None,itkCropImageFilterIRGBUS2IRGBUS2)
itkCropImageFilterIRGBUS2IRGBUS2.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_GetPointer,None,itkCropImageFilterIRGBUS2IRGBUS2)
itkCropImageFilterIRGBUS2IRGBUS2_swigregister = _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_swigregister
itkCropImageFilterIRGBUS2IRGBUS2_swigregister(itkCropImageFilterIRGBUS2IRGBUS2)
def itkCropImageFilterIRGBUS2IRGBUS2___New_orig__():
"""itkCropImageFilterIRGBUS2IRGBUS2___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2___New_orig__()
def itkCropImageFilterIRGBUS2IRGBUS2_cast(*args):
"""itkCropImageFilterIRGBUS2IRGBUS2_cast(itkLightObject obj) -> itkCropImageFilterIRGBUS2IRGBUS2"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS2IRGBUS2_cast(*args)
class itkCropImageFilterIRGBUS3IRGBUS3(itkExtractImageFilterPython.itkExtractImageFilterIRGBUS3IRGBUS3):
"""Proxy of C++ itkCropImageFilterIRGBUS3IRGBUS3 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize3 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIRGBUS3IRGBUS3
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIRGBUS3IRGBUS3"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIRGBUS3IRGBUS3"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIRGBUS3IRGBUS3
Create a new object of the class itkCropImageFilterIRGBUS3IRGBUS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIRGBUS3IRGBUS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIRGBUS3IRGBUS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIRGBUS3IRGBUS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIRGBUS3IRGBUS3.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_SetUpperBoundaryCropSize,None,itkCropImageFilterIRGBUS3IRGBUS3)
itkCropImageFilterIRGBUS3IRGBUS3.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_GetUpperBoundaryCropSize,None,itkCropImageFilterIRGBUS3IRGBUS3)
itkCropImageFilterIRGBUS3IRGBUS3.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_SetLowerBoundaryCropSize,None,itkCropImageFilterIRGBUS3IRGBUS3)
itkCropImageFilterIRGBUS3IRGBUS3.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_GetLowerBoundaryCropSize,None,itkCropImageFilterIRGBUS3IRGBUS3)
itkCropImageFilterIRGBUS3IRGBUS3.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_SetBoundaryCropSize,None,itkCropImageFilterIRGBUS3IRGBUS3)
itkCropImageFilterIRGBUS3IRGBUS3.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_GetPointer,None,itkCropImageFilterIRGBUS3IRGBUS3)
itkCropImageFilterIRGBUS3IRGBUS3_swigregister = _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_swigregister
itkCropImageFilterIRGBUS3IRGBUS3_swigregister(itkCropImageFilterIRGBUS3IRGBUS3)
def itkCropImageFilterIRGBUS3IRGBUS3___New_orig__():
"""itkCropImageFilterIRGBUS3IRGBUS3___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3___New_orig__()
def itkCropImageFilterIRGBUS3IRGBUS3_cast(*args):
"""itkCropImageFilterIRGBUS3IRGBUS3_cast(itkLightObject obj) -> itkCropImageFilterIRGBUS3IRGBUS3"""
return _itkCropImageFilterPython.itkCropImageFilterIRGBUS3IRGBUS3_cast(*args)
class itkCropImageFilterIUC2IUC2(itkExtractImageFilterPython.itkExtractImageFilterIUC2IUC2):
"""Proxy of C++ itkCropImageFilterIUC2IUC2 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize2 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIUC2IUC2
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIUC2IUC2"""
return _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIUC2IUC2"""
return _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIUC2IUC2
Create a new object of the class itkCropImageFilterIUC2IUC2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIUC2IUC2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIUC2IUC2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIUC2IUC2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIUC2IUC2.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_SetUpperBoundaryCropSize,None,itkCropImageFilterIUC2IUC2)
itkCropImageFilterIUC2IUC2.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_GetUpperBoundaryCropSize,None,itkCropImageFilterIUC2IUC2)
itkCropImageFilterIUC2IUC2.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_SetLowerBoundaryCropSize,None,itkCropImageFilterIUC2IUC2)
itkCropImageFilterIUC2IUC2.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_GetLowerBoundaryCropSize,None,itkCropImageFilterIUC2IUC2)
itkCropImageFilterIUC2IUC2.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_SetBoundaryCropSize,None,itkCropImageFilterIUC2IUC2)
itkCropImageFilterIUC2IUC2.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_GetPointer,None,itkCropImageFilterIUC2IUC2)
itkCropImageFilterIUC2IUC2_swigregister = _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_swigregister
itkCropImageFilterIUC2IUC2_swigregister(itkCropImageFilterIUC2IUC2)
def itkCropImageFilterIUC2IUC2___New_orig__():
"""itkCropImageFilterIUC2IUC2___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2___New_orig__()
def itkCropImageFilterIUC2IUC2_cast(*args):
"""itkCropImageFilterIUC2IUC2_cast(itkLightObject obj) -> itkCropImageFilterIUC2IUC2"""
return _itkCropImageFilterPython.itkCropImageFilterIUC2IUC2_cast(*args)
class itkCropImageFilterIUC3IUC3(itkExtractImageFilterPython.itkExtractImageFilterIUC3IUC3):
"""Proxy of C++ itkCropImageFilterIUC3IUC3 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize3 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIUC3IUC3
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIUC3IUC3"""
return _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIUC3IUC3"""
return _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIUC3IUC3
Create a new object of the class itkCropImageFilterIUC3IUC3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIUC3IUC3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIUC3IUC3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIUC3IUC3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIUC3IUC3.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_SetUpperBoundaryCropSize,None,itkCropImageFilterIUC3IUC3)
itkCropImageFilterIUC3IUC3.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_GetUpperBoundaryCropSize,None,itkCropImageFilterIUC3IUC3)
itkCropImageFilterIUC3IUC3.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_SetLowerBoundaryCropSize,None,itkCropImageFilterIUC3IUC3)
itkCropImageFilterIUC3IUC3.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_GetLowerBoundaryCropSize,None,itkCropImageFilterIUC3IUC3)
itkCropImageFilterIUC3IUC3.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_SetBoundaryCropSize,None,itkCropImageFilterIUC3IUC3)
itkCropImageFilterIUC3IUC3.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_GetPointer,None,itkCropImageFilterIUC3IUC3)
itkCropImageFilterIUC3IUC3_swigregister = _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_swigregister
itkCropImageFilterIUC3IUC3_swigregister(itkCropImageFilterIUC3IUC3)
def itkCropImageFilterIUC3IUC3___New_orig__():
"""itkCropImageFilterIUC3IUC3___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3___New_orig__()
def itkCropImageFilterIUC3IUC3_cast(*args):
"""itkCropImageFilterIUC3IUC3_cast(itkLightObject obj) -> itkCropImageFilterIUC3IUC3"""
return _itkCropImageFilterPython.itkCropImageFilterIUC3IUC3_cast(*args)
class itkCropImageFilterIUL2IUL2(itkExtractImageFilterPython.itkExtractImageFilterIUL2IUL2):
"""Proxy of C++ itkCropImageFilterIUL2IUL2 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize2 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIUL2IUL2
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIUL2IUL2"""
return _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIUL2IUL2"""
return _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIUL2IUL2
Create a new object of the class itkCropImageFilterIUL2IUL2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIUL2IUL2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIUL2IUL2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIUL2IUL2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIUL2IUL2.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_SetUpperBoundaryCropSize,None,itkCropImageFilterIUL2IUL2)
itkCropImageFilterIUL2IUL2.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_GetUpperBoundaryCropSize,None,itkCropImageFilterIUL2IUL2)
itkCropImageFilterIUL2IUL2.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_SetLowerBoundaryCropSize,None,itkCropImageFilterIUL2IUL2)
itkCropImageFilterIUL2IUL2.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_GetLowerBoundaryCropSize,None,itkCropImageFilterIUL2IUL2)
itkCropImageFilterIUL2IUL2.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_SetBoundaryCropSize,None,itkCropImageFilterIUL2IUL2)
itkCropImageFilterIUL2IUL2.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_GetPointer,None,itkCropImageFilterIUL2IUL2)
itkCropImageFilterIUL2IUL2_swigregister = _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_swigregister
itkCropImageFilterIUL2IUL2_swigregister(itkCropImageFilterIUL2IUL2)
def itkCropImageFilterIUL2IUL2___New_orig__():
"""itkCropImageFilterIUL2IUL2___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2___New_orig__()
def itkCropImageFilterIUL2IUL2_cast(*args):
"""itkCropImageFilterIUL2IUL2_cast(itkLightObject obj) -> itkCropImageFilterIUL2IUL2"""
return _itkCropImageFilterPython.itkCropImageFilterIUL2IUL2_cast(*args)
class itkCropImageFilterIUL3IUL3(itkExtractImageFilterPython.itkExtractImageFilterIUL3IUL3):
"""Proxy of C++ itkCropImageFilterIUL3IUL3 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize3 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIUL3IUL3
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIUL3IUL3"""
return _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIUL3IUL3"""
return _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIUL3IUL3
Create a new object of the class itkCropImageFilterIUL3IUL3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIUL3IUL3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIUL3IUL3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIUL3IUL3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIUL3IUL3.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_SetUpperBoundaryCropSize,None,itkCropImageFilterIUL3IUL3)
itkCropImageFilterIUL3IUL3.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_GetUpperBoundaryCropSize,None,itkCropImageFilterIUL3IUL3)
itkCropImageFilterIUL3IUL3.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_SetLowerBoundaryCropSize,None,itkCropImageFilterIUL3IUL3)
itkCropImageFilterIUL3IUL3.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_GetLowerBoundaryCropSize,None,itkCropImageFilterIUL3IUL3)
itkCropImageFilterIUL3IUL3.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_SetBoundaryCropSize,None,itkCropImageFilterIUL3IUL3)
itkCropImageFilterIUL3IUL3.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_GetPointer,None,itkCropImageFilterIUL3IUL3)
itkCropImageFilterIUL3IUL3_swigregister = _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_swigregister
itkCropImageFilterIUL3IUL3_swigregister(itkCropImageFilterIUL3IUL3)
def itkCropImageFilterIUL3IUL3___New_orig__():
"""itkCropImageFilterIUL3IUL3___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3___New_orig__()
def itkCropImageFilterIUL3IUL3_cast(*args):
"""itkCropImageFilterIUL3IUL3_cast(itkLightObject obj) -> itkCropImageFilterIUL3IUL3"""
return _itkCropImageFilterPython.itkCropImageFilterIUL3IUL3_cast(*args)
class itkCropImageFilterIUS2IUS2(itkExtractImageFilterPython.itkExtractImageFilterIUS2IUS2):
"""Proxy of C++ itkCropImageFilterIUS2IUS2 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize2 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIUS2IUS2
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIUS2IUS2"""
return _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIUS2IUS2"""
return _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIUS2IUS2
Create a new object of the class itkCropImageFilterIUS2IUS2 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIUS2IUS2.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIUS2IUS2.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIUS2IUS2.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIUS2IUS2.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_SetUpperBoundaryCropSize,None,itkCropImageFilterIUS2IUS2)
itkCropImageFilterIUS2IUS2.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_GetUpperBoundaryCropSize,None,itkCropImageFilterIUS2IUS2)
itkCropImageFilterIUS2IUS2.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_SetLowerBoundaryCropSize,None,itkCropImageFilterIUS2IUS2)
itkCropImageFilterIUS2IUS2.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_GetLowerBoundaryCropSize,None,itkCropImageFilterIUS2IUS2)
itkCropImageFilterIUS2IUS2.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_SetBoundaryCropSize,None,itkCropImageFilterIUS2IUS2)
itkCropImageFilterIUS2IUS2.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_GetPointer,None,itkCropImageFilterIUS2IUS2)
itkCropImageFilterIUS2IUS2_swigregister = _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_swigregister
itkCropImageFilterIUS2IUS2_swigregister(itkCropImageFilterIUS2IUS2)
def itkCropImageFilterIUS2IUS2___New_orig__():
"""itkCropImageFilterIUS2IUS2___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2___New_orig__()
def itkCropImageFilterIUS2IUS2_cast(*args):
"""itkCropImageFilterIUS2IUS2_cast(itkLightObject obj) -> itkCropImageFilterIUS2IUS2"""
return _itkCropImageFilterPython.itkCropImageFilterIUS2IUS2_cast(*args)
class itkCropImageFilterIUS3IUS3(itkExtractImageFilterPython.itkExtractImageFilterIUS3IUS3):
"""Proxy of C++ itkCropImageFilterIUS3IUS3 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize3 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIUS3IUS3
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIUS3IUS3"""
return _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIUS3IUS3"""
return _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIUS3IUS3
Create a new object of the class itkCropImageFilterIUS3IUS3 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIUS3IUS3.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIUS3IUS3.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIUS3IUS3.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIUS3IUS3.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_SetUpperBoundaryCropSize,None,itkCropImageFilterIUS3IUS3)
itkCropImageFilterIUS3IUS3.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_GetUpperBoundaryCropSize,None,itkCropImageFilterIUS3IUS3)
itkCropImageFilterIUS3IUS3.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_SetLowerBoundaryCropSize,None,itkCropImageFilterIUS3IUS3)
itkCropImageFilterIUS3IUS3.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_GetLowerBoundaryCropSize,None,itkCropImageFilterIUS3IUS3)
itkCropImageFilterIUS3IUS3.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_SetBoundaryCropSize,None,itkCropImageFilterIUS3IUS3)
itkCropImageFilterIUS3IUS3.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_GetPointer,None,itkCropImageFilterIUS3IUS3)
itkCropImageFilterIUS3IUS3_swigregister = _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_swigregister
itkCropImageFilterIUS3IUS3_swigregister(itkCropImageFilterIUS3IUS3)
def itkCropImageFilterIUS3IUS3___New_orig__():
"""itkCropImageFilterIUS3IUS3___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3___New_orig__()
def itkCropImageFilterIUS3IUS3_cast(*args):
"""itkCropImageFilterIUS3IUS3_cast(itkLightObject obj) -> itkCropImageFilterIUS3IUS3"""
return _itkCropImageFilterPython.itkCropImageFilterIUS3IUS3_cast(*args)
class itkCropImageFilterIVF22IVF22(itkExtractImageFilterPython.itkExtractImageFilterIVF22IVF22):
"""Proxy of C++ itkCropImageFilterIVF22IVF22 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize2 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize2"""
return _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize2 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIVF22IVF22
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIVF22IVF22"""
return _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIVF22IVF22"""
return _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIVF22IVF22
Create a new object of the class itkCropImageFilterIVF22IVF22 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIVF22IVF22.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIVF22IVF22.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIVF22IVF22.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIVF22IVF22.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_SetUpperBoundaryCropSize,None,itkCropImageFilterIVF22IVF22)
itkCropImageFilterIVF22IVF22.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_GetUpperBoundaryCropSize,None,itkCropImageFilterIVF22IVF22)
itkCropImageFilterIVF22IVF22.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_SetLowerBoundaryCropSize,None,itkCropImageFilterIVF22IVF22)
itkCropImageFilterIVF22IVF22.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_GetLowerBoundaryCropSize,None,itkCropImageFilterIVF22IVF22)
itkCropImageFilterIVF22IVF22.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_SetBoundaryCropSize,None,itkCropImageFilterIVF22IVF22)
itkCropImageFilterIVF22IVF22.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_GetPointer,None,itkCropImageFilterIVF22IVF22)
itkCropImageFilterIVF22IVF22_swigregister = _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_swigregister
itkCropImageFilterIVF22IVF22_swigregister(itkCropImageFilterIVF22IVF22)
def itkCropImageFilterIVF22IVF22___New_orig__():
"""itkCropImageFilterIVF22IVF22___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22___New_orig__()
def itkCropImageFilterIVF22IVF22_cast(*args):
"""itkCropImageFilterIVF22IVF22_cast(itkLightObject obj) -> itkCropImageFilterIVF22IVF22"""
return _itkCropImageFilterPython.itkCropImageFilterIVF22IVF22_cast(*args)
class itkCropImageFilterIVF33IVF33(itkExtractImageFilterPython.itkExtractImageFilterIVF33IVF33):
"""Proxy of C++ itkCropImageFilterIVF33IVF33 class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
InputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_InputImageDimension
OutputImageDimension = _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_OutputImageDimension
InputConvertibleToOutputCheck = _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_InputConvertibleToOutputCheck
SameDimensionCheck = _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_SameDimensionCheck
def __New_orig__():
"""__New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33___New_orig__()
__New_orig__ = staticmethod(__New_orig__)
def SetUpperBoundaryCropSize(self, *args):
"""SetUpperBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_SetUpperBoundaryCropSize(self, *args)
def GetUpperBoundaryCropSize(self):
"""GetUpperBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_GetUpperBoundaryCropSize(self)
def SetLowerBoundaryCropSize(self, *args):
"""SetLowerBoundaryCropSize(self, itkSize3 _arg)"""
return _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_SetLowerBoundaryCropSize(self, *args)
def GetLowerBoundaryCropSize(self):
"""GetLowerBoundaryCropSize(self) -> itkSize3"""
return _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_GetLowerBoundaryCropSize(self)
def SetBoundaryCropSize(self, *args):
"""SetBoundaryCropSize(self, itkSize3 s)"""
return _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_SetBoundaryCropSize(self, *args)
__swig_destroy__ = _itkCropImageFilterPython.delete_itkCropImageFilterIVF33IVF33
def cast(*args):
"""cast(itkLightObject obj) -> itkCropImageFilterIVF33IVF33"""
return _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_cast(*args)
cast = staticmethod(cast)
def GetPointer(self):
"""GetPointer(self) -> itkCropImageFilterIVF33IVF33"""
return _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_GetPointer(self)
def New(*args, **kargs):
"""New() -> itkCropImageFilterIVF33IVF33
Create a new object of the class itkCropImageFilterIVF33IVF33 and set the input and the parameters if some
named or non-named arguments are passed to that method.
New() tries to assign all the non named parameters to the input of the new objects - the
first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name prefixed by 'Set'.
Ex:
itkCropImageFilterIVF33IVF33.New( reader, Threshold=10 )
is (most of the time) equivalent to:
obj = itkCropImageFilterIVF33IVF33.New()
obj.SetInput( 0, reader.GetOutput() )
obj.SetThreshold( 10 )
"""
obj = itkCropImageFilterIVF33IVF33.__New_orig__()
import itkTemplate
itkTemplate.New(obj, *args, **kargs)
return obj
New = staticmethod(New)
itkCropImageFilterIVF33IVF33.SetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_SetUpperBoundaryCropSize,None,itkCropImageFilterIVF33IVF33)
itkCropImageFilterIVF33IVF33.GetUpperBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_GetUpperBoundaryCropSize,None,itkCropImageFilterIVF33IVF33)
itkCropImageFilterIVF33IVF33.SetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_SetLowerBoundaryCropSize,None,itkCropImageFilterIVF33IVF33)
itkCropImageFilterIVF33IVF33.GetLowerBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_GetLowerBoundaryCropSize,None,itkCropImageFilterIVF33IVF33)
itkCropImageFilterIVF33IVF33.SetBoundaryCropSize = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_SetBoundaryCropSize,None,itkCropImageFilterIVF33IVF33)
itkCropImageFilterIVF33IVF33.GetPointer = new_instancemethod(_itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_GetPointer,None,itkCropImageFilterIVF33IVF33)
itkCropImageFilterIVF33IVF33_swigregister = _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_swigregister
itkCropImageFilterIVF33IVF33_swigregister(itkCropImageFilterIVF33IVF33)
def itkCropImageFilterIVF33IVF33___New_orig__():
"""itkCropImageFilterIVF33IVF33___New_orig__()"""
return _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33___New_orig__()
def itkCropImageFilterIVF33IVF33_cast(*args):
"""itkCropImageFilterIVF33IVF33_cast(itkLightObject obj) -> itkCropImageFilterIVF33IVF33"""
return _itkCropImageFilterPython.itkCropImageFilterIVF33IVF33_cast(*args)
| [
"[email protected]"
] | |
dea6745e243b7a67b860dc3d86998254c22d6dd9 | 112e792b80f0ba5f0989a297156d1554e18034d9 | /testapp/migrations/0003_person__etag.py | 77350671fec980d95092bece88612d0351d2bf1b | [] | no_license | VNG-Realisatie/vng-api-common | ba4537c230f47f0b0ba305eccc289eef09be56f2 | 609c931b3f8b640aa6dff6d02cfb799745f25eb5 | refs/heads/master | 2023-06-26T22:52:50.419762 | 2023-06-20T11:20:53 | 2023-06-20T11:20:53 | 136,349,326 | 4 | 14 | null | 2023-09-01T14:26:58 | 2018-06-06T15:31:27 | Python | UTF-8 | Python | false | false | 629 | py | # Generated by Django 2.1.8 on 2019-09-05 06:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("testapp", "0002_auto_20190620_0849")]
operations = [
migrations.AddField(
model_name="person",
name="_etag",
field=models.CharField(
default="",
editable=False,
help_text="MD5 hash of the resource representation in its current version.",
max_length=32,
verbose_name="etag value",
),
preserve_default=False,
)
]
| [
"[email protected]"
] | |
ceb1883778ddd0a2c8c0827fd3ee230013717c7f | da370ba0df9700519139e1da54f3e7f38e9b7f5f | /.nox/tests/lib/python3.7/site-packages/tensorflow_probability/python/positive_semidefinite_kernels/positive_semidefinite_kernel.py | 79f834dc7218d03f27472a50abf01fcb1c2b97fa | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | antonevenepoel/open_spiel | 90e3c7c6611cf508f2872237412fd67cf6cd10e0 | f2f0c786410018675fc40e9a5b82c40814555fa8 | refs/heads/master | 2021-03-15T20:57:00.562672 | 2020-05-15T16:10:23 | 2020-05-15T16:10:23 | 246,877,171 | 0 | 0 | Apache-2.0 | 2020-03-12T16:07:42 | 2020-03-12T16:07:41 | null | UTF-8 | Python | false | false | 30,020 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""PositiveSemidefiniteKernel base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import functools
import operator
import six
import tensorflow as tf
from tensorflow_probability.python.positive_semidefinite_kernels.internal import util
__all__ = [
'PositiveSemidefiniteKernel',
]
@six.add_metaclass(abc.ABCMeta)
class PositiveSemidefiniteKernel(object):
"""Abstract base class for positive semi-definite kernel functions.
#### Background
For any set `S`, a real- (or complex-valued) function `k` on the Cartesian
product `S x S` is called positive semi-definite if we have
```none
sum_i sum_j (c[i]*) c[j] k(x[i], x[j]) >= 0
```
for any finite collections `{x[1], ..., x[N]}` in S and `{c[1], ..., c[N]}` in
the reals (or the complex plane). '*' denotes the complex conjugate, in the
complex case.
Some examples:
- `S` is R, and `k(s, t) = (s - a) (t - b)`, where a, b are in R. This
corresponds to a linear kernel.
- `S` is R^+ U {0}, and `k(s, t) = min(s, t)`. This corresponds to a kernel
for a Wiener process.
- `S` is the set of strings over an alphabet `A = {c1, ... cC}`, and
`k(s, t)` is defined via some similarity metric over strings.
We model positive semi-definite functions (*kernels*, in common machine
learning parlance) as classes implementing 2 primary public methods:
`matrix` and `apply`.
`matrix` computes the value of the kernel *pairwise* on two (batches of)
collections of inputs. When the collections are both the same set of inputs,
the result is the Gram (or Gramian) matrix
(https://en.wikipedia.org/wiki/Gramian_matrix).
`apply` computes the value of the kernel function at a pair of (batches of)
input locations. It is the more low-level operation and must be implemented in
each concrete base class of PositiveSemidefiniteKernel.
#### Kernel Parameter Shape Semantics
PositiveSemidefiniteKernel implementations support batching of kernel
parameters. This allows, for example, creating a single kernel object which
acts like a collection of kernels with different parameters. This might be
useful for, e.g., for exploring multiple random initializations in parallel
during a kernel parameter optimization procedure.
The interaction between kernel parameter shapes and input shapes (see below)
is somewhat subtle. The semantics are designed to make the most common use
cases easy, while not ruling out more intricate control. The overarching
principle is that kernel parameter batch shapes must be broadcastable with
input batch shapes (see below). Examples are provided in the method-level
documentation.
#### Input Shape Semantics
`apply` and `matrix` each support a notion of batching inputs; see the
method-level documentation for full details; here we describe the overall
semantics of input shapes. Inputs to PositiveSemidefiniteKernel methods
partition into 3 pieces:
```none
[b1, ..., bB, e, f1, ..., fF]
'----------' | '---------'
| | '-- Feature dimensions
| '-- Example dimension (`matrix`-only)
'-- Batch dimensions
```
- Feature dimensions correspond to the space over which the kernel is defined;
in typical applications inputs are vectors and this part of the shape is
rank-1. For example, if our kernel is defined over R^2 x R^2, each input is
a 2-D vector (a rank-1 tensor of shape `[2,]`) so that
`F = 1, [f1, ..., fF] = [2]`. If we defined a kernel over DxD matrices, its
domain would be R^(DxD) x R^(DxD), we would have `F = 2` and
`[f1, ..., fF] = [D, D]`. Feature shapes of inputs should be the same, but
no exception will be raised unless they are broadcast-incompatible.
- Example dimensions are relevant only for `matrix` calls. Given inputs `x`
and `y` with feature dimensions [f1, ..., fF] and example dimensions `e1`
and `e2`, a to `matrix` will yield an `e1 x e2` matrix. If batch dimensions
are present, it will return a batch of `e1 x e2` matrices.
- Batch dimensions are supported for inputs to `apply` or `matrix`; the only
requirement is that batch dimensions of inputs `x` and `y` be broadcastable
with each other and with the kernel's parameter batch shapes (see above).
"""
def __init__(self, feature_ndims, dtype=None, name=None):
"""Construct a PositiveSemidefiniteKernel (subclass) instance.
Args:
feature_ndims: Python `integer` indicating the number of dims (the rank)
of the feature space this kernel acts on.
dtype: `DType` on which this kernel operates.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if `feature_ndims` is not an integer greater than 0
Inputs to PositiveSemidefiniteKernel methods partition into 3 pieces:
```none
[b1, ..., bB, e, f1, ..., fF]
'----------' | '---------'
| | '-- Feature dimensions
| '-- Example dimension (`matrix`-only)
'-- Batch dimensions
```
The `feature_ndims` argument declares how many of the right-most shape
dimensions belong to the feature dimensions. This enables us to predict
which shape dimensions will be 'reduced' away during kernel computation.
"""
if not (isinstance(feature_ndims, int) and feature_ndims > 0):
raise ValueError(
'`feature_ndims` must be a Python `integer` greater than zero. ' +
'Got: {}'.format(feature_ndims))
self._feature_ndims = feature_ndims
self._dtype = dtype
if not name or name[-1] != '/': # `name` is not a name scope
name = tf.compat.v1.name_scope(name or type(self).__name__).name
self._name = name
@property
def feature_ndims(self):
"""The number of feature dimensions.
Kernel functions generally act on pairs of inputs from some space like
```none
R^(d1 x ... x dD)
```
or, in words: rank-`D` real-valued tensors of shape `[d1, ..., dD]`. Inputs
can be vectors in some `R^N`, but are not restricted to be. Indeed, one
might consider kernels over matrices, tensors, or even more general spaces,
like strings or graphs.
Returns:
The number of feature dimensions (feature rank) of this kernel.
"""
return self._feature_ndims
@property
def dtype(self):
"""DType over which the kernel operates."""
return self._dtype
@property
def name(self):
"""Name prepended to all ops created by this class."""
return self._name
@property
def batch_shape(self):
"""The batch_shape property of a PositiveSemidefiniteKernel.
This property describes the fully broadcast shape of all kernel parameters.
For example, consider an ExponentiatedQuadratic kernel, which is
parameterized by an amplitude and length_scale:
```none
exp_quad(x, x') := amplitude * exp(||x - x'||**2 / length_scale**2)
```
The batch_shape of such a kernel is derived from broadcasting the shapes of
`amplitude` and `length_scale`. E.g., if their shapes were
```python
amplitude.shape = [2, 1, 1]
length_scale.shape = [1, 4, 3]
```
then `exp_quad`'s batch_shape would be `[2, 4, 3]`.
Note that this property defers to the private _batch_shape method, which
concrete implementation sub-classes are obliged to provide.
Returns:
`TensorShape` instance describing the fully broadcast shape of all
kernel parameters.
"""
return self._batch_shape()
def batch_shape_tensor(self):
"""The batch_shape property of a PositiveSemidefiniteKernel as a `Tensor`.
Returns:
`Tensor` which evaluates to a vector of integers which are the
fully-broadcast shapes of the kernel parameters.
"""
with tf.compat.v1.name_scope(self._name):
if self.batch_shape.is_fully_defined():
return tf.convert_to_tensor(
value=self.batch_shape.as_list(),
dtype=tf.int32,
name='batch_shape')
with tf.compat.v1.name_scope('batch_shape_tensor'):
return self._batch_shape_tensor()
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with tf.compat.v1.name_scope(self.name):
values = [] if values is None else values
with tf.compat.v1.name_scope(name, values=values) as scope:
yield scope
def apply(self, x1, x2):
"""Apply the kernel function to a pair of (batches of) inputs.
Args:
x1: `Tensor` input to the first positional parameter of the kernel, of
shape `[b1, ..., bB, f1, ..., fF]`, where `B` may be zero (ie, no
batching) and `F` (number of feature dimensions) must equal the kernel's
`feature_ndims` property. Batch shape must broadcast with the batch
shape of `x2` and with the kernel's parameters.
x2: `Tensor` input to the second positional parameter of the kernel,
shape `[c1, ..., cC, f1, ..., fF]`, where `C` may be zero (ie, no
batching) and `F` (number of feature dimensions) must equal the kernel's
`feature_ndims` property. Batch shape must broadcast with the batch
shape of `x1` and with the kernel's parameters.
Returns:
`Tensor` containing the (batch of) results of applying the kernel function
to inputs `x1` and `x2`. If the kernel parameters' batch shape is
`[k1, ..., kK]` then the shape of the `Tensor` resulting from this method
call is `broadcast([b1, ..., bB], [c1, ..., cC], [k1, ..., kK])`.
Given an index set `S`, a kernel function is mathematically defined as a
real- or complex-valued function on `S` satisfying the
positive semi-definiteness constraint:
```none
sum_i sum_j (c[i]*) c[j] k(x[i], x[j]) >= 0
```
for any finite collections `{x[1], ..., x[N]}` in `S` and
`{c[1], ..., c[N]}` in the reals (or the complex plane). '*' is the complex
conjugate, in the complex case.
This method most closely resembles the function described in the
mathematical definition of a kernel. Given a PositiveSemidefiniteKernel `k`
with scalar parameters and inputs `x` and `y` in `S`, `apply(x, y)` yields a
single scalar value. Given the same kernel and, say, batched inputs of shape
`[b1, ..., bB, f1, ..., fF]`, it will yield a batch of scalars of shape
`[b1, ..., bB]`.
#### Examples
```python
import tensorflow_probability as tfp
# Suppose `SomeKernel` acts on vectors (rank-1 tensors)
scalar_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=.5)
scalar_kernel.batch_shape
# ==> []
# `x` and `y` are batches of five 3-D vectors:
x = np.ones([5, 3], np.float32)
y = np.ones([5, 3], np.float32)
scalar_kernel.apply(x, y).shape
# ==> [5]
```
The above output is the result of vectorized computation of the five values
```none
[k(x[0], y[0]), k(x[1], y[1]), ..., k(x[4], y[4])]
```
Now we can consider a kernel with batched parameters:
```python
batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[.2, .5])
batch_kernel.batch_shape
# ==> [2]
batch_kernel.apply(x, y).shape
# ==> Error! [2] and [5] can't broadcast.
```
The parameter batch shape of `[2]` and the input batch shape of `[5]` can't
be broadcast together. We can fix this by giving the parameter a shape of
`[2, 1]` which will correctly broadcast with `[5]` to yield `[2, 5]`:
```python
batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(
param=[[.2], [.5]])
batch_kernel.batch_shape
# ==> [2, 1]
batch_kernel.apply(x, y).shape
# ==> [2, 5]
```
"""
with self._name_scope(self._name, values=[x1, x2]):
x1 = tf.convert_to_tensor(value=x1, name='x1')
x2 = tf.convert_to_tensor(value=x2, name='x2')
return self._apply(x1, x2)
def _apply(self, x1, x2, param_expansion_ndims=0):
"""Apply the kernel function to a pair of (batches of) inputs.
Args:
x1: `Tensor` input to the first positional parameter of the kernel, of
shape `[b1, ..., bB, f1, ..., fF]`, where `B` may be zero (no batching)
and `F` (number of feature dimensions) must equal the kernel's
`feature_ndims` property. Batch shape must broadcast with the batch
shape of `x2` and with the kernel's parameters *after* parameter
expansion (see `param_expansion_ndims` argument).
x2: `Tensor` input to the second positional parameter of the kernel,
shape `[c1, ..., cC, f1, ..., fF]`, where `C` may be zero (no batching)
and `F` (number of feature dimensions) must equal the kernel's
`feature_ndims` property. Batch shape must broadcast with the batch
shape of `x1` and with the kernel's parameters *after* parameter
expansion (see `param_expansion_ndims` argument).
param_expansion_ndims: Python `integer` enabling reshaping of kernel
parameters by concatenating a list of 1's to the param shapes. This
allows the caller to control how the parameters broadcast across the
inputs.
Returns:
`Tensor` containing the (batch of) results of applying the kernel function
to inputs `x1` and `x2`. If the kernel parameters' batch shape *after*
parameter expansion (ie, concatenating `param_expansion_ndims` 1's onto
the parameters' shapes) is `[k1, ..., kK, 1, ..., 1]` then the shape of
the `Tensor` resulting from this method call is
`broadcast([b1, ..., bB], [c1, ..., cC], [k1, ..., kK, 1, ..., 1])`.
"""
raise NotImplementedError(
'Subclasses must provide `_apply` implementation.')
def matrix(self, x1, x2):
"""Construct (batched) matrices from (batches of) collections of inputs.
Args:
x1: `Tensor` input to the first positional parameter of the kernel, of
shape `[b1, ..., bB, e1, f1, ..., fF]`, where `B` may be zero (ie, no
batching), e1 is an integer greater than zero, and `F` (number of
feature dimensions) must equal the kernel's `feature_ndims` property.
Batch shape must broadcast with the batch shape of `x2` and with the
kernel's parameters *after* parameter expansion (see
`param_expansion_ndims` argument).
x2: `Tensor` input to the second positional parameter of the kernel,
shape `[c1, ..., cC, e2, f1, ..., fF]`, where `C` may be zero (ie, no
batching), e2 is an integer greater than zero, and `F` (number of
feature dimensions) must equal the kernel's `feature_ndims` property.
Batch shape must broadcast with the batch shape of `x1` and with the
kernel's parameters *after* parameter expansion (see
`param_expansion_ndims` argument).
Returns:
`Tensor containing (batch of) matrices of kernel applications to pairs
from inputs `x1` and `x2`. If the kernel parameters' batch shape is
`[k1, ..., kK]`, then the shape of the resulting `Tensor` is
`broadcast([b1, ..., bB], [c1, ..., cC], [k1, ..., kK]) + [e1, e2]`.
Given inputs `x1` and `x2` of shapes
```none
[b1, ..., bB, e1, f1, ..., fF]
```
and
```none
[c1, ..., cC, e2, f1, ..., fF]
```
This method computes the batch of `e1 x e2` matrices resulting from applying
the kernel function to all pairs of inputs from `x1` and `x2`. The shape
of the batch of matrices is the result of broadcasting the batch shapes of
`x1`, `x2`, and the kernel parameters (see examples below). As such, it's
required that these shapes all be broadcast compatible. However, the kernel
parameter batch shapes need not broadcast against the 'example shapes' (`e1`
and `e2` above).
When the two inputs are the (batches of) identical collections, the
resulting matrix is the so-called Gram (or Gramian) matrix
(https://en.wikipedia.org/wiki/Gramian_matrix).
N.B., this method can only be used to compute the pairwise application of
the kernel function on rank-1 collections. E.g., it *does* support inputs of
shape `[e1, f]` and `[e2, f]`, yielding a matrix of shape `[e1, e2]`. It
*does not* support inputs of shape `[e1, e2, f]` and `[e3, e4, f]`, yielding
a `Tensor` of shape `[e1, e2, e3, e4]`. To do this, one should instead
reshape the inputs and pass them to `apply`, e.g.:
```python
k = tfpk.SomeKernel()
t1 = tf.placeholder([4, 4, 3], tf.float32)
t2 = tf.placeholder([5, 5, 3], tf.float32)
k.apply(
tf.reshape(t1, [4, 4, 1, 1, 3]),
tf.reshape(t2, [1, 1, 5, 5, 3])).shape
# ==> [4, 4, 5, 5, 3]
```
`matrix` is a special case of the above, where there is only one example
dimension; indeed, its implementation looks almost exactly like the above
(reshaped inputs passed to the private version of `_apply`).
#### Examples
First, consider a kernel with a single scalar parameter.
```python
import tensorflow_probability as tfp
scalar_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=.5)
scalar_kernel.batch_shape
# ==> []
# Our inputs are two lists of 3-D vectors
x = np.ones([5, 3], np.float32)
y = np.ones([4, 3], np.float32)
scalar_kernel.matrix(x, y).shape
# ==> [5, 4]
```
The result comes from applying the kernel to the entries in `x` and `y`
pairwise, across all pairs:
```none
| k(x[0], y[0]) k(x[0], y[1]) ... k(x[0], y[3]) |
| k(x[1], y[0]) k(x[1], y[1]) ... k(x[1], y[3]) |
| ... ... ... |
| k(x[4], y[0]) k(x[4], y[1]) ... k(x[4], y[3]) |
```
Now consider a kernel with batched parameters with the same inputs
```python
batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[1., .5])
batch_kernel.batch_shape
# ==> [2]
batch_kernel.matrix(x, y).shape
# ==> [2, 5, 4]
```
This results in a batch of 2 matrices, one computed from the kernel with
`param = 1.` and the other with `param = .5`.
We also support batching of the inputs. First, let's look at that with
the scalar kernel again.
```python
# Batch of 10 lists of 5 vectors of dimension 3
x = np.ones([10, 5, 3], np.float32)
# Batch of 10 lists of 4 vectors of dimension 3
y = np.ones([10, 4, 3], np.float32)
scalar_kernel.matrix(x, y).shape
# ==> [10, 5, 4]
```
The result is a batch of 10 matrices built from the batch of 10 lists of
input vectors. These batch shapes have to be broadcastable. The following
will *not* work:
```python
x = np.ones([10, 5, 3], np.float32)
y = np.ones([20, 4, 3], np.float32)
scalar_kernel.matrix(x, y).shape
# ==> Error! [10] and [20] can't broadcast.
```
Now let's consider batches of inputs in conjunction with batches of kernel
parameters. We require that the input batch shapes be broadcastable with
the kernel parameter batch shapes, otherwise we get an error:
```python
x = np.ones([10, 5, 3], np.float32)
y = np.ones([10, 4, 3], np.float32)
batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(params=[1., .5])
batch_kernel.batch_shape
# ==> [2]
batch_kernel.matrix(x, y).shape
# ==> Error! [2] and [10] can't broadcast.
```
The fix is to make the kernel parameter shape broadcastable with `[10]` (or
reshape the inputs to be broadcastable!):
```python
x = np.ones([10, 5, 3], np.float32)
y = np.ones([10, 4, 3], np.float32)
batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(
params=[[1.], [.5]])
batch_kernel.batch_shape
# ==> [2, 1]
batch_kernel.matrix(x, y).shape
# ==> [2, 10, 5, 4]
# Or, make the inputs broadcastable:
x = np.ones([10, 1, 5, 3], np.float32)
y = np.ones([10, 1, 4, 3], np.float32)
batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(
params=[1., .5])
batch_kernel.batch_shape
# ==> [2]
batch_kernel.matrix(x, y).shape
# ==> [10, 2, 5, 4]
```
Here, we have the result of applying the kernel, with 2 different
parameters, to each of a batch of 10 pairs of input lists.
"""
with self._name_scope(self._name, values=[x1, x2]):
x1 = tf.convert_to_tensor(value=x1, name='x1')
x2 = tf.convert_to_tensor(value=x2, name='x2')
x1 = tf.expand_dims(x1, -(self.feature_ndims + 1))
x2 = tf.expand_dims(x2, -(self.feature_ndims + 2))
return self._apply(x1, x2, param_expansion_ndims=2)
def _batch_shape(self):
raise NotImplementedError('Subclasses must provide batch_shape property.')
def _batch_shape_tensor(self):
raise NotImplementedError(
'Subclasses must provide batch_shape_tensor implementation')
def __add__(self, k):
if not isinstance(k, PositiveSemidefiniteKernel):
raise ValueError(
"Can't add non-kernel (of type '%s') to kernel" % type(k))
return _SumKernel([self, k])
def __iadd__(self, k):
return self.__add__(k)
def __mul__(self, k):
if not isinstance(k, PositiveSemidefiniteKernel):
raise ValueError(
"Can't multiply by non-kernel (of type '%s') to kernel" % type(k))
return _ProductKernel([self, k])
def __imul__(self, k):
return self.__mul__(k)
def __str__(self):
return ('tfp.positive_semidefinite_kernels.{type_name}('
'"{self_name}"'
'{maybe_batch_shape}'
', feature_ndims={feature_ndims}'
', dtype={dtype})'.format(
type_name=type(self).__name__,
self_name=self.name,
maybe_batch_shape=(', batch_shape={}'.format(self.batch_shape)
if self.batch_shape.ndims is not None
else ''),
feature_ndims=self.feature_ndims,
dtype=None if self.dtype is None else self.dtype.name))
def __repr__(self):
return ('<tfp.positive_semidefinite_kernels.{type_name} '
'\'{self_name}\''
' batch_shape={batch_shape}'
' feature_ndims={feature_ndims}'
' dtype={dtype}>'.format(
type_name=type(self).__name__,
self_name=self.name,
batch_shape=self.batch_shape,
feature_ndims=self.feature_ndims,
dtype=None if self.dtype is None else self.dtype.name))
def _flatten_summand_list(kernels):
"""Flatten a list of kernels which may contain _SumKernel instances.
Args:
kernels: Python list of `PositiveSemidefiniteKernel` instances
Returns:
Python list containing the elements of kernels, with any _SumKernel
instances replaced by their `kernels` property contents.
"""
flattened = []
for k in kernels:
if isinstance(k, _SumKernel):
flattened += k.kernels
else:
flattened.append(k)
return flattened
def _flatten_multiplicand_list(kernels):
"""Flatten a list of kernels which may contain _ProductKernel instances.
Args:
kernels: Python list of `PositiveSemidefiniteKernel` instances
Returns:
Python list containing the elements of kernels, with any _ProductKernel
instances replaced by their `kernels` property contents.
"""
flattened = []
for k in kernels:
if isinstance(k, _ProductKernel):
flattened += k.kernels
else:
flattened.append(k)
return flattened
class _SumKernel(PositiveSemidefiniteKernel):
"""Kernel class representing summation over a list of kernels.
Mathematically this class represents the pointwise sum of several kernels.
Given two kernels, `k1` and `k2`, and `kp = _SumKernel([k1, k2])`, we have
```none
kp.apply(x, y) = k1(x, y) + k2(x, y)
```
for any `x`, `y` in the feature space (this presumes that the constituent
kernels all act on the same feature space).
That the sum is positive semi-definite follows simply from the definition of
positive semi-definiteness of functions. If we have
```none
sum_i sum_j (c[i]*) c[j] k1(x[i], x[j]) >= 0
```
and
```none
sum_i sum_j (c[i]*) c[j] k2(x[i], x[j]) >= 0
```
for any finite collections `{x[1], ..., x[N]}` in S and `{c[1], ..., c[N]}` in
the reals (or the complex plane), then we clearly also have the same for the
sum of `k1` and `k2`.
"""
def __init__(self, kernels, name=None):
"""Create a kernel which is the sum of `kernels`.
The input list is 'flattened' in the sense that any entries which are also
of type `_SumKernel` will have their list of kernels appended to this
instance's list of kernels. This will reduce the stack depth when actually
evaluating the sum over kernel applications.
Args:
kernels: Python `list` of `PositiveSemidefiniteKernel` instances.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: `kernels` is an empty list, or `kernels` don't all have the
same `feature_ndims`.
"""
if not kernels:
raise ValueError("Can't create _SumKernel over empty list.")
if len(set([k.feature_ndims for k in kernels])) > 1:
raise ValueError(
"Can't sum kernels with different feature_ndims. Got:\n%s" %
str([k.feature_ndims for k in kernels]))
self._kernels = _flatten_summand_list(kernels)
if name is None:
name = 'SumKernel'
# We have ensured the list is non-empty and all feature_ndims are the same.
super(_SumKernel, self).__init__(
feature_ndims=kernels[0].feature_ndims,
dtype=util.maybe_get_common_dtype(
[None if k.dtype is None else k for k in kernels]),
name=name)
@property
def kernels(self):
"""The list of kernels this _SumKernel sums over."""
return self._kernels
def _apply(self, x1, x2, param_expansion_ndims=0):
return sum([k._apply(x1, x2, param_expansion_ndims) for k in self.kernels]) # pylint: disable=protected-access
def _batch_shape(self):
return functools.reduce(tf.broadcast_static_shape,
[k.batch_shape for k in self.kernels])
def _batch_shape_tensor(self):
return functools.reduce(tf.broadcast_dynamic_shape,
[k.batch_shape_tensor() for k in self.kernels])
class _ProductKernel(PositiveSemidefiniteKernel):
"""Kernel class representing the product over a list of kernels.
Mathematically this class represents the pointwise product of several kernels.
Given two kernels, `k1` and `k2`, and `kp = _ProductKernel([k1, k2])`, we have
```none
kp.apply(x, y) = k1(x, y) * k2(x, y)
```
for any x, y in the feature space (this presumes that the constituent kernels
all act on the same feature space).
The fact that this product is still positive semi-definite can be shown in a
variety of ways, many deep and all fascinating, but follows readily from the
[Schur product theorem](https://en.wikipedia.org/wiki/Schur_product_theorem),
which states that the Hadamard (element-wise) product of two PSD matrices is
also PSD.
"""
def __init__(self, kernels, name=None):
"""Create a kernel which is the product of `kernels`.
The input list is 'flattened' in the sense that any entries which are also
of type `_ProductKernel` will have their list of kernels appended to this
instance's list of kernels. This will reduce the stack depth when actually
evaluating the product over kernel applications.
Args:
kernels: Python `list` of `PositiveSemidefiniteKernel` instances.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: `kernels` is an empty list, or `kernels` don't all have the
same `feature_ndims`.
"""
if not kernels:
raise ValueError("Can't create _ProductKernel over empty list.")
if len(set([k.feature_ndims for k in kernels])) > 1:
raise ValueError(
"Can't multiply kernels with different feature_ndims. Got:\n%s" %
str([k.feature_ndims for k in kernels]))
self._kernels = _flatten_multiplicand_list(kernels)
if name is None:
name = 'ProductKernel'
# We have ensured the list is non-empty and all feature_ndims are the same.
super(_ProductKernel, self).__init__(
feature_ndims=kernels[0].feature_ndims,
dtype=util.maybe_get_common_dtype(
[None if k.dtype is None else k for k in kernels]),
name=name)
@property
def kernels(self):
"""The list of kernels this _ProductKernel multiplies over."""
return self._kernels
def _apply(self, x1, x2, param_expansion_ndims=0):
return functools.reduce(
operator.mul,
[k._apply(x1, x2, param_expansion_ndims) for k in self.kernels]) # pylint: disable=protected-access
def _batch_shape(self):
return functools.reduce(tf.broadcast_static_shape,
[k.batch_shape for k in self.kernels])
def _batch_shape_tensor(self):
return functools.reduce(tf.broadcast_dynamic_shape,
[k.batch_shape_tensor() for k in self.kernels])
| [
"[email protected]"
] | |
b43075304e3f3743babbc05752088d35d38d269f | 8370083dbbbd32740ad1862637809396dc7984e2 | /paresh60/a12.py | 673c08cc381941391743fa151ddb4784c6997b26 | [] | no_license | parshuramsail/PYTHON_LEARN | a919b14aab823e0f5e769d8936ddbfb357133db2 | 8c76720bf73f13cf96930e6d4d5128e6ba9aa535 | refs/heads/main | 2023-07-14T16:25:26.240555 | 2021-08-29T17:10:19 | 2021-08-29T17:10:19 | 401,095,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # Method over-loading in python using default argument.
class A:
def first(self,f=None):
if f is not None:
print('Method',f)
else:
print('method without argument')
a=A()
a.first()
print(a.first('with argument'))
| [
"[email protected]"
] | |
29e71287fe48a5b6f7722a2cac26373b8fdbe652 | d6833270e21fc14d8dd9d6624f8906ed7fe3ae86 | /SoarUtils.py | 3944a872bd43b057c2eda986c0618db191e20328 | [
"MIT"
] | permissive | ChienDavid/pysoarlib | 0f9c91424f7900d43b39e401f91274b0fdf21540 | 3c722d163acf3dd35c5be914bf200e23369e169a | refs/heads/master | 2022-10-24T22:37:24.839494 | 2020-06-13T15:10:32 | 2020-06-13T15:10:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,783 | py | import Python_sml_ClientInterface as sml
from .SoarWME import SoarWME
class SoarUtils:
""" A Class containing static utility methods for dealing with Soar and working memory """
def update_wm_from_tree(root_id, root_name, input_dict, wme_table):
"""
Recursively update WMEs that have a sub-tree structure rooted at the given identifier.
We scan through the `input_dict`, which represents the input value getters (or further
sub-trees) of the sub-tree root, either adding terminal WMEs as usual or further recursing.
:param root_id: The sml identifier of the root of the sub-tree
:param root_name: The attribute which is the root of this sub-tree
:param input_dict: A dict mapping attributes to getter functions
:param wme_table: A table to lookup and store wme's and identifiers
:return: None
"""
assert isinstance(input_dict, dict), "Should only recurse on dicts!"
for attribute in input_dict.keys():
input_val = input_dict[attribute]
child_name = root_name + "." + attribute
if not callable(input_val):
if child_name not in wme_table:
wme_table[child_name] = root_id.CreateIdWME(attribute)
child_id = wme_table[child_name]
SoarUtils.update_wm_from_tree(child_id, child_name, input_val, wme_table)
continue
value = input_val()
if child_name not in wme_table:
wme_table[child_name] = SoarWME(att=attribute, val=value)
wme = wme_table[child_name]
wme.set_value(value)
wme.update_wm(root_id)
def remove_tree_from_wm(wme_table):
"""
Given a wme_table filled by SoarUtils.update_wm_from_tree, removes all wmes from working memory
Intermediate nodes are sml.Identifiers, which are removed from the table
Leaves are SoarWME's which are kept in the table but .remove_from_wm() is called on them
"""
items_to_remove = set()
for path, wme in wme_table.items():
if isinstance(wme, sml.Identifier):
items_to_remove.add(path)
else:
wme.remove_from_wm()
for path in items_to_remove:
del wme_table[path]
def extract_wm_graph(root_id, max_depth=1000000, id_map=None):
"""
Given a soar identifier (root_id), crawls over the children and builds a graph rep for them
Return dictionary:
d['__id__'] = root_id
d['__sym__'] = root_id as a string
d['attr'] = constant # for string, double, or int value
d['attr'] = dict # for identifier
d['attr'] = [ val1, val2, ... ] # for multi-valued attributes
This will handle loops, where the same dict will be reused for each reference to an identifier
Example:
Given an identifier <obj> with the following wm structure:
(<obj> ^id 5 ^volume 23.3 ^predicates <preds>)
(<preds> ^predicate red ^predicate cube ^predicate block)
Will return the following dictionary:
{
'__id': (sml Identifier for <obj>)
'__sym__': 'O32'
'id' : 5 (int),
'volume': 23.3 (float),
'predicates': {
'__id__': (sml Identifier for <preds>)
'__sym__': 'P53'
'predicate': [ 'red', 'cube', 'block' ]
}
}
:param root_id: The sml identifier of the root of the sub-graph
:param max_depth: The maximum depth to extract
:param id_map: A dictionary from identifiers to their corresponding dictionaries
:return a dict containing a recurisve enumeration of all children reachable from the given root_id
"""
if id_map is None:
id_map = dict()
root_id_str = root_id.GetValueAsString()
if root_id_str in id_map:
return id_map[root_id_str]
child_wmes = dict()
child_wmes['__id__'] = root_id
child_wmes['__sym__'] = root_id_str
id_map[root_id_str] = child_wmes
if max_depth == 0:
return child_wmes
for index in range(root_id.GetNumberChildren()):
wme = root_id.GetChild(index)
attr = wme.GetAttribute()
if wme.IsIdentifier():
wme_val = SoarUtils.extract_wm_graph(wme.ConvertToIdentifier(), max_depth-1, id_map)
elif wme.GetValueType() == "int":
wme_val = wme.ConvertToIntElement().GetValue()
elif wme.GetValueType() == "double":
wme_val = wme.ConvertToFloatElement().GetValue()
else:
wme_val = wme.GetValueAsString()
if attr in child_wmes:
cur_val = child_wmes[attr]
if isinstance(cur_val, list):
cur_val.append(wme_val)
else:
child_wmes[attr] = [ cur_val, wme_val ]
else:
child_wmes[attr] = wme_val
return child_wmes
def wm_graph_to_str(wm_graph):
"""
Given a wm_graph produced by extract_wm_graph, returns a nicely formatted string representation of it
:param wm_graph: A dictionary representing a wm graph produced by extract_wm_graph
"""
return SoarUtils._wm_value_to_str(wm_graph, "", set())
def _wm_value_to_str(val, indent, ignore_ids):
"""
recursive helper function which returns a string representation of any given value type
(str, int, float, list, dict)
:param wm_graph: A dictionary representing a wm graph produced by extract_wm_graph
:param indent: a string of spaces to indent the current level
:param ignore_ids: A set of Identifiers to not print
"""
if isinstance(val, str):
return val
if isinstance(val, int):
return str(val)
if isinstance(val, float):
return str(val)
if isinstance(val, list):
return "[ " + ", ".join(SoarUtils._wm_value_to_str(i, indent, ignore_ids) for i in val) + " ]"
if not isinstance(val, dict):
return ""
id_str = val['__sym__']
if id_str in ignore_ids:
return "<" + id_str + ">"
ignore_ids.add(id_str)
if len(val) == 1:
return "<" + id_str + ">"
s = "<" + id_str + "> {\n"
for a, v in val.items():
if a == '__sym__' or a == '__id__':
continue
s += indent + " " + a + ": " + SoarUtils._wm_value_to_str(v, indent + " ", ignore_ids) + "\n"
s += indent + "}"
return s
| [
"[email protected]"
] | |
8f8df82e8f10eae8a382750c473b56390d9cf50e | 52243c4a05a296e7c042663b5942faa47eb66aee | /common/plot_points_shp.py | 2f0e6e7945bf659016fd5b03ffaaa47015baf92c | [
"MIT"
] | permissive | joaoppadua/Pesquisas | fbe0311b59340c041732d6d1f7f4862fa6c53198 | 808d8b0ef9e432e05a4f284ce18778ed8b3acd96 | refs/heads/master | 2023-07-16T02:50:30.846205 | 2021-09-03T13:34:54 | 2021-09-03T13:34:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | import matplotlib.pyplot as plt, geopandas as gpd
from sklearn.preprocessing import MinMaxScaler
def plot_points_shape_file(
shape_file, list_values, list_longitude, list_latitude, title
):
scaler = MinMaxScaler(feature_range=(50, 1000))
list_final = scaler.fit_transform([[i] for i in (list_values)])
gdf = gpd.read_file(shape_file)
fig, ax = plt.subplots(figsize=(20, 10))
ax.axis("off")
for i in range(len(list_values)):
plt.scatter(x=[list_longitude[i]], y=[list_latitude[i]], s=list_final[i])
plt.title(title)
plt.tight_layout()
gdf.plot(facecolor="none", ax=ax, legend=True, linewidth=1, edgecolor="black")
| [
"[email protected]"
] | |
44ae1b353c4c95c6209f6de8cde15112550f0a3b | f966c891c666db846d86406cb9c08a530902d032 | /algorithms/implementation/divisible_sum_pairs.py | 94ca67a38d2a2b8e609d8ed2769e0956df8b0853 | [] | no_license | rickharris-dev/hacker-rank | 36620a16894571e324422c83bd553440cf5bbeb1 | 2ad0fe4b496198bec1b900d2e396a0704bd0c6d4 | refs/heads/master | 2020-12-25T14:33:20.118325 | 2016-09-06T01:10:43 | 2016-09-06T01:10:43 | 67,264,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | #!/usr/bin/python
n,k = raw_input().strip().split(' ')
n,k = [int(n),int(k)]
a = map(int,raw_input().strip().split(' '))
count = 0
i = 0
for i in range(0,n):
j = i + 1
while j < n:
if (a[i] + a[j]) % k == 0:
count += 1
j += 1
print count
| [
"[email protected]"
] | |
85404e686fe6601424f46561d0938c179b5a1cb7 | dddbfd8eb6dff0bd3449bac87ee76b5c3e0bdfb1 | /icehouse-patches/neutron/dvr-patch/neutron/services/vpn/plugin.py | 74218e2ab6b6e6fecfba5e5f3a40c5d4d3e0f0af | [
"Apache-2.0"
] | permissive | joey5678/tricircle | 40897fed8fe9d6772e8878b4f06ba1a829636488 | e211f7efef129bbfb038cc05232ea1de33f82a97 | refs/heads/master | 2021-01-17T21:04:32.945469 | 2014-11-17T09:46:29 | 2014-11-17T10:10:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,525 | py |
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard
from neutron.db.vpn import vpn_db
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services import service_base
LOG = logging.getLogger(__name__)
class VPNPlugin(vpn_db.VPNPluginDb):
"""Implementation of the VPN Service Plugin.
This class manages the workflow of VPNaaS request/response.
Most DB related works are implemented in class
vpn_db.VPNPluginDb.
"""
supported_extension_aliases = ["vpnaas", "service-type"]
class VPNDriverPlugin(VPNPlugin, vpn_db.VPNPluginRpcDbMixin):
"""VpnPlugin which supports VPN Service Drivers."""
#TODO(nati) handle ikepolicy and ipsecpolicy update usecase
def __init__(self):
super(VPNDriverPlugin, self).__init__()
# Load the service driver from neutron.conf.
drivers, default_provider = service_base.load_drivers(
constants.VPN, self)
LOG.info(_("VPN plugin using service driver: %s"), default_provider)
self.ipsec_driver = drivers[default_provider]
def _get_driver_for_vpnservice(self, vpnservice):
return self.ipsec_driver
def _get_driver_for_ipsec_site_connection(self, context,
ipsec_site_connection):
#TODO(nati) get vpnservice when we support service type framework
vpnservice = None
return self._get_driver_for_vpnservice(vpnservice)
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_site_connection = super(
VPNDriverPlugin, self).create_ipsec_site_connection(
context, ipsec_site_connection)
driver = self._get_driver_for_ipsec_site_connection(
context, ipsec_site_connection)
driver.create_ipsec_site_connection(context, ipsec_site_connection)
return ipsec_site_connection
def delete_ipsec_site_connection(self, context, ipsec_conn_id):
ipsec_site_connection = self.get_ipsec_site_connection(
context, ipsec_conn_id)
super(VPNDriverPlugin, self).delete_ipsec_site_connection(
context, ipsec_conn_id)
driver = self._get_driver_for_ipsec_site_connection(
context, ipsec_site_connection)
driver.delete_ipsec_site_connection(context, ipsec_site_connection)
def update_ipsec_site_connection(
self, context,
ipsec_conn_id, ipsec_site_connection):
old_ipsec_site_connection = self.get_ipsec_site_connection(
context, ipsec_conn_id)
ipsec_site_connection = super(
VPNDriverPlugin, self).update_ipsec_site_connection(
context,
ipsec_conn_id,
ipsec_site_connection)
driver = self._get_driver_for_ipsec_site_connection(
context, ipsec_site_connection)
driver.update_ipsec_site_connection(
context, old_ipsec_site_connection, ipsec_site_connection)
return ipsec_site_connection
def update_vpnservice(self, context, vpnservice_id, vpnservice):
old_vpn_service = self.get_vpnservice(context, vpnservice_id)
new_vpn_service = super(
VPNDriverPlugin, self).update_vpnservice(context, vpnservice_id,
vpnservice)
driver = self._get_driver_for_vpnservice(old_vpn_service)
driver.update_vpnservice(context, old_vpn_service, new_vpn_service)
return new_vpn_service
def delete_vpnservice(self, context, vpnservice_id):
vpnservice = self._get_vpnservice(context, vpnservice_id)
super(VPNDriverPlugin, self).delete_vpnservice(context, vpnservice_id)
driver = self._get_driver_for_vpnservice(vpnservice)
driver.delete_vpnservice(context, vpnservice)
| [
"[email protected]"
] | |
16cac5e803f640d4ca511eefb60e7fe8ff4999f1 | 73cb6d22aa85f00808fdc58d5998e85b5ff7deea | /anunciotest/urls.py | 2d4965a2709628e794b96ce06c579ad62f2af6d6 | [] | no_license | juanshocl/anunciotest | d0edf8b2535f31cbd93dbfe9d075b346a2e1247c | 43e447038233456727bcd689f48f6440a80a6d3f | refs/heads/master | 2023-08-14T05:53:29.389870 | 2020-06-05T15:36:54 | 2020-06-05T15:36:54 | 269,683,683 | 0 | 0 | null | 2021-09-22T19:09:54 | 2020-06-05T15:26:09 | Python | UTF-8 | Python | false | false | 768 | py | """anunciotest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
64d1d215eb66bb8fe647dfa71e5b4e50a6e7d16d | f9e1d9c71d232aa0bcf03627259e6c9f88538b18 | /gs70PageExpiredAfter20Seconds/gs70/urls.py | 9912e105f6b2d0ea566a6baf3471f65ae0e3e527 | [] | no_license | nayan-gujju/Django-Practice | a7db202b6a3627a6a4e9f96953b61e43eaf68cb1 | eafa29e9321a1683867b2ea1d26ca74dfa6db12d | refs/heads/master | 2023-07-27T11:41:43.956705 | 2021-09-09T08:47:44 | 2021-09-09T08:47:44 | 403,917,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | """gs70 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from student import views
urlpatterns = [
path('admin/', admin.site.urls),
path('set/', views.setsession),
path('get/', views.getsession),
path('del/', views.delsession),
]
| [
"[email protected]"
] | |
881028d34f8ae09d7ee3fa9e047a5a4c55c4a893 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ/16_0_3_54N_c.py | a900e884b099ed456e2e4f9878c7443257e54913 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,146 | py | #!/usr/bin/python
luts = []
def initlut():
for i in range(2,11):
l = []
for k in range(0,32):
l.append(i**k)
luts.append(l)
# stackoverflow
def is_prime(n):
if n == 2 or n == 3: return True, 0
# never happens in our case?
if n < 2: return False, 1
if n%2 == 0: return False, 2
if n < 9: return True, 0
if n%3 == 0: return False, 3
r = int(n**0.5)
f = 5
while f <= r:
# print '\t',f
if n%f == 0: return False, f
if n%(f+2) == 0: return False, f+2
f +=6
return True, 0
def is_prime_nt(n):
x, y = is_prime(n)
return y
def yield_coin(N):
hbit = 2**(N-1)
for i in range(0,2**(N-2)):
s = "{0:b}".format(hbit + i*2 + 1)
bits = [int(x) for x in reversed(s)]
nums = []
for lut in luts:
nums.append(sum([y for x,y in zip(bits,lut) if x==1]))
divisor = []
for x in nums:
y = is_prime_nt(x)
divisor.append(y)
if y == 0:
break
if all(divisor):
yield s, nums, divisor
initlut()
N = 16
J = 50
print "Case #1:"
case = 0
for s, nums, divisor in yield_coin(N):
print s, " ".join([str(x) for x in divisor])
case += 1
#print case, s, nums, divisor
if case == J:
break
| [
"[[email protected]]"
] | |
773a41d0dcc8b30a7a4ad88f2e2719f1d65a4822 | 3774a634f361e99bb7f0b8fb172290b106e4072e | /grafatko/__init__.py | 0a13235dd8109a9d55e4de6c7e60d27ea97b7288 | [
"MIT"
] | permissive | knut0815/Grafatko | 13a7300a25e5931e4b4106a6b6425b0e4dadace4 | 11f09c15567e34e37aee07d8356af25b679fb429 | refs/heads/master | 2023-04-24T18:54:28.239521 | 2021-05-14T16:27:33 | 2021-05-14T16:27:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,837 | py | import os
import sys
import webbrowser
import argparse
from importlib.machinery import SourceFileLoader
from functools import partial
from random import random
from math import pi
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from qtmodern import styles
from grafatko.controls import *
from grafatko.graph import *
class Canvas(QWidget):
# WIDGET OPTIONS
contrast_coefficient = 10
background_brush = Brush(Color.background().lighter(100 + contrast_coefficient))
background_pen = Pen(Color.background().darker(100 + contrast_coefficient))
# whether the forces are enabled/disabled
forces: bool = True
# _ because the lambda gets self as the first argument
repulsion = lambda _, distance: (1 / distance) ** 2
attraction = lambda _, distance: -(distance - 6) / 3
tree = lambda _, v: v * 0.3
gravity = lambda _: Vector(0, 0.1)
# the radius around which to check if the node moved when shift-selecting nodes
mouse_toggle_radius = 0.1
def __init__(self, line_edit, parent, update_ui_callback):
super().__init__(parent)
# GRAPH
self.graph = DrawableGraph(
selected_changed=self.selected_changed, animation_stopped=update_ui_callback
)
# CANVAS STUFF
self.transformation = Transformation(self)
# MOUSE
self.mouse = Mouse(self.transformation)
self.setMouseTracking(True)
self.keyboard = Keyboard()
self.line_edit = line_edit
self.line_edit.textEdited.connect(self.line_edit_changed)
# timer that runs the simulation (60 times a second... once every ~= 17ms)
QTimer(self, interval=17, timeout=self.update).start()
self.update_ui_callback = update_ui_callback
def update(self, *args):
"""A function that gets periodically called to update the canvas."""
# if the graph is rooted and we want to do forces
root = self.graph.get_root()
if root is not None and self.forces:
distances = self.graph.get_distance_from_root()
# calculate the forces within each BFS layer from root
for layer in distances:
if len(distances[layer]) < 1:
continue
pivot = Vector.average([n.get_position() for n in distances[layer]])
for node in distances[layer]:
vector = Vector(0, pivot[1] - node.get_position()[1])
node.add_force(self.tree(vector))
# add gravity
for node in self.graph.get_nodes():
if node is not root and self.graph.weakly_connected(node, root):
node.add_force(self.gravity())
# only move the nodes when forces are enabled
if self.forces:
for i, n1 in enumerate(self.graph.get_nodes()):
for n2 in self.graph.get_nodes()[i + 1 :]:
# only apply force, if n1 and n2 are weakly connected
if not self.graph.weakly_connected(n1, n2):
continue
d = n1.get_position().distance(n2.get_position())
# if they are on top of each other, nudge one of them slightly
if d == 0:
n1.add_force(Vector(random(), random()))
continue
# unit vector from n1 to n2
uv = (n2.get_position() - n1.get_position()).unit()
# the size of the repel force between the two nodes
fr = self.repulsion(d)
# add a repel force to each of the nodes, in the opposite directions
n1.add_force(-uv * fr)
n2.add_force(uv * fr)
# if they are also connected, add the attraction force
# the direction does not matter -- it would look weird for directed
if n1.is_adjacent_to(n2) or n2.is_adjacent_to(n1):
fa = self.attraction(d)
n1.add_force(-uv * fa)
n2.add_force(uv * fa)
# root is special
if n1 is root:
n1.clear_forces()
else:
n1.evaluate_forces()
# if space is being pressed, center around the currently selected nodes
# if there are none, center around their average
if self.keyboard.space.pressed():
sn = self.graph.get_selected_nodes()
pivot = None
if len(sn) != 0:
pivot = Vector.average([n.get_position() for n in sn])
elif len(self.graph.get_nodes()) != 0:
pivot = Vector.average(
[n.get_position() for n in self.graph.get_nodes()]
)
if pivot is not None:
self.transformation.center(pivot)
super().update(*args)
def line_edit_changed(self, text):
"""Called when the line edit associated with the Canvas changed."""
selected = self.graph.get_selected_objects()
if type(selected[0]) is DrawableNode:
selected[0].set_label(text)
else:
try:
weight = int(text)
except:
try:
weight = float(text)
except:
weight = None
if weight is not None:
for v in selected:
self.graph.set_weight(v, weight)
def selected_changed(self):
"""Called when something in the graph gets selected/deselected."""
selected = self.graph.get_selected_objects()
# if nothing is selected, let the user know
if len(selected) == 0:
self.line_edit.setReadOnly(True)
self.line_edit.setText("Select a node or a vertex to edit.")
# else if more than two things are selected
elif len(selected) >= 2 and not (
type(selected[0]) is DrawableVertex
and type(selected[1]) is DrawableVertex
and selected[0][0] == selected[1][1]
and selected[0][1] == selected[1][0]
):
self.line_edit.setReadOnly(True)
self.line_edit.setText("Select only one node or a vertex to edit.")
# else if one is, focus on it
else:
self.line_edit.setReadOnly(False)
if type(selected[0]) is DrawableNode:
self.line_edit.setText(selected[0].get_label() or "")
else:
self.line_edit.setText(str(selected[0].get_weight()))
def paintEvent(self, event):
"""Paints the board."""
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
palette = self.palette()
# clip
painter.setClipRect(0, 0, self.width(), self.height())
# draw the background
painter.setBrush(self.background_brush(palette))
painter.setPen(self.background_pen(palette))
painter.drawRect(0, 0, self.width() - 1, self.height() - 1)
# transform the coordinates according to the current state of the canvas
self.transformation.transform_painter(painter)
# draw the graph
self.graph.draw(painter, palette)
def keyReleaseEvent(self, event):
"""Called when a key press is registered."""
key = self.keyboard.released_event(event)
# if we release shift, stop shift-dragging the nodes
if key is self.keyboard.shift:
self.stop_shift_dragging_nodes()
def start_shift_dragging_nodes(self, additional: List[DrawableNode] = []):
"""Start dragging nodes that are weakly connected to some selected nodes (and
possibly also to those provided)."""
selected = self.graph.get_selected_nodes() + additional
for n in self.graph.get_weakly_connected(*selected):
if not n.is_dragged():
n.start_drag(self.mouse.get_position())
def stop_shift_dragging_nodes(self):
"""Stop dragging nodes that are weakly connected to some selected nodes."""
selected = self.graph.get_selected_nodes()
for n in self.graph.get_weakly_connected(*selected):
if n.is_dragged() and n not in selected:
n.stop_drag()
def keyPressEvent(self, event):
"""Called when a key press is registered."""
key = self.keyboard.pressed_event(event)
# toggle graph root on r press
if key is self.keyboard.r:
selected = self.graph.get_selected_nodes()
if self.graph.get_root() is not None:
self.graph.set_root(None)
elif len(selected) == 1:
self.graph.set_root(selected[0])
if key is self.keyboard.delete:
for node in self.graph.get_selected_nodes():
self.graph.remove_node(node)
for vertex in self.graph.get_selected_vertices():
self.graph.remove_vertex(vertex[0], vertex[1])
elif key is self.keyboard.shift and self.mouse.left.pressed():
self.start_shift_dragging_nodes()
def mouseMoveEvent(self, event):
"""Is called when the mouse is moved across the canvas."""
self.mouse.moved_event(event)
pressed_node = self.graph.node_at_position(self.mouse.get_position())
if (
self.mouse.left.pressed()
and pressed_node is not None
and self.mouse.current_last_distance() > self.mouse_toggle_radius
and len(self.graph.get_dragged_nodes()) > 0
):
self.select(pressed_node)
# move dragged nodes (unless we are holding down space, centering on them)
# also move the canvas (unless holding down space)
if not self.keyboard.space.pressed():
for node in self.graph.get_nodes():
if node.is_dragged():
node.set_position(self.mouse.get_position())
if self.mouse.middle.pressed():
# move canvas when the middle button is pressed
curr = self.mouse.get_position()
prev = self.mouse.get_previous_position()
self.transformation.translate(curr - prev)
def mouseReleaseEvent(self, event):
"""Is called when a mouse button is released."""
self.setFocus() # done so that key strokes register
key = self.mouse.released_event(event)
# get the node and the vertex at the position where we clicked
pressed_node = self.graph.node_at_position(self.mouse.get_position())
pressed_vertices = self.graph.vertices_at_position(self.mouse.get_position())
# stop dragging the nodes if left is released
if key is self.mouse.left:
for node in self.graph.get_nodes():
node.stop_drag()
# toggle if we haven't moved a lot
if (
self.mouse.current_last_distance() <= self.mouse_toggle_radius
and self.keyboard.shift.pressed()
):
if pressed_node is not None:
self.graph.toggle(pressed_node)
for vertex in pressed_vertices:
self.graph.toggle(vertex)
def mousePressEvent(self, event):
"""Called when a left click is registered."""
self.setFocus() # done so that key strokes register
key = self.mouse.pressed_event(event)
# get the node and the vertex at the position where we clicked
pressed_node = self.graph.node_at_position(self.mouse.get_position())
pressed_vertices = self.graph.vertices_at_position(self.mouse.get_position())
if key is self.mouse.left:
# if shift is not pressed, select the pressed thing immediately and deselect
# everything else
if not self.keyboard.shift.pressed():
self.graph.deselect_all()
# also start the drag if it's a node
if pressed_node is not None:
self.select(pressed_node)
pressed_node.start_drag(self.mouse.get_position())
for vertex in pressed_vertices:
self.select(vertex)
# else just start regular drag on the pressed node
else:
if pressed_node is not None:
pressed_node.start_drag(self.mouse.get_position())
self.start_shift_dragging_nodes([pressed_node])
if key is self.mouse.right:
selected = self.graph.get_selected_nodes()
if pressed_node is None:
# if there isn't a node at the position, create a new one, connect
# all selected to it and select
pressed_node = DrawableNode(position=self.mouse.get_position())
self.graph.add_node(pressed_node)
for node in selected:
self.graph.add_vertex(node, pressed_node)
self.select(pressed_node)
else:
# if there is, toggle vertices from selected to it
for node in selected:
self.graph.toggle_vertex(node, pressed_node)
def wheelEvent(self, event):
"""Is called when the mouse wheel is turned."""
# don't rotate rooted graphs
if self.graph.get_root() is not None:
return
delta = radians(event.angleDelta().y() / 8)
# rotate nodes on shift press
if self.keyboard.shift.pressed():
selected = self.graph.get_selected_nodes()
if len(selected) != 0:
nodes = self.graph.get_weakly_connected(
*self.graph.get_selected_nodes()
)
# if left mouse is pressed, don't rotate
if not self.mouse.left.pressed():
pivot = Vector.average([n.get_position() for n in selected])
self.rotate_about(nodes, delta, pivot)
# zoom on canvas on not shift press
else:
# if some nodes are being centered on, don't use mouse
nodes = self.graph.get_selected_nodes()
if self.keyboard.space.pressed() and len(nodes) != 0:
positions = [p.get_position() for p in nodes]
self.transformation.zoom(Vector.average(positions), delta)
else:
self.transformation.zoom(self.mouse.get_position(), delta)
def rotate_about(self, nodes: Sequence[DrawableNode], angle: float, pivot: Vector):
"""Rotate about the average of selected nodes by the angle."""
for node in nodes:
node.set_position(node.get_position().rotated(angle, pivot), True)
def select(self, obj: Union[DrawableNode, DrawableVertex]):
"""Select the given node/vertex."""
# only select one when shift is not pressed
if not self.keyboard.shift.pressed():
self.graph.deselect_all()
# else just select it
self.graph.select(obj)
def get_graph(self):
"""Get the current graph."""
return self.graph
def set_forces(self, value: bool):
"""Enable/disable the forces that act on the nodes."""
self.forces = value
def import_graph(self, path: str = None):
"""Either import a graph from the specified file, or prompt it."""
if path is None:
path = QFileDialog.getOpenFileName()[0]
if path == "":
return
try:
# create the graph
new_graph = DrawableGraph.from_string(
open(path, "r").read(),
selected_changed=self.selected_changed,
animation_stopped=self.update_ui_callback,
)
if new_graph is not None:
self.graph = new_graph
# make the graph less jittery by setting the positions to a circle
for i, node in enumerate(self.graph.get_nodes()):
node.set_position(
Vector(3, 3).rotated(i * (2 * pi / len(self.graph.get_nodes())))
)
# center on it (immediately)
self.transformation.center(
Vector.average([n.get_position() for n in self.graph.get_nodes()]),
center_smoothness=1,
)
except Exception as e:
QMessageBox.critical(
self, "Error!", "An error occurred when importing the graph."
)
self.update_ui_callback()
def export_graph(self):
"""Prompt a graph (from file) export."""
path = QFileDialog.getSaveFileName()[0]
if path == "":
return
try:
with open(path, "w") as f:
f.write(self.graph.to_string())
except Exception as e:
QMessageBox.critical(
self, "Error!", "An error occurred when exporting the graph."
)
# clean-up
os.remove(path)
def run_algorithm(self):
"""Select a file containing an algorithm and run it."""
path = QFileDialog.getOpenFileName()[0]
if path == "":
return
if not path.endswith(".py"):
QMessageBox.critical(self, "Error!", "The file must be a Python program.")
return
try:
filename = os.path.basename(path)[:-3]
cls = SourceFileLoader(filename, path).load_module()
getattr(cls, filename)(self.graph)
except AssertionError as e:
QMessageBox.critical(self, "Error!", str(e))
except AttributeError as e:
QMessageBox.critical(self, "Error!", f"Function '{filename}' not found.")
except Exception as e:
QMessageBox.critical(
self, "Error!", f"An error occurred when running the algorithm.\n\n{e}",
)
self.update_ui_callback()
class Grafatko(QMainWindow):
def __init__(self, arguments):
super().__init__()
# build the entire interface
self.__create_interface(arguments)
self.show()
# import a graph from the get-go, if it's provided
if arguments.import_path is not None:
self.canvas.import_graph(arguments.import_path)
# set to light by default (unless there is an argument to set it to dark)
if arguments.dark:
styles.dark(QApplication.instance())
else:
styles.light(QApplication.instance())
def __create_interface(self, arguments):
"""A method that creates the entire interface."""
# Canvas (main widget)
self.line_edit = QLineEdit(self)
self.canvas = Canvas(self.line_edit, self, self.update_ui)
self.canvas.setMinimumSize(100, 200) # reasonable minimum size
self.setCentralWidget(self.canvas)
# Top menu bar
self.menubar = self.menuBar()
## menu bar separator
self.sep = QAction()
self.sep.setSeparator(True)
## file menu
self.file_menu = self.menubar.addMenu("&File")
self.file_menu.addActions(
[
QAction("&Import", self, triggered=lambda: self.canvas.import_graph()),
QAction("&Export", self, triggered=lambda: self.canvas.export_graph()),
self.sep,
QAction("&Quit", self, triggered=exit),
]
)
## preference menu
self.preferences_menu = self.menubar.addMenu("&Preferences")
self.preferences_menu.addAction(
QAction(
"&Dark Theme",
self,
checkable=True,
checked=arguments.dark,
triggered=partial(
lambda x, y: styles.dark(x) if y else styles.light(x),
QApplication.instance(),
),
)
)
## algorithm menu
self.help_menu = self.menubar.addMenu("&Algorithms")
self.help_menu.addAction(
QAction("&Run", self, triggered=self.canvas.run_algorithm)
)
## help menu
self.help_menu = self.menubar.addMenu("&Help")
self.help_menu.addActions(
[
QAction(
"&About",
self,
triggered=lambda: QMessageBox.information(
self,
"About",
"This application was created as a semester project for a "
"programming class at <a href='https://www.mff.cuni.cz/en'>MFF UK</a> "
"by Tomáš Sláma. It's open source (see the tab below) and licensed "
"under MIT, so do as you please with the code and anything else "
"related to the project.",
),
),
QAction(
"&Source Code",
self,
triggered=partial(
# TODO: make non-blocking
webbrowser.open,
"https://github.com/xiaoxiae/Grafatko",
),
),
]
)
# dock
self.dock_menu = QDockWidget("Settings", self)
self.dock_menu.setAllowedAreas(Qt.BottomDockWidgetArea)
self.dock_menu.setFeatures(QDockWidget.DockWidgetFloatable)
layout = QGridLayout()
## widgets
self.directed_checkbox = QCheckBox("directed", self, toggled=self.set_directed)
self.weighted_checkbox = QCheckBox(
"weighted",
self,
toggled=lambda value: self.canvas.get_graph().set_weighted(value),
)
self.reorient_pushbutton = QPushButton(
"reorient", self, pressed=lambda: self.canvas.get_graph().reorient()
)
self.pause_pushbutton = QPushButton(
"pause", self, pressed=lambda: self.canvas.get_graph().pause_animations(),
)
self.resume_pushbutton = QPushButton(
"resume", self, pressed=lambda: self.canvas.get_graph().resume_animations(),
)
self.clear_pushbutton = QPushButton(
"clear", self, pressed=self.clear_animations,
)
self.labels_checkbox = QCheckBox(
"labels",
self,
toggled=lambda value: self.canvas.get_graph().set_show_labels(value),
checked=True,
)
self.gravity_checkbox = QCheckBox(
"gravity",
self,
toggled=lambda value: self.canvas.set_forces(value),
checked=True,
)
self.complement_pushbutton = QPushButton(
"complement", self, pressed=lambda: self.canvas.get_graph().complement()
)
widgets = {
(0, 0): QLabel(self, text="Graph"),
(1, 0): self.directed_checkbox,
(2, 0): self.weighted_checkbox,
(0, 1): QLabel(self, text="Visual"),
(1, 1): self.labels_checkbox,
(2, 1): self.gravity_checkbox,
(0, 2): QLabel(self, text="Actions"),
(1, 2): self.complement_pushbutton,
(2, 2): self.reorient_pushbutton,
(0, 3, 1, 2): QLabel(self, text="Animations"),
(1, 3, 1, 1): self.pause_pushbutton,
(1, 4, 1, 1): self.resume_pushbutton,
(2, 3, 1, 2): self.clear_pushbutton,
(3, 0, 1, -1): self.line_edit,
}
## add all widgets to the dock
for k, v in widgets.items():
layout.addWidget(v, *k)
self.dock_widget = QWidget()
self.dock_widget.setLayout(layout)
### Set the dock menu as the dock widget for the app
self.dock_menu.setWidget(self.dock_widget)
self.addDockWidget(Qt.BottomDockWidgetArea, self.dock_menu)
self.setWindowIcon(QIcon("icon.ico"))
self.setWindowTitle("Grafátko")
self.update_ui()
def keyPressEvent(self, event):
self.canvas.keyPressEvent(event)
def keyReleaseEvent(self, event):
self.canvas.keyReleaseEvent(event)
def clear_animations(self):
"""Clear animations and update the UI (to disable the animation buttons)."""
self.canvas.get_graph().clear_animations()
self.update_ui()
def set_directed(self, value):
"""Set the direction of the graph, updating the UI."""
self.canvas.get_graph().set_directed(value)
self.update_ui()
def update_ui(self):
"""Update the UI according to the state of the canvas. Is triggered when canvas
lets this class know that something has changed."""
animations_active = self.canvas.get_graph().animations_active()
self.clear_pushbutton.setEnabled(animations_active)
self.pause_pushbutton.setEnabled(animations_active)
self.resume_pushbutton.setEnabled(animations_active)
self.weighted_checkbox.setChecked(self.canvas.get_graph().is_weighted())
self.directed_checkbox.setChecked(self.canvas.get_graph().is_directed())
self.reorient_pushbutton.setEnabled(self.canvas.get_graph().is_directed())
# to prevent weird focus on textbox
self.setFocus()
def run():
"""An entry point to the GUI."""
parser = argparse.ArgumentParser(
description="An app for creating and visualizing graphs and graph-related algorithms.",
)
parser.add_argument(
"-d", "--dark", dest="dark", action="store_true", help="use dark mode",
)
parser.add_argument(
"-i",
"--import",
dest="import_path",
default=None,
metavar="path",
help="import a graph",
)
arguments = parser.parse_args()
app = QApplication(sys.argv)
ex = Grafatko(arguments)
sys.exit(app.exec_())
if __name__ == "__main__":
run()
| [
"[email protected]"
] | |
a73d86d3a4f951a81943d8e5269cbe679da0b96d | 7727187a009e4b9c46c2fe06609372ec8814cd23 | /freemix_akara/__init__.py | 91c164e4bb4284d6a3acfe41a1ff6a0104cecf65 | [] | no_license | govtmirror/freemix-akara | ebf204554f4effc0543e60083698f2ea012413b8 | 1d10c3f02afbd4268852e2c52afdf77809176bdd | refs/heads/master | 2021-01-12T07:47:08.183429 | 2014-06-05T18:53:56 | 2014-06-05T18:53:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | import os
import sys
target_version = "5.0.0"
def build_version():
distance ="0"
try:
from subprocess import Popen, PIPE
prev_tag,distance,revision = Popen(["git", "describe", "--match", "[0-9]*", "--long"],
cwd=os.path.dirname(__file__),
stdout=PIPE
).communicate()[0].strip().split("-")
if distance == "0":
return prev_tag
elif prev_tag == target_version:
return "%s.post%s"%(target_version, distance)
except Exception as e:
print e
return "%s.dev%s"%(target_version, distance)
try:
from .version import __version__
except ImportError:
__version__=build_version()
| [
"[email protected]"
] | |
b09b6faf12f4a4ca4b0c15e69bb14fb55025f71c | 23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6 | /rootfs/usr/lib/pymodules/python2.6/orca/scripts/apps/Instantbird/script_utilities.py | 30ada4b76dc1afe520b58b72cbf76629cd6b7945 | [] | no_license | xinligg/trainmonitor | 07ed0fa99e54e2857b49ad3435546d13cc0eb17a | 938a8d8f56dc267fceeb65ef7b867f1cac343923 | refs/heads/master | 2021-09-24T15:52:43.195053 | 2018-10-11T07:12:25 | 2018-10-11T07:12:25 | 116,164,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | /usr/share/pyshared/orca/scripts/apps/Instantbird/script_utilities.py | [
"[email protected]"
] | |
2a87718535884ed7542b5b091ab25bf9c0463213 | 43e0cfda9c2ac5be1123f50723a79da1dd56195f | /python/paddle/fluid/tests/unittests/test_generate_proposals_v2_op.py | 506176d146c572f653a460b40608bbb55357141d | [
"Apache-2.0"
] | permissive | jiangjiajun/Paddle | 837f5a36e868a3c21006f5f7bb824055edae671f | 9b35f03572867bbca056da93698f36035106c1f3 | refs/heads/develop | 2022-08-23T11:12:04.503753 | 2022-08-11T14:40:07 | 2022-08-11T14:40:07 | 426,936,577 | 0 | 0 | Apache-2.0 | 2022-02-17T03:43:19 | 2021-11-11T09:09:28 | Python | UTF-8 | Python | false | false | 13,448 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import sys
import math
import paddle
import paddle.fluid as fluid
from op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python
import copy
from test_generate_proposals_op import clip_tiled_boxes, box_coder, nms
def python_generate_proposals_v2(
scores,
bbox_deltas,
img_size,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
pixel_offset=False,
return_rois_num=True,
):
rpn_rois, rpn_roi_probs, rpn_rois_num = paddle.vision.ops.generate_proposals(
scores,
bbox_deltas,
img_size,
anchors,
variances,
pre_nms_top_n=pre_nms_top_n,
post_nms_top_n=post_nms_top_n,
nms_thresh=nms_thresh,
min_size=min_size,
eta=eta,
pixel_offset=pixel_offset,
return_rois_num=return_rois_num)
return rpn_rois, rpn_roi_probs
def generate_proposals_v2_in_python(scores, bbox_deltas, im_shape, anchors,
variances, pre_nms_topN, post_nms_topN,
nms_thresh, min_size, eta, pixel_offset):
all_anchors = anchors.reshape(-1, 4)
rois = np.empty((0, 5), dtype=np.float32)
roi_probs = np.empty((0, 1), dtype=np.float32)
rpn_rois = []
rpn_roi_probs = []
rois_num = []
num_images = scores.shape[0]
for img_idx in range(num_images):
img_i_boxes, img_i_probs = proposal_for_one_image(
im_shape[img_idx, :], all_anchors, variances,
bbox_deltas[img_idx, :, :, :], scores[img_idx, :, :, :],
pre_nms_topN, post_nms_topN, nms_thresh, min_size, eta,
pixel_offset)
rois_num.append(img_i_probs.shape[0])
rpn_rois.append(img_i_boxes)
rpn_roi_probs.append(img_i_probs)
return rpn_rois, rpn_roi_probs, rois_num
def proposal_for_one_image(im_shape, all_anchors, variances, bbox_deltas,
scores, pre_nms_topN, post_nms_topN, nms_thresh,
min_size, eta, pixel_offset):
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
# - bbox deltas will be (4 * A, H, W) format from conv output
# - transpose to (H, W, 4 * A)
# - reshape to (H * W * A, 4) where rows are ordered by (H, W, A)
# in slowest to fastest order to match the enumerated anchors
bbox_deltas = bbox_deltas.transpose((1, 2, 0)).reshape(-1, 4)
all_anchors = all_anchors.reshape(-1, 4)
variances = variances.reshape(-1, 4)
# Same story for the scores:
# - scores are (A, H, W) format from conv output
# - transpose to (H, W, A)
# - reshape to (H * W * A, 1) where rows are ordered by (H, W, A)
# to match the order of anchors and bbox_deltas
scores = scores.transpose((1, 2, 0)).reshape(-1, 1)
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN (e.g. 6000)
if pre_nms_topN <= 0 or pre_nms_topN >= len(scores):
order = np.argsort(-scores.squeeze())
else:
# Avoid sorting possibly large arrays;
# First partition to get top K unsorted
# and then sort just those
inds = np.argpartition(-scores.squeeze(), pre_nms_topN)[:pre_nms_topN]
order = np.argsort(-scores[inds].squeeze())
order = inds[order]
scores = scores[order, :]
bbox_deltas = bbox_deltas[order, :]
all_anchors = all_anchors[order, :]
proposals = box_coder(all_anchors, bbox_deltas, variances, pixel_offset)
# clip proposals to image (may result in proposals with zero area
# that will be removed in the next step)
proposals = clip_tiled_boxes(proposals, im_shape, pixel_offset)
# remove predicted boxes with height or width < min_size
keep = filter_boxes(proposals, min_size, im_shape, pixel_offset)
if len(keep) == 0:
proposals = np.zeros((1, 4)).astype('float32')
scores = np.zeros((1, 1)).astype('float32')
return proposals, scores
proposals = proposals[keep, :]
scores = scores[keep, :]
# apply loose nms (e.g. threshold = 0.7)
# take post_nms_topN (e.g. 1000)
# return the top proposals
if nms_thresh > 0:
keep = nms(boxes=proposals,
scores=scores,
nms_threshold=nms_thresh,
eta=eta,
pixel_offset=pixel_offset)
if post_nms_topN > 0 and post_nms_topN < len(keep):
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep, :]
return proposals, scores
def filter_boxes(boxes, min_size, im_shape, pixel_offset=True):
"""Only keep boxes with both sides >= min_size and center within the image.
"""
# Scale min_size to match image scale
min_size = max(min_size, 1.0)
offset = 1 if pixel_offset else 0
ws = boxes[:, 2] - boxes[:, 0] + offset
hs = boxes[:, 3] - boxes[:, 1] + offset
if pixel_offset:
x_ctr = boxes[:, 0] + ws / 2.
y_ctr = boxes[:, 1] + hs / 2.
keep = np.where((ws >= min_size) & (hs >= min_size)
& (x_ctr < im_shape[1]) & (y_ctr < im_shape[0]))[0]
else:
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
class TestGenerateProposalsV2Op(OpTest):
def set_data(self):
self.init_test_params()
self.init_test_input()
self.init_test_output()
self.inputs = {
'Scores': self.scores,
'BboxDeltas': self.bbox_deltas,
'ImShape': self.im_shape.astype(np.float32),
'Anchors': self.anchors,
'Variances': self.variances
}
self.attrs = {
'pre_nms_topN': self.pre_nms_topN,
'post_nms_topN': self.post_nms_topN,
'nms_thresh': self.nms_thresh,
'min_size': self.min_size,
'eta': self.eta,
'pixel_offset': self.pixel_offset,
}
self.outputs = {
'RpnRois': self.rpn_rois[0],
'RpnRoiProbs': self.rpn_roi_probs[0],
}
def test_check_output(self):
self.check_output(check_eager=False)
def setUp(self):
self.op_type = "generate_proposals_v2"
self.python_api = python_generate_proposals_v2
self.set_data()
def init_test_params(self):
self.pre_nms_topN = 12000 # train 12000, test 2000
self.post_nms_topN = 5000 # train 6000, test 1000
self.nms_thresh = 0.7
self.min_size = 3.0
self.eta = 1.
self.pixel_offset = True
def init_test_input(self):
batch_size = 1
input_channels = 20
layer_h = 16
layer_w = 16
input_feat = np.random.random(
(batch_size, input_channels, layer_h, layer_w)).astype('float32')
self.anchors, self.variances = anchor_generator_in_python(
input_feat=input_feat,
anchor_sizes=[16., 32.],
aspect_ratios=[0.5, 1.0],
variances=[1.0, 1.0, 1.0, 1.0],
stride=[16.0, 16.0],
offset=0.5)
self.im_shape = np.array([[64, 64]]).astype('float32')
num_anchors = self.anchors.shape[2]
self.scores = np.random.random(
(batch_size, num_anchors, layer_h, layer_w)).astype('float32')
self.bbox_deltas = np.random.random(
(batch_size, num_anchors * 4, layer_h, layer_w)).astype('float32')
def init_test_output(self):
self.rpn_rois, self.rpn_roi_probs, self.rois_num = generate_proposals_v2_in_python(
self.scores, self.bbox_deltas, self.im_shape, self.anchors,
self.variances, self.pre_nms_topN, self.post_nms_topN,
self.nms_thresh, self.min_size, self.eta, self.pixel_offset)
# class TestGenerateProposalsV2OpNoBoxLeft(TestGenerateProposalsV2Op):
# def init_test_params(self):
# self.pre_nms_topN = 12000 # train 12000, test 2000
# self.post_nms_topN = 5000 # train 6000, test 1000
# self.nms_thresh = 0.7
# self.min_size = 1000.0
# self.eta = 1.
# self.pixel_offset = True
# class TestGenerateProposalsV2OpNoOffset(TestGenerateProposalsV2Op):
# def init_test_params(self):
# self.pre_nms_topN = 12000 # train 12000, test 2000
# self.post_nms_topN = 5000 # train 6000, test 1000
# self.nms_thresh = 0.7
# self.min_size = 3.0
# self.eta = 1.
# self.pixel_offset = False
# class testGenerateProposalsAPI(unittest.TestCase):
# def setUp(self):
# np.random.seed(678)
# self.scores_np = np.random.rand(2, 3, 4, 4).astype('float32')
# self.bbox_deltas_np = np.random.rand(2, 12, 4, 4).astype('float32')
# self.img_size_np = np.array([[8, 8], [6, 6]]).astype('float32')
# self.anchors_np = np.reshape(np.arange(4 * 4 * 3 * 4),
# [4, 4, 3, 4]).astype('float32')
# self.variances_np = np.ones((4, 4, 3, 4)).astype('float32')
# self.roi_expected, self.roi_probs_expected, self.rois_num_expected = generate_proposals_v2_in_python(
# self.scores_np,
# self.bbox_deltas_np,
# self.img_size_np,
# self.anchors_np,
# self.variances_np,
# pre_nms_topN=10,
# post_nms_topN=5,
# nms_thresh=0.5,
# min_size=0.1,
# eta=1.0,
# pixel_offset=False)
# self.roi_expected = np.array(self.roi_expected).squeeze(1)
# self.roi_probs_expected = np.array(self.roi_probs_expected).squeeze(1)
# self.rois_num_expected = np.array(self.rois_num_expected)
# def test_dynamic(self):
# paddle.disable_static()
# scores = paddle.to_tensor(self.scores_np)
# bbox_deltas = paddle.to_tensor(self.bbox_deltas_np)
# img_size = paddle.to_tensor(self.img_size_np)
# anchors = paddle.to_tensor(self.anchors_np)
# variances = paddle.to_tensor(self.variances_np)
# rois, roi_probs, rois_num = paddle.vision.ops.generate_proposals(
# scores,
# bbox_deltas,
# img_size,
# anchors,
# variances,
# pre_nms_top_n=10,
# post_nms_top_n=5,
# return_rois_num=True)
# self.assertTrue(np.allclose(self.roi_expected, rois.numpy()))
# self.assertTrue(np.allclose(self.roi_probs_expected, roi_probs.numpy()))
# self.assertTrue(np.allclose(self.rois_num_expected, rois_num.numpy()))
# def test_static(self):
# paddle.enable_static()
# scores = paddle.static.data(name='scores',
# shape=[2, 3, 4, 4],
# dtype='float32')
# bbox_deltas = paddle.static.data(name='bbox_deltas',
# shape=[2, 12, 4, 4],
# dtype='float32')
# img_size = paddle.static.data(name='img_size',
# shape=[2, 2],
# dtype='float32')
# anchors = paddle.static.data(name='anchors',
# shape=[4, 4, 3, 4],
# dtype='float32')
# variances = paddle.static.data(name='variances',
# shape=[4, 4, 3, 4],
# dtype='float32')
# rois, roi_probs, rois_num = paddle.vision.ops.generate_proposals(
# scores,
# bbox_deltas,
# img_size,
# anchors,
# variances,
# pre_nms_top_n=10,
# post_nms_top_n=5,
# return_rois_num=True)
# exe = paddle.static.Executor()
# rois, roi_probs, rois_num = exe.run(
# paddle.static.default_main_program(),
# feed={
# 'scores': self.scores_np,
# 'bbox_deltas': self.bbox_deltas_np,
# 'img_size': self.img_size_np,
# 'anchors': self.anchors_np,
# 'variances': self.variances_np,
# },
# fetch_list=[rois.name, roi_probs.name, rois_num.name],
# return_numpy=False)
# self.assertTrue(np.allclose(self.roi_expected, np.array(rois)))
# self.assertTrue(
# np.allclose(self.roi_probs_expected, np.array(roi_probs)))
# self.assertTrue(np.allclose(self.rois_num_expected, np.array(rois_num)))
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
| [
"[email protected]"
] | |
543f98e378822d026624672325a7e5e70e01bdbb | 0103046cd77e9f86ccde477736de36bba766ceb6 | /src/sentry/projectoptions/manager.py | e0fea86bd8bc6dd142d90dbc9d5bf9ee8979bf40 | [
"BUSL-1.1",
"Apache-2.0"
] | permissive | kaozdl/sentry | ad41ada649a20300e9f2fe69050200cfbf738a63 | 63d698f5294f64a8c206b4c741e2a11be1f9a9be | refs/heads/master | 2021-06-21T18:24:21.713064 | 2021-03-04T19:45:20 | 2021-03-04T19:45:20 | 198,681,569 | 0 | 0 | BSD-3-Clause | 2019-07-24T17:32:29 | 2019-07-24T17:32:28 | null | UTF-8 | Python | false | false | 3,109 | py | import uuid
import bisect
from datetime import datetime
from pytz import utc
class WellKnownProjectOption:
def __init__(self, key, default=None, epoch_defaults=None):
self.key = key
self.default = default
self.epoch_defaults = epoch_defaults
self._epoch_default_list = sorted(epoch_defaults or ())
def get_default(self, project=None, epoch=None):
if self.epoch_defaults:
if epoch is None:
if project is None:
epoch = 1
else:
epoch = project.get_option("sentry:option-epoch") or 1
idx = bisect.bisect(self._epoch_default_list, epoch)
if idx > 0:
return self.epoch_defaults[self._epoch_default_list[idx - 1]]
return self.default
class ProjectOptionsManager:
"""Project options used to be implemented in a relatively ad-hoc manner
in the past. The project manager still uses the functionality of the
project model and just dispatches to it.
Options can be used without declaring defaults, but if defaults are
declared they are returned without having to define a default at the
time of the option lookup.
"""
def __init__(self):
self.registry = {}
def lookup_well_known_key(self, key):
return self.registry.get(key)
def freeze_option_epoch(self, project, force=False):
# The options are frozen in a receiver hook for project saves.
# See `sentry.receivers.core.freeze_option_epoch_for_project`
if force or project.get_option("sentry:option-epoch") is None:
from .defaults import LATEST_EPOCH
project.update_option("sentry:option-epoch", LATEST_EPOCH)
def set(self, project, key, value):
from sentry.models import ProjectOption
self.update_rev_for_option(project)
return ProjectOption.objects.set_value(project, key, value)
def isset(self, project, key):
return project.get_option(project, key, Ellipsis) is not Ellipsis
def get(self, project, key, default=None, validate=None):
from sentry.models import ProjectOption
return ProjectOption.objects.get_value(project, key, default, validate=validate)
def delete(self, project, key):
from sentry.models import ProjectOption
self.update_rev_for_option(project)
return ProjectOption.objects.unset_value(project, key)
def update_rev_for_option(self, project):
from sentry.models import ProjectOption
ProjectOption.objects.set_value(project, "sentry:relay-rev", uuid.uuid4().hex)
ProjectOption.objects.set_value(
project, "sentry:relay-rev-lastchange", datetime.utcnow().replace(tzinfo=utc)
)
def register(self, key, default=None, epoch_defaults=None):
self.registry[key] = WellKnownProjectOption(
key=key, default=default, epoch_defaults=epoch_defaults
)
def all(self):
"""
Return an iterator for all keys in the registry.
"""
return self.registry.values()
| [
"[email protected]"
] | |
1a1cb129232e0e97be0322ccfb3ff3d58cbee9bc | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /readtwice/models/narrative_qa/preprocess_lib.py | c4826fd2778991d70acaaa4911bced36be5f44de | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 32,958 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing for NarrativeQA data."""
import csv
import json
import math
import os
import re
import string
from typing import Any, Iterator, List, Optional, Set, Text, Tuple
from absl import logging
import apache_beam as beam
from apache_beam import metrics
import dataclasses
import nltk
import tensorflow.compat.v1 as tf
from readtwice.data_utils import beam_utils
from readtwice.data_utils import data_utils
from readtwice.data_utils import tokenization
from readtwice.models.narrative_qa import extractive_oracle
METRICS_NAMESPACE = 'read_it_twice.narrative_qa'
SAMPLE_NO_ANSWER_QUESTIONS = 100
@dataclasses.dataclass(frozen=True)
class Question(object):
id: int
question_id: Text
value: Text
tokenized: Text
@dataclasses.dataclass(frozen=True)
class EvidenceInfo(object):
id: Text
source: Text
url: Text
@dataclasses.dataclass(frozen=True)
class Evidence(object):
info: EvidenceInfo
text: Text
summary: Optional[Text]
@dataclasses.dataclass(frozen=True)
class Answer(object):
"""Class represents answer for the question."""
values: List[Text]
tokenized: List[Text]
def _alias_answer(self, answer, include=None):
alias = answer.replace('_', ' ').lower()
exclude = set(string.punctuation + ''.join(['‘', '’', '´', '`']))
include = include or []
alias = ''.join(
c if c not in exclude or c in include else ' ' for c in alias)
return ' '.join(alias.split()).strip()
def make_answer_set(self, other_answers):
"""Apply less aggressive normalization to the answer aliases."""
answers = []
for alias in self.values + self.tokenized + other_answers:
answers.append(self._alias_answer(alias))
answers.append(self._alias_answer(alias, [',', '.']))
answers.append(self._alias_answer(alias, ['-']))
answers.append(self._alias_answer(alias, [',', '.', '-']))
answers.append(self._alias_answer(alias, string.punctuation))
answers = set(answers)
# Filter out empty or all-whitespace strings
answers = {answer for answer in answers if answer.strip()}
return answers
@dataclasses.dataclass(frozen=True)
class QuestionAnswer(object):
"""Single record in TriviaQA dataset."""
question: Question
answer: Answer
evidence_info: EvidenceInfo
class EnhancedJSONEncoder(json.JSONEncoder):
def default(self, o):
if dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
return super().default(o)
@dataclasses.dataclass
class QuestionAnswerEvidence(object):
question: Question
evidence: Evidence
answer: Optional[Answer] = None
def to_json(self):
return json.dumps(self, cls=EnhancedJSONEncoder)
@dataclasses.dataclass
class FilteredAnnotation(object):
question: Question
answer: Answer
annotation: Text
sentence: Text
def __str__(self):
return '%s\t%s\t%s\t%s' % (self.question.question_id, ','.join(
self.answer.values), self.annotation,
self.sentence.replace(
tokenization.SPIECE_UNDERLINE, ' '))
class ReadEvidenceOutput(object):
SUCCESS = None
NO_STAR_END_CONTENT = 'no_star_end_content'
TOO_SHORT_CONTENT = 'too_short_content'
class MakeExampleOutput(object):
SUCCESS = None
SUCCESS_FILTERED_ANNOTATIONS = 'success_filtered_annotations'
NO_ANSWER = 'no_answer'
NO_ANSWER_TOKENIZED = 'no_answer_tokenized'
NO_ANSWER_TOKENIZED_FILTERED_ANNOTATIONS = 'no_answer_tokenized_filtered_annotations'
TOO_MANY_ANSWERS = 'too_many_answers'
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace.
This normalization is the same as for the TriviaQA dataset. However,
it is NOT used during evaluation in case of NarrativeQA -- only for training
purposes.
Args:
s: Text
Returns:
normalized text
"""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def handle_punc(text):
exclude = set(string.punctuation + ''.join([u'‘', u'’', u'´', u'`']))
return ''.join(ch if ch not in exclude else ' ' for ch in text)
def lower(text):
return text.lower()
def replace_underscore(text):
return text.replace('_', ' ')
return white_space_fix(
remove_articles(handle_punc(lower(replace_underscore(s))))).strip()
def read_question_answer_csv(csv_path, documents_path,
data_split):
"""Read a CVS file into a list of QuestionAnswer objects."""
id_to_evidence_info = {}
with tf.io.gfile.GFile(documents_path) as f:
reader = csv.DictReader(f)
for datum in reader:
if datum['set'] != data_split:
continue
assert datum['kind'] in ['movie', 'gutenberg'], datum['kind']
document_id = datum['document_id']
id_to_evidence_info[document_id] = EvidenceInfo(
id=document_id, source=datum['kind'], url=datum['story_url'])
logging.info('Read %d evidence info from file %s', len(id_to_evidence_info),
documents_path)
question_answers = []
with tf.io.gfile.GFile(csv_path) as f:
reader = csv.DictReader(f)
for datum in reader:
if datum['set'] != data_split:
continue
# Note that document IDs start from 1.
# We keep 0 as an ID of an empty document
question = Question(
id=len(question_answers) + 1,
question_id=datum['question'],
value=datum['question'],
tokenized=datum['question_tokenized'])
answer = Answer(
values=[datum['answer1'], datum['answer2']],
tokenized=[datum['answer1_tokenized'], datum['answer2_tokenized']])
question_answers.append(
QuestionAnswer(question, answer,
id_to_evidence_info[datum['document_id']]))
logging.info('Read %d questions for the data spit "%s" from file %s',
len(question_answers), data_split, csv_path)
return question_answers
def _gutenberg_simple_parse(raw_content):
"""Clean a project Gunteberg file content."""
content = raw_content
# *** START OF THIS PROJECT GUTENBERG EBOOK THE RED BADGE OF COURAGE ***
starts = [
'*** START OF THIS PROJECT GUTENBERG EBOOK',
'***START OF THE PROJECT GUTENBERG EBOOK',
'*** START OF THE PROJECT GUTENBERG EBOOK',
'*END*THE SMALL PRINT! FOR PUBLIC DOMAIN',
'*END THE SMALL PRINT! FOR PUBLIC DOMAIN',
'This etext was prepared by',
'This Etext was prepared by',
'This etext was provided by',
'This Etext prepared by ',
'***START OF THIS PROJECT GUTENBERG EBOOK',
]
# *** END OF THIS PROJECT GUTENBERG EBOOK THE RED BADGE OF COURAGE ***
ends = [
'*** END OF THIS PROJECT GUTENBERG EBOOK',
'***END OF THE PROJECT GUTENBERG EBOOK',
'*** END OF THE PROJECT GUTENBERG EBOOK',
'End of Project Gutenberg Etext',
'End of this Project Gutenberg Etext',
'End of the Project Gutenberg Etext',
'End of The Project Gutenberg Etext',
'End of the Project Gutenberg etext',
'End of Project Gutenberg\'s Etext of ',
'END OF PROJECT GUTENBERG ETEXT OF ',
'***END OF THIS PROJECT GUTENBERG EBOOK',
]
has_start = any([s in content for s in starts])
has_end = any([e in content for e in ends])
if not has_start or not has_end:
return None
start_index = max([content.rfind(s) for s in starts])
end_index = min([content.find(e) % len(content) for e in ends])
# Strip the prefix: '*** START OF THIS PROJECT GUTENBERG EBOOK ***'
_, content = content[start_index:end_index].split('\n', 1)
return content
def _movie_simple_parse(raw_content):
"""Clean a movie script file content."""
content = raw_content
starts = [
'<pre>',
]
ends = [
'</pre>',
]
has_start = any([s in content for s in starts])
has_end = any([e in content for e in ends])
if not has_start or not has_end:
return None
start_index = max([content.find(s) for s in starts])
end_index = min([content.rfind(e) for e in ends])
content = content[start_index:end_index]
# _, content = content[start_index:end_index].split('>', 1)
content = re.sub('<[^>]+?>', '', content) # remove tags
return content
def load_story(path):
"""Load and decode a story from the file and."""
with tf.io.gfile.GFile(path, 'rb') as f:
raw_content = f.read()
# file encoding
charset_search = re.search(b'Character set encoding: ([-0-9a-zA-Z()]+)',
raw_content)
if charset_search is None:
charset_search = re.search(b'charset=([-0-9a-zA-Z()]+)', raw_content)
charset = None
if raw_content[:3] == b'\xef\xbb\xbf':
raw_content = raw_content[3:]
charset = 'utf-8'
elif raw_content[:2] == b'\xfe\xff':
raw_content = raw_content[3:]
charset = 'utf-16'
elif charset_search is None:
charset = 'utf-8'
else:
charset = charset_search.groups()[0]
assert charset is not None, path
charset = charset.decode('utf-8')
charset = charset.lower()
if charset == 'utf':
charset = 'utf-8'
if charset not in ['utf-8', 'iso-8859-1', 'utf-16']:
logging.warn('Uncommon charset "%s" for the file %s', charset, path)
try:
raw_content = raw_content.decode(charset)
# pylint: disable=broad-except
except Exception as e:
logging.warn('Failed to decode file %s with charset "%s". Error: %s', path,
charset, e)
raw_content = raw_content.decode(charset, errors='replace')
# pylint: enable=broad-except
return raw_content
class ReadEvidence(beam.DoFn):
"""Read evidence from directory."""
def __init__(self, stories_dir, summaries_path):
assert tf.io.gfile.isdir(stories_dir)
self.stories_dir = stories_dir
self.summaries_path = summaries_path
if self.summaries_path is not None:
assert tf.io.gfile.exists(summaries_path)
def setup(self):
self.summary = None
if self.summaries_path is not None:
self.summary = {}
with tf.io.gfile.GFile(self.summaries_path) as f:
reader = csv.DictReader(f)
for datum in reader:
self.summary[datum['document_id']] = datum['summary'].replace(
'\n', ' ')
def process(
self,
question_answer):
path = os.path.join(self.stories_dir,
question_answer.evidence_info.id + '.content')
raw_content = load_story(path)
if question_answer.evidence_info.source == 'gutenberg':
content = _gutenberg_simple_parse(raw_content)
elif question_answer.evidence_info.source == 'movie':
content = _movie_simple_parse(raw_content)
else:
raise ValueError(
f'Unknown evidence source: {question_answer.evidence_info.source}.')
if content is None:
metrics.Metrics.counter(
METRICS_NAMESPACE, 'read_evidence_status.no_start_end_content').inc()
yield beam.pvalue.TaggedOutput(ReadEvidenceOutput.NO_STAR_END_CONTENT,
path)
return
num_words = len(re.sub(r'\s+', ' ', content).split(' '))
if num_words < 100:
logging.error('Content is missing (less than 100 words) in file %s', path)
metrics.Metrics.counter(METRICS_NAMESPACE,
'read_evidence_status.too_short_content').inc()
yield beam.pvalue.TaggedOutput(ReadEvidenceOutput.TOO_SHORT_CONTENT, path)
return
metrics.Metrics.counter(METRICS_NAMESPACE,
'read_evidence_status.success').inc()
if self.summary is not None:
summary = self.summary[question_answer.evidence_info.id]
else:
summary = None
yield QuestionAnswerEvidence(
question=question_answer.question,
evidence=Evidence(
info=question_answer.evidence_info, text=content, summary=summary),
answer=question_answer.answer)
# TODO(urikz): Potentially, we should filter out all intersecting
# annotations and try to pick only, for example, the largest ones
def find_answer_annotations(
text, answer_set):
"""Find answer annotations."""
annotations = []
for answer in answer_set:
# We use regex matching to search for the answer for two reasons:
# (1) We want to ignore case (so `flags=re.IGNORECASE`)
# (2) We want to the space and the hyphen to be treated as the same token.
# Sometimes the answer is "TSR 2", but the actual text contains only "TSR-2"
#
# Note that we have to espace -- `re.escape(answer)` -- because the answer
# can contain parentheses, etc.
# Finally, to accommodate (2) we replace spaces ('\\ ' due to escaping)
# with a group '[ -]'.
answer_regex = re.compile(
re.escape(answer).replace('\\ ', '[ -]'), flags=re.IGNORECASE)
for match in re.finditer(answer_regex, text):
if not answer.strip() or match.end() == 0:
raise ValueError('Invalid answer string "%s" from answer set %s' %
(answer, str(answer_set)))
annotations.append(
data_utils.Annotation(
begin=match.start(), end=match.end() - 1, text=match.group(0)))
return sorted(annotations)
class MakeExamples(beam.DoFn):
"""Function to make tf.train.Examples."""
def __init__(self, spm_model_path,
num_blocks_per_example, block_overlap_length,
block_length, max_num_annotations_per_block,
padding_token_id, cls_token_id, sep_token_id,
generate_answers, generate_summaries,
min_rouge_l_oracle_score, nltk_data_path):
self.spm_model_path = spm_model_path
self.num_blocks_per_example = num_blocks_per_example
self.block_overlap_length = block_overlap_length
self.block_length = block_length
self.max_num_annotations_per_block = max_num_annotations_per_block
self.padding_token_id = padding_token_id
self.cls_token_id = cls_token_id
self.sep_token_id = sep_token_id
self.generate_answers = generate_answers
self.generate_summaries = generate_summaries
self.extractive_oracle = extractive_oracle.ExtractiveOracle(
min_roughe_l_score=min_rouge_l_oracle_score,
top_percentile=0.9,
top_k=100)
self.nltk_data_path = nltk_data_path
nltk.data.path.append(self.nltk_data_path)
def setup(self):
nltk.data.path.append(self.nltk_data_path)
self.tokenizer = tokenization.FullTokenizer(
spm_model_file=self.spm_model_path)
self.nltk_tokenizer = nltk.TreebankWordTokenizer()
self.nltk_pos_types = {'PERSON', 'ORGANIZATION', 'FACILITY', 'GPE', 'GSP'}
# self.spacy_annotator = spacy.load('en_core_web_sm')
def process(
self, question_answer_evidence):
metrics.Metrics.counter(METRICS_NAMESPACE, 'num_questions').inc()
if self.generate_answers:
oracle_answers = []
for answer in question_answer_evidence.answer.values:
oracle_answers.extend(
self.extractive_oracle.find_approximate_answers(
question_answer_evidence.evidence.text,
answer,
remove_all_stopwords_answers=True))
metrics.Metrics.distribution(METRICS_NAMESPACE,
'oracle_answers_per_question').update(
len(oracle_answers))
answer_set = question_answer_evidence.answer.make_answer_set(
oracle_answers)
normalized_answer_set = {
normalize_answer(answer) for answer in answer_set
}
sentences = []
for sentence in self._split_into_sentences(
question_answer_evidence.evidence):
sentence_obj = self._annotate_entities(sentence)
metrics.Metrics.counter(METRICS_NAMESPACE, 'nltk_entities').inc(
sentence_obj.num_annotations(1))
if self.generate_answers:
annotations = find_answer_annotations(sentence_obj.text, answer_set)
sentence_obj.annotations.extend(annotations)
sentences.append(sentence_obj)
big_document = data_utils.BertDocument(
sentences=sentences, document_id=question_answer_evidence.question.id)
metrics.Metrics.distribution(METRICS_NAMESPACE,
'num_sentences_per_question').update(
len(sentences))
metrics.Metrics.distribution(METRICS_NAMESPACE,
'doc_length_per_question').update(
big_document.num_characters())
if self.generate_answers:
num_annotations = big_document.num_annotations(0)
metrics.Metrics.distribution(
METRICS_NAMESPACE,
'num_annotations_per_question').update(num_annotations)
if num_annotations == 0:
metrics.Metrics.counter(
METRICS_NAMESPACE,
'make_example_status.answer_span_not_found').inc()
yield beam.pvalue.TaggedOutput(MakeExampleOutput.NO_ANSWER,
question_answer_evidence.to_json())
return
tokenized_big_document = data_utils.tokenize_document_for_bert(
big_document, self.tokenizer)
metrics.Metrics.distribution(METRICS_NAMESPACE,
'tokenized_doc_length_per_question').update(
tokenized_big_document.num_tokens())
tokenized_question = self._tokenize_text(
question_answer_evidence.question.value)
metrics.Metrics.distribution(METRICS_NAMESPACE, 'question_length').update(
len(tokenized_question))
filtered_annotations = []
if self.generate_answers:
for i, sentence in enumerate(tokenized_big_document.sentences):
should_update, annotations, current_filtered_annotations = self._verify_annotations(
sentence.annotations, normalized_answer_set)
if should_update:
tokenized_big_document.sentences[i].annotations = annotations
# pylint: disable=g-complex-comprehension
filtered_annotations.extend([
FilteredAnnotation(
question=question_answer_evidence.question,
answer=question_answer_evidence.answer,
annotation=annotation,
sentence=''.join(sentence.tokens))
for annotation in current_filtered_annotations
])
metrics.Metrics.counter(METRICS_NAMESPACE,
'num_filtered_annotations').inc(
len(current_filtered_annotations))
tokenized_big_document = data_utils.split_tokenized_sentences(
tokenized_big_document,
max_tokens=self._get_max_tokens_per_raw_doc(len(tokenized_question)),
min_tokens_for_graceful_split=math.ceil(
self._get_max_tokens_per_raw_doc(len(tokenized_question)) * 0.5))
if self.generate_answers:
num_annotations = tokenized_big_document.num_annotations(0)
metrics.Metrics.distribution(
METRICS_NAMESPACE,
'num_annotations_tokenized_per_question').update(num_annotations)
if num_annotations == 0:
metrics.Metrics.counter(
METRICS_NAMESPACE,
'make_example_status.answer_not_found_tokenized').inc()
yield beam.pvalue.TaggedOutput(MakeExampleOutput.NO_ANSWER_TOKENIZED,
question_answer_evidence.to_json())
yield beam.pvalue.TaggedOutput(
MakeExampleOutput.NO_ANSWER_TOKENIZED_FILTERED_ANNOTATIONS,
filtered_annotations)
return
else:
approx_num_blocks = (
tokenized_big_document.num_tokens() /
(self.block_length - self.block_overlap_length -
len(tokenized_question)))
if (num_annotations >
self.max_num_annotations_per_block * approx_num_blocks):
metrics.Metrics.counter(METRICS_NAMESPACE,
'num_questions_with_too_many_answers').inc()
yield beam.pvalue.TaggedOutput(MakeExampleOutput.TOO_MANY_ANSWERS,
question_answer_evidence.to_json())
yield beam.pvalue.TaggedOutput(
MakeExampleOutput.SUCCESS_FILTERED_ANNOTATIONS,
filtered_annotations)
# message = question_answer_evidence.evidence.info.id
tokenized_documents = data_utils.split_tokenized_documents(
tokenized_big_document,
max_tokens=self._get_max_tokens_per_raw_doc(len(tokenized_question)),
max_sentences=None)
metrics.Metrics.distribution(METRICS_NAMESPACE,
'num_examples_per_question').update(
len(tokenized_documents))
if len(tokenized_documents) > 1:
metrics.Metrics.counter(METRICS_NAMESPACE, 'num_too_large_evidence').inc()
if self.generate_summaries:
tokenized_summary = self._tokenize_text(
question_answer_evidence.evidence.summary)
if len(tokenized_summary) < self.block_length:
tokenized_summary.extend([self.padding_token_id] *
(self.block_length - len(tokenized_summary)))
for tokenized_document in tokenized_documents:
if self.generate_answers and tokenized_document.num_annotations(0) == 0:
metrics.Metrics.counter(
METRICS_NAMESPACE,
'make_example_status.answer_not_found_splitted').inc()
continue
metrics.Metrics.counter(METRICS_NAMESPACE, 'num_examples').inc()
tf_example = tokenized_document.to_tf_strided_large_example(
overlap_length=self.block_overlap_length,
block_length=self.block_length,
padding_token_id=self.padding_token_id,
prefix_token_ids=tokenized_question,
max_num_annotations=self.max_num_annotations_per_block)
if self.generate_summaries:
num_blocks = len(
tf_example.features.feature['block_ids'].int64_list.value)
tf_example.features.feature[
'summary_token_ids'].int64_list.value.extend(tokenized_summary *
num_blocks)
yield tf_example
metrics.Metrics.counter(METRICS_NAMESPACE,
'make_example_status.success').inc()
def _split_into_sentences(self, evidence):
current_line = ''
re_combine_whitespace = re.compile(r'\s+')
for line in evidence.text.strip().split('\n'):
line_stripped = re_combine_whitespace.sub(' ', line).strip()
if line_stripped:
if current_line:
current_line = current_line + ' ' + line_stripped
else:
current_line = line_stripped
else:
if current_line:
yield current_line
current_line = ''
if current_line:
yield current_line
# TODO(urikz): Use spacy
# def _annotate_entities(self, text: Text):
# annotations = []
# for entity in self.spacy_annotator(text):
# begin = entity.start_char
# end = entity.end_char - 1
# assert end >= begin, text
# assert text[begin:end + 1] == entity.text, text
# annotations.append(
# data_utils.Annotation(
# begin=begin, end=end, text=entity.text, label=None, type=1))
# annotations.sort(key=lambda a: (a.begin, a.end))
# sentence = data_utils.Sentence(text=text, annotations=annotations)
# sentence.strip_whitespaces()
# return sentence
def _annotate_entities(self, text):
spans = list(self.nltk_tokenizer.span_tokenize(text))
tokens = [text[b:e] for (b, e) in spans]
annotations = []
trees = nltk.ne_chunk(nltk.pos_tag(tokens))
start_index = 0
for tree in trees:
if hasattr(tree, 'label'):
children = [text for text, pos in tree]
end_index = start_index + len(children)
if tree.label() in self.nltk_pos_types:
begin, _ = spans[start_index]
_, end = spans[end_index - 1]
surface_form = ' '.join(children)
# There are edge cases when these are not equal.
# For example, Diminish'd != Diminish 'd
# assert text[begin:end] == surface_form, text
surface_form = text[begin:end]
annotations.append(
data_utils.Annotation(
begin=begin, end=end - 1, text=surface_form, label=1, type=1))
start_index = end_index
else:
start_index += 1
annotations.sort(key=lambda a: (a.begin, a.end))
sentence = data_utils.Sentence(text=text, annotations=annotations)
sentence.strip_whitespaces()
return sentence
def _verify_annotations(
self, annotations, answer_set
):
should_update = False
new_annotations = []
filtered_annotations = set()
for annotation in annotations:
if (annotation.type == 0 and
normalize_answer(annotation.text) not in answer_set):
filtered_annotations.add(annotation.text)
should_update = True
else:
new_annotations.append(annotation)
return should_update, new_annotations, filtered_annotations
def _get_max_tokens_per_raw_doc(self, question_len):
"""Computes the maximal number of tokens per single document."""
# The document will be split into several overlapping blocks --
# see TokenizedBertDocument.to_tf_strided_large_example for details.
# The first block will contain (`block_length` - `question_len`) tokens
# Other blocks will contain fewer tokens because of the overlap --
# (`block_length` - `question_len` - `block_overlap_length`) tokens.
# Finally, `num_blocks_per_example` blocks will in total
# have the following number of tokens:
# (`block_length` - `question_len`) + (`num_blocks_per_example` - 1) *
# (`block_length` - `question_len` - `block_overlap_length`) tokens =
# = `num_blocks_per_example` * (`block_length` - `question_len`)
# - (`num_blocks_per_example` - 1) * `block_overlap_length`
return self.num_blocks_per_example * (self.block_length - question_len) - (
self.num_blocks_per_example - 1) * self.block_overlap_length
def _tokenize_text(self, question):
tokens = self.tokenizer.tokenize(question)
token_ids = self.tokenizer.convert_tokens_to_ids(tokens)
token_ids = [self.cls_token_id] + token_ids + [self.sep_token_id]
token_ids = token_ids[:self.block_length]
return token_ids
def write_to_file_fn(output_prefix, filename):
return beam.io.WriteToText(
os.path.join(output_prefix + '.' + filename),
append_trailing_newlines=True,
shard_name_template='', # To force unsharded output.
)
def get_pipeline(input_qaps, input_documents, data_split,
stories_dir, summaries_path, spm_model_path,
num_blocks_per_example, block_overlap_length,
block_length, max_num_annotations_per_block,
padding_token_id, cls_token_id, sep_token_id,
generate_answers, generate_summaries,
min_rouge_l_oracle_score, nltk_data_path,
output_prefix, output_num_shards):
"""Makes a Beam pipeline."""
def pipeline(root):
question_answers = read_question_answer_csv(input_qaps, input_documents,
data_split)
question_answers = (
root | 'CreateQuestionAnswers' >> beam.Create(question_answers)
| 'ShuffleAfterCreatingQA' >> beam.Reshuffle())
read_outputs = (
question_answers
| 'ReadEvidence' >> beam.ParDo(
ReadEvidence(
stories_dir=stories_dir,
summaries_path=summaries_path)).with_outputs())
_ = (
read_outputs[ReadEvidenceOutput.NO_STAR_END_CONTENT]
| 'ShuffleNoStarEndContent' >> beam.Reshuffle()
| 'WriteNoStarEndContent' >> write_to_file_fn(
output_prefix, 'no_star_end_content.txt'))
_ = (
read_outputs[ReadEvidenceOutput.TOO_SHORT_CONTENT]
| 'ShuffleTooShortContent' >> beam.Reshuffle()
| 'WriteTooShortContent' >> write_to_file_fn(output_prefix,
'too_short_content.txt'))
outputs = (
read_outputs[ReadEvidenceOutput.SUCCESS]
| 'ShuffleBeforeMakeExamples' >> beam.Reshuffle()
| 'MakeExamples' >> beam.ParDo(
MakeExamples(
spm_model_path=spm_model_path,
num_blocks_per_example=num_blocks_per_example,
block_overlap_length=block_overlap_length,
block_length=block_length,
max_num_annotations_per_block=max_num_annotations_per_block,
padding_token_id=padding_token_id,
cls_token_id=cls_token_id,
sep_token_id=sep_token_id,
generate_answers=generate_answers,
generate_summaries=generate_summaries,
min_rouge_l_oracle_score=min_rouge_l_oracle_score,
nltk_data_path=nltk_data_path)).with_outputs())
# if generate_answers:
# # Write failure cases, when no answer was found
# _ = (
# outputs[MakeExampleOutput.NO_ANSWER]
# | 'ShuffleNoAnswer' >> beam.Reshuffle()
# | 'SampleNoAnswer' >>
# beam.combiners.Sample.FixedSizeGlobally(SAMPLE_NO_ANSWER_QUESTIONS)
# | 'WriteNoAnswer' >> write_to_file_fn('no_answer.jsonl'))
# _ = (
# outputs[MakeExampleOutput.NO_ANSWER_TOKENIZED]
# | 'ShuffleNoAnswerTokenized' >> beam.Reshuffle()
# | 'SampleNoAnswerTokenized' >>
# beam.combiners.Sample.FixedSizeGlobally(SAMPLE_NO_ANSWER_QUESTIONS)
# | 'WriteNoAnswerTokenized' >>
# write_to_file_fn('no_answer_tokenized.jsonl'))
# # Write annotations that have been filtered out after tokenization
# _ = (
# outputs[MakeExampleOutput.SUCCESS_FILTERED_ANNOTATIONS]
# | 'ShuffleSuccessFilteredAnnotations' >> beam.Reshuffle()
# | 'FlattenSuccessFilteredAnnotations' >> beam.FlatMap(lambda x: x)
# | 'WriteSuccessFilteredAnnotations' >>
# write_to_file_fn('success.filtered_annotations.txt'))
# _ = (
# outputs[
# MakeExampleOutput.NO_ANSWER_TOKENIZED_FILTERED_ANNOTATIONS]
# |
# 'ShuffleNoAnswerTokenizedFilteredAnnotations' >> beam.Reshuffle()
# | 'FlattenNoAnswerTokenizedFilteredAnnotations' >>
# beam.FlatMap(lambda x: x)
# | 'WriteNoAnswerTokenizedFilteredAnnotations' >>
# write_to_file_fn('no_answer_tokenized.filtered_annotations.txt'))
# # Write cases where the too many answer spans were found
# _ = (
# outputs[MakeExampleOutput.TOO_MANY_ANSWERS]
# | 'ShuffleTooManyAnswers' >> beam.Reshuffle()
# | ('WriteTooManyAnswers' >>
# write_to_file_fn('too_many_answers.jsonl')))
max_tokens = num_blocks_per_example * block_length
max_num_annotations = num_blocks_per_example * max_num_annotations_per_block
max_lengths = dict(
token_ids=max_tokens,
is_continuation=max_tokens,
block_ids=num_blocks_per_example,
answer_annotation_begins=max_num_annotations,
answer_annotation_ends=max_num_annotations,
answer_annotation_labels=max_num_annotations,
entity_annotation_begins=max_num_annotations,
entity_annotation_ends=max_num_annotations,
entity_annotation_labels=max_num_annotations,
prefix_length=num_blocks_per_example)
if generate_summaries:
max_lengths['summary_token_ids'] = max_tokens
example_packer = beam_utils.PriorityExamplePacker(
priority_feature='token_ids',
max_lengths=max_lengths,
breakpoint_features=dict(),
cumulative_features=[],
min_packing_fraction=1.0,
max_cache_len=num_blocks_per_example)
_ = (
outputs[MakeExampleOutput.SUCCESS]
| 'ShuffleBeforePacking' >> beam.Reshuffle()
| 'PackExamples' >> beam_utils.PackExamples(example_packer)
| 'ShuffleAfterPacking' >> beam.Reshuffle()
| 'WriteTfExamples' >> beam.io.WriteToTFRecord(
os.path.join(output_prefix + '.tfrecord'),
coder=beam.coders.ProtoCoder(tf.train.Example),
num_shards=output_num_shards))
return pipeline
| [
"[email protected]"
] | |
ead7b1a0755191bb9b690a567988c0afb102bd9f | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /man/call_last_world_with_next_woman/want_last_case/come_new_hand/few_person.py | 61c2026eb36fe40fd1c4e1cefa0bcdb43a2f6891 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py |
#! /usr/bin/env python
def group(str_arg):
thing(str_arg)
print('feel_company_by_week')
def thing(str_arg):
print(str_arg)
if __name__ == '__main__':
group('same_part')
| [
"[email protected]"
] | |
07abeb853173fc2e69c1a3dea3176bc3904f1b83 | cdb186ad49bba1406c81f634b936e73f8cb04009 | /ARC/002/a2.py | b88ac58257608a246949e7fe5a812d771b81e63d | [] | no_license | ToshikiShimizu/AtCoder | 9e46f5581f2c1f5149ce1394d61d652cda6256a3 | 41fe6408c20c59bbf1b5d7ee9db2e132f48ad1ac | refs/heads/master | 2023-07-26T22:45:51.965088 | 2023-07-10T14:11:35 | 2023-07-10T14:11:35 | 148,154,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #coding:utf-8
y = int(input())
if (y%400==0):
print ("YES")
elif (y%100==0):
print ("NO")
elif (y%4==0):
print ("YES")
else:
print ("NO") | [
"[email protected]"
] | |
ed52f1f1bcf6a8340ae7a9012b1186ea964513df | 6710c52d04e17facbc9fb35a7df313f7a2a7bd53 | /0258. Add Digits.py | 9fcb8f9b0f7b36558df111738752ea389b781eb3 | [] | no_license | pwang867/LeetCode-Solutions-Python | 535088fbe747a453360457728cc22cf336020bd2 | 188befbfb7080ba1053ee1f7187b177b64cf42d2 | refs/heads/master | 2022-11-13T16:20:28.211707 | 2020-06-28T06:01:14 | 2020-06-28T06:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | """
explanation, mathematical proof:
N=(a[0] * 1 + a[1] * 10 + ...a[n] * 10 ^n),and a[0]...a[n] are all between [0,9]
we set M = a[0] + a[1] + ..a[n]
and another truth is that:
1 % 9 = 1
10 % 9 = 1
100 % 9 = 1
so N % 9 = a[0] + a[1] + ..a[n]
means N % 9 = M
so N = M (% 9)
"""
# digit root, time/space O(1)
class Solution(object):
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
if num == 0:
return 0
return (num-1)%9 + 1 # period is 9
"""
Given a non-negative integer num, repeatedly add all its digits until the result has only one digit.
Example:
Input: 38
Output: 2
Explanation: The process is like: 3 + 8 = 11, 1 + 1 = 2.
Since 2 has only one digit, return it.
Follow up:
Could you do it without any loop/recursion in O(1) runtime?
"""
| [
"[email protected]"
] | |
a9617016af138a8744cd4ab32de42682c82f62a9 | 6f44214567f1fe9c5534ea3d55800e9aaabaa390 | /evergreen_requests.py | 73c97ef056f55049281e3fcfd2f921c5cf23debe | [
"BSD-2-Clause"
] | permissive | saghul/evergreen-requests | c3505c1f669834b997a3b15907cd25a1d88b08f1 | 4df9e7564492d82d9850399ad6b0c83e59e30423 | refs/heads/master | 2016-09-06T05:53:26.345690 | 2013-05-14T21:36:22 | 2013-05-14T21:36:22 | 10,063,878 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,433 | py |
from functools import partial
from operator import methodcaller
from evergreen import futures, patcher
# Monkey-patch.
requests = patcher.import_patched('requests')
__version__ = '0.0.1'
__all__ = ['map', 'imap', 'get', 'options', 'head', 'post', 'put', 'patch', 'delete', 'request', '__version__']
# Export same items as vanilla requests
__requests_imports__ = ['utils', 'session', 'Session', 'codes', 'RequestException', 'Timeout', 'URLRequired', 'TooManyRedirects', 'HTTPError', 'ConnectionError']
patcher.slurp_properties(requests, globals(), srckeys=__requests_imports__)
__all__.extend(__requests_imports__)
del requests, patcher, __requests_imports__
class AsyncRequest(object):
""" Asynchronous request.
Accept same parameters as ``Session.request`` and some additional:
:param session: Session which will do the request, else one is created automatically.
:param callback: Callback called on response. Same as passing ``hooks={'response': callback}``.
"""
def __init__(self, method, url, **kwargs):
self.method = method
self.url = url
self.session = kwargs.pop('session', None)
if self.session is None:
self.session = Session()
callback = kwargs.pop('callback', None)
if callback:
kwargs['hooks'] = {'response': callback}
self.kwargs = kwargs # Arguments for ``Session.request``
self.response = None
def send(self, **kwargs):
"""
Prepares request based on parameter passed to constructor and optional ``kwargs```.
Then sends request and saves response to :attr:`response`
:returns: ``Response``
"""
merged_kwargs = {}
merged_kwargs.update(self.kwargs)
merged_kwargs.update(kwargs)
self.response = self.session.request(self.method, self.url, **merged_kwargs)
return self.response
# Shortcuts for creating AsyncRequest with appropriate HTTP method
get = partial(AsyncRequest, 'GET')
options = partial(AsyncRequest, 'OPTIONS')
head = partial(AsyncRequest, 'HEAD')
post = partial(AsyncRequest, 'POST')
put = partial(AsyncRequest, 'PUT')
patch = partial(AsyncRequest, 'PATCH')
delete = partial(AsyncRequest, 'DELETE')
def request(method, url, **kwargs):
return AsyncRequest(method, url, **kwargs)
def map(reqs, concurrency=10):
"""Concurrently converts a list of Requests to Responses. Results are yielded
in order even if requests are performed concurrently.
:param reqs: a collection of AsyncRequest objects.
:param concurrency: Specifies the number of requests to make at a time. Defaults to 10.
"""
def result_iterator():
with futures.TaskPoolExecutor(concurrency) as e:
fs = [e.submit(r.send) for r in reqs]
for f in fs:
yield f.get()
return result_iterator()
def imap(reqs, concurrency=10):
"""Concurrently converts a list of Requests to Responses. Results are yielded
in arbitrary order, as soon as requests complete.
:param reqs: a collection of AsyncRequest objects.
:param concurrency: Specifies the number of requests to make at a time. Defaults to 10.
"""
def result_iterator():
with futures.TaskPoolExecutor(concurrency) as e:
fs = [e.submit(r.send) for r in reqs]
for f in futures.as_completed(fs):
yield f.get()
return result_iterator()
| [
"[email protected]"
] | |
d2004d8e68741301c502305b263147f20710d51e | ef821468b081ef2a0b81bf08596a2c81e1c1ef1a | /PythonWebBasics_Django/Django_Web_Basics/Django_Web_Basics_2/migrations/0007_auto_20210130_1511.py | c45bf61e0b3924a636cba22def0c3c357974ec4f | [] | no_license | Ivaylo-Atanasov93/The-Learning-Process | 71db22cd79f6d961b9852f140f4285ef7820dd80 | 354844e2c686335345f6a54b3af86b78541ed3f3 | refs/heads/master | 2023-03-30T20:59:34.304207 | 2021-03-29T15:23:05 | 2021-03-29T15:23:05 | 294,181,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | # Generated by Django 3.1.5 on 2021-01-30 15:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Django_Web_Basics_2', '0006_auto_20210130_1456'),
]
operations = [
migrations.AlterField(
model_name='game',
name='level_of_difficulty',
field=models.IntegerField(choices=[(0, 'Easy'), (1, 'Medium'), (2, 'Hard')]),
),
]
| [
"[email protected]"
] | |
cf3fde1e92f30234f03259580d338b0fc8a49ef0 | 94a4388cee6dfeaa1674fba20423e8a3f8f6dd42 | /backend/game_mdoe_18873/urls.py | d4b0c3f0622dafe5bd8ba458e4c2793e11cd2878 | [] | no_license | crowdbotics-apps/game-mdoe-18873 | 0dc5c1e1827f382d5a84847697d0b1b05191066d | 8bcbe6c9b116fa1294b8104018c9cd36b1536c13 | refs/heads/master | 2022-11-15T17:29:36.126851 | 2020-07-13T22:23:28 | 2020-07-13T22:23:28 | 279,423,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | """game_mdoe_18873 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("home/", include("home.urls")),
path("api/v1/", include("course.api.v1.urls")),
path("course/", include("course.urls")),
]
admin.site.site_header = "game mdoe"
admin.site.site_title = "game mdoe Admin Portal"
admin.site.index_title = "game mdoe Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="game mdoe API",
default_version="v1",
description="API documentation for game mdoe App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
6e0aac2a7595214d87e1fd0c6e350a34022d7300 | 8a223de74cf3d44d7b3275a3be3b4f8bebf0d9b8 | /big.py | a63654aaed6d317bae9c002b9d4f5c3da5f1e39e | [] | no_license | sarureddi/j | cdb69fd5be90d140b20d1b2ad47ab3100e4776d9 | 6e2a4f8298990c787dc171d5d4e6e2e1424b8745 | refs/heads/master | 2020-05-31T22:23:28.208008 | 2019-06-04T17:03:55 | 2019-06-04T17:03:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | a,b=(input()).split()
if(a>b):
print(a)
elif(a<b):
print(b)
else:
print(a)
| [
"[email protected]"
] | |
579f9cdd764f516d3acae14fe2bbe0ae5e6f94b5 | 165d2e9a6d451b3a614a28629cca256f849b73ab | /notebooks/_solutions/case2_biodiversity_analysis21.py | 087bcee2f2924c310bc97c9115dab1564054c9e1 | [
"BSD-3-Clause"
] | permissive | DemeulemeesterT/DS-python-data-analysis | 5b2439382571769dcc9b61fb3408a51552364c08 | b4dd68b9c912c5d5c52c607aa117f5054449c73d | refs/heads/master | 2023-05-07T16:51:31.426493 | 2021-05-31T07:24:38 | 2021-05-31T07:24:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | species_per_plot = survey_data.reset_index().pivot_table(index="name",
columns="verbatimLocality",
values="occurrenceID",
aggfunc='count')
# alternative ways to calculate this
#species_per_plot = survey_data.groupby(['name', 'plot_id']).size().unstack(level=-1)
#species_per_plot = pd.crosstab(survey_data['name'], survey_data['plot_id']) | [
"[email protected]"
] | |
1401b708e738bdf68f663d12f6a128f484f08f08 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/statistics_timeline_item.py | 71106a07b0316521f9190c69730cb31447e4a86e | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,489 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class StatisticsTimelineItem:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'key': 'str',
'timeline': 'list[TimeLineItem]'
}
attribute_map = {
'key': 'key',
'timeline': 'timeline'
}
def __init__(self, key=None, timeline=None):
"""StatisticsTimelineItem
The model defined in huaweicloud sdk
:param key: 键值,包括请求总量(ACCESS)、Bot攻击防护(CRAWLER)、攻击总量(ATTACK)、Web基础防护(WEB_ATTACK)、精准防护(PRECISE)、CC攻击防护(CC)
:type key: str
:param timeline: 对应键值的时间线统计数据
:type timeline: list[:class:`huaweicloudsdkwaf.v1.TimeLineItem`]
"""
self._key = None
self._timeline = None
self.discriminator = None
if key is not None:
self.key = key
if timeline is not None:
self.timeline = timeline
@property
def key(self):
"""Gets the key of this StatisticsTimelineItem.
键值,包括请求总量(ACCESS)、Bot攻击防护(CRAWLER)、攻击总量(ATTACK)、Web基础防护(WEB_ATTACK)、精准防护(PRECISE)、CC攻击防护(CC)
:return: The key of this StatisticsTimelineItem.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this StatisticsTimelineItem.
键值,包括请求总量(ACCESS)、Bot攻击防护(CRAWLER)、攻击总量(ATTACK)、Web基础防护(WEB_ATTACK)、精准防护(PRECISE)、CC攻击防护(CC)
:param key: The key of this StatisticsTimelineItem.
:type key: str
"""
self._key = key
@property
def timeline(self):
"""Gets the timeline of this StatisticsTimelineItem.
对应键值的时间线统计数据
:return: The timeline of this StatisticsTimelineItem.
:rtype: list[:class:`huaweicloudsdkwaf.v1.TimeLineItem`]
"""
return self._timeline
@timeline.setter
def timeline(self, timeline):
"""Sets the timeline of this StatisticsTimelineItem.
对应键值的时间线统计数据
:param timeline: The timeline of this StatisticsTimelineItem.
:type timeline: list[:class:`huaweicloudsdkwaf.v1.TimeLineItem`]
"""
self._timeline = timeline
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StatisticsTimelineItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
bc34681fd530d87f8de958eb30d88c342abbeffe | daee54824cb107f9b5749e3c12e7f09f544bac0e | /modules/readers/resources/python/rawVolumeRDRViewFrame.py | f809c2e9c0e1da779a57aa6d0deafd0a9ea6ba7c | [] | no_license | JoonVan/devide | 8fa556d2b42c5ad70c3595303253f2a171de0312 | 586225d68b079e2a96007bd33784113b3a19a538 | refs/heads/master | 2020-12-26T06:25:01.744966 | 2017-01-22T19:47:50 | 2017-01-22T19:47:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,028 | py | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# generated by wxGlade 0.6.3 on Sat Feb 09 13:43:04 2008
import wx
# begin wxGlade: extracode
# end wxGlade
class rawVolumeRDRViewFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: rawVolumeRDRViewFrame.__init__
kwds["style"] = wx.CAPTION|wx.MINIMIZE_BOX|wx.MAXIMIZE_BOX|wx.SYSTEM_MENU|wx.RESIZE_BORDER
wx.Frame.__init__(self, *args, **kwds)
self.viewFramePanel = wx.Panel(self, -1)
self.label_1_copy_2 = wx.StaticText(self.viewFramePanel, -1, "Filename")
self.filenameText = wx.TextCtrl(self.viewFramePanel, -1, "")
self.browseButtonId = wx.NewId()
self.button_1_copy = wx.Button(self.viewFramePanel, self.browseButtonId, "Browse")
self.label_3_copy_1 = wx.StaticText(self.viewFramePanel, -1, "Data type")
self.dataTypeChoice = wx.Choice(self.viewFramePanel, -1, choices=["Dummy 1", "Dummy 2", "Dummy 3", "Dummy 4", "Dummy 5"])
self.endiannessRadioBox = wx.RadioBox(self.viewFramePanel, -1, "Endianness", choices=["Little (LSB at lowest address)", "Big (MSB at lowest address)"], majorDimension=2, style=wx.RA_SPECIFY_COLS)
self.label_2 = wx.StaticText(self.viewFramePanel, -1, "Header size (bytes)")
self.headerSizeText = wx.TextCtrl(self.viewFramePanel, -1, "0")
self.label_4 = wx.StaticText(self.viewFramePanel, -1, "Extent (x0, x1, y0, y1, z0, z1)")
self.extentText = wx.TextCtrl(self.viewFramePanel, -1, "")
self.label_5 = wx.StaticText(self.viewFramePanel, -1, "Spacing (Sx, Sy, Sz)")
self.spacingText = wx.TextCtrl(self.viewFramePanel, -1, "")
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: rawVolumeRDRViewFrame.__set_properties
self.SetTitle("Raw Volume Reader")
self.dataTypeChoice.SetSelection(0)
self.endiannessRadioBox.SetSelection(0)
# end wxGlade
def __do_layout(self):
# begin wxGlade: rawVolumeRDRViewFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_5 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_1 = wx.FlexGridSizer(3, 2, 4, 4)
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_7 = wx.BoxSizer(wx.HORIZONTAL)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
sizer_6.Add(self.label_1_copy_2, 0, wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 2)
sizer_6.Add(self.filenameText, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_6.Add(self.button_1_copy, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_2.Add(sizer_6, 0, wx.BOTTOM|wx.EXPAND, 4)
sizer_3.Add(self.label_3_copy_1, 0, wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 2)
sizer_3.Add(self.dataTypeChoice, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_2.Add(sizer_3, 0, wx.BOTTOM|wx.EXPAND, 4)
sizer_7.Add(self.endiannessRadioBox, 1, wx.EXPAND, 0)
sizer_2.Add(sizer_7, 0, wx.BOTTOM|wx.EXPAND, 4)
sizer_5.Add(sizer_2, 1, wx.ALL|wx.EXPAND, 7)
grid_sizer_1.Add(self.label_2, 0, wx.ALIGN_CENTER_VERTICAL, 0)
grid_sizer_1.Add(self.headerSizeText, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.label_4, 0, wx.ALIGN_CENTER_VERTICAL, 0)
grid_sizer_1.Add(self.extentText, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.label_5, 0, wx.ALIGN_CENTER_VERTICAL, 0)
grid_sizer_1.Add(self.spacingText, 0, wx.EXPAND, 0)
grid_sizer_1.AddGrowableCol(1)
sizer_5.Add(grid_sizer_1, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 7)
self.viewFramePanel.SetSizer(sizer_5)
sizer_1.Add(self.viewFramePanel, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# end wxGlade
# end of class rawVolumeRDRViewFrame
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = rawVolumeRDRViewFrame(None, -1, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
| [
"[email protected]"
] | |
fc7f96130086adfc9ad4a25f653c89d83852ef51 | bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6 | /AtCoder/EducationalDP/n_2.py | cbed84f2ac5e605e38eb513f8cf7a89401c910ac | [] | no_license | y-oksaku/Competitive-Programming | 3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db | a3ff52f538329bed034d3008e051f30442aaadae | refs/heads/master | 2021-06-11T16:14:12.635947 | 2021-05-04T08:18:35 | 2021-05-04T08:18:35 | 188,639,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | import numpy as np
N = int(input())
A = np.array(input().split(), np.int64)
Acum = np.zeros((N, N + 1), dtype=np.int64)
for i in range(N):
Acum[i, i + 1:] = A[i:].cumsum()
dp = np.zeros((N, N + 1), dtype=np.int64)
for length in range(2, N + 1):
for left in range(N - length + 1):
right = left + length
dp[left, right] = (dp[left, left + 1: right] + dp[left + 1: right, right]).min() + Acum[left, right]
print(dp[0, N]) | [
"[email protected]"
] | |
cc2d90b5c507367ae76264756846dcd49936d646 | f9bfba4002964d24df97d20722e116b4f7cf52de | /recursor/atlas/chart.py | 32d9c3f847990d5343b2ca0226ca7e4861424a89 | [
"MIT"
] | permissive | OaklandPeters/recursor | a42ce77c190a3b537b671f88e74f9ecab81841ad | 3a5eabd0b43e5ec2a66e6215a9bad70b4ab47c34 | refs/heads/master | 2020-04-05T23:35:29.259107 | 2015-04-24T21:39:16 | 2015-04-24T21:39:16 | 27,939,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | """
Standard implementation of Atlas class.
Essentially just a list of dicts, with item access affecting sublists
"""
| [
"[email protected]"
] | |
6564f4093b1a148ae1df81d5d70bfef545717316 | 747f759311d404af31c0f80029e88098193f6269 | /addons/etl_interface/etl_component/input/xml_in.py | a491b849e447f154b6d922032dbf9fd2564c96c9 | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | /home/openerp/production/extra-addons/etl_interface/etl_component/input/xml_in.py | [
"[email protected]"
] | |
102852e3762a3459527bfeaaf5209fbd07acaf95 | e70e8f9f5c1b20fe36feab42ad4c2c34fc094069 | /Python/Advanced OOP/Inheritance/Players and monsters/05. Knight.py | 6e1783c65ccaa292e9d2cfcc4e9d7be7e99ab6ad | [
"MIT"
] | permissive | teodoramilcheva/softuni-software-engineering | 9247ca2032915d8614017a3762d3752b3e300f37 | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | refs/heads/main | 2023-03-29T15:55:54.451641 | 2021-04-09T18:46:32 | 2021-04-09T18:46:32 | 333,551,625 | 0 | 0 | null | 2021-04-09T18:46:32 | 2021-01-27T20:30:18 | Python | UTF-8 | Python | false | false | 68 | py | from project.hero import Hero
class Knight(Hero):
pass
| [
"[email protected]"
] | |
39011567a8e74d0c3d1a0fa36bbb225839194414 | 349dadbf45b7c12a3fe41c5e0421c0488b679919 | /transformers/tests/test_trainer_seq2seq.py | dc4a11260d4c0fe61480ce2e944d48835dcfbbd3 | [
"BSD-3-Clause",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Apache-2.0"
] | permissive | salesforce/CodeRL | c772e408bac690527759f416ea22add4c97e5bec | 51db4ff983d5376e62b9e7eba150316a651c80d9 | refs/heads/main | 2023-08-18T18:38:02.740995 | 2022-11-18T16:14:28 | 2022-11-18T16:14:28 | 508,912,853 | 412 | 52 | BSD-3-Clause | 2023-08-31T07:51:27 | 2022-06-30T02:54:36 | Python | UTF-8 | Python | false | false | 4,859 | py | # coding=utf-8
# Copyright 2020 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import BertTokenizer, EncoderDecoderModel, Seq2SeqTrainer, Seq2SeqTrainingArguments
from transformers.file_utils import is_datasets_available
from transformers.testing_utils import TestCasePlus, require_torch, slow
if is_datasets_available():
import datasets
class Seq2seqTrainerTester(TestCasePlus):
@slow
@require_torch
def test_finetune_bert2bert(self):
bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny", "prajjwal1/bert-tiny")
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
bert2bert.config.vocab_size = bert2bert.config.encoder.vocab_size
bert2bert.config.eos_token_id = tokenizer.sep_token_id
bert2bert.config.decoder_start_token_id = tokenizer.cls_token_id
bert2bert.config.max_length = 128
train_dataset = datasets.load_dataset("cnn_dailymail", "3.0.0", split="train[:1%]")
val_dataset = datasets.load_dataset("cnn_dailymail", "3.0.0", split="validation[:1%]")
train_dataset = train_dataset.select(range(32))
val_dataset = val_dataset.select(range(16))
batch_size = 4
def _map_to_encoder_decoder_inputs(batch):
# Tokenizer will automatically set [BOS] <text> [EOS]
inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=512)
outputs = tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=128)
batch["input_ids"] = inputs.input_ids
batch["attention_mask"] = inputs.attention_mask
batch["decoder_input_ids"] = outputs.input_ids
batch["labels"] = outputs.input_ids.copy()
batch["labels"] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
batch["decoder_attention_mask"] = outputs.attention_mask
assert all([len(x) == 512 for x in inputs.input_ids])
assert all([len(x) == 128 for x in outputs.input_ids])
return batch
def _compute_metrics(pred):
labels_ids = pred.label_ids
pred_ids = pred.predictions
# all unnecessary tokens are removed
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True)
accuracy = sum([int(pred_str[i] == label_str[i]) for i in range(len(pred_str))]) / len(pred_str)
return {"accuracy": accuracy}
# map train dataset
train_dataset = train_dataset.map(
_map_to_encoder_decoder_inputs,
batched=True,
batch_size=batch_size,
remove_columns=["article", "highlights"],
)
train_dataset.set_format(
type="torch",
columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"],
)
# same for validation dataset
val_dataset = val_dataset.map(
_map_to_encoder_decoder_inputs,
batched=True,
batch_size=batch_size,
remove_columns=["article", "highlights"],
)
val_dataset.set_format(
type="torch",
columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"],
)
output_dir = self.get_auto_remove_tmp_dir()
training_args = Seq2SeqTrainingArguments(
output_dir=output_dir,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
predict_with_generate=True,
evaluation_strategy="steps",
do_train=True,
do_eval=True,
warmup_steps=0,
eval_steps=2,
logging_steps=2,
)
# instantiate trainer
trainer = Seq2SeqTrainer(
model=bert2bert,
args=training_args,
compute_metrics=_compute_metrics,
train_dataset=train_dataset,
eval_dataset=val_dataset,
tokenizer=tokenizer,
)
# start training
trainer.train()
| [
"[email protected]"
] | |
b0914bfb223699b5b1e55591b0080efe663329aa | f94c2337607ef06856fcff8acd18e60059894e21 | /src/program/consumers.py | 9d141e87613bd88c6f795b85e8bcb5a01bc8c4e4 | [] | no_license | coral/bornhack-website | d82446486db8523d0e059e82851ae80498b0bf2d | 260a37af3c1cb4cd33dfc8cb80f6bd966b794c3d | refs/heads/master | 2021-01-16T00:09:15.237955 | 2017-08-10T18:55:13 | 2017-08-10T18:55:13 | 99,955,304 | 0 | 0 | null | 2017-08-10T18:51:34 | 2017-08-10T18:51:34 | null | UTF-8 | Python | false | false | 2,043 | py | from channels.generic.websockets import JsonWebsocketConsumer
from camps.models import Camp
from .models import EventInstance, Favorite
class ScheduleConsumer(JsonWebsocketConsumer):
http_user = True
def connection_groups(self, **kwargs):
return ['schedule_users']
def connect(self, message, **kwargs):
camp_slug = message.http_session['campslug']
try:
camp = Camp.objects.get(slug=camp_slug)
days = list(map(
lambda day:
{ 'repr': day.lower.strftime('%A %Y-%m-%d')
, 'iso': day.lower.strftime('%Y-%m-%d')
, 'day_name': day.lower.strftime('%A')
},
camp.get_days('camp')
))
event_instances_query_set = EventInstance.objects.filter(event__camp=camp)
event_instances = list(map(lambda x: x.to_json(user=message.user), event_instances_query_set))
self.send({
"accept": True,
"event_instances": event_instances,
"days": days,
"action": "init"
})
except Camp.DoesNotExist:
pass
def raw_receive(self, message, **kwargs):
content = self.decode_json(message['text'])
action = content.get('action')
data = {}
if action == 'favorite':
event_instance_id = content.get('event_instance_id')
event_instance = EventInstance.objects.get(id=event_instance_id)
Favorite.objects.create(
user=message.user,
event_instance=event_instance
)
if action == 'unfavorite':
event_instance_id = content.get('event_instance_id')
event_instance = EventInstance.objects.get(id=event_instance_id)
favorite = Favorite.objects.get(event_instance=event_instance, user=message.user)
favorite.delete()
self.send(data)
def disconnect(self, message, **kwargs):
pass
| [
"[email protected]"
] | |
1fa159fc3242994e4c5012f910fd314436c5735a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_210/312.py | a4c44e7288c46a54c1f07b45e3b43b556f87655f | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,008 | py |
'''
t = int(input()) # read a line with a single integer
for b in range(1, t + 1):
n = list(input())
eqInd = 0
for i in range(1, len(n)):
if n[i] < n[i-1]:
n[eqInd] = str(int(n[eqInd]) - 1)
n[eqInd + 1:] = ['9'] * (len(n) - eqInd - 1)
break
if n[i] == n[eqInd]:
continue
elif n[i] == n[i-1]:
eqInd = i - 1
continue
else:
eqInd = i
if n[0] == '0': n = n[1:]
print("Case #{}: {}".format(b, ''.join(n)))
'''
'''
t = int(input()) # read a line with a single integer
for b in range(1, t + 1):
panc, k = list(input().split())
k = int(k)
moves = 0
lastP = 'B'
while(True):
if lastP == panc:
break
lastP = panc
panc = panc.rstrip('+').lstrip('+')
while(k <= len(panc) and panc[:k] == '-'*k):
panc = panc[k:]
moves += 1
while(k <= len(panc) and panc[len(panc)-k:] == '-'*k):
panc = panc[:len(panc)-k]
moves += 1
if panc == '':
break
if panc[0] == '-' and panc[-1] == '-':
fp = panc.find('+')
if fp == -1:
continue
p = list(panc)
while fp < k and fp + k <= len(panc):
p[fp:fp + k] = list(panc[fp:fp + k].replace('-', '1').replace('+','-').replace('1','+'))
panc = ''.join(p)
fp = panc.find('+')
moves += 1
if(fp == -1):
break
panc = panc.rstrip('+').lstrip('+')
if panc == '':
print("Case #{}: {}".format(b, moves))
else:
print("Case #{}: {}".format(b, 'IMPOSSIBLE'))
'''
'''
t = int(input()) # read a line with a single integer
for b in range(1, t + 1):
N, k = list(input().split())
N, k = int(N), int(k)
if N % 2 == 0:
ls = N//2 - 1
rs = N//2
else:
ls, rs = N//2, N//2
turns = 1
count = 2
while(True):
if turns >= k:
if ls < 0: ls = 0
if rs < 0: rs = 0
break
if ls == rs:
ls = ls//2 - 1
rs = rs//2
else:
ls = ls // 2
rs = rs // 2
turns += count
count *= 2
print("Case #{}: {} {}".format(b, ls, rs))
string = input()
length = len(string)
hf = string[:length//2][::-1]
he = string[length//2:]
if length%2 != 0:
he = he[1:]
count = sum(hf[i] != he[i] for i in range(len(he)))
if count == 1:
print('YES')
elif count < 2 and length%2 !=0:
print('YES')
else:
print('NO')
'''
'''
num = int(input())
f = input()
l = len(f)
moves = 0
strings = [f]
for k in range(num - 1):
c = input()
strings.append(c)
count = -1
cyc = False
for i in range(l):
if f == c:
cyc = True
break
else:
f = f[1:] + f[0]
count += 1
if not cyc:
moves = -1
break
f = c
if moves == -1:
print(moves)
else:
moves = []
for k in range(num):
c = strings[k]
countAll = 0
for j in range(num):
count = 0
f = strings[j]
for i in range(l):
if f == c:
break
else:
f = f[1:] + f[0]
count += 1
countAll += count
moves.append(countAll)
print(min(moves))
'''
'''
t = int(input())
for i in range(t):
D, N = [int(x) for x in input().split()]
k, s = [int(x) for x in input().split()]
a = (D * s) / (D - k)
for j in range(N-1):
k, s = [int(x) for x in input().split()]
ac = (D * s) / (D - k)
if ac < a: a = ac
print("Case #" + str(i+1) + ": " + str(a))
'''
'''
t = int(input())
for i in range(t):
N, R, O, Y, G, B, V = [int(x) for x in input().split()]
circ = ['' for x in range(N)]
if Y != 0: ys = N // Y
if B != 0: bs = N // B
if R != 0: rs = N // R
if R == max(Y,R,B):
f = R
first = 'R'
Rs = rs
if B >= Y:
s = B
g = Y
Ys = bs
Bs = ys
second = 'B'
third = 'Y'
else:
s = Y
g = B
Ys = ys
Bs = bs
second = 'Y'
third = 'B'
elif B == max(Y,R,B):
first = 'B'
f = B
Rs = bs
if R >= Y:
s = R
g = Y
Ys = rs
Bs = ys
second = 'R'
third = 'Y'
else:
s = Y
g = R
Ys = ys
Bs = rs
second = 'Y'
third = 'R'
else:
first = 'Y'
f = Y
Rs = ys
if R >= B:
s = R
g = B
Ys = rs
Bs = bs
second = 'R'
third = 'B'
else:
s = B
g = R
Ys = bs
Bs = rs
second = 'B'
third = 'R'
idx = 0
j = 0
while j < f:
if '' not in circ: break
if circ[(idx + j * Rs) % N] != '':
idx = circ.index('')
j = 0
continue
circ[idx + j * Rs % N] = first
j += 1
if '' in circ:
idx = circ.index('')
j = 0
c = 0
while c < s:
if '' not in circ: break
if circ[(idx + j * Ys) % N] != '':
idx = (idx + j * Ys)%N + 1
j = 0
continue
circ[idx + j * Ys % N] = second
c += 1
j += 1
if '' in circ:
idx = circ.index('')
j = 0
c = 0
while c < g:
if '' not in circ: break
if circ[(idx + j * Bs) % N] != '':
idx = (idx + j * Bs) % N + 1
j = 0
continue
circ[idx + j * Bs % N] = third
j += 1
c += 1
circ = [x for x in ''.join(circ)]
impossible = False
if circ[-1] == circ[0] or circ[1] == circ[0]:
impossible = True
print("Case #" + str(i+1) + ": IMPOSSIBLE")
elif circ[-1] == circ[0] or circ[-1] == circ[-2]:
impossible = True
print("Case #" + str(i+1) + ": IMPOSSIBLE")
else:
for j in range(1, N-1):
if circ[j] == circ[j-1] or circ[j] == circ[j+1]:
impossible = True
print("Case #" + str(i+1) + ": IMPOSSIBLE")
break
if not impossible:
print("Case #" + str(i+1) + ": " + ''.join(circ))
'''
'''
import math as math
t = int(input())
for i in range(t):
N, k = [int(x) for x in input().split()]
h = []
r = []
maxIndex = 0
maxR = 0
maxH = 0
for j in range(N):
rj, hj = [int(x) for x in input().split()]
print(math.pi*(2*rj*hj + rj**2))
if (2*rj*hj + rj**2 > 2*maxR*maxH + maxR**2):
maxR = rj
maxH = hj
maxIndex = j
h.append(hj)
r.append(rj)
del h[maxIndex]
del r[maxIndex]
k = k - 1
hr = [a*b for a,b in zip(h,r)]
hr = sorted(hr)
for a in hr: print(2*math.pi*a)
result = math.pi*(maxR**2 + 2*(sum(hr[len(hr)-k:]) + maxR*maxH))
print("Case #" + str(i+1) + ": " + str(result))
'''
t = int(input())
for i in range(t):
Ac, Aj = [int(x) for x in input().split()]
if ((Ac == 1 and Aj == 0) or (Ac == 0 and Aj == 1)):
a1s, a1f = [int(x) for x in input().split()]
print("Case #" + str(i+1) + ": " + str(2))
elif (Ac == 1 and Aj == 1):
a1s, a1f = [int(x) for x in input().split()]
a2s, a2f = [int(x) for x in input().split()]
print("Case #" + str(i+1) + ": " + str(2))
else:
a1s, a1f = [int(x) for x in input().split()]
a2s, a2f = [int(x) for x in input().split()]
if(a1s < a2s):
res = min(abs(a2f-a1s), abs(a1f+1440-a2s))
else:
res = min(abs(a1f-a2s), abs(a2f+1440-a1s))
if(res > 720):
print("Case #" + str(i+1) + ": " + str(4))
else:
print("Case #" + str(i+1) + ": " + str(2))
| [
"[email protected]"
] | |
d63d12e23a52921f34d12e3a1cdc0d1b61ad85ac | 9249947c07f8addf64dd3d2a2f9f37d379f83921 | /client_tools/svc/scratch.py | 4b5fae33a95ec2120c0dc9b330a9788b34c10759 | [
"MIT"
] | permissive | operepo/ope | eb71aa763d157416009d7c3052ace11852660e0a | 018c82af46845315795c67c36801e2a128f515d5 | refs/heads/master | 2023-08-08T15:05:28.592589 | 2023-07-25T00:22:24 | 2023-07-25T00:22:24 | 96,855,111 | 12 | 11 | MIT | 2023-03-03T15:10:34 | 2017-07-11T05:42:14 | Perl | UTF-8 | Python | false | false | 48,207 | py | import pythoncom
import win32serviceutil
import win32service
import win32event
import servicemanager
import socket
import time
import datetime
import sys
import os
import logging
import random
from win32com.shell import shell, shellcon
import ntsecuritycon
import win32security
import win32api
import win32gui
import win32ui
import win32con
import win32gui_struct
import win32ts
import win32process
import win32profile
import ctypes
import wmi
# TODO - Set recovery options for service so it restarts on failure
# Most event notification support lives around win32gui
GUID_DEVINTERFACE_USB_DEVICE = "{A5DCBF10-6530-11D2-901F-00C04FB951ED}"
ROOT_FOLDER = os.path.join(shell.SHGetFolderPath(0, shellcon.CSIDL_COMMON_APPDATA, None, 0), "ope")
TMP_FOLDER = os.path.join(ROOT_FOLDER, "tmp")
LOG_FOLDER = os.path.join(TMP_FOLDER, "log")
SCREEN_SHOTS_FOLDER = os.path.join(TMP_FOLDER, "screen_shots")
BINARIES_FOLDER = os.path.join(ROOT_FOLDER, "ope_laptop_binaries")
EVERYONE, domain, type = win32security.LookupAccountName("", "Everyone")
ADMINISTRATORS, domain, type = win32security.LookupAccountName("", "Administrators")
# CURRENT_USER, domain, type = win32security.LookupAccountName("", win32api.GetUserName())
CURRENT_USER = None
try:
CURRENT_USER, domain, type = win32security.LookupAccountName("", "huskers")
except:
CURRENT_USER = None
if CURRENT_USER is None:
try:
CURRENT_USER, domain, type = win32security.LookupAccountName("", "ray")
except:
CURRENT_USER = None
SYSTEM_USER, domain, type = win32security.LookupAccountName("", "System")
# Disable ALL nics if this is set
DISABLE_ALL_NICS = False
DEBUG_NICS = False
if os.path.isfile(os.path.join(ROOT_FOLDER, ".debug_nics")):
DEBUG_NICS = True
# Disable sshot if this is set
DISABLE_SSHOT = False
if os.path.isfile(os.path.join(ROOT_FOLDER, ".disable_sshot")):
DISABLE_SSHOT = True
system_nics = ["WAN Miniport (IP)", "WAN Miniport (IPv6)", "WAN Miniport (Network Monitor)",
"WAN Miniport (PPPOE)", "WAN Miniport (PPTP)", "WAN Miniport (L2TP)", "WAN Miniport (IKEv2)",
"WAN Miniport (SSTP)", "Microsoft Wi-Fi Direct Virtual Adapter", "Teredo Tunneling Pseudo-Interface",
"Microsoft Kernel Debug Network Adapter",
]
approved_nics = ["Realtek USB GbE Family Controller",
"Thinkpad USB 3.0 Ethernet Adapter"]
if DEBUG_NICS is True:
# Add these nics so we don't cut off network on our dev machines
approved_nics.append("Intel(R) 82579LM Gigabit Network Connection")
approved_nics.append("150Mbps Wireless 802.11bgn Nano USB Adapter")
approved_nics.append("Intel(R) PRO/1000 MT Network Connection")
approved_nics.append("Intel(R) Centrino(R) Wireless-N 1000")
def show_cacls(filename):
print("\n\n")
for line in os.popen("cacls %s" % filename).read().splitlines():
print(line)
def set_ope_permissions():
global ROOT_FOLDER, LOG_FOLDER, SCREEN_SHOTS_FOLDER, BINARIES_FOLDER, TMP_FOLDER
# Make sure folders exits
if not os.path.isdir(ROOT_FOLDER):
os.makedirs(ROOT_FOLDER)
if not os.path.isdir(TMP_FOLDER):
os.makedirs(TMP_FOLDER)
if not os.path.isdir(LOG_FOLDER):
os.makedirs(LOG_FOLDER)
if not os.path.isdir(SCREEN_SHOTS_FOLDER):
os.makedirs(SCREEN_SHOTS_FOLDER)
if not os.path.isdir(BINARIES_FOLDER):
os.makedirs(BINARIES_FOLDER)
# Make sure the ope-sshot.log file exists so we can set permissions on it later
if not os.path.isfile(os.path.join(LOG_FOLDER, "ope-sshot.log")):
f = open(os.path.join(LOG_FOLDER, "ope-sshot.log"), "w")
f.close()
# --- Set permissions on OPE folder - viewable by not writable
# Set inheritance flags
flags = win32security.OBJECT_INHERIT_ACE | win32security.CONTAINER_INHERIT_ACE
sd = win32security.GetFileSecurity(ROOT_FOLDER, win32security.DACL_SECURITY_INFORMATION)
# Create the blank DACL and add our ACE's
dacl = win32security.ACL()
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, ADMINISTRATORS)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, SYSTEM_USER)
if not CURRENT_USER is None:
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, CURRENT_USER)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags,
ntsecuritycon.FILE_GENERIC_READ | ntsecuritycon.FILE_GENERIC_EXECUTE,
EVERYONE)
# Set our ACL
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(ROOT_FOLDER, win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION, sd)
# --- Set permissions on TMP folder - viewable by not writable
# Set inheritance flags
flags = win32security.OBJECT_INHERIT_ACE | win32security.CONTAINER_INHERIT_ACE
sd = win32security.GetFileSecurity(TMP_FOLDER, win32security.DACL_SECURITY_INFORMATION)
# Create the blank DACL and add our ACE's
dacl = win32security.ACL()
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, ADMINISTRATORS)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, SYSTEM_USER)
if not CURRENT_USER is None:
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, CURRENT_USER)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags,
ntsecuritycon.FILE_GENERIC_READ | ntsecuritycon.FILE_GENERIC_EXECUTE,
EVERYONE)
# Set our ACL
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(TMP_FOLDER, win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION, sd)
# --- Set permissions on ope_laptop_binaries folder - viewable by not writable
# Set inheritance flags
flags = win32security.OBJECT_INHERIT_ACE | win32security.CONTAINER_INHERIT_ACE
sd = win32security.GetFileSecurity(BINARIES_FOLDER, win32security.DACL_SECURITY_INFORMATION)
# Create the blank DACL and add our ACE's
dacl = win32security.ACL()
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, ADMINISTRATORS)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, SYSTEM_USER)
if not CURRENT_USER is None:
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, CURRENT_USER)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags,
ntsecuritycon.FILE_GENERIC_READ | ntsecuritycon.FILE_GENERIC_EXECUTE,
EVERYONE)
# Set our ACL
sd.SetSecurityDescriptorDacl(1, dacl, 0)
# Set on all folders
win32security.SetFileSecurity(BINARIES_FOLDER, win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION, sd)
for root, dirs, files in os.walk(BINARIES_FOLDER, topdown=False):
for f in files:
try:
win32security.SetFileSecurity(os.path.join(root, f), win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION, sd)
except:
logging.info("Error setting file permissions " + os.path.join(root, f))
for d in dirs:
try:
win32security.SetFileSecurity(os.path.join(root, d), win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION, sd)
except:
logging.info("Error setting folder permissions " + os.path.join(root, d))
# win32security.TreeSetNamedSecurityInfo(BINARIES_FOLDER, win32security.SE_FILE_OBJECT, win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION, None, None, sd, None)
# --- Set permissions on the log folder - create file or append only
# Set inheritance flags
flags = win32security.OBJECT_INHERIT_ACE | win32security.CONTAINER_INHERIT_ACE
sd = win32security.GetFileSecurity(LOG_FOLDER, win32security.DACL_SECURITY_INFORMATION)
# Create the blank DACL and add our ACE's
dacl = win32security.ACL()
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, ADMINISTRATORS)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, SYSTEM_USER)
if not CURRENT_USER is None:
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, CURRENT_USER)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION, flags,
ntsecuritycon.FILE_ADD_FILE | ntsecuritycon.FILE_GENERIC_READ | ntsecuritycon.FILE_GENERIC_EXECUTE,
EVERYONE)
# Set our ACL
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(LOG_FOLDER, win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION, sd)
# --- Set permissions on the log file for screen shots - append only
# Set inheritance flags
flags = win32security.OBJECT_INHERIT_ACE | win32security.CONTAINER_INHERIT_ACE
sd = win32security.GetFileSecurity(os.path.join(LOG_FOLDER, "ope-sshot.log"), win32security.DACL_SECURITY_INFORMATION)
# Create the blank DACL and add our ACE's
dacl = win32security.ACL()
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, ADMINISTRATORS)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, SYSTEM_USER)
if not CURRENT_USER is None:
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, CURRENT_USER)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION, flags,
ntsecuritycon.FILE_APPEND_DATA | ntsecuritycon.FILE_GENERIC_READ | ntsecuritycon.FILE_GENERIC_EXECUTE,
EVERYONE)
# Set our ACL
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(os.path.join(LOG_FOLDER, "ope-sshot.log"), win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION, sd)
# --- Set permissions on the sshot folder - let students create but not modify/delete sshots
# Set inheritance flags
flags = win32security.OBJECT_INHERIT_ACE | win32security.CONTAINER_INHERIT_ACE
sd = win32security.GetFileSecurity(SCREEN_SHOTS_FOLDER, win32security.DACL_SECURITY_INFORMATION)
# Create the blank DACL and add our ACE's
dacl = win32security.ACL()
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, ADMINISTRATORS)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, SYSTEM_USER)
if not CURRENT_USER is None:
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION_DS, flags, ntsecuritycon.FILE_ALL_ACCESS, CURRENT_USER)
dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION, flags,
ntsecuritycon.FILE_ADD_FILE | ntsecuritycon.FILE_GENERIC_READ | ntsecuritycon.FILE_GENERIC_EXECUTE,
EVERYONE)
# Set our ACL
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(SCREEN_SHOTS_FOLDER, win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION, sd)
# Possible to set whole tree?
# win32security.TreeSetNamedSecurityInfo(folder, win32security.SE_FILE_OBJECT, win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION, None, None, sd, None)
def scan_com_ports():
# TODO - Need Debug
# Use WMI to pull a list of com ports
w = wmi.WMI()
logging.info("Scanning USB/Serial COM Ports...")
# Scan for PNP Devices that are ports
for port in w.Win32_PNPEntity(PNPClass="Ports"):
logging.info("PNP COM Port Found: " + str(port.name))
if port.Status == "OK":
# Port is on and working - turn it off
logging.info("COM Port " + str(port.Caption) + " is on - disabling...")
try:
port.Disable()
except Exception as ex:
logging.info("ERROR!!! " + str(ex))
else:
logging.info("COM Port " + str(port.Caption) + " is off...")
# Scan for Serial devices (may not be PNP)
for port in w.Win32_SerialPort():
print("Serial Port Found: " + str(port.name))
if port.Status == "OK":
logging.info("Serial Port " + str(port.Caption) + " is on - disabling...")
try:
port.Disable()
except Exception as ex:
logging.info("ERROR!!! " + str(ex))
else:
logging.info("Serial Port " + str(port.Caption) + " is off...")
return
def scanNics():
# May need to call this before calling this function so that COM works
# pythoncom.CoInitialize() - called in the main function
global DISABLE_ALL_NICS, system_nics, approved_nics
if DISABLE_ALL_NICS is True:
approved_nics = []
logging.info("scanning for unauthorized nics...")
import win32com.client
strComputer = "."
objWMIService = win32com.client.Dispatch("WbemScripting.SWbemLocator")
objSWbemServices = objWMIService.ConnectServer(strComputer,"root\cimv2")
colItems = objSWbemServices.ExecQuery("Select * from Win32_NetworkAdapter")
for objItem in colItems:
if objItem.Name in approved_nics:
# logging.info("***Device found - on approved list: " + str(objItem.Name) + str(objItem.NetConnectionID))
dev_id = objItem.NetConnectionID
if dev_id:
logging.info(" ---> !!! Approved device !!!, enabling..." + str(dev_id))
cmd = "netsh interface set interface \"" + dev_id + "\" admin=ENABLED"
# print(cmd)
os.system(cmd)
else:
# print(" ---> unauthorized, not plugged in...")
pass
continue
elif objItem.Name in system_nics:
# logging.info("***Device found - system nic - ignoring: " + str(objItem.Name))
continue
else:
# logging.info("***Device found :" + str(objItem.Name))
dev_id = objItem.NetConnectionID
if dev_id:
logging.info(" ---> !!! unauthorized !!!, disabling..." + str(dev_id))
cmd = "netsh interface set interface \"" + dev_id + "\" admin=DISABLED"
# print(cmd)
os.system(cmd)
else:
# print(" ---> unauthorized, not plugged in...")
pass
continue
# print("========================================================")
# print("Adapter Type: ", objItem.AdapterType)
# print("Adapter Type Id: ", objItem.AdapterTypeId)
# print("AutoSense: ", objItem.AutoSense)
# print("Availability: ", objItem.Availability)
# print("Caption: ", objItem.Caption)
# print("Config Manager Error Code: ", objItem.ConfigManagerErrorCode)
# print("Config Manager User Config: ", objItem.ConfigManagerUserConfig)
# print("Creation Class Name: ", objItem.CreationClassName)
# print("Description: ", objItem.Description)
# print("Device ID: ", objItem.DeviceID)
# print("Error Cleared: ", objItem.ErrorCleared)
# print("Error Description: ", objItem.ErrorDescription)
# print("Index: ", objItem.Index)
# print("Install Date: ", objItem.InstallDate)
# print("Installed: ", objItem.Installed)
# print("Last Error Code: ", objItem.LastErrorCode)
# print("MAC Address: ", objItem.MACAddress)
# print("Manufacturer: ", objItem.Manufacturer)
# print("Max Number Controlled: ", objItem.MaxNumberControlled)
# print("Max Speed: ", objItem.MaxSpeed)
# print("Name: ", objItem.Name)
# print("Net Connection ID: ", objItem.NetConnectionID)
# print("Net Connection Status: ", objItem.NetConnectionStatus)
# z = objItem.NetworkAddresses
# if z is None:
# a = 1
# else:
# for x in z:
# print("Network Addresses: ", x)
# print("Permanent Address: ", objItem.PermanentAddress)
# print("PNP Device ID: ", objItem.PNPDeviceID)
# z = objItem.PowerManagementCapabilities
# if z is None:
# a = 1
# else:
# for x in z:
# print("Power Management Capabilities: ", x)
# print("Power Management Supported: ", objItem.PowerManagementSupported)
# print("Product Name: ", objItem.ProductName)
# print("Service Name: ", objItem.ServiceName)
# print("Speed: ", objItem.Speed)
# print("Status: ", objItem.Status)
# print("Status Info: ", objItem.StatusInfo)
# print("System Creation Class Name: ", objItem.SystemCreationClassName)
# print("System Name: ", objItem.SystemName)
# print("Time Of Last Reset: ", objItem.TimeOfLastReset)
class OPEService(win32serviceutil.ServiceFramework):
_svc_name_ = 'OPEService'
_svc_display_name_ = 'OPEService'
_svc_description_ = "Open Prison Education Service"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
socket.setdefaulttimeout(60)
self.isAlive = True
# Setup data folders and set permissions
set_ope_permissions()
# Setup logging
logging.basicConfig(
filename=os.path.join(LOG_FOLDER, 'ope-service.log'),
level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S',
format='[ope-sshot] %(asctime)-15s %(levelname)-7.7s %(message)s'
)
logging.info("service init")
# register for a device notification - we pass our service handle
# instead of a window handle.
filter = win32gui_struct.PackDEV_BROADCAST_DEVICEINTERFACE(
GUID_DEVINTERFACE_USB_DEVICE)
self.hdn = win32gui.RegisterDeviceNotification(self.ssh, filter,
win32con.DEVICE_NOTIFY_SERVICE_HANDLE)
# Override the base class so we can accept additional events.
def GetAcceptedControls(self):
# say we accept them all.
rc = win32serviceutil.ServiceFramework.GetAcceptedControls(self)
rc |= win32service.SERVICE_ACCEPT_PARAMCHANGE \
| win32service.SERVICE_ACCEPT_NETBINDCHANGE \
| win32service.SERVICE_CONTROL_DEVICEEVENT \
| win32service.SERVICE_ACCEPT_HARDWAREPROFILECHANGE \
| win32service.SERVICE_ACCEPT_POWEREVENT \
| win32service.SERVICE_ACCEPT_SESSIONCHANGE
return rc
# All extra events are sent via SvcOtherEx (SvcOther remains as a
# function taking only the first args for backwards compat)
def SvcOtherEx(self, control, event_type, data):
# This is only showing a few of the extra events - see the MSDN
# docs for "HandlerEx callback" for more info.
if control == win32service.SERVICE_CONTROL_DEVICEEVENT:
info = win32gui_struct.UnpackDEV_BROADCAST(data)
msg = "A device event occurred: %x - %s" % (event_type, info)
scanNics()
elif control == win32service.SERVICE_CONTROL_HARDWAREPROFILECHANGE:
msg = "A hardware profile changed: type=%s, data=%s" % (event_type, data)
elif control == win32service.SERVICE_CONTROL_POWEREVENT:
msg = "A power event: setting %s" % data
elif control == win32service.SERVICE_CONTROL_SESSIONCHANGE:
# data is a single elt tuple, but this could potentially grow
# in the future if the win32 struct does
msg = "Session event: type=%s, data=%s" % (event_type, data)
else:
msg = "Other event: code=%d, type=%s, data=%s" \
% (control, event_type, data)
logging.info("Event " + msg)
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
0xF000, # generic message
(msg, '')
)
def SvcStop(self):
self.isAlive = False
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
self.isAlive = True
logging.info("Service running")
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ''))
self.main()
# win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)
# Write a stop message.
logging.info("Service Stopped")
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STOPPED,
(self._svc_name_, '')
)
def runScreenShotApp3_old(self):
# Get the current security token
token = win32security.OpenProcessToken(win32process.GetCurrentProcess(),
win32security.TOKEN_ALL_ACCESS)
# Make a copy
#token2 = win32security.DuplicateToken(token)
token2 = win32security.DuplicateTokenEx(token,
win32security.SecurityImpersonation,
win32security.TOKEN_ALL_ACCESS,
win32security.TokenPrimary)
# Find the session id - we will grab the console/keyboard
#proc_id = win32process.GetCurrentProcessId()
#session_id = win32ts.ProcessIdToSessionId(proc_id)
session_id = win32ts.WTSGetActiveConsoleSessionId()
# Make this token target our session
win32security.SetTokenInformation(token2, win32security.TokenSessionId, session_id)
def runScreenShotApp(self):
global DISABLE_SSHOT
if DISABLE_SSHOT is True:
return
# Get the session id for the console
session_id = win32ts.WTSGetActiveConsoleSessionId()
if session_id == 0xffffffff:
# User not logged in right now?
logging.info("No console user")
return None
# logging.info("Got Console: " + str(session_id))
# Login to the terminal service to get the user token for the console id
svr = win32ts.WTSOpenServer(".")
user_token = win32ts.WTSQueryUserToken(session_id)
# logging.info("User Token " + str(user_token))
# Copy the token
user_token_copy = win32security.DuplicateTokenEx(user_token,
win32security.SecurityImpersonation,
win32security.TOKEN_ALL_ACCESS,
win32security.TokenPrimary)
# Put this token in the logged in session
win32security.SetTokenInformation(user_token_copy, win32security.TokenSessionId, session_id)
# Switch to the user
# win32security.ImpersonateLoggedOnUser(user_token)
# logging.info("Impersonating " + win32api.GetUserName())
# Run the screen shot app
# app_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
# cmd = os.path.join(app_path, "sshot\\dist\\sshot.exe")
cmd = os.path.join(ROOT_FOLDER, "Services\\sshot\\sshot.exe") # "c:\\programdata\\ope\\bin\\sshot.exe"
# cmd = "cmd.exe"
logging.info("Running sshot app " + cmd)
# Use win create process function
si = win32process.STARTUPINFO()
si.dwFlags = win32process.STARTF_USESHOWWINDOW
si.wShowWindow = win32con.SW_NORMAL
# si.lpDesktop = "WinSta0\Default" # WinSta0\Winlogon
si.lpDesktop = ""
# Setup envinroment for the user
environment = win32profile.CreateEnvironmentBlock(user_token, False)
try:
(hProcess, hThread, dwProcessId, dwThreadId) = win32process.CreateProcessAsUser(user_token_copy,
None, # AppName (really command line, blank if cmd line supplied)
"\"" + cmd + "\"", # Command Line (blank if app supplied)
None, # Process Attributes
None, # Thread Attributes
0, # Inherits Handles
win32con.NORMAL_PRIORITY_CLASS, # or win32con.CREATE_NEW_CONSOLE,
environment, # Environment
os.path.dirname(cmd), # Curr directory
si) # Startup info
# logging.info("Process Started: " + str(dwProcessId))
# logging.info(hProcess)
except Exception as e:
logging.info("Error launching process: " + str(e))
# logging.info(os.system(cmd))
# Return us to normal security
# win32security.RevertToSelf()
# Cleanup
win32ts.WTSCloseServer(svr)
user_token.close()
user_token_copy.close()
return
def runScreenShotApp2_old(self):
console_id = win32ts.WTSGetActiveConsoleSessionId()
if console_id == 0xffffffff:
# User not logged in right now?
logging.info("No console user")
return None
dc = None
logging.info("Got console: " + str(console_id))
# Get processes running on this console
svr = win32ts.WTSOpenServer(".")
user_token = win32ts.WTSQueryUserToken(console_id)
logging.info("User Token " + str(user_token))
# hwnd = win32gui.GetDC(win32con.HWND_DESKTOP) # win32gui.GetDesktopWindow()
# dc = ctypes.windll.user32.GetDC(win32con.HWND_DESKTOP)
# logging.info("DC before impersonation " + str(dc))
# win32gui.ReleaseDC(win32con.HWND_DESKTOP, dc)
# Switch to the user
win32security.ImpersonateLoggedOnUser(user_token)
logging.info("Impersonating " + win32api.GetUserName())
app_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
cmd = os.path.join(app_path, "sshot\\dist\\sshot.exe")
logging.info("Running sshot app " + cmd)
logging.info(os.system(cmd))
# hwnd = ctypes.windll.user32.GetDC(win32con.HWND_DESKTOP)
# logging.info("HWND after impersonation " + str(hwnd))
# ps_list = win32ts.WTSEnumerateProcesses(svr, 1, 0)
# for ps in ps_list:
# logging.info("PS " + str(ps))
win32ts.WTSCloseServer(svr)
# Revert back to normal user
win32security.RevertToSelf()
user_token.close()
return
def grabScreenShot_old(self):
# Grab the screen shot and save it to the logs folder
# Get the hwnd for the current desktop window
try:
hwnd = win32gui.GetDesktopWindow()
# hwnd = self.getDesktopHWND()
l, t, r, b = win32gui.GetWindowRect(hwnd)
w = r - l
h = b - t
logging.info("SC - HWND " + str(hwnd) + " " + str(w) + "/" + str(h))
dc = win32gui.GetDC(win32con.HWND_DESKTOP)
logging.info("DC " + str(dc))
dcObj = win32ui.CreateDCFromHandle(dc)
drawDC = dcObj.CreateCompatibleDC()
logging.info("drawDC " + str(drawDC))
# cDC = dcObj.CreateCompatibleDC() # Do we need this since it is the desktop dc?
bm = win32ui.CreateBitmap()
bm.CreateCompatibleBitmap(dcObj, w, h)
drawDC.SelectObject(bm)
drawDC.BitBlt((0, 0), (w, h), dcObj, (0, 0), win32con.SRCCOPY)
bm.SaveBitmapFile(drawDC, os.path.join(SCREEN_SHOTS_FOLDER, "test.jpeg"))
win32gui.DeleteObject(bm.GetHandle())
drawDC.DeleteDC()
dcObj.DeleteDC()
win32gui.ReleaseDC(win32con.HWND_DESKTOP, dc)
# dc = win32gui.GetWindowDC(hwnd)
# logging.info("DC " + str(dc))
# dcObj = win32ui.CreateDCFromHandle(dc)
# logging.info("dcObj " + str(dcObj))
# cDC = dcObj.CreateCompatibleDC()
# logging.info("cDC " + str(cDC))
# bm = win32ui.CreateBitmap()
# logging.info("bm " + str(bm))
# bm.CreateCompatibleBitmap(dcObj, w, h)
# cDC.SelectObject(bm)
# r = cDC.BitBlt((0,0), (w,h), dcObj, (0,0), win32con.SRCCOPY)
# logging.info("bitblt " + str(r))
# bm.SaveBitmapFile(cDC, os.path.join(SCREEN_SHOTS_FOLDER, "test.jpeg"))
# dcObj.DeleteDC()
# cDC.DeleteDC()
# win32gui.ReleaseDC(hwnd, dc)
# win32gui.DeleteObject(bm.GetHandle())
except Exception as ex:
logging.info("Error grabbing screenshot: " + str(ex))
# m = ImageGrab.grab()
# Save the file
# p = os.path.join(SCREEN_SHOTS_FOLDER, str(datetime.datetime.now()) + ".png")
# im.save(p, optimize=True)
def main(self):
rc = None
nic_scan_time = 0
sshot_time = time.time() + 60 # Start by waiting at least a minute before trying
# Need this so scanNics doesn't fail
pythoncom.CoInitialize()
while rc != win32event.WAIT_OBJECT_0:
# Grab screen shots
if sshot_time - time.time() < 0:
# Reset the sshot_timer = now + 15 secs + up to 10 minutes rand value
sshot_time = time.time() + 15 + random.randint(0, 600)
# Time to take another screen shot
try:
self.runScreenShotApp()
except Exception as ex:
logging.error("Error grabbing screen shot: " + str(ex))
# Scan for inserted NICS
if time.time() - nic_scan_time > 60:
scanNics()
nic_scan_time = time.time()
# Grab event logs
# Grab firewall logs
# Run virus scanner
# Security checks - is current user the correct user?
# Is online?
# block for 24*60*60 seconds and wait for a stop event
# it is used for a one-day loop
rest = 5 # * 1000 # 24*60*60*1000
rc = win32event.WaitForSingleObject(self.hWaitStop, rest)
time.sleep(0.5)
# Cleanup
pythoncom.CoUninitialize()
if __name__ == '__main__':
if len(sys.argv) == 1:
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(OPEService)
servicemanager.StartServiceCtrlDispatcher()
else:
win32serviceutil.HandleCommandLine(OPEService)
from firmware_variables import *
from firmware_variables.load_option import LoadOptionAttributes, LoadOption
from firmware_variables.device_path import DevicePathList, DevicePath, DevicePathType, MediaDevicePathSubtype, EndOfHardwareDevicePathSubtype
from firmware_variables.utils import verify_uefi_firmware, string_to_utf16_bytes, utf16_string_from_bytes
import struct
import collections
import uuid
import sys
import io
import traceback
import win32file
import win32con
import winioctlcon
import pythoncom
import wmi
import ctypes
from ctypes import wintypes
import winerror
kernel32 = ctypes.WinDLL('kernel32')
# Register wapi functions
kernel32.FindFirstVolumeW.restype = wintypes.HANDLE
kernel32.FindNextVolumeW.argtypes = (wintypes.HANDLE,
wintypes.LPWSTR,
wintypes.DWORD)
kernel32.FindVolumeClose.argtypes = (wintypes.HANDLE,)
def FindFirstVolume():
v_name = ctypes.create_unicode_buffer(" " * 255)
h = kernel32.FindFirstVolumeW(v_name, 255)
if h == win32file.INVALID_HANDLE_VALUE:
raise Exception("Invalid Handle for FindFirstVolume")
return h, v_name.value
def FindNextVolume(h):
v_name = ctypes.create_unicode_buffer(" " * 255)
if kernel32.FindNextVolumeW(h, v_name, 255) != 0:
return v_name.value
# Error if we get here
e = ctypes.GetLastError()
if e == winerror.ERROR_NO_MORE_FILES:
FindVolumeClose(h)
return None
raise Exception("Error calling FindNextVolumeW (%s)" % e)
def FindVolumeClose(h):
if kernel32.FindVolumeClose(h) == 0:
# Failed?
raise Exception("FindVolumeClose failed on handle (%s)" % h)
# Boot Variables
# Boot#### - #### Hex value, no 0x or h, for the item
# BootCurrent - Option selected for the current boot
# BootNext - Boot option for next boot only
# BootOrder - List of boot options in order
# BootOrderSupport - Types of boot options supported by the boot manager (read only)
# Driver#### - Driver load option
# DriverOrder - Ordered list of drivers to load
"""
efi boot item:
struct {
UINT32 // Attributes - bit mask
UINT16 // File Path List Length - len in bytes of whole file path list Optional Data starts at sizeof(UINT32) + sizeof(UINT16) + strsize(Description) + FilePathListLengh
CHAR16 // Description - Null term string
EFI_DEVICE_PATH_PROTOCOL // FilePathList[]
UINT8 //Optional Data - calculate size from starting offset to size of whole load_option structure
}
"""
#pythoncom.CoInitialize()
#pythoncom.CoUnInitialize()
DISKDRIVES_QUERY = "SELECT * FROM Win32_DiskDrive"
VOLUME_QUERY = "SELECT * FROM Win32_Volume"
VOLUME_CLUSTER_SIZE_QUERY = "SELECT Name, Blocksize FROM Win32_Volume WHERE FileSystem='NTFS'"
DISKDRIVE_TO_DISKPARTITIONS_QUERY = r'SELECT * FROM Win32_DiskDriveToDiskPartition WHERE Antecedent="{}"'
DISKPARTITION_QUERY = r'SELECT * FROM Win32_DiskPartition WHERE DeviceID={}'
class PhysicalDrive():
DRIVES = dict()
def __init__(self, drive_id=0, wmi_obj=None):
self.drive_id = drive_id
self.win_path = r"\\.\PHYSICALDRIVE" + str(self.drive_id)
self.wmi_obj = wmi_obj
def get_partitions(self):
partitions = list()
# Get the mapping from disk to partition
res = self.wmi_obj.associators("MSFT_DiskToPartition")
#self.wmi_obj.associators("Win32_DiskDriveToDiskPartition")
for r in res:
#print(r)
partitions.append(r)
return partitions
def IsBoot(self):
if self.wmi_obj is None:
print("WMI OBJ is NULL!")
return False
return self.wmi_obj.IsBoot
def __repr__(self):
DeviceID = None
guid = None
if self.wmi_obj is not None:
DeviceID = self.wmi_obj # self.wmi_obj.DeviceID
#guid = self.wmi_obj.Guid
return "Drive <" + str(self.win_path) + ", " + str(DeviceID) + ">"
@staticmethod
def get_drives():
# https://stackoverflow.com/questions/56784915/python-wmi-can-we-really-say-that-c-is-always-the-boot-drive
w = wmi.WMI(namespace='root/Microsoft/Windows/Storage')
res = w.MSFT_Disk() #w.Win32_DiskDrive()
PhysicalDrive.DRIVES = dict()
for r in res:
#print(r)
disk_number = r.Number # r.Index
physical_path = r'\\.\PHYSICALDRIVE' + str(disk_number) # r.DeviceID
d = PhysicalDrive(disk_number, r)
PhysicalDrive.DRIVES[physical_path] = d
return PhysicalDrive.DRIVES
def findVolumeGuids_broken():
DiskExtent = collections.namedtuple(
'DiskExtent', ['DiskNumber', 'StartingOffset', 'ExtentLength'])
Volume = collections.namedtuple(
'Volume', ['Guid', 'MediaType', 'DosDevice', 'Extents'])
found = []
h, guid = FindFirstVolume()
while h and guid:
#print (guid)
#print (guid, win32file.GetDriveType(guid),
# win32file.QueryDosDevice(guid[4:-1]))
hVolume = win32file.CreateFile(
guid[:-1], win32con.GENERIC_READ,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None, win32con.OPEN_EXISTING, win32con.FILE_ATTRIBUTE_NORMAL, None)
extents = []
driveType = win32file.GetDriveType(guid)
if driveType in [win32con.DRIVE_REMOVABLE, win32con.DRIVE_FIXED]:
x = win32file.DeviceIoControl(
hVolume, winioctlcon.IOCTL_VOLUME_GET_VOLUME_DISK_EXTENTS,
None, 512, None)
instream = io.BytesIO(x)
numRecords = struct.unpack('<q', instream.read(8))[0]
fmt = '<qqq'
sz = struct.calcsize(fmt)
while 1:
b = instream.read(sz)
if len(b) < sz:
break
rec = struct.unpack(fmt, b)
extents.append( DiskExtent(*rec) )
vinfo = Volume(guid, driveType, win32file.QueryDosDevice(guid[4:-1]),
extents)
found.append(vinfo)
guid = FindNextVolume(h)
return found
def find_efi_partition():
drives = PhysicalDrive.get_drives()
for drive_path in drives:
drive = drives[drive_path]
#print(drive)
if drive.IsBoot() == True:
# Found the boot drive, look for the EFI partition
partitions = drive.get_partitions()
#print(partitions)
for part in partitions:
if part.GptType == "{c12a7328-f81f-11d2-ba4b-00a0c93ec93b}":
print("Found EFI Part: " + part.Guid)
print(part)
sector_size = int(drive.wmi_obj.PhysicalSectorSize)
part_starting_sector = int(int(part.Offset) / sector_size)
part_size = int(int(part.Size) / sector_size)
part_guid = part.Guid
part_number = part.PartitionNumber
return (part, part_guid, part_number, part_starting_sector, part_size)
print("ERROR - Unable to find EFI partition!")
return None
def parse_uefi_data(data):
data_len = len(data)
import locale
ret = str(data, "utf-16-le" ) # utf8, utf16, cp437
#ret = str(struct.unpack(str(data_len)+"B", data), "UTF-8") #.decode("UTF-8")
#ret = struct.unpack("B", data) #.decode("UTF-16-LE")
#print(ret)
return ret
# Get all entries
with privileges():
try:
verify_uefi_firmware()
except:
print("Not UEFI Bios!")
sys.exit(0)
found_entries = dict()
# Always BCDOBJECT={9dea862c-5cdd-4e70-acc1-f32b344d4795} w some other data??? - is boot manager id
WIN_OPTIONAL_DATA = b'WINDOWS\x00\x01\x00\x00\x00\x88\x00\x00\x00x\x00\x00\x00B\x00C\x00D\x00O\x00B\x00J\x00E\x00C\x00T\x00=\x00{\x009\x00d\x00e\x00a\x008\x006\x002\x00c\x00-\x005\x00c\x00d\x00d\x00-\x004\x00e\x007\x000\x00-\x00a\x00c\x00c\x001\x00-\x00f\x003\x002\x00b\x003\x004\x004\x00d\x004\x007\x009\x005\x00}\x00\x00\x00.\x00\x01\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x7f\xff\x04\x00'
#b'WINDOWS\x00\x01\x00\x00\x00\x88\x00\x00\x00x\x00\x00\x00B\x00C\x00D\x00O\x00B\x00J\x00E\x00C\x00T\x00=\x00{\x009\x00d\x00e\x00a\x008\x006\x002\x00c\x00-\x005\x00c\x00d\x00d\x00-\x004\x00e\x007\x000\x00-\x00a\x00c\x00c\x001\x00-\x00f\x003\x002\x00b\x003\x004\x004\x00d\x004\x007\x009\x005\x00}\x00\x00\x00.\x00\x01\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x7f\xff\x04\x00'
#b'WINDOWS\x00\x01\x00\x00\x00\x88\x00\x00\x00x\x00\x00\x00B\x00C\x00D\x00O\x00B\x00J\x00E\x00C\x00T\x00=\x00{\x009\x00d\x00e\x00a\x008\x006\x002\x00c\x00-\x005\x00c\x00d\x00d\x00-\x004\x00e\x007\x000\x00-\x00a\x00c\x00c\x001\x00-\x00f\x003\x002\x00b\x003\x004\x004\x00d\x004\x007\x009\x005\x00}\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x7f\xff\x04\x00'
WIN_DEVICE_PATH_DATA = string_to_utf16_bytes("\\EFI\\Microsoft\\Boot\\bootmgfw.efi")
#b'\x02\x00\x00\x00\x00\x18\x0e\x00\x00\x00\x00\x00\x00 \x03\x00\x00\x00\x00\x00\xfc?\x1bs{\xb5rL\x91\xf7\xbar\xb8\xbe\x11h\x02\x02'
"""
type - 1 byte - Type 4 - MEDIA_DEVICE_PATH (added during tobytes)
sub-type - 1 byte - Type 1 - HARD_DRIVE (added during tobytes)
length - 2 bytes - len of this structure (42 bytes? added during tobytes)
partition number - 4 bytes - 0 means whole disk, 1 = first part, 1-4 valid for MBR, 1-count valid for GPT
partition start - 8 bytes - Starting LBA of partition
partition size - 8 bytes - Size of part in logical blocks
Part Signature - 16 bytes - 0 if part type is 0, type 1 = mbr sig in first 4 bytes, type 2= 16 byte signature (guid?)
part format - 1 byte - 0x01 - mbr, 0x02 - guid parition
Sig Type - 1 byte - 0x00 - no signature, 0x01 - 32 bit signature from address 0x1b8 of type 0x01 mbr, 0x02 - GUID signature
"""
boot_part = find_efi_partition()
if boot_part is None:
print("Error - Unable to find efi boot part!")
sys.exit(-1)
(part, part_guid, part_number, part_starting_sector, part_size) = boot_part
# bytes_le - little endian for first half bytes
packed_guid = uuid.UUID(part_guid).bytes_le
# Pack data into this structure
# part num - 8 bytes, part_start 8 bytes, part_size 8 bytes, part guid - 16 bytes, part format - 1 byte, sig type - 1 byte
WIN_HARD_DRIVE_MEDIA_PATH = b''
WIN_HARD_DRIVE_MEDIA_PATH = struct.Struct("<LQQ16sBB").pack(
part_number, # Long - 4 bytes
part_starting_sector, # long long - 8 bytes
part_size, # long long - 8 bytes
packed_guid, # 16 bytes,
0x02, # 1 byte - 0x01 for mbr, 0x02 for gpt
0x02, # 1 byte - 0x00 none, 0x01 mbr, 0x02 gpt
)
# Make our default boot option
ope_entry = LoadOption()
ope_entry.attributes = LoadOptionAttributes.LOAD_OPTION_ACTIVE
ope_entry.description="OPE Boot"
# Add the disk GUID entry
ope_entry.file_path_list.paths.append(DevicePath(
DevicePathType.MEDIA_DEVICE_PATH, MediaDevicePathSubtype.HARD_DRIVE, WIN_HARD_DRIVE_MEDIA_PATH
)
)
# Add the file path
ope_entry.file_path_list.paths.append(DevicePath(
DevicePathType.MEDIA_DEVICE_PATH, MediaDevicePathSubtype.FILE_PATH, WIN_DEVICE_PATH_DATA
)
)
ope_entry.file_path_list.paths.append(DevicePath(
DevicePathType.END_OF_HARDWARE_DEVICE_PATH, EndOfHardwareDevicePathSubtype.END_ENTIRE_DEVICE_PATH
))
#ret = ope_entry.file_path_list.set_file_path('\\EFI\\Microsoft\\Boot\\bootmgfw.efi')
#print(ret)
#ope_entry.file_path_list.data = WIN_DEVICE_PATH_DATA
#ope_entry.file_path_list.paths[0].data = WIN_DEVICE_PATH_DATA
#ope_entry.file_path_list.paths[0].subtype = MediaDevicePathSubtype.HARD_DRIVE
#ope_entry.optional_data = WIN_OPTIONAL_DATA
#set_parsed_boot_entry(0, ope_entry)
# Find all boot entries
for i in range(0, 24):
# Get entry
try:
parsed_option = get_parsed_boot_entry(i)
print(str(i) + " - " + parsed_option.description)
print(parsed_option.file_path_list.paths[0].data)
print(parsed_option)
#print(parsed_option.attributes)
print("Device Paths")
for p in parsed_option.file_path_list.paths:
print("------")
print(f"\t{p.path_type}")
print(f"\t{p.subtype}")
print(f"\t{p.data}")
#print(len(parsed_option.file_path_list.paths))
#if len(parsed_option.file_path_list.paths) > 0:
# print(parsed_option.file_path_list.get_file_path())
# print(parsed_option.file_path_list.paths[0].path_type)
# print(parsed_option.file_path_list.paths[0].subtype)
print(parse_uefi_data(parsed_option.optional_data))
print(parsed_option.optional_data)
print("")
if parsed_option.description != "OPE Boot":
found_entries[parsed_option.description] = parsed_option
except Exception as ex:
# Will get errors if we run out of entries. That is OK.
if "environment option" not in str(ex):
print(ex)
traceback.print_exc()
pass
# Set our custom entry as first entry
boot_order = list()
boot_order.append(0)
set_parsed_boot_entry(0, ope_entry)
i = 1
for entry_desc in found_entries:
entry = found_entries[entry_desc]
# Add each entry back to the boot entries.
if entry.description == "UEFI: Realtek USB FE Family Controller":
entry.attributes = LoadOptionAttributes.LOAD_OPTION_HIDDEN | LoadOptionAttributes.LOAD_OPTION_ACTIVE
elif entry.description == "UEFI: IP4 Realtek USB FE Family Controller":
entry.attributes = LoadOptionAttributes.LOAD_OPTION_HIDDEN | LoadOptionAttributes.LOAD_OPTION_ACTIVE
else:
entry.attributes = LoadOptionAttributes.LOAD_OPTION_HIDDEN | LoadOptionAttributes.LOAD_OPTION_ACTIVE
set_parsed_boot_entry(i, entry)
boot_order.append(i)
i+=1
# Write the new boot order
set_boot_order(boot_order)
# Get the current list of boot items.
#for entry_id in get_boot_order():
# load_option = get_parsed_boot_entry(entry_id)
# print(f"{entry_id} {load_option} {load_option.description}\n\t\t{load_option.optional_data}\n")
exit()
with privileges():
data, attr = get_variable("BootCurrent")
print(data)
print(attr)
with privileges():
for entry_id in get_boot_order():
load_option = get_parsed_boot_entry(entry_id)
print(f"{entry_id} {load_option}")
with privileges():
# Set our custom order
boot_order = get_boot_order()
set_boot_order(boot_order)
boot_entry = get_parsed_boot_entry(boot_order[0])
print(boot_entry.__dict__)
boot_entry.description="OPE Boot"
boot_entry.file_path_list.set_file_path(r"\EFI\MICROSOFT\BOOT\BOOTMGWFW.EFI")
set_parsed_boot_entry(0, boot_entry)
with privileges():
data, attr = get_variable("BootCurrent")
print(data)
print(attr)
with privileges():
for entry_id in get_boot_order():
load_option = get_parsed_boot_entry(entry_id)
print(f"{entry_id} {load_option}")
raw_entry = get_boot_entry(entry_id)
loaded_option = LoadOption.from_bytes(raw_entry)
loaded_option.attributes = LoadOptionAttributes.LOAD_OPTION_HIDDEN
print("0x{:04X} {}".format(entry_id, loaded_option))
exit()
with privileges():
ope_id, attr = get_variable("OPE_ID")
if ope_id is None:
namespace = "{f29e2c32-8cca-44ff-93d7-87195ace38b9}"
ope_id = uuid.uuid4()
set_variable("OPE_ID", ope_id,
namespace=namespace,
attributes= Attributes.NON_VOLATILE |
Attributes.BOOT_SERVICE_ACCESS |
Attributes.RUNTIME_ACCESS
)
# delete_variable("OPE_ID", namespace=namespace)
| [
"[email protected]"
] | |
2673f7a679c32787464288aeaaf3f84c4970adcd | 1852cdc422fe605a379ab24368157a1b82e8f66f | /037_trunc_primes.py | 2367585319fcdf307dc6473976ef2f6828dcd21d | [] | no_license | JesseAldridge/Euler | e96bd7995fd8da60ce4db3c1daa20a719778b4a2 | ee23c562cfcf3187f8768264249a41470c9d6355 | refs/heads/master | 2021-01-16T19:35:53.064280 | 2014-07-06T01:20:26 | 2014-07-06T01:20:26 | 2,194,822 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py |
def gen_primes():
## http://stackoverflow.com/questions/567222/simple-prime-generator-in-python
D = {}
q = 2
while True:
if q not in D:
yield q
D[q * q] = [q]
else:
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1
# Find the eleven truncatable primes.
primes = set()
count = 0
sum_ = 0
for prime in gen_primes():
primes.add(prime)
if prime <= 7: continue
sprime = str(prime)
for i in range(1, len(sprime)):
if int(sprime[i:]) not in primes: break
if int(sprime[:-i]) not in primes: break
else:
count += 1
sum_ += prime
if count >= 11:
break
print 'sum:', sum_ | [
"[email protected]"
] | |
89f8dbce692f523fe4b4add92ab763aebb764dbb | bdc47ebbe3e125a48d3cfe762061f4b1070465c4 | /mysite/fcuser/migrations/0001_initial.py | 49fb63275f1f537773b58852b3526f7fe6aadb84 | [] | no_license | swpheus/Django_prj | 3de3d2b110e51087cdf89b95fc2b6ea3f63acf00 | 10b9af058f0b76c424e24ecd1996a49646948547 | refs/heads/master | 2020-07-21T17:17:14.019550 | 2019-09-21T03:00:36 | 2019-09-21T03:00:36 | 206,929,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # Generated by Django 2.2.4 on 2019-08-30 12:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Fcuser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=64, verbose_name='사용자이름')),
('password', models.CharField(max_length=64, verbose_name='비밀번호')),
('registered_dttm', models.DateTimeField(auto_now_add=True, verbose_name='등록시간')),
],
options={
'db_table': 'sw',
},
),
]
| [
"[email protected]"
] | |
5c04bc43c47e68240dcf58ac436ae995004210b7 | 0a33cc0ebb67c51cc38750f0f04c3e6c088e3b1a | /tests/components/media_source/test_init.py | 491b1972cb680ef2f8d91bc7ee511cd93d8951ad | [
"Apache-2.0"
] | permissive | robert-alfaro/home-assistant | e9bb08ad22a167ed226fb3de8f5b36acfc393548 | 4a53121b58b77a318f08c64ad2c5372a16b800e0 | refs/heads/dev | 2023-02-28T06:46:23.217246 | 2022-04-26T17:30:08 | 2022-04-26T17:30:08 | 115,894,662 | 4 | 0 | Apache-2.0 | 2023-02-22T06:21:08 | 2018-01-01T02:00:35 | Python | UTF-8 | Python | false | false | 7,917 | py | """Test Media Source initialization."""
from unittest.mock import Mock, patch
import pytest
import yarl
from homeassistant.components import media_source
from homeassistant.components.media_player import MEDIA_CLASS_DIRECTORY, BrowseError
from homeassistant.components.media_source import const, models
from homeassistant.setup import async_setup_component
async def test_is_media_source_id():
"""Test media source validation."""
assert media_source.is_media_source_id(media_source.URI_SCHEME)
assert media_source.is_media_source_id(f"{media_source.URI_SCHEME}domain")
assert media_source.is_media_source_id(
f"{media_source.URI_SCHEME}domain/identifier"
)
assert not media_source.is_media_source_id("test")
async def test_generate_media_source_id():
"""Test identifier generation."""
tests = [
(None, None),
(None, ""),
("", ""),
("domain", None),
("domain", ""),
("domain", "identifier"),
]
for domain, identifier in tests:
assert media_source.is_media_source_id(
media_source.generate_media_source_id(domain, identifier)
)
async def test_async_browse_media(hass):
"""Test browse media."""
assert await async_setup_component(hass, media_source.DOMAIN, {})
await hass.async_block_till_done()
# Test non-media ignored (/media has test.mp3 and not_media.txt)
media = await media_source.async_browse_media(hass, "")
assert isinstance(media, media_source.models.BrowseMediaSource)
assert media.title == "media"
assert len(media.children) == 2
# Test content filter
media = await media_source.async_browse_media(
hass,
"",
content_filter=lambda item: item.media_content_type.startswith("video/"),
)
assert isinstance(media, media_source.models.BrowseMediaSource)
assert media.title == "media"
assert len(media.children) == 1, media.children
media.children[0].title = "Epic Sax Guy 10 Hours"
assert media.not_shown == 1
# Test content filter adds to original not_shown
orig_browse = models.MediaSourceItem.async_browse
async def not_shown_browse(self):
"""Patch browsed item to set not_shown base value."""
item = await orig_browse(self)
item.not_shown = 10
return item
with patch(
"homeassistant.components.media_source.models.MediaSourceItem.async_browse",
not_shown_browse,
):
media = await media_source.async_browse_media(
hass,
"",
content_filter=lambda item: item.media_content_type.startswith("video/"),
)
assert isinstance(media, media_source.models.BrowseMediaSource)
assert media.title == "media"
assert len(media.children) == 1, media.children
media.children[0].title = "Epic Sax Guy 10 Hours"
assert media.not_shown == 11
# Test invalid media content
with pytest.raises(BrowseError):
await media_source.async_browse_media(hass, "invalid")
# Test base URI returns all domains
media = await media_source.async_browse_media(hass, const.URI_SCHEME)
assert isinstance(media, media_source.models.BrowseMediaSource)
assert len(media.children) == 1
assert media.children[0].title == "Local Media"
async def test_async_resolve_media(hass):
"""Test browse media."""
assert await async_setup_component(hass, media_source.DOMAIN, {})
await hass.async_block_till_done()
media = await media_source.async_resolve_media(
hass,
media_source.generate_media_source_id(media_source.DOMAIN, "local/test.mp3"),
)
assert isinstance(media, media_source.models.PlayMedia)
assert media.url == "/media/local/test.mp3"
assert media.mime_type == "audio/mpeg"
async def test_async_unresolve_media(hass):
"""Test browse media."""
assert await async_setup_component(hass, media_source.DOMAIN, {})
await hass.async_block_till_done()
# Test no media content
with pytest.raises(media_source.Unresolvable):
await media_source.async_resolve_media(hass, "")
# Test invalid media content
with pytest.raises(media_source.Unresolvable):
await media_source.async_resolve_media(hass, "invalid")
# Test invalid media source
with pytest.raises(media_source.Unresolvable):
await media_source.async_resolve_media(hass, "media-source://media_source2")
async def test_websocket_browse_media(hass, hass_ws_client):
"""Test browse media websocket."""
assert await async_setup_component(hass, media_source.DOMAIN, {})
await hass.async_block_till_done()
client = await hass_ws_client(hass)
media = media_source.models.BrowseMediaSource(
domain=media_source.DOMAIN,
identifier="/media",
title="Local Media",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="listing",
can_play=False,
can_expand=True,
)
with patch(
"homeassistant.components.media_source.async_browse_media",
return_value=media,
):
await client.send_json(
{
"id": 1,
"type": "media_source/browse_media",
}
)
msg = await client.receive_json()
assert msg["success"]
assert msg["id"] == 1
assert media.as_dict() == msg["result"]
with patch(
"homeassistant.components.media_source.async_browse_media",
side_effect=BrowseError("test"),
):
await client.send_json(
{
"id": 2,
"type": "media_source/browse_media",
"media_content_id": "invalid",
}
)
msg = await client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == "browse_media_failed"
assert msg["error"]["message"] == "test"
@pytest.mark.parametrize("filename", ["test.mp3", "Epic Sax Guy 10 Hours.mp4"])
async def test_websocket_resolve_media(hass, hass_ws_client, filename):
"""Test browse media websocket."""
assert await async_setup_component(hass, media_source.DOMAIN, {})
await hass.async_block_till_done()
client = await hass_ws_client(hass)
media = media_source.models.PlayMedia(
f"/media/local/{filename}",
"audio/mpeg",
)
with patch(
"homeassistant.components.media_source.async_resolve_media",
return_value=media,
):
await client.send_json(
{
"id": 1,
"type": "media_source/resolve_media",
"media_content_id": f"{const.URI_SCHEME}{media_source.DOMAIN}/local/{filename}",
}
)
msg = await client.receive_json()
assert msg["success"]
assert msg["id"] == 1
assert msg["result"]["mime_type"] == media.mime_type
# Validate url is relative and signed.
assert msg["result"]["url"][0] == "/"
parsed = yarl.URL(msg["result"]["url"])
assert parsed.path == getattr(media, "url")
assert "authSig" in parsed.query
with patch(
"homeassistant.components.media_source.async_resolve_media",
side_effect=media_source.Unresolvable("test"),
):
await client.send_json(
{
"id": 2,
"type": "media_source/resolve_media",
"media_content_id": "invalid",
}
)
msg = await client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == "resolve_media_failed"
assert msg["error"]["message"] == "test"
async def test_browse_resolve_without_setup():
"""Test browse and resolve work without being setup."""
with pytest.raises(BrowseError):
await media_source.async_browse_media(Mock(data={}), None)
with pytest.raises(media_source.Unresolvable):
await media_source.async_resolve_media(Mock(data={}), None)
| [
"[email protected]"
] | |
ee898a8219a29d065569ea7a3d1dc90e6507d516 | a3fa25d5424c81e1d5aef431916be9759a88cf91 | /src/main/python/lib/default/gtkgui/default_gui/runningactions.py | eb6950505cfa100e4dafd11ff9539c69900d75a3 | [
"MIT"
] | permissive | emilybache/texttest-runner | e4d5385bfb2d7f543b9e9828fae90681f49e5c77 | 2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a | refs/heads/master | 2021-01-22T23:20:28.358273 | 2015-07-07T18:03:56 | 2015-07-07T18:03:56 | 17,683,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,337 | py |
"""
The various ways to launch the dynamic GUI from the static GUI
"""
import gtk, plugins, os, sys, stat
from .. import guiplugins
from copy import copy, deepcopy
# Runs the dynamic GUI, but not necessarily with all the options available from the configuration
class BasicRunningAction:
runNumber = 1
def __init__(self, inputOptions):
self.inputOptions = inputOptions
self.testCount = 0
def getTabTitle(self):
return "Running"
def messageAfterPerform(self):
return self.performedDescription() + " " + self.describeTestsWithCount() + " at " + plugins.localtime() + "."
def describeTestsWithCount(self):
if self.testCount == 1:
return "test " + self.getTestCaseSelection()[0].getRelPath()
else:
return str(self.testCount) + " tests"
def performOnCurrent(self):
self.startTextTestProcess(self.getUseCaseName(), [ "-g" ])
def getTestsAffected(self, testSelOverride):
if testSelOverride:
return testSelOverride
else:
# Take a copy so we aren't fooled by selection changes
return copy(self.currTestSelection)
def startTextTestProcess(self, usecase, runModeOptions, testSelOverride=None, filterFileOverride=None):
app = self.getCurrentApplication()
writeDir = os.path.join(self.getLogRootDirectory(app), "dynamic_run" + str(self.runNumber))
plugins.ensureDirectoryExists(writeDir)
filterFile = self.createFilterFile(writeDir, filterFileOverride)
ttOptions = runModeOptions + self.getTextTestOptions(filterFile, app, usecase)
self.diag.info("Starting " + usecase + " run of TextTest with arguments " + repr(ttOptions))
logFile = os.path.join(writeDir, "output.log")
errFile = os.path.join(writeDir, "errors.log")
BasicRunningAction.runNumber += 1
description = "Dynamic GUI started at " + plugins.localtime()
cmdArgs = self.getInterpreterArgs() + [ sys.argv[0] ] + ttOptions
env = self.getNewUseCaseEnvironment(usecase)
testsAffected = self.getTestsAffected(testSelOverride)
guiplugins.processMonitor.startProcess(cmdArgs, description, env=env, killOnTermination=self.killOnTermination(),
stdout=open(logFile, "w"), stderr=open(errFile, "w"),
exitHandler=self.checkTestRun,
exitHandlerArgs=(errFile,testsAffected,filterFile,usecase))
def getCurrentApplication(self):
return self.currAppSelection[0] if self.currAppSelection else self.validApps[0]
def getLogRootDirectory(self, app):
return app.writeDirectory
def killOnTermination(self):
return True
def getNewUseCaseEnvironment(self, usecase):
environ = deepcopy(os.environ)
recScript = os.getenv("USECASE_RECORD_SCRIPT")
if recScript:
environ["USECASE_RECORD_SCRIPT"] = plugins.addLocalPrefix(recScript, usecase)
repScript = os.getenv("USECASE_REPLAY_SCRIPT")
if repScript:
# Dynamic GUI might not record anything (it might fail) - don't try to replay files that
# aren't there...
dynRepScript = plugins.addLocalPrefix(repScript, usecase)
if os.path.isfile(dynRepScript):
environ["USECASE_REPLAY_SCRIPT"] = dynRepScript
else:
del environ["USECASE_REPLAY_SCRIPT"]
return environ
def getSignalsSent(self):
return [ "SaveSelection" ]
def createFilterFile(self, writeDir, filterFileOverride):
# Because the description of the selection can be extremely long, we write it in a file and refer to it
# This avoids too-long command lines which are a problem at least on Windows XP
if filterFileOverride is None:
filterFileName = os.path.join(writeDir, "gui_select")
self.notify("SaveSelection", filterFileName)
return filterFileName
elif filterFileOverride is not NotImplemented:
return filterFileOverride
def getInterpreterArgs(self):
interpreterArg = os.getenv("TEXTTEST_DYNAMIC_GUI_INTERPRETER", "") # Alternative interpreter for the dynamic GUI : mostly useful for coverage / testing
if interpreterArg:
return plugins.splitcmd(interpreterArg.replace("ttpython", sys.executable))
else: # pragma: no cover - cannot test without StoryText on dynamic GUI
return [ sys.executable ]
def getOptionGroups(self):
return [ self.optionGroup ]
def getTextTestOptions(self, filterFile, app, usecase):
ttOptions = self.getCmdlineOptionForApps(filterFile)
for group in self.getOptionGroups():
ttOptions += self.getCommandLineArgs(group, self.getCommandLineKeys(usecase))
# May be slow to calculate for large test suites, cache it
self.testCount = len(self.getTestCaseSelection())
ttOptions += [ "-count", str(self.testCount * self.getCountMultiplier()) ]
if filterFile:
ttOptions += [ "-f", filterFile ]
tmpFilterDir = self.getTmpFilterDir(app)
if tmpFilterDir:
ttOptions += [ "-fd", tmpFilterDir ]
return ttOptions
def getCommandLineKeys(self, *args):
# assume everything by default
return []
def getCountMultiplier(self):
return 1
def getVanillaOption(self):
options = []
if self.inputOptions.has_key("vanilla"):
options.append("-vanilla")
value = self.inputOptions.get("vanilla")
if value:
options.append(value)
return options
def getTmpFilterDir(self, app):
return os.path.join(app.writeDirectory, "temporary_filter_files")
def getAppIdentifier(self, app):
return app.name + app.versionSuffix()
def getCmdlineOptionForApps(self, filterFile):
if not filterFile:
return []
apps = sorted(self.currAppSelection, key=self.validApps.index)
appNames = map(self.getAppIdentifier, apps)
return [ "-a", ",".join(appNames) ]
def checkTestRun(self, errFile, testSel, filterFile, usecase):
if not testSel:
return
if self.checkErrorFile(errFile, testSel, usecase):
self.handleCompletion(testSel, filterFile, usecase)
if len(self.currTestSelection) >= 1 and self.currTestSelection[0] in testSel:
self.currTestSelection[0].filesChanged()
testSel[0].notify("CloseDynamic", usecase)
def checkErrorFile(self, errFile, testSel, usecase):
if os.path.isfile(errFile):
errText = testSel[0].app.filterErrorText(errFile)
if len(errText):
self.notify("Status", usecase.capitalize() + " run failed for " + repr(testSel[0]))
lines = errText.splitlines()
maxLength = testSel[0].getConfigValue("lines_of_text_difference")
if len(lines) > maxLength:
errText = "\n".join(lines[:maxLength])
errText += "\n(Very long message truncated. Full details can be seen in the file at\n" + errFile + ")"
self.showErrorDialog(usecase.capitalize() + " run failed, with the following errors:\n" + errText)
return False
return True
def handleCompletion(self, *args):
pass # only used when recording
def getConfirmationMessage(self):
# For extra speed we check the selection first before we calculate all the test cases again...
if len(self.currTestSelection) > 1 or len(self.getTestCaseSelection()) > 1:
multiTestWarning = self.getMultipleTestWarning()
if multiTestWarning:
return "You are trying to " + multiTestWarning + ".\nThis will mean lots of target application GUIs " + \
"popping up and may be hard to follow.\nAre you sure you want to do this?"
return ""
def getMultipleTestWarning(self):
pass
class ReconnectToTests(BasicRunningAction,guiplugins.ActionDialogGUI):
def __init__(self, allApps, dynamic, inputOptions):
guiplugins.ActionDialogGUI.__init__(self, allApps, dynamic)
BasicRunningAction.__init__(self, inputOptions)
self.addOption("v", "Version to reconnect to")
self.addOption("reconnect", "Temporary result directory", os.getenv("TEXTTEST_TMP", ""), selectDir=True, description="Specify a directory containing temporary texttest results. The reconnection will use a random subdirectory matching the version used.")
appGroup = plugins.OptionGroup("Invisible")
self.addApplicationOptions(allApps, appGroup, inputOptions)
self.addSwitch("reconnfull", "Recomputation", options=appGroup.getOption("reconnfull").options)
def _getStockId(self):
return "connect"
def _getTitle(self):
return "Re_connect..."
def getTooltip(self):
return "Reconnect to previously run tests"
def performedDescription(self):
return "Reconnected to"
def getUseCaseName(self):
return "reconnect"
def performOnCurrent(self):
self.startTextTestProcess(self.getUseCaseName(), [ "-g" ] + self.getVanillaOption())
def getAppIdentifier(self, app):
# Don't send version data, we have our own field with that info and it has a slightly different meaning
return app.name
def getSizeAsWindowFraction(self):
return 0.8, 0.7
class ReloadTests(BasicRunningAction,guiplugins.ActionDialogGUI):
def __init__(self, allApps, dynamic, inputOptions):
guiplugins.ActionDialogGUI.__init__(self, allApps, dynamic)
BasicRunningAction.__init__(self, inputOptions)
self.appGroup = plugins.OptionGroup("Invisible")
# We don't think reconnect can handle multiple roots currently
# Can be a limitation of this for the moment.
self.addApplicationOptions(allApps, self.appGroup, inputOptions)
self.addSwitch("reconnfull", "Recomputation", options=self.appGroup.getOption("reconnfull").options)
def getTmpDirectory(self):
return self.currAppSelection[0].writeDirectory
def _getStockId(self):
return "connect"
def _getTitle(self):
return "Re_load tests..."
def getTooltip(self):
return "Reload current results into new dynamic GUI"
def performedDescription(self):
return "Reloaded"
def getUseCaseName(self):
return "reload"
def performOnCurrent(self):
if self.appGroup.getOptionValue("reconnfull") == 0:
# We want to reload the results exactly as they are currently
# This will only be possible if we make sure to save the teststate files first
self.saveTestStates()
self.startTextTestProcess(self.getUseCaseName(), [ "-g", "-reconnect", self.getTmpDirectory() ] + self.getVanillaOption())
def saveTestStates(self):
for test in self.currTestSelection:
if test.state.isComplete(): # might look weird but this notification also comes in scripts etc.
test.saveState()
def getAppIdentifier(self, app):
# Don't send version data, we have our own field with that info and it has a slightly different meaning
return app.name
# base class for RunTests and RerunTests, i.e. all the options are available
class RunningAction(BasicRunningAction):
originalVersion = ""
def __init__(self, allApps, inputOptions):
BasicRunningAction.__init__(self, inputOptions)
self.optionGroups = []
self.disablingInfo = {}
self.disableWidgets = {}
for groupName, disablingOption, disablingOptionValue in self.getGroupNames(allApps):
group = plugins.OptionGroup(groupName)
self.addApplicationOptions(allApps, group, inputOptions)
self.optionGroups.append(group)
if disablingOption:
self.disablingInfo[self.getOption(disablingOption)] = disablingOptionValue, group
self.temporaryGroup = plugins.OptionGroup("Temporary Settings")
self.temporaryGroup.addOption("filetype", "File Type", "environment", possibleValues=self.getFileTypes(allApps))
self.temporaryGroup.addOption("contents", "Contents", multilineEntry=True)
RunningAction.originalVersion = self.getVersionString()
def getFileTypes(self, allApps):
ignoreTypes = [ "testsuite", "knownbugs", "stdin", "input", "testcustomize.py" ]
fileTypes = []
for app in allApps:
for ft in app.defFileStems("builtin") + app.defFileStems("default"):
if ft not in fileTypes and ft not in ignoreTypes:
fileTypes.append(ft)
return fileTypes
def getTextTestOptions(self, filterFile, app, *args):
ret = BasicRunningAction.getTextTestOptions(self, filterFile, app, *args)
contents = self.temporaryGroup.getValue("contents")
if contents:
fileType = self.temporaryGroup.getValue("filetype")
writeDir = os.path.dirname(filterFile)
tmpDir = self.makeTemporarySettingsDir(writeDir, app, fileType, contents)
ret += [ "-td", tmpDir ]
return ret
def makeTemporarySettingsDir(self, writeDir, app, fileType, contents):
tmpDir = os.path.join(writeDir, "temporary_settings")
plugins.ensureDirectoryExists(tmpDir)
fileName = os.path.join(tmpDir, fileType + "." + app.name + app.versionSuffix())
with open(fileName, "w") as f:
f.write(contents)
return tmpDir
def getGroupNames(self, allApps):
if len(allApps) > 0:
return allApps[0].getAllRunningGroupNames(allApps)
else:
configObject = self.makeDefaultConfigObject(self.inputOptions)
return configObject.getAllRunningGroupNames(allApps)
def createCheckBox(self, switch):
widget = guiplugins.OptionGroupGUI.createCheckBox(self, switch)
self.storeSwitch(switch, [ widget ])
return widget
def createRadioButtons(self, switch, *args):
buttons = guiplugins.OptionGroupGUI.createRadioButtons(self, switch, *args)
self.storeSwitch(switch, buttons)
return buttons
def storeSwitch(self, switch, widgets):
if self.disablingInfo.has_key(switch):
disablingOptionValue, group = self.disablingInfo[switch]
if disablingOptionValue < len(widgets):
self.disableWidgets[widgets[disablingOptionValue]] = switch, disablingOptionValue, group
def getOption(self, optName):
for group in self.optionGroups:
opt = group.getOption(optName)
if opt:
return opt
def getOptionGroups(self):
return self.optionGroups
def getCountMultiplier(self):
return self.getCopyCount() * self.getVersionCount()
def getCopyCount(self):
return self.getOption("cp").getValue()
def getVersionString(self):
vOption = self.getOption("v")
if vOption:
versionString = vOption.getValue()
return "" if versionString.startswith("<default>") else versionString
else:
return ""
def getVersionCount(self):
return self.getVersionString().count(",") + 1
def performedDescription(self):
timesToRun = self.getCopyCount()
numberOfTests = self.testCount
if timesToRun != 1:
if numberOfTests > 1:
return "Started " + str(timesToRun) + " copies each of"
else:
return "Started " + str(timesToRun) + " copies of"
else:
return "Started"
def getMultipleTestWarning(self):
app = self.currTestSelection[0].app
for group in self.getOptionGroups():
for switchName, desc in app.getInteractiveReplayOptions():
if group.getSwitchValue(switchName, False):
return "run " + self.describeTests() + " with " + desc + " replay enabled"
def getConfirmationMessage(self):
runVersion = self.getVersionString()
if self.originalVersion and self.originalVersion not in runVersion:
return "You have tried to run a version ('" + runVersion + \
"') which is not based on the version you started with ('" + self.originalVersion + "').\n" + \
"This will result in an attempt to amalgamate the versions, i.e. to run version '" + \
self.originalVersion + "." + runVersion + "'.\n" + \
"If this isn't what you want, you will need to restart the static GUI with a different '-v' flag.\n\n" + \
"Are you sure you want to continue?"
else:
return BasicRunningAction.getConfirmationMessage(self)
def createNotebook(self):
notebook = gtk.Notebook()
notebook.set_name("sub-notebook for running")
tabNames = [ "Basic", "Advanced" ]
frames = []
for group in self.optionGroups:
if group.name in tabNames:
label = gtk.Label(group.name)
tab = self.createTab(group, frames)
notebook.append_page(tab, label)
elif len(group.keys()) > 0:
frames.append(self.createFrame(group, group.name))
self.connectDisablingSwitches()
notebook.show_all()
self.widget = notebook
return notebook
def createTab(self, group, frames):
tabBox = gtk.VBox()
if frames:
frames.append(self.createFrame(group, "Miscellaneous"))
frames.append(self.createFrame(self.temporaryGroup, self.temporaryGroup.name))
for frame in frames:
tabBox.pack_start(frame, fill=False, expand=False, padding=8)
else:
self.fillVBox(tabBox, group)
if isinstance(self, guiplugins.ActionTabGUI):
# In a tab, we need to duplicate the buttons for each subtab
# In a dialog we should not do this
self.createButtons(tabBox)
widget = self.addScrollBars(tabBox, hpolicy=gtk.POLICY_AUTOMATIC)
widget.set_name(group.name + " Tab")
return widget
def updateSensitivity(self, widget, data):
switch, disablingOptionValue, group = data
sensitive = switch.getValue() != disablingOptionValue
self.setGroupSensitivity(group, sensitive, ignoreWidget=widget)
def connectDisablingSwitches(self):
for widget, data in self.disableWidgets.items():
self.updateSensitivity(widget, data)
widget.connect("toggled", self.updateSensitivity, data)
self.disableWidgets = {} # not needed any more
def notifyReset(self, *args):
for optionGroup in self.optionGroups:
optionGroup.reset()
def _getStockId(self):
return "execute"
class RunTests(RunningAction,guiplugins.ActionTabGUI):
def __init__(self, allApps, dummy, inputOptions):
guiplugins.ActionTabGUI.__init__(self, allApps)
RunningAction.__init__(self, allApps, inputOptions)
def _getTitle(self):
return "_Run"
def getTooltip(self):
return "Run selected tests"
def getUseCaseName(self):
return "dynamic"
def createView(self):
return self.createNotebook()
def updateName(self, nameOption, name):
if name:
nameOption.setValue("Tests started from " + repr(name) + " at <time>")
def notifySetRunName(self, name):
nameOption = self.getOption("name")
self.updateName(nameOption, name)
def addApplicationOptions(self, allApps, group, inputOptions):
guiplugins.ActionTabGUI.addApplicationOptions(self, allApps, group, inputOptions)
nameOption = group.getOption("name")
if nameOption:
self.updateName(nameOption, nameOption.getValue())
class RerunTests(RunningAction,guiplugins.ActionDialogGUI):
def __init__(self, allApps, dummy, inputOptions):
guiplugins.ActionDialogGUI.__init__(self, allApps)
RunningAction.__init__(self, allApps, inputOptions)
def _getTitle(self):
return "_Rerun"
def getTooltip(self):
return "Rerun selected tests"
def getUseCaseName(self):
return "rerun"
def killOnTermination(self):
return False # don't want rerun GUIs to disturb each other like this
def getTmpFilterDir(self, app):
return "" # don't want selections returned here, send them to the static GUI
def getConfirmationMessage(self):
return BasicRunningAction.getConfirmationMessage(self)
def getLogRootDirectory(self, app):
if self.inputOptions.has_key("f"):
logRootDir = os.path.dirname(self.inputOptions["f"])
if os.path.basename(logRootDir).startswith("dynamic_run"):
return logRootDir
return BasicRunningAction.getLogRootDirectory(self, app)
def getExtraParent(self, app):
for other in self.validApps:
if app in other.extras:
return other
def getExtraVersions(self, app):
if app.extras or any((v.startswith("copy_") for v in app.versions)):
return []
extraParent = self.getExtraParent(app)
if extraParent:
return filter(lambda v: v not in extraParent.versions, app.versions)
else:
extrasGiven = app.getConfigValue("extra_version")
return filter(lambda v: v in extrasGiven, app.versions)
def getAppIdentifier(self, app):
parts = [ app.name ] + self.getExtraVersions(app)
return ".".join(parts)
def checkTestRun(self, errFile, testSel, filterFile, usecase):
# Don't do anything with the files, but do produce popups on failures and notify when complete
self.checkErrorFile(errFile, testSel, usecase)
testSel[0].notify("CloseDynamic", usecase)
def fillVBox(self, vbox, optionGroup):
if optionGroup is self.optionGroup:
notebook = self.createNotebook()
vbox.pack_start(notebook)
return None, None # no file chooser info
else:
return guiplugins.ActionDialogGUI.fillVBox(self, vbox, optionGroup, includeOverrides=False)
def getSizeAsWindowFraction(self):
return 0.8, 0.9
class RecordTest(BasicRunningAction,guiplugins.ActionDialogGUI):
def __init__(self, allApps, dynamic, inputOptions):
guiplugins.ActionDialogGUI.__init__(self, allApps, dynamic)
BasicRunningAction.__init__(self, inputOptions)
self.recordTime = None
self.currentApp = None
if len(allApps) > 0:
self.currentApp = allApps[0]
self.addOptions()
self.addSwitches()
def addOptions(self):
defaultVersion, defaultCheckout = "", ""
if self.currentApp:
defaultVersion = self.currentApp.getFullVersion()
defaultCheckout = self.currentApp.checkout
self.addOption("v", "Version to record", defaultVersion)
self.addOption("c", self.getCheckoutLabel(), defaultCheckout)
self.addOption("m", "Record on machine")
def getCheckoutLabel(self):
# Sometimes configurations might want to use their own term in place of "checkout"
return "Checkout to use for recording"
def addSwitches(self):
if self.currentApp and self.currentApp.usesCaptureMock():
self.currentApp.addCaptureMockSwitch(self.optionGroup, value=1) # record new by default
self.addSwitch("rep", "Automatically replay test after recording it", 1,
options = [ "Disabled", "In background", "Using dynamic GUI" ])
if self.currentApp and self.currentApp.getConfigValue("extra_test_process_postfix"):
self.addSwitch("mult", "Record multiple runs of system")
def correctTestClass(self):
return "test-case"
def _getStockId(self):
return "media-record"
def messageAfterPerform(self):
return "Started record session for " + self.describeTests()
def touchFiles(self, test):
for postfix in test.getConfigValue("extra_test_process_postfix"):
if not test.getFileName("usecase" + postfix):
fileName = os.path.join(test.getDirectory(), "usecase" + postfix + "." + test.app.name)
with open(fileName, "w") as f:
f.write("Dummy file to indicate we should record multiple runs\n")
def performOnCurrent(self):
test = self.currTestSelection[0]
if self.optionGroup.getSwitchValue("mult"):
self.touchFiles(test)
self.updateRecordTime(test)
self.startTextTestProcess("record", [ "-g", "-record" ] + self.getVanillaOption())
def shouldShowCurrent(self, *args):
# override the default so it's disabled if there are no apps
return len(self.validApps) > 0 and guiplugins.ActionDialogGUI.shouldShowCurrent(self, *args)
def isValidForApp(self, app):
return app.getConfigValue("use_case_record_mode") != "disabled" and \
app.getConfigValue("use_case_recorder") != "none"
def updateOptions(self):
if self.currentApp is not self.currAppSelection[0]:
self.currentApp = self.currAppSelection[0]
self.optionGroup.setOptionValue("v", self.currentApp.getFullVersion())
self.optionGroup.setOptionValue("c", self.currentApp.checkout)
return True
else:
return False
def getUseCaseFile(self, test):
return test.getFileName("usecase", self.optionGroup.getOptionValue("v"))
def updateRecordTime(self, test):
file = self.getUseCaseFile(test)
if file:
self._updateRecordTime(file)
def _updateRecordTime(self, file):
newTime = plugins.modifiedTime(file)
if newTime != self.recordTime:
self.recordTime = newTime
return True
else:
return False
def getChangedUseCaseVersion(self, test):
test.refreshFiles() # update cache after record run
file = self.getUseCaseFile(test)
if not file or not self._updateRecordTime(file):
return
parts = os.path.basename(file).split(".")
return ".".join(parts[2:])
def getMultipleTestWarning(self):
return "record " + self.describeTests() + " simultaneously"
def handleCompletion(self, testSel, filterFile, usecase):
test = testSel[0]
if usecase == "record":
changedUseCaseVersion = self.getChangedUseCaseVersion(test)
replay = self.optionGroup.getSwitchValue("rep")
if changedUseCaseVersion is not None and replay:
replayOptions = self.getVanillaOption() + self.getReplayRunModeOptions(changedUseCaseVersion)
self.startTextTestProcess("replay", replayOptions, testSel, filterFile)
message = "Recording completed for " + repr(test) + \
". Auto-replay of test now started. Don't submit the test manually!"
self.notify("Status", message)
else:
self.notify("Status", "Recording completed for " + repr(test) + ", not auto-replaying")
else:
self.notify("Status", "Recording and auto-replay completed for " + repr(test))
def getCommandLineKeys(self, usecase):
keys = [ "v", "c", "m" ]
if usecase == "record":
keys.append("rectraffic")
return keys
def getReplayRunModeOptions(self, overwriteVersion):
if self.optionGroup.getSwitchValue("rep") == 2:
return [ "-autoreplay", "-g" ]
else:
return [ "-autoreplay", "-o", overwriteVersion ]
def _getTitle(self):
return "Record _Use-Case"
def getSizeAsWindowFraction(self):
return 0.5, 0.5
class RunScriptAction(BasicRunningAction):
def getUseCaseName(self):
return "script"
def performOnCurrent(self, **kw):
self.startTextTestProcess(self.getUseCaseName(), [ "-g" ] + self.getVanillaOption(), **kw)
def getCommandLineArgs(self, optionGroup, *args):
args = [ self.scriptName() ]
for key, option in optionGroup.options.items():
args.append(key + "=" + str(option.getValue()))
return [ "-s", " ".join(args) ]
class ReplaceText(RunScriptAction, guiplugins.ActionDialogGUI):
def __init__(self, allApps, dynamic, inputOptions):
RunScriptAction.__init__(self, inputOptions)
guiplugins.ActionDialogGUI.__init__(self, allApps, dynamic)
self.addSwitch("regexp", "Enable regular expressions", 1)
self.addOption("old", "Text or regular expression to search for", multilineEntry=True)
self.addOption("new", "Text to replace it with (may contain regexp back references)", multilineEntry=True)
self.addOption("file", "File stem(s) to perform replacement in", allocateNofValues=2)
self.storytextDirs = {}
def getCmdlineOptionForApps(self, filterFile):
options = RunScriptAction.getCmdlineOptionForApps(self, filterFile)
if self.shouldAddShortcuts():
options[1] = options[1] + ",shortcut"
directoryStr = os.path.dirname(filterFile) + os.pathsep + os.pathsep.join(self.inputOptions.rootDirectories)
options += [ "-d", directoryStr ]
return options
def createFilterFile(self, writeDir, filterFileOverride):
filterFileName = RunScriptAction.createFilterFile(self, writeDir, filterFileOverride)
if self.shouldAddShortcuts():
storytextDir = self.storytextDirs[self.currAppSelection[0]]
self.createShortcutApps(writeDir)
with open(filterFileName, "a") as filterFile:
filterFile.write("appdata=shortcut\n")
filterFile.write(os.path.basename(storytextDir) + "\n")
return filterFileName
def notifyAllStems(self, allStems, defaultTestFile):
self.optionGroup.setValue("file", defaultTestFile)
self.optionGroup.setPossibleValues("file", allStems)
def notifyNewTestSelection(self, *args):
guiplugins.ActionDialogGUI.notifyNewTestSelection(self, *args)
if len(self.storytextDirs) > 0:
self.addSwitch("includeShortcuts", "Replace text in shortcut files", 0)
def shouldAddShortcuts(self):
return len(self.storytextDirs) > 0 and self.optionGroup.getOptionValue("includeShortcuts") > 0
def notifyUsecaseRename(self, argstr, *args):
self.showQueryDialog(self.getParentWindow(), "Usecase names were renamed. Would you like to update them in all usecases now?",
gtk.STOCK_DIALOG_WARNING, "Confirmation", self.respondUsecaseRename, respondData=(argstr, False, "*usecase*,stdout"))
def notifyShortcutRename(self, argstr, *args):
self.showQueryDialog(self.getParentWindow(), "Shortcuts were renamed. Would you like to update all usecases now?",
gtk.STOCK_DIALOG_WARNING, "Confirmation", self.respondUsecaseRename, respondData=(argstr, True, "*usecase*"))
def notifyShortcutRemove(self, argstr, *args):
self.showQueryDialog(self.getParentWindow(), "Shortcuts were removed. Would you like to update all usecases now?",
gtk.STOCK_DIALOG_WARNING, "Confirmation", self.respondUsecaseRename, respondData=(argstr, True, "*usecase*"))
def respondUsecaseRename(self, dialog, ans, args):
if ans == gtk.RESPONSE_YES:
oldName, newName = args[0].split(" renamed to ")
if args[1]:
self.optionGroup.setValue("regexp", 1)
self.addSwitch("argsReplacement", "", 1)
self.optionGroup.setValue("file", args[2])
self.optionGroup.setValue("old", oldName.strip("'"))
self.optionGroup.setValue("new", newName.strip("'"))
self.performOnCurrent(filterFileOverride=NotImplemented)
dialog.hide()
def createShortcutApps(self, writeDir):
for app, storyTextHome in self.storytextDirs.items():
self.createConfigFile(app, writeDir)
self.createTestSuiteFile(app, storyTextHome, writeDir)
def createConfigFile(self, app, writeDir):
configFileName = os.path.join(writeDir, "config.shortcut")
with open(configFileName, "w") as configFile:
configFile.write("executable:None\n")
configFile.write("filename_convention_scheme:standard\n")
configFile.write("use_case_record_mode:GUI\n")
configFile.write("use_case_recorder:storytext")
return configFileName
def createTestSuiteFile(self, app, storyTextHome, writeDir):
suiteFileName = os.path.join(writeDir, "testsuite.shortcut")
with open(suiteFileName, "w") as suiteFile:
suiteFile.write(storyTextHome + "\n")
return suiteFileName
def scriptName(self):
return "default.ReplaceText"
def _getTitle(self):
return "Replace Text in Files"
def getTooltip(self):
return "Replace text in multiple test files"
def performedDescription(self):
return "Replaced text in files for"
def getSizeAsWindowFraction(self):
# size of the dialog
return 0.5, 0.5
def notifyUsecaseHome(self, suite, usecaseHome):
self.storytextDirs[suite.app] = usecaseHome
def _respond(self, saidOK=True, dialog=None, fileChooserOption=None):
if saidOK and not self.optionGroup.getValue("old"):
self.showWarningDialog("Text or regular expression to search for cannot be empty")
else:
guiplugins.ActionDialogGUI._respond(self, saidOK, dialog, fileChooserOption)
class TestFileFiltering(guiplugins.ActionGUI):
def _getTitle(self):
return "Test Filtering"
def isActiveOnCurrent(self, *args):
return guiplugins.ActionGUI.isActiveOnCurrent(self) and len(self.currFileSelection) == 1
def getVersion(self, test, fileName):
fileVersions = set(os.path.basename(fileName).split(".")[1:])
testVersions = set(test.app.versions + [ test.app.name ])
additionalVersions = fileVersions.difference(testVersions)
return ".".join(additionalVersions)
def getTextToShow(self, test, fileName):
version = self.getVersion(test, fileName)
return test.app.applyFiltering(test, fileName, version)
def performOnCurrent(self):
self.reloadConfigForSelected() # Always make sure we're up to date here
test = self.currTestSelection[0]
fileName = self.currFileSelection[0][0]
text = self.getTextToShow(test, fileName)
root = test.getEnvironment("TEXTTEST_SANDBOX_ROOT")
plugins.ensureDirectoryExists(root)
tmpFileNameLocal = os.path.basename(fileName) + " (FILTERED)"
tmpFileName = os.path.join(root, tmpFileNameLocal)
bakFileName = tmpFileName + ".bak"
if os.path.isfile(bakFileName):
os.remove(bakFileName)
if os.path.isfile(tmpFileName):
os.rename(tmpFileName, bakFileName)
with open(tmpFileName, "w") as tmpFile:
tmpFile.write(text)
# Don't want people editing by mistake, remove write permissions
os.chmod(tmpFileName, stat.S_IREAD)
self.notify("ViewReadonlyFile", tmpFileName)
def getSignalsSent(self):
return [ "ViewReadonlyFile" ]
class InsertShortcuts(RunScriptAction, guiplugins.OptionGroupGUI):
def __init__(self, allApps, dynamic, inputOptions):
guiplugins.OptionGroupGUI.__init__(self, allApps, dynamic)
RunScriptAction.__init__(self, inputOptions)
def scriptName(self):
return "default.InsertShortcuts"
def _getTitle(self):
return "Insert Shortcuts into Usecases"
def getTootip(self):
return self._getTitle()
def notifyShortcut(self, *args):
self.showQueryDialog(self.getParentWindow(), "New shortcuts were created. Would you like to insert them into all usecases now?",
gtk.STOCK_DIALOG_WARNING, "Confirmation", self.respondShortcut)
def respondShortcut(self, dialog, ans, *args):
if ans == gtk.RESPONSE_YES:
self.performOnCurrent(filterFileOverride=NotImplemented)
dialog.hide()
def performedDescription(self):
return "Inserted shortcuts into usecases for"
def isValidForApp(self, app):
return app.getConfigValue("use_case_record_mode") != "disabled" and \
app.getConfigValue("use_case_recorder") != "none"
def getInteractiveActionClasses(dynamic):
if dynamic:
return [ RerunTests, ReloadTests ]
else:
return [ RunTests, RecordTest, ReconnectToTests, ReplaceText, TestFileFiltering, InsertShortcuts ]
| [
"[email protected]"
] | |
c994e9588ddb9863412a93b03416f881104cb03e | 7e4e2acb0cdd4dba7d23e15ce56cdc3e4842f601 | /openstack/telemetry/v2/_proxy.py | f9265372a1af74b7c92e798ebfaed693919c7dcc | [
"Apache-2.0"
] | permissive | david-guyon/python-openstacksdk | 99b2a52af9acf75b3df76438569477d2a46c1679 | 4a7c643b1b09904ac5ab8863a06a11e493a3d235 | refs/heads/master | 2021-05-28T23:06:46.337935 | 2015-02-26T23:12:53 | 2015-02-26T23:12:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,801 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.telemetry.v2 import alarm
from openstack.telemetry.v2 import alarm_change
from openstack.telemetry.v2 import capability
from openstack.telemetry.v2 import meter
from openstack.telemetry.v2 import resource
from openstack.telemetry.v2 import sample
from openstack.telemetry.v2 import statistics
class Proxy(object):
def __init__(self, session):
self.session = session
def create_alarm(self, **data):
return alarm.Alarm(data).create(self.session)
def delete_alarm(self, **data):
alarm.Alarm(data).delete(self.session)
def find_alarm(self, name_or_id):
return alarm.Alarm.find(self.session, name_or_id)
def get_alarm(self, **data):
return alarm.Alarm(data).get(self.session)
def list_alarms(self):
return alarm.Alarm.list(self.session)
def update_alarm(self, **data):
return alarm.Alarm(data).update(self.session)
def find_alarm_change(self, name_or_id):
return alarm_change.AlarmChange.find(self.session, name_or_id)
def list_alarm_changes(self):
return alarm_change.AlarmChange.list(self.session)
def find_capability(self, name_or_id):
return capability.Capability.find(self.session, name_or_id)
def list_capabilitys(self):
return capability.Capability.list(self.session)
def find_meter(self, name_or_id):
return meter.Meter.find(self.session, name_or_id)
def list_meters(self):
return meter.Meter.list(self.session)
def find_resource(self, name_or_id):
return resource.Resource.find(self.session, name_or_id)
def get_resource(self, **data):
return resource.Resource(data).get(self.session)
def list_resources(self):
return resource.Resource.list(self.session)
def create_sample(self, **data):
return sample.Sample(data).create(self.session)
def find_sample(self, name_or_id):
return sample.Sample.find(self.session, name_or_id)
def list_samples(self):
return sample.Sample.list(self.session)
def find_statistics(self, name_or_id):
return statistics.Statistics.find(self.session, name_or_id)
def list_statistics(self):
return statistics.Statistics.list(self.session)
| [
"[email protected]"
] | |
40fd3a424ce366164e63986b518fd63f14d16468 | 2a0efe30198c948982a85ea74c7e47ef9ecb4823 | /pers/cyj/day11/shoot/person.py | 124be128ac066503bc23040dc39c6c6912db87cf | [] | no_license | cyjhunnyboy/PythonTutorialProj | 00151ed364807c6df54b1b0300cb622086128b74 | c3a02e6fa2dd09bf5d7b47fd78a4d3a31356a39d | refs/heads/master | 2023-02-23T18:35:45.481252 | 2021-01-25T11:13:45 | 2021-01-25T11:13:45 | 298,922,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | class Person(object):
"""人类"""
def __init__(self, gun):
self.gun = gun
def fire(self):
self.gun.shoot()
def fillBullet(self, count):
self.gun.bulletBox.bulletCount = count
| [
"[email protected]"
] | |
ca3aa4adc107683032e77e9d1df22364acfe304a | 0e4dc82a94563dacb0c25d0d43fbcbe3def21f72 | /259-3Sum-Smaller/Python/Solution01.py | d722b1e3406c547a4b5602fe95da262f1617fbce | [
"CC-BY-3.0",
"MIT"
] | permissive | Eroica-cpp/LeetCode | 3ce3b05b3098e8097c1090e2116b813efaadd2a3 | 07276bd11558f3d0e32bec768b09e886de145f9e | refs/heads/master | 2021-06-20T05:41:30.506250 | 2017-03-16T05:17:39 | 2017-03-16T05:17:39 | 35,126,816 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | #!/usr/bin/python
"""
==============================================================================
Author: Tao Li ([email protected])
Date: Aug 29, 2015
Question: 259-3Sum-Smaller
Link: https://leetcode.com/problems/3sum-smaller/
==============================================================================
Given an array of n integers nums and a target, find the number of index
triplets i, j, k with 0 <= i < j < k < n that satisfy the condition
nums[i] + nums[j] + nums[k] < target.
For example, given nums = [-2, 0, 1, 3], and target = 2.
Return 2. Because there are two triplets which sums are less than 2:
[-2, 0, 1]
[-2, 0, 3]
Follow up:
Could you solve it in O(n2) runtime?
==============================================================================
Method: sort first
Time Complexity: O(n^2)
Space Complexity: O(1)
==============================================================================
"""
class Solution(object):
def threeSumSmaller(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
size = len(nums)
counter = 0
for i in xrange(size-2):
begin, end = i + 1, size - 1
while begin < end:
if nums[i] + nums[begin] + nums[end] < target:
counter += end - begin
begin += 1
else:
end -= 1
return counter
| [
"[email protected]"
] | |
d8a209f8fec660b58abf72cb3b33b420aa69cf12 | 665d9bad46e68f779512640e582d2522867b0dba | /Linked List Problems/21. Merge Two Sorted Lists.py | 48c4ac08963a9b30f419087ad340bcb3731dfc20 | [] | no_license | RahatIbnRafiq/leetcodeProblems | 6fd1e9726b14b7ad3571e5a4af5665b72f7aee0a | 2d35590d189938e0705a21be110e75e0b209ea96 | refs/heads/master | 2021-11-25T11:58:20.498911 | 2021-11-25T00:43:04 | 2021-11-25T00:43:04 | 72,865,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
d1,d2,d3 = ListNode(0),ListNode(0),ListNode(0)
d1.next, d2.next = l1,l2
cur = d3
while d1.next and d2.next:
if d1.next.val <= d2.next.val:
cur.next = d1.next
d1.next = d1.next.next
else:
cur.next = d2.next
d2.next = d2.next.next
cur = cur.next
if d1.next is not None:
cur.next = d1.next
d1.next = None
elif d2.next is not None:
cur.next = d2.next
d2.next = None
return d3.next
| [
"[email protected]"
] | |
77d1b6b14ec4e15eceb3b3cb3df095166f40518e | 88bbf27deb0b2a1b96985c0a94ff0b7a3d012820 | /Feeds/migrations/0001_initial.py | 4b9aaa549847b201a89bcbc92820989702ecc36c | [] | no_license | Code-Community99/Hiq-django | e8efb7d63bd4fc0bc8e2af193fdec9aaab0975b0 | af62622648ad88f6e8d94e86a8dc5d6660e3bbe2 | refs/heads/master | 2022-12-14T01:12:45.218318 | 2020-05-18T23:29:35 | 2020-05-18T23:29:35 | 233,811,384 | 2 | 1 | null | 2022-12-08T03:34:53 | 2020-01-14T10:02:55 | JavaScript | UTF-8 | Python | false | false | 783 | py | # Generated by Django 3.0.2 on 2020-01-31 11:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('signup', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='feeds_list',
fields=[
('Fid', models.AutoField(primary_key=True, serialize=False)),
('feed', models.CharField(max_length=1255)),
('post_time', models.DateTimeField(auto_now_add=True)),
('uid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='signup.signup_user')),
],
options={
'db_table': 'feeds',
},
),
]
| [
"[email protected]"
] | |
64bec78da4f9613a704e778e9861c439a2f6da10 | f281d0d6431c1b45c6e5ebfff5856c374af4b130 | /DAY001~099/DAY84-BOJ7579-앱/ykim.py | 57d20480ba6f75c21a3574aa7376c4bf95479268 | [] | no_license | tachyon83/code-rhino | ec802dc91dce20980fac401b26165a487494adb4 | b1af000f5798cd12ecdab36aeb9c7a36f91c1101 | refs/heads/master | 2022-08-13T09:10:16.369287 | 2022-07-30T11:27:34 | 2022-07-30T11:27:34 | 292,142,812 | 5 | 6 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | import sys
N,M=map(int,sys.stdin.readline().split())
#앱
m=list(map(int,sys.stdin.readline().split()))#메모리
c=list(map(int,sys.stdin.readline().split()))#비활성화 비용
result=[10001]*(M+1)
result[0]=0
#M = 확보할 메모리
for i in range(N):
rm=m[i]#해당 메모리를 제거할때
for j in range(M,-1,-1):
if result[j]!=10001:#해당 값이있다면
if j+rm>=M:#목표 메모리보다 크다면
if result[M]>result[j]+c[i]:#기존 보다 비용 더 작게 든다면
result[M]=result[j]+c[i]
else:#목표 메모리보다 작다면
if result[j+rm]>result[j]+c[i]:#기존보다 비용이 작게든다면 갱신시켜준다.
result[j+rm]=result[j]+c[i]
print(result[M])
| [
"[email protected]"
] | |
6a77925e19d91eaed67900e2e238817e5b1b7ae6 | 2e70b3ce93762c5b66fba57f8b9cba37aacf0702 | /new/event/migrations/0068_auto_20190604_0558.py | 2b9ad4a849e0cdbfb1119ab88bfba5b068febf1e | [] | no_license | mahidul-islam/jamah | 02be511fe119e8934ec7d5aa1eaa8e2b24fad246 | c8ddf9a8094d33e8b1d6cb834eab3d9f18b1a9ea | refs/heads/master | 2022-05-13T15:11:38.609550 | 2019-06-08T04:52:09 | 2019-06-08T04:52:09 | 184,331,276 | 2 | 0 | null | 2022-04-22T21:27:18 | 2019-04-30T21:04:06 | Python | UTF-8 | Python | false | false | 536 | py | # Generated by Django 2.2.1 on 2019-06-04 05:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('event', '0067_auto_20190602_1815'),
]
operations = [
migrations.RenameField(
model_name='eventmember',
old_name='account',
new_name='accountant_account',
),
migrations.RenameField(
model_name='eventmember',
old_name='total_sent',
new_name='total_sent_money',
),
]
| [
"[email protected]"
] | |
8abf0d510ba72fc3a8052dcc5dd840e749d5fe57 | 84bf0086bfe7af894bbfba353f1884ae9f31c335 | /1123. Lowest Common Ancestor of Deepest Leaves/Python 3/solution.py | 3ab86fe8912541cd5a9e096944789f1d46549a38 | [] | no_license | HarrrrryLi/LeetCode | 7625d9ec3d7854f72e7aeeb0292960af06a78d05 | fe5c6936627c2459731ddda6f67422c217b3cc91 | refs/heads/master | 2020-04-12T23:57:12.387253 | 2020-01-16T02:22:26 | 2020-01-16T02:22:26 | 162,834,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) -> TreeNode:
stack = collections.deque()
path = set()
path.add((root, 0))
stack.append((root, 0, path))
leaves = collections.defaultdict(list)
while stack:
node, depth, path = stack.pop()
if not node.left and not node.right:
leaves[depth].append(path)
continue
if node.left:
temp = set(path)
temp.add((node.left, depth + 1))
stack.append((node.left, depth + 1, temp))
if node.right:
temp = set(path)
temp.add((node.right, depth + 1))
stack.append((node.right, depth + 1, temp))
max_depth = max(leaves)
candidates = leaves[max_depth][0]
for path in leaves[max_depth]:
candidates &= path
return max(candidates, key=lambda x: x[1])[0]
| [
"[email protected]"
] | |
4eebe69f6eb17ac0db185dfa84f1ab896d702cd5 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/3a4115ceaf044682aee09918ac6e23d1.py | b78aaaad71268364f91be3bfbc607b615372c346 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 236 | py | def hey(arg):
arg = arg.strip()
if arg == '':
return 'Fine. Be that way!'
elif arg.isupper():
return "Whoa, chill out!"
elif arg.endswith('?'):
return "Sure."
else:
return "Whatever."
| [
"[email protected]"
] | |
36ae90b43eb93bcefa102186ba36f11fe3c82757 | 650b3dd4cc74f32db78f7d99cef9907aec78a222 | /dialogs/Transaksi/fFundCollection_intr.py | 1f13cbe037fb3f889079353bc5fd50c4f76ebfc3 | [] | no_license | mech4/PKTrx | 29b871ab587434e7c208175c248f48d9b6c80a17 | cf01bc5be8837d632974786d2419c58b94a0381d | refs/heads/master | 2020-03-29T19:55:07.331831 | 2012-09-18T20:22:52 | 2012-09-18T20:22:52 | 6,289,691 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,055 | py | # GLOBALs
MAPEntity = {'Z': 1, 'I': 2, 'W': 3, 'A' : 4 ,'N' : 5}
DefaultItems = [ 'Inputer',
'BranchCode',
'TransactionDate',
'FloatTransactionDate',
'CurrencyCode',
'Rate',
'TotalAmount',
'CashType',
#'LBatch.BatchId',
#'LBatch.BatchNo',
#'LBatch.Description',
'LProductBranch.Kode_Cabang',
'LProductBranch.Nama_Cabang',
'LCurrency.Currency_Code',
'LCurrency.Full_Name',
'LCurrency.Kurs_Tengah_BI',
'LValuta.Currency_Code',
'LValuta.Full_Name',
'LValuta.Kurs_Tengah_BI',
'LBank.AccountNo',
'LBank.BankName',
'LBank.CurrencyCode',
#'LPettyCash.AccountNo',
#'LPettyCash.AccountName',
#'LPettyCash.CurrencyCode',
#'LPettyCash.LCurrency.Full_Name',
#'LPettyCash.LCurrency.Kurs_Tengah_BI',
'PeriodId',
'LSponsor.SponsorId',
'LVolunteer.VolunteerId',
'TransactionNo',
'ShowMode',
'PaidTo',
'ActualDate',
]
class fFundCollection :
def __init__(self, formObj, parentForm) :
self.app = formObj.ClientApplication
self.form = formObj
self.fSelectProduct = None
self.fSearchDonor = None
self.DefaultValues = {}
self.IdxCounter = 0
def InitValues(self):
if self.uipTransaction.ShowMode == 1 : # input mode
self.DonorNo = self.uipDonor.DonorNo
self.AmountList = {}
self.IdxCounter = 1
else: # edit mode
self.DonorNo = self.uipDonor.DonorNo
self.AmountList = {}
uipTran = self.uipTransaction
uipItem = self.uipTransactionItem
TotalAmount = 0.0
TotalItemRow = uipItem.RecordCount
Idx = 1
uipItem.First()
for i in range(TotalItemRow):
uipItem.Edit()
# Assign Ulang Index Grid
uipItem.ItemIdx = Idx
# Simpan Nilai Amount Sebagai Helper
self.AmountList[uipItem.ItemIdx] = uipItem.Amount #uipItem.Ekuivalen
# Hitung Ulang Total Penyaluran
TotalAmount += uipItem.Amount
Idx += 1
uipItem.Next()
# end for
# Set IdxCounter sebagai helper
self.IdxCounter = TotalItemRow + 1
# Hitung Ulang Total Penghimpunan
uipTran.Edit()
uipTran.TotalAmount = TotalAmount
# end if else
self.CheckRateEnabled()
def SaveDefaultValues(self):
uipTran = self.uipTransaction
for item in DefaultItems :
self.DefaultValues[item] = uipTran.GetFieldValue(item)
def ClearData(self):
self.InitValues()
self.uipDonor.ClearData()
self.uipTransaction.ClearData()
self.uipTransactionItem.ClearData()
uipTran = self.uipTransaction
uipTran.Edit()
for item in DefaultItems :
uipTran.SetFieldValue(item,self.DefaultValues[item])
self.CariDonor()
#self.pBatch_LBatch.SetFocus()
self.SetCashType(uipTran.CashType)
self.pBatch_ActualDate.SetFocus()
# mode
# 1 : input mode
# 2 : edit mode
def Show(self,mode=1):
uipTran = self.uipTransaction
uipTran.Edit()
uipTran.ShowMode = mode
self.InitValues()
self.DonorNo = self.uipDonor.DonorNo
self.SetCashType(uipTran.CashType)
if mode == 2: # Edit Mode
#self.pBatch_LBatch.Enabled = 0
# Set Save button hidden
self.pAction_bSave.Visible = 0
# Move button position
self.pAction_bCancel.Left = self.pAction_bSimpanClose.Left
self.pAction_bSimpanClose.Left = self.pAction_bSave.Left
PageIndex = {'C' : 0 , 'K' : 0 ,'B' : 1 , 'A':2}
self.mpBayar.ActivePageIndex = PageIndex[uipTran.PaymentType]
else: # Insert Mode
uipTran.PaymentType = 0
uipTran.CashType = 'C'
self.CariDonor()
self.SaveDefaultValues()
# end if
#self.pDonor_edAddress.Text = str(self.uipDonor.Address)
return self.FormContainer.Show()
def bCariDonorClick(self,sender):
self.CariDonor()
def CashTypeOnChange(self,sender):
dictCashType = {0 : 'K', 1 : 'C'}
self.SetCashType(dictCashType[sender.ItemIndex])
def SetCashType(self,CashType):
#self.pCashTransaction_LPettyCash.visible = (CashType == 'K')
self.pCashTransaction_LCurrency.enabled = (CashType == 'C')
def IdDonorOnExit(self,sender):
uipDonor = self.uipDonor
DonorNo = uipDonor.DonorNo or ''
if ( DonorNo == '' or
( DonorNo not in [None,''] and
DonorNo == self.DonorNo)
) :
return
rph = self.form.CallServerMethod(
'GetDonorDataByNo',
self.app.CreateValues(['DonorNo',DonorNo])
)
uipDonor.Edit()
rec = rph.FirstRecord
if rec.Is_Err :
uipDonor.DonorName = ''
uipDonor.PhoneNumber = ''
uipDonor.Address = ''
self.pDonor_edAddress.Text = ''
self.DonorNo = None
uipDonor.SetFieldValue('LMarketer.MarketerId',0)
uipDonor.SetFieldValue('LMarketer.Full_Name','')
raise 'PERINGATAN',rec.Err_Message
uipDonor.DonorId = rec.DonorId
uipDonor.DonorName = rec.DonorName
uipDonor.PhoneNumber = rec.PhoneNumber
uipDonor.Address = rec.Address
uipDonor.DonorType = rec.DonorType
self.pDonor_edAddress.Text = str(rec.Address)
self.DonorNo = uipDonor.DonorNo
uipDonor.SetFieldValue('LMarketer.MarketerId',rec.MarketerId)
uipDonor.SetFieldValue('LMarketer.Full_Name',rec.MarketerName)
def CariDonor(self):
if self.fSearchDonor == None :
fSearch = self.app.CreateForm('Donatur/fSearchDonor', 'Donatur/fSearchDonor', 0, None, None)
self.fSearchDonor = fSearch
else :
fSearch = self.fSearchDonor
if fSearch.GetDonorData():
uipDonor = self.uipDonor
uipDonor.Edit()
uipDonor.DonorId = fSearch.DonorIntId
uipDonor.DonorNo = fSearch.DonorNo
uipDonor.DonorName = fSearch.DonorName
uipDonor.PhoneNumber = fSearch.PhoneNumber
uipDonor.Address = fSearch.Address
uipDonor.DonorType = fSearch.DonorType
self.DonorNo = uipDonor.DonorNo
self.pDonor_edAddress.Text = str(fSearch.Address)
uipDonor.SetFieldValue('LMarketer.MarketerId',fSearch.MarketerId)
uipDonor.SetFieldValue('LMarketer.Full_Name',fSearch.MarketerName)
def ProductBeforeLookup(self, sender, linkui):
if self.fSelectProduct == None:
fData = self.app.CreateForm('Transaksi/fSelectProduct', 'Transaksi/fSelectProduct', 0, None, None)
self.fSelectProduct = fData
else:
fData = self.fSelectProduct
branchCode = self.uipTransaction.BranchCode #GetFieldValue('LProductBranch.Kode_Cabang')
if fData.GetProduct(branchCode) == 1:
productId = fData.ProductId
productName = fData.ProductName
self.uipTransactionItem.Edit()
self.uipTransactionItem.SetFieldValue('LProduct.ProductId', productId)
self.uipTransactionItem.SetFieldValue('LProduct.ProductName', productName)
self.uipTransactionItem.FundEntity = MAPEntity[fData.FundCategory or 'I']
self.uipTransactionItem.PercentageOfAmil = fData.PercentageOfAmilFunds
self.uipTransactionItem.AccountNo = fData.AccountNo
self.uipTransactionItem.Description = productName
return 1
def BatchAfterLookup(self, sender, linkui):
uipTran = self.uipTransaction
uipTran.Edit()
uipTran.ActualDate = uipTran.GetFieldValue('LBatch.BatchDate')
self.DefaultValues['LBatch.BatchId'] = uipTran.GetFieldValue('LBatch.BatchId')
self.DefaultValues['LBatch.BatchNo'] = uipTran.GetFieldValue('LBatch.BatchNo')
self.DefaultValues['LBatch.Description'] = uipTran.GetFieldValue('LBatch.Description')
self.DefaultValues['ActualDate'] = uipTran.ActualDate
def PettyCashAfterLookUp(self, sender, linkui):
uipTran = self.uipTransaction
uipTran.Edit()
# Set Currency Code and Rate
CurrCode = uipTran.GetFieldValue('LPettyCash.CurrencyCode')
CurrName = uipTran.GetFieldValue('LPettyCash.LCurrency.Full_Name')
CurrRate = uipTran.GetFieldValue('LPettyCash.LCurrency.Kurs_Tengah_BI')
uipTran.SetFieldValue('LCurrency.Currency_Code' , CurrCode)
uipTran.SetFieldValue('LCurrency.Full_Name' , CurrName)
uipTran.SetFieldValue('LCurrency.Kurs_Tengah_BI' , CurrRate)
# Save Choice As Default Values
self.DefaultValues['LPettyCash.AccountNo'] = uipTran.GetFieldValue('LPettyCash.AccountNo')
self.DefaultValues['LPettyCash.AccountName'] = uipTran.GetFieldValue('LPettyCash.AccountName')
self.DefaultValues['LPettyCash.CurrencyCode'] = uipTran.GetFieldValue('LPettyCash.CurrencyCode')
self.DefaultValues['LPettyCash.LCurrency.Full_Name'] = uipTran.GetFieldValue('LPettyCash.LCurrency.Full_Name')
self.DefaultValues['LPettyCash.LCurrency.Kurs_Tengah_BI'] = uipTran.GetFieldValue('LPettyCash.LCurrency.Kurs_Tengah_BI')
def CashCurrAfterLookup(self, sender, linkui):
uipItem = self.uipTransactionItem
uipItem.Edit()
uipItem.Rate = uipItem.GetFieldValue('LCurrency.Kurs_Tengah_BI')
def BankAfterLookup(self,sender,linkui):
app = self.app
uipTran = self.uipTransaction
CurrencyCode = uipTran.GetFieldValue('LBank.CurrencyCode')
param = app.CreateValues(['CurrencyCode',CurrencyCode])
rph = self.form.CallServerMethod('GetCurrencyRate',param)
rec = rph.FirstRecord
uipTran.CurrencyCode = CurrencyCode
uipTran.Rate = rec.Kurs_Tengah_BI
self.DefaultValues['LBank.AccountNo'] = uipTran.GetFieldValue('LBank.AccountNo')
self.DefaultValues['LBank.BankName'] = uipTran.GetFieldValue('LBank.BankName')
self.DefaultValues['LBank.CurrencyCode'] = uipTran.GetFieldValue('LBank.CurrencyCode')
self.CheckRateEnabled()
def ItemNewRecord (self, sender):
sender.ItemIdx = self.IdxCounter
sender.Amount = 0.0
#sender.Rate = 1.0
#sender.Ekuivalen = 0.0
#sender.SetFieldValue('LCurrency.Currency_Code', '000')
#sender.SetFieldValue('LCurrency.Short_Name', 'IDR')
def ItemBeforePost(self, sender) :
aProductId = sender.GetFieldValue('LProduct.ProductId')
if aProductId == None or aProductId == 0:
raise 'Produk', 'Produk belum dipilih'
#self.app.ShowMessage('Produk belum dipilih')
#sender.Cancel()
if sender.Amount <= 0.0 :
raise 'Nilai Transaksi', 'Nilai transaksi tidak boleh negatif atau 0.0'
#sender.Ekuivalen = sender.Amount * sender.Rate
def ItemBeforeDelete(self,sender):
self.uipTransaction.Edit()
self.uipTransaction.TotalAmount -= sender.Amount
self.uipTransaction.Post()
def ItemAfterPost(self, sender) :
self.IdxCounter += 1
Idx = sender.ItemIdx
if self.AmountList.has_key(Idx):
amountbefore = self.AmountList[Idx]
else:
amountbefore = 0.0
self.AmountList[Idx] = sender.Amount
self.uipTransaction.Edit()
self.uipTransaction.TotalAmount += (sender.Amount - amountbefore)
self.uipTransaction.Post()
def bCancelClick(self, sender):
if self.app.ConfirmDialog('Yakin batalkan transaksi ?'):
sender.ExitAction = 1
else:
sender.ExitAction = 0
def CheckRequiredBank(self):
if self.uipTransaction.GetFieldValue('LBank.AccountNo') == None:
self.app.ShowMessage('Bank belum dipilih')
return 0
else:
return 1
def CheckRequiredAsset(self):
if (self.uipTransaction.GetFieldValue('LAsset.Account_Code') == None or
self.uipTransaction.GetFieldValue('LValuta.Currency_Code') == None):
self.app.ShowMessage('Asset/Valuta belum dipilih')
return 0
else:
return 1
def CheckRequiredPettyCash(self):
if self.uipTransaction.GetFieldValue('LPettyCash.AccountNo') == None:
self.app.ShowMessage('Rekening Kas Kecil belum dipilih')
return 0
return 1
def CheckRequiredBranchCash(self):
return 1
def CheckRequiredGeneral(self):
uipDonor = self.uipDonor
uipTran = self.uipTransaction
#if uipTran.GetFieldValue('LBatch.BatchId') == None:
# self.app.ShowMessage('Batch belum dipilih')
# return 0
if uipDonor.DonorId in [0,None]:
self.app.ShowMessage('Data Donatur belum diinputkan')
return 0
if uipTran.ActualDate in [0, None] :
self.app.ShowMessage('Tanggal Transaksi belum diinputkan')
return 0
if self.uipTransactionItem.RecordCount <= 0 :
self.app.ShowMessage('Detail Transaksi belum diinput')
return 0
ReferenceNo = uipTran.ReferenceNo or ''
pType = self.mpBayar.ActivePageIndex
if (ReferenceNo == '' and pType != 1 ) :
self.app.ShowMessage('No. Referensi/FSZ Belum Diinput')
return 0
return 1
def CheckRateEnabled(self):
uipTran = self.uipTransaction
self.pCashTransaction_RateCash.Enabled = uipTran.GetFieldValue('LCurrency.Currency_Code') != '000'
self.pBankTransaction_RateBank.Enabled = uipTran.GetFieldValue('LBank.CurrencyCode') != '000'
self.pAssetTransaction_RateAsset.Enabled = uipTran.GetFieldValue('LValuta.Currency_Code') != '000'
def CurrencyAfterLookup(self,sender,linkui):
uipTransaction = self.uipTransaction
if uipTransaction.GetFieldValue('LCurrency.Currency_Code') != None :
uipTransaction.Edit()
uipTransaction.Rate = uipTransaction.GetFieldValue('LCurrency.Kurs_Tengah_BI')
self.CheckRateEnabled()
def ValutaAfterLookup(self, sender, linkui):
uipTransaction = self.uipTransaction
if uipTransaction.GetFieldValue('LValuta.Currency_Code') != None :
uipTransaction.Edit()
uipTransaction.Rate = uipTransaction.GetFieldValue('LValuta.Kurs_Tengah_BI')
self.CheckRateEnabled()
def bSimpanClick(self, sender):
if self.Simpan(1):
self.ClearData()
def bSimpanCloseClick(self,sender):
if self.Simpan(2):
sender.ExitAction = 1
def Simpan(self, savemode):
app = self.app
self.FormObject.CommitBuffer()
if not self.CheckRequiredGeneral(): return 0
if app.ConfirmDialog('Yakin simpan transaksi ?'):
uipTran = self.uipTransaction
uipTran.Edit()
uipTran.SaveMode = savemode
pType = self.mpBayar.ActivePageIndex
if pType == 0:
self.DefaultValues['CashType'] = uipTran.CashType
uipTran.PaymentType = uipTran.CashType
if uipTran.CashType == 'K' :
if not self.CheckRequiredPettyCash():
return 0
else:
if not self.CheckRequiredBranchCash():
return 0
elif pType == 1:
uipTran.PaymentType = 'B'
if not self.CheckRequiredBank():
return 0
else: #pType == 2:
uipTran.PaymentType = 'A'
if not self.CheckRequiredAsset():
return 0
self.FormObject.CommitBuffer()
ph = self.FormObject.GetDataPacket()
ph = self.FormObject.CallServerMethod("SimpanData", ph)
res = ph.FirstRecord
if res.IsErr == 1:
app.ShowMessage(res.ErrMessage)
return 0
else: # res.IsErr in [0,2]
Message = 'Transaksi Berhasil.\nNomor Transaksi : ' + res.TransactionNo
if res.IsErr == 2:
Message += '\n Proses Jurnal Gagal.' + res.ErrMessage
app.ShowMessage(Message)
if savemode == 2 :
if self.app.ConfirmDialog('Apakah akan cetak kwitansi ?'):
oPrint = app.GetClientClass('PrintLib','PrintLib')()
#app.ShowMessage("Masukkan kertas ke printer untuk cetak kwitansi")
oPrint.doProcessByStreamName(app,ph.packet,res.StreamName)
#if savemode == 1 :
# self.DefaultValues['TransactionNo'] = res.NewTransactionNo
self.DefaultValues['ActualDate'] = uipTran.ActualDate
return 1
#-- if
return 0
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.