blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ad6818b1cb0641df553b96ea5a6a81a2964f6ddf | 1078c61f2c6d9fe220117d4c0fbbb09f1a67f84c | /paws/lib/python2.7/site-packages/euca2ools-3.4.1_2_g6b3f62f2-py2.7.egg/EGG-INFO/scripts/euare-instanceprofilelistbypath | c55ad41537406ff822d69daf820d23cc6176d1e1 | [
"MIT"
] | permissive | cirobessa/receitas-aws | c21cc5aa95f3e8befb95e49028bf3ffab666015c | b4f496050f951c6ae0c5fa12e132c39315deb493 | refs/heads/master | 2021-05-18T06:50:34.798771 | 2020-03-31T02:59:47 | 2020-03-31T02:59:47 | 251,164,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | #!/media/ciro/LOCALDRV/A_DESENVOLVIMENTO/AWS/receitas/paws/bin/python -tt
import euca2ools.commands.iam.listinstanceprofiles
if __name__ == '__main__':
euca2ools.commands.iam.listinstanceprofiles.ListInstanceProfiles.run()
| [
"[email protected]"
] | ||
4d137f2610e281cad85b0440573d0513db16ccdb | fe3a29aefb4f9bb38d6d4bc61ef9839521823dc2 | /server/src/oscarbundles/migrations/0009_auto_20180319_1116.py | f8f0a02a9366aa3258ef06133916ac5a59f7ae57 | [
"ISC"
] | permissive | thelabnyc/django-oscar-bundles | a5f73edd26d3c930a32cdaa4a2142cfd44a74294 | d8dc00edbcc57cbe18b274905beef533a8a642f7 | refs/heads/master | 2023-05-25T01:13:10.769112 | 2023-05-16T15:42:01 | 2023-05-16T15:42:01 | 81,470,009 | 6 | 3 | ISC | 2023-03-14T17:27:07 | 2017-02-09T16:24:45 | Python | UTF-8 | Python | false | false | 1,077 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-19 15:16
from __future__ import unicode_literals
from django.db import migrations
def make_group_trigger_data_unique(apps, schema_editor):
Bundle = apps.get_model("oscarbundles", "Bundle")
for bundle in Bundle.objects.order_by("id").all():
conflicts = (
Bundle.objects.filter(bundle_group=bundle.bundle_group)
.filter(triggering_product=bundle.triggering_product)
.exclude(pk=bundle.pk)
.order_by("id")
.all()
)
for conflict in conflicts:
for suggested_product in conflict.suggested_products.all():
bundle.suggested_products.add(suggested_product)
bundle.save()
conflict.suggested_products.remove(suggested_product)
conflict.save()
class Migration(migrations.Migration):
dependencies = [
("oscarbundles", "0008_auto_20180318_1933"),
]
operations = [
migrations.RunPython(make_group_trigger_data_unique),
]
| [
"[email protected]"
] | |
d82b63b927f20bd2f9ea34dd627297fedd1bd24d | cb6b1aa2d61b80cba29490dfe8755d02c7b9a79f | /lobbyapp/dbmangr/root.py | 68b7357ed39d3aeacd4c771556566af010222663 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | piotrmaslanka/Ninja-Tower | c127a64888bc3306046e4b400ce3a8c6764b5481 | 7eca86e23513a8805dd42c3c542b7fae0499576b | refs/heads/master | 2021-12-06T07:56:13.796922 | 2015-10-15T08:10:35 | 2015-10-15T08:10:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,775 | py | import MySQLdb
from satella.instrumentation.counters import PulseCounter
from satella.instrumentation import CounterCollection
from satella.db.pool import DatabaseDefinition, ConnectionPool
from lobbyapp.selectlayer.api import PDBHelperInterface as SelectLayerInterface
from lobbyapp.dbmangr.proxies import SelectLayerProxy, PlayerDBProxy
from lobbyapp.playerdb.api import PDBHelperInterface as PlayerDBInterface
class DatabaseManager(object):
def __init__(self, host, username, password, dbname, rootcc, dbtype='mysql'):
"""@type rootcc: L{satella.instrumentation.CounterCollection}"""
assert dbtype == 'mysql', 'I cannot support other databases!'
dd = DatabaseDefinition(MySQLdb.connect,
(MySQLdb.OperationalError, MySQLdb.InterfaceError),
(host, username, password, dbname))
self.cp = ConnectionPool(dd)
# Set up instrumentation
insmgr = CounterCollection('database')
self.cursors_counter = PulseCounter('cursors', resolution=60,
units=u'cursors per minute',
description='SQL cursors created')
insmgr.add(self.cursors_counter)
rootcc.add(insmgr)
def query_interface(self, ifc):
if ifc == SelectLayerInterface:
return SelectLayerProxy(self)
elif ifc == PlayerDBInterface:
return PlayerDBProxy(self)
else:
raise ValueError, 'Unknown interface'
def __call__(self):
"""
Use as in:
with database_manager() as cur:
cur.execute('I CAN DO SQL')
"""
self.cursors_counter.update()
return self.cp.cursor()
| [
"[email protected]"
] | |
0f039a2e0f17e114b92b8c5b5137e444e7ab0e25 | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve-8.51.857815/eve/client/script/ui/services/viewStateSvc.py | 164db4014bef2482b98bbdd32051a37548e7548a | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,220 | py | #Embedded file name: eve/client/script/ui/services\viewStateSvc.py
"""
The view manager is tasked with controlling the transitions between fullscreen views
"""
from service import Service
import uicls
import carbonui.const as uiconst
import blue
import uthread
import localization
import memorySnapshot
import log
class ViewStateError(Exception):
"""
A generic error wrapper for view state related errors
"""
__guid__ = 'viewstate.ViewStateError'
class View(object):
"""
The base class for a view. It consists of a UI root container and a scene.
The view is registered for notify event by the view manager and will receive them while active or visible
"""
__guid__ = 'viewstate.View'
__notifyevents__ = []
__dependencies__ = []
__layerClass__ = uicls.LayerCore
__progressMessageLabel__ = None
__subLayers__ = None
__overlays__ = set()
__suppressedOverlays__ = set()
__exclusiveOverlay__ = set()
def __init__(self):
self.name = None
self.layer = None
self.scene = None
self._dynamicViewType = None
def GetDynamicViewType(self):
"""
Override in views that are able to exist as primary or secondary views.
"""
if self._dynamicViewType is None:
raise RuntimeError('View %s was activated without being set to Primary or Secondary' % self.name)
return self._dynamicViewType
def SetDynamicViewType(self, viewType):
self._dynamicViewType = viewType
def LoadView(self, **kwargs):
"""Called when the view is loaded"""
self.LogInfo('LoadView called on view', self.name, 'kwargs', kwargs)
def UnloadView(self):
"""Used for cleaning up after the view has served its purpose"""
self.LogInfo('UnloadView called on view', self.name)
def ShowView(self, **kwargs):
"""
Only called on a Primary views. Called after LoadView has been called.
This allows the primary view to stay loaded while still responding to view switch from secondary back to primary view.
"""
self.LogInfo('ShowView called on view', self.name, 'with', kwargs)
def HideView(self):
"""
Only called on a Primary views after LoadView has been called when a secondary view is activated.
This allows the primary view to stay loaded while still responding to view switch from primary back to secondary view.
"""
self.LogInfo('HideView called on view', self.name)
def ZoomBy(self, amount):
if self.layer:
self.layer.ZoomBy(amount)
def IsActive(self):
sm.GetService('viewState').IsViewActive(self.name)
def GetProgressText(self, **kwargs):
"""Override this if you has complicated needs with respect to progress text"""
if self.__progressMessageLabel__:
return localization.GetByLabel(self.__progressMessageLabel__)
def CanEnter(self, **kwargs):
"""
Indicate if it is safe to enter the view.
argumenst:
- kwargs: named input arguments to the view activation
"""
return True
def CanExit(self):
"""
Indicate if it is safe to exit the view. If we are in the middle of something bad stuff can happen.
"""
return True
def CheckShouldReopen(self, newKwargs, cachedKwargs):
"""
This method gets to evaluate the opening arguments and decide if we want to reopen or recreate the view.
Only evaluated for primary views
Override if naive dict equality does not cut it.
Returns True to reopen and false to recreate.
"""
return newKwargs == cachedKwargs
def __repr__(self):
return '%s(name=%s)' % (self.__class__.__name__, self.name)
def LogInfo(self, *args, **kwargs):
sm.GetService('viewState').LogInfo(self, *args, **kwargs)
def LogWarn(self, *args, **kwargs):
sm.GetService('viewState').LogWarn(self, *args, **kwargs)
def LogError(self, *args, **kwargs):
sm.GetService('viewState').LogError(self, *args, **kwargs)
class Transition(object):
"""
A transition defines graphical behavior while switching between any two views.
Graphical effects for masking the switch belong here
"""
__guid__ = 'viewstate.Transition'
def __init__(self, allowReopen = True, fallbackView = None):
self.allowReopen = allowReopen
self.fallbackView = fallbackView
self.transitionReason = None
self.animatedOut = set()
def StartTransition(self, fromView, toView):
"""Called when the view is activated"""
sm.GetService('viewState').LogInfo('Transition starting for', fromView, 'to', toView)
def EndTransition(self, fromView, toView):
"""Used for cleaning up after the view"""
sm.GetService('viewState').LogInfo('Transition ending for', fromView, 'to', toView)
self.transitionReason = None
def IsActive(self):
"""Query if a transition is currently in progress"""
return self.active
def SetTransitionReason(self, reason, allowOverwrite = False):
if reason is None or self.transitionReason is not None and not allowOverwrite:
return
self.transitionReason = reason
def AnimateUIIn(self, duration = 2):
uthread.new(self._AnimateUIIn, duration)
def _AnimateUIIn(self, duration = 2):
"""
display the layers if they have been animated by us, and then fade them in
"""
curveSet = None
for layer, doSleep in ((uicore.layer.main, False), (uicore.layer.viewstate, True)):
if layer in self.animatedOut:
layer.display = True
self.animatedOut.remove(layer)
uicore.animations.FadeIn(layer, curveSet=curveSet, duration=duration, sleep=doSleep)
self.animatedOut = set()
def AnimateUIOut(self, duration = 0.5):
uthread.new(self._AnimateUIOut, duration)
def _AnimateUIOut(self, duration = 0.5):
curveSet = None
myCallback = lambda : self.FadeOutEndCallback(uicore.layer.main)
uicore.animations.FadeOut(uicore.layer.main, duration=duration, curveSet=curveSet, callback=myCallback)
myCallback = lambda : self.FadeOutEndCallback(uicore.layer.viewstate)
uicore.animations.FadeOut(uicore.layer.viewstate, duration=duration, sleep=True, curveSet=curveSet, callback=myCallback)
def FadeOutEndCallback(self, layer, *args):
"""
set the display of the layers to False so they are not active while hidden
also record that we did something to this layer, so when animating in we are not chaning display
of something we are not responsible for hiding (someone else might have been doing it)
"""
if layer.display:
self.animatedOut.add(layer)
layer.display = False
class ViewType:
"""
Enum the different types of view templates available.
Also defines the precedence of different types.
"""
__guid__ = 'viewstate.ViewType'
Primary = 0
Secondary = 1
Dynamic = 2
class ViewInfo(object):
"""
Meta data about a view. This is used internally by the viewState service for accounting
purposes. Stores info like name, type and statistics. Also caches the opening arguments last
used to open the view to use when re-entering primary views.
"""
__guid__ = 'viewstate.ViewInfo'
def __init__(self, name, view, viewType = ViewType.Primary):
self.name = name
self.view = view
self.viewType = viewType
self.viewCount = 0
self.viewTime = 0
self.entryArguments = None
def GetViewType(self):
if self.viewType == ViewType.Dynamic:
return self.view.GetDynamicViewType()
else:
return self.viewType
def __repr__(self):
return 'ViewInfo(view=%s type=%d)' % (self.view, self.viewType)
class ViewStateSvc(Service):
"""
Manages a set of view state and transitions between them.
Views come in two flavors: Primary and Secondary
Primary view:
These are the important once. They usually represent game state and are usually dictated by the server.
The classic way is to respond to session changes and change using ChangePrimaryView.
Secondary view:
These are for ingame tools. Maps, inventory, character customization etc. They are most often envoked
by the players them selves via links, buttons and whatnot.
Transisions:
These define what view state can lead to what other view state. Only the declared transisions are valid
and others will result in errors. There is allwas a transition class instance associated with the mapping.
The instance implements any kind of effect ment to be exceuted WHILE the the state switch takes place.
"""
__guid__ = 'svc.viewState'
__servicename__ = 'view state manager'
__displayname__ = 'View State Manager'
__notifyevents__ = ['OnShowUI']
__dependencies__ = ['loading']
def Initialize(self, viewLayerParent):
"""
Initialize the view state service and prepare for configuration
arguments:
viewLayerParent: this is the ui layer where all the view navigation layers will reside
in along with the overlay parent layer (ie. uicore.layer.viewstate)
"""
self.viewLayerParent = viewLayerParent
self.viewInfosByName = {}
self.transitionsByNames = {}
self.overlaysByName = {}
self.overlayLayerParent = self.viewLayerParent.AddLayer('l_view_overlays', uicls.LayerCore)
self.primaryInfo = None
self.secondaryInfo = None
self.activeViewInfo = None
self.activeTransition = None
self.isOpeningView = None
self.lastViewOpenTime = blue.os.GetWallclockTime()
self.logUsageHandler = None
self.logStorage = []
def LogUsage(self, viewName, time):
"""
We start trying to log before we have logged in so we need to work around that
"""
if self.logUsageHandler is None:
if sm.GetService('machoNet').IsConnected() and session.charid is not None:
self.logUsageHandler = sm.GetService('infoGatheringSvc').GetEventIGSHandle(const.infoEventViewStateUsage)
for viewName, time in self.logStorage:
self.LogUsage(viewName, time)
del self.logStorage
else:
self.logStorage.append((viewName, time))
else:
self.logUsageHandler(char_1=viewName, itemID=session.charid, int_1=1, float_1=float(time) / const.SEC)
def ActivateView(self, name, **kwargs):
"""
makes the selected view active
"""
self.LogInfo('Activating view', name, 'with key words', kwargs)
transitionFailed = False
if self.isOpeningView is not None:
self.LogInfo("Can't activate view", name, '. already busy opening view', self.isOpeningView)
return
self.isOpeningView = name
error = None
try:
newInfo = self.GetViewInfo(name)
oldInfo = self.secondaryInfo or self.primaryInfo
if newInfo.viewType == ViewType.Dynamic:
if self.primaryInfo is None:
newInfo.view.SetDynamicViewType(ViewType.Primary)
else:
newInfo.view.SetDynamicViewType(ViewType.Secondary)
transition = self.GetTransition(oldInfo, newInfo)
if transition is None and oldInfo is not None and newInfo.name == oldInfo.name:
self.LogInfo('No valid transition found for view', name, 'to view', name, '. Skipping since it is is already active')
else:
if oldInfo:
try:
if not oldInfo.view.CanExit():
oldInfo.view.LogInfo('Unable to exit view at present')
return
except:
log.LogException()
try:
if not newInfo.view.CanEnter(**kwargs):
newInfo.view.LogInfo('Unable to enter view now. Arguments:', kwargs)
return
except:
log.LogException()
viewOpenTime = blue.os.GetWallclockTime()
self.activeTransition = transition
try:
self.activeTransition.StartTransition(oldInfo.view if oldInfo else None, newInfo.view)
except:
log.LogException()
progressText = newInfo.view.GetProgressText(**kwargs)
if progressText:
sm.GetService('loading').ProgressWnd(progressText, '', 1, 2)
reopen = False
if newInfo.GetViewType() == ViewType.Secondary:
if self.secondaryInfo:
reopen = self.activeTransition.allowReopen and newInfo == self.secondaryInfo
if reopen:
try:
reopen = newInfo.view.CheckShouldReopen(kwargs, newInfo.entryArguments)
except:
log.LogException()
reopen = False
self._CloseView(self.secondaryInfo, unload=not reopen)
else:
self._CloseView(self.primaryInfo, unload=False)
else:
if self.secondaryInfo:
self._CloseView(self.secondaryInfo)
if self.primaryInfo:
if self.activeTransition.allowReopen and newInfo == self.primaryInfo:
try:
self.primaryInfo.view.CheckShouldReopen(kwargs, newInfo.entryArguments)
reopen = True
except:
log.LogException()
self._CloseView(self.primaryInfo, unload=False)
else:
self._CloseView(self.primaryInfo)
self.activeViewInfo = newInfo
if newInfo.GetViewType() == ViewType.Primary:
self._OpenPrimaryView(newInfo, reopen=reopen, **kwargs)
else:
self._OpenView(newInfo, reopen=reopen, **kwargs)
self.UpdateOverlays()
if progressText is not None:
sm.GetService('loading').ProgressWnd(progressText, '', 2, 2)
try:
transitionFailed = self.activeTransition.EndTransition(oldInfo, newInfo)
except:
log.LogException()
timeInView = viewOpenTime - self.lastViewOpenTime
if oldInfo:
oldInfo.viewTime += timeInView
self.LogUsage(oldInfo.name, timeInView)
self.activeViewInfo.viewCount += 1
self.lastViewOpenTime = viewOpenTime
if newInfo.GetViewType() == ViewType.Primary:
sm.ScatterEvent('OnClientReady', newInfo.name)
self.LogInfo('View', name, 'was activated')
sm.ScatterEvent('OnViewStateChanged', oldInfo.name if oldInfo else None, newInfo.name)
except UserError as e:
self.LogInfo('UserError raised while making a transition. UserError', e)
if newInfo.GetViewType() == ViewType.Secondary:
error = e
else:
raise RuntimeError('UserError raised while transitioning from %s to %s UserError: %s' % (oldInfo, newInfo, e))
finally:
self.isOpeningView = None
if transitionFailed:
self.ActivateView(self.activeTransition.fallbackView, **kwargs)
self.activeTransition = None
sm.GetService('loading').HideAllLoad()
if error:
self.LogInfo('Trying to re-enter primary view', self.primaryInfo.name, 'using cached entry arguments', self.primaryInfo.entryArguments)
uthread.new(self.ActivateView, self.primaryInfo.name, **self.primaryInfo.entryArguments).context = 'viewStateSvc::AttemptToRecoverFromUserError'
raise error
def StartDependantServices(self, viewInfo):
"""make sure all the dependent services have started before we fully activate the view"""
for serviceName in viewInfo.view.__dependencies__:
setattr(viewInfo.view, serviceName, sm.StartServiceAndWaitForRunningState(serviceName))
self.LogInfo('Dependant service', serviceName, 'has started')
self.LogInfo('All dependant services started for view', viewInfo.name)
def _OpenPrimaryView(self, viewInfo, reopen = False, **kwargs):
"""
takes care of primary view specific functionality that needs to happen when opening
"""
blue.SetCrashKeyValues(u'ViewState', unicode(viewInfo.name))
blue.statistics.SetTimelineSectionName(viewInfo.name)
memorySnapshot.AutoMemorySnapshotIfEnabled(viewInfo.name)
self._OpenView(viewInfo, reopen=reopen, **kwargs)
def _OpenView(self, viewInfo, reopen = False, **kwargs):
self.LogInfo('Re-open view' if reopen else 'Opening view', viewInfo, 'with kwargs', kwargs)
self.StartDependantServices(viewInfo)
showView = True
if viewInfo.GetViewType() == ViewType.Primary:
if self.activeViewInfo.GetViewType() == ViewType.Secondary:
showView = False
sm.ScatterEvent('OnPrimaryViewChanged', self.primaryInfo, viewInfo)
self.primaryInfo = viewInfo
else:
self.secondaryInfo = viewInfo
try:
if showView:
self.LogInfo('Opening layer', viewInfo.view.layer.name)
viewInfo.view.layer.OpenView()
viewInfo.view.layer.pickState = uiconst.TR2_SPS_ON
viewInfo.view.layer.display = True
else:
self.LogInfo('Changing the primary layer while a secondary view', self.activeViewInfo.name, 'is active')
except:
log.LogException()
try:
if reopen:
self.LogInfo('View', viewInfo.name, 'is being re-opened')
else:
self.LogInfo('View', viewInfo.name, 'is being loaded.')
viewInfo.view.LoadView(**kwargs)
if showView:
self.LogInfo('Showing view', viewInfo.name)
viewInfo.view.ShowView(**kwargs)
except:
log.LogException()
sm.RegisterNotify(viewInfo.view)
viewInfo.entryArguments = kwargs
self.LogInfo('view', viewInfo, 'opened')
def _CloseView(self, viewInfo, unload = True):
sm.UnregisterNotify(viewInfo.view)
try:
viewInfo.view.layer.CloseView(recreate=False)
except:
log.LogException()
viewInfo.view.layer.display = False
try:
viewInfo.view.HideView()
if unload:
viewInfo.view.UnloadView()
self.LogInfo('Unloading view', viewInfo.name)
except:
log.LogException()
if viewInfo.GetViewType() == ViewType.Primary:
if unload:
viewInfo.entryArguments = None
else:
self.secondaryInfo = None
sm.ScatterEvent('OnViewClosed', viewInfo.name)
def ChangePrimaryView(self, name, **kwargs):
"""
change the primary view with out forcing the secondary view to change.
NOTE: if this would make the current secondary invalid we should close it
"""
self.LogInfo('ChangePrimaryView', name)
while self.isOpeningView:
blue.pyos.synchro.Yield()
if self.secondaryInfo:
if (self.secondaryInfo.name, name) not in self.transitionsByNames:
raise ViewStateError('Changing primary view to %s from current active secondary view %s will leave the viewStateSvc in an undefined state' % (name, self.secondaryInfo.name))
viewInfo = self.GetViewInfo(name)
self._CloseView(self.primaryInfo)
self._OpenView(viewInfo, **kwargs)
self.UpdateOverlays()
else:
self.ActivateView(name, **kwargs)
def GetTransition(self, oldInfo, newInfo):
oldViewName = oldInfo.name if oldInfo else None
transition = self.transitionsByNames.get((oldViewName, newInfo.name))
if transition is None:
transition = self.transitionsByNames.get((None, newInfo.name))
if transition is None:
raise ViewStateError('There is not a valid transition from %s to %s' % (oldViewName, newInfo.name))
self.LogInfo('Found transition from', oldViewName, 'to', newInfo.name)
return transition
def GetTransitionByName(self, fromName, toName):
if (fromName, toName) in self.transitionsByNames:
return self.transitionsByNames[fromName, toName]
def GetView(self, name):
"""return a named view"""
return self.GetViewInfo(name).view
def HasView(self, name):
return name in self.viewInfosByName
def GetViewInfo(self, name):
"""return a named view info"""
try:
return self.viewInfosByName[name]
except KeyError:
raise ViewStateError('There is no view registered by the name %s' % name)
def GetCurrentViewInfo(self):
"""get the current view"""
return self.activeViewInfo
def GetCurrentView(self):
"""get the current view. None if no view is active."""
return getattr(self.activeViewInfo, 'view', None)
def IsViewActive(self, *names):
return getattr(self.activeViewInfo, 'name', None) in names
def GetActiveViewName(self):
return getattr(self.activeViewInfo, 'name', None)
def HasActiveTransition(self):
"""
Queries whether there is a transition currently occuring
NOTE: This should be temporary and used very sparingly as this is not a paradigm we want to follow.
Refactoring is needed on the VSM and the use of transitions to avoid it though.
"""
if self.activeTransition is not None:
return True
else:
return False
def AddView(self, name, view, viewType = ViewType.Primary):
"""
add a new view
"""
self.LogInfo('Adding view', name, view, viewType)
view.name = name
info = ViewInfo(name, view, viewType)
view.layer = self.viewLayerParent.AddLayer('l_%s' % name, view.__layerClass__, view.__subLayers__)
view.layer.state = uiconst.UI_HIDDEN
self.viewInfosByName[name] = info
def AddTransition(self, fromName, toName, transition = Transition()):
"""
define a transition from one view to another.
This will allow special effects to take place implemented by the view
"""
self.LogInfo('Adding transition', fromName or '[All]', toName, transition)
self.transitionsByNames[fromName, toName] = transition
def AddTransitions(self, fromNames, toNames, transition = Transition()):
"""
define many to many transitions that share a single transition implementation
arguments:
fromNames is a list of view names that appear in the from clause of a transition
toNames is a list of new namse that appear in the to clause of a transition
transition that is initiated for all the transitions generated
"""
for fromName in fromNames:
for toName in toNames:
self.AddTransition(fromName, toName, transition)
def GetPrimaryView(self):
try:
return self.primaryInfo.view
except AttributeError:
raise ViewStateError('There is no primary view set')
def CloseSecondaryView(self, name = None):
"""
Close a secondry view. It is safe to call even if it is not active.
If called with no arguments or None we will close whatever seconday view is open.
You can call this if you just want to make sure no secondary view is open.
"""
while self.isOpeningView:
blue.pyos.synchro.Yield()
if self.secondaryInfo is None:
self.LogInfo("Can't close secondary view since none is active")
elif name is None or self.activeViewInfo.name == name:
self.LogInfo('closing secondary view', self.secondaryInfo.name)
self.ActivateView(self.primaryInfo.name, **self.primaryInfo.entryArguments)
else:
self.LogInfo('The secondary view', name, 'was not closed as is not active')
def ToggleSecondaryView(self, name):
"""Toggle the state of a secondary view"""
self.LogInfo('Toggling view', name)
while self.isOpeningView:
blue.pyos.synchro.Yield()
info = self.GetViewInfo(name)
if info.GetViewType() != ViewType.Secondary:
raise RuntimeError('You can only toggle secondary views (tools)')
if self.IsViewActive(name):
self.CloseSecondaryView(name)
else:
self.ActivateView(name)
def IsCurrentViewPrimary(self):
return self.activeViewInfo.GetViewType() == ViewType.Primary
def IsCurrentViewSecondary(self):
activeViewInfo = getattr(self, 'activeViewInfo', None)
if activeViewInfo:
return activeViewInfo.GetViewType() == ViewType.Secondary
else:
return False
def AddOverlay(self, name, overlayClass, subLayers = None):
if name not in self.overlaysByName:
overlay = self.overlayLayerParent.AddLayer('l_%s' % name, overlayClass, subLayers)
overlay.display = False
self.overlaysByName[name] = overlay
def UpdateOverlays(self):
"""
compiles a list of all overlays to activate and then
trims the list by removing all suppressed ovelays
then walks all overlays and displays according to the compiled list
"""
activeOverlays = self.primaryInfo.view.__overlays__.copy()
if self.secondaryInfo:
activeOverlays.update(self.secondaryInfo.view.__overlays__)
activeOverlays.difference_update(self.primaryInfo.view.__suppressedOverlays__)
if self.secondaryInfo:
activeOverlays.difference_update(self.secondaryInfo.view.__suppressedOverlays__)
self.LogInfo('Overlays to enable', activeOverlays)
for name, overlay in self.overlaysByName.items():
try:
if name in activeOverlays or name in self.activeViewInfo.view.__exclusiveOverlay__:
overlay.OpenView()
overlay.display = True
sm.ScatterEvent('OnOverlayActivated', name)
self.LogInfo('Overlay', name, 'activated')
else:
overlay.display = False
overlay.CloseView(recreate=False)
self.overlaysByName[name] = uicore.layer.Get(name)
sm.ScatterEvent('OnOverlayClosed', name)
self.LogInfo('Overlay', name, 'closed')
except:
log.LogException()
if uicore.cmd.IsUIHidden():
uicore.cmd.HideUI()
def SetTransitionReason(self, fromName, toName, reason):
self.LogInfo('Adding transition reason ', fromName or '[All]', toName, reason)
self.transitionsByNames[fromName, toName].SetTransitionReason(reason)
def GetActiveTransitionReason(self):
if self.activeTransition is None:
return
return self.activeTransition.transitionReason
def OnShowUI(self):
self.UpdateOverlays()
| [
"[email protected]"
] | |
67685bc853b72f28dfc50d9e13c6874b050911f5 | 0c89b4b021d469f5209753f2ab75de06c4925497 | /setup.py | d92ceb35d542fabdf9f5253fce19017a14c8b384 | [
"BSD-3-Clause"
] | permissive | mrusoff/sos | 6fc474a7a8da49f7d0ff6f4ae11ce8ea3d5283ea | e60f2944d4f4c623191dbe7b1ba6a092f0dc5e94 | refs/heads/master | 2020-07-01T19:54:28.724508 | 2019-08-08T13:52:12 | 2019-08-08T13:52:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,404 | py | #!/usr/bin/env python
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import sys
from setuptools import find_packages, setup
from setuptools.command.bdist_egg import bdist_egg
_py_ver = sys.version_info
if _py_ver.major == 2 or (_py_ver.major == 3 and
(_py_ver.minor, _py_ver.micro) < (6, 0)):
raise SystemError(
'sos requires Python 3.6 or higher. Please upgrade your Python {}.{}.{}.'
.format(_py_ver.major, _py_ver.minor, _py_ver.micro))
# obtain version of SoS
with open('src/sos/_version.py') as version:
for line in version:
if line.startswith('__version__'):
__version__ = eval(line.split('=')[1])
break
description = '''\
Computationally intensive disciplines such as computational biology often
requires one to exploit a variety of tools implemented in different programming
languages, and to analyze large datasets on high performance computing systems.
Although scientific workflow systems are powerful in organizing and executing
large-scale data analysis processes, there are usually non-trivial learning
curve and engineering overhead in creating and maintaining such workflows,
making them unsuitable for data exploration and prototyping. To bridge the
gap between interactive analysis and workflow systems, we developed Script
of Scripts (SoS), a system with strong emphases on readability, practicality,
and reproducibility for daily computational research. For exploratory analysis
SoS provides a multi-language file format and scripting engine that centralizes
all computations, and creates dynamic report documents for publishing and
sharing. As a workflow engine, SoS provides an intuitive syntax to create
workflows in process-oriented, outcome-oriented and mixed styles, as well as
a unified interface to executing and managing tasks on a variety of computing
platforms with automatic synchronization of files between isolated systems.
In this paper we illustrate with real-world examples the use of SoS as both
interactive analysis tool and pipeline platform for all stages of methods
development and data analysis projects. In particular we demonstrate how SoS
can easily be adopted based on existing scripts and pipelines, yet resulting
in substantial improvement in terms of organization, readability and
cross-platform computation management.
Please refer to http://vatlab.github.io/SOS/ for more details on SoS.
'''
class bdist_egg_disabled(bdist_egg):
"""Disabled version of bdist_egg
Prevents setup.py install performing setuptools' default easy_install,
which it should never ever do.
"""
def run(self):
sys.exit(
"Aborting implicit building of eggs. Use `pip install -U --upgrade-strategy only-if-needed .` to install from source."
)
cmdclass = {
'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled
}
setup(
name="sos",
version=__version__,
description='Script of Scripts (SoS): an interactive, cross-platform, and cross-language workflow system for reproducible data analysis',
long_description=description,
author='Bo Peng',
url='https://github.com/vatlab/SoS',
author_email='[email protected]',
maintainer='Bo Peng',
maintainer_email='[email protected]',
license='3-clause BSD',
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=find_packages('src'),
cmdclass=cmdclass,
package_dir={'': 'src'},
python_requires='>=3.6',
install_requires=[
'psutil',
# progress bar
'tqdm',
# for file lock
'fasteners',
'pyyaml',
'pygments',
# for DAG, some version requires pydot, some requires pydotplus
'networkx',
'pydot',
'pydotplus',
'pexpect',
# for report regeneration
'jinja2',
# to execute workflow embedded in .ipynb files
'nbformat',
# zeromq for IPC
'pyzmq',
],
entry_points='''
[console_scripts]
sos = sos.__main__:main
sos-runner = sos.__main__:sosrunner
[pygments.lexers]
sos = sos.converter:SoS_Lexer
[sos_targets]
file_target = sos.targets:file_target
dynamic = sos.targets:dynamic
remote = sos.targets:remote
executable = sos.targets:executable
sos_variable = sos.targets:sos_variable
sos_step = sos.targets:sos_step
env_variable = sos.targets:env_variable
sos_targets = sos.targets:sos_targets
system_resource = sos.targets:system_resource
Py_Module = sos.targets_python:Py_Module
R_library = sos.targets_r:R_library
[sos_actions]
script = sos.actions:script
sos_run = sos.actions:sos_run
fail_if = sos.actions:fail_if
warn_if = sos.actions:warn_if
stop_if = sos.actions:stop_if
done_if = sos.actions:done_if
skip_if = sos.actions:skip_if
download = sos.actions:download
run = sos.actions:run
bash = sos.actions_bash:bash
csh = sos.actions_bash:csh
tcsh = sos.actions_bash:tcsh
zsh = sos.actions_bash:zsh
sh = sos.actions_bash:sh
node = sos.actions_javascript:node
julia = sos.actions_julia:julia
matlab = sos.actions_matlab:matlab
octave = sos.actions_matlab:octave
python = sos.actions_python:python
python2 = sos.actions_python:python2
python3 = sos.actions_python:python3
R = sos.actions_r:R
Rmarkdown = sos.actions_r:Rmarkdown
ruby = sos.actions_ruby:ruby
perl = sos.actions:perl
report = sos.actions:report
pandoc = sos.actions:pandoc
docker_build = sos.docker.actions:docker_build
singularity_build = sos.singularity.actions:singularity_build
[sos_taskengines]
process = sos.tasks:BackgroundProcess_TaskEngine
[sos_previewers]
*.pdf,1 = sos.preview:preview_pdf
*.html,1 = sos.preview:preview_html
*.csv,1 = sos.preview:preview_csv
*.xls,1 = sos.preview:preview_xls
*.xlsx,1 = sos.preview:preview_xls
*.gz,1 = sos.preview:preview_gz
*.txt,1 = sos.preview:preview_txt
*.md,1 = sos.preview:preview_md
*.dot,1 = sos.preview:preview_dot [dot]
*.svg,1 = sos.preview:preview_svg
imghdr:what,1 = sos.preview:preview_img
zipfile:is_zipfile,1 = sos.preview:preview_zip
tarfile:is_tarfile,1 = sos.preview:preview_tar
*,0 = sos.preview:preview_txt
[sos_converters]
sos-html.parser = sos.converter:get_script_to_html_parser
sos-html.func = sos.converter:script_to_html
''',
# [sos_installers]
# vim-syntax.parser = sos.install:get_install_vim_syntax_parser
# vim-syntax.func = sos.install:install_vim_syntax
extras_require={
':sys_platform=="win32"': ['colorama'],
# faster hashlib
':sys_platform!="win32"': ['xxhash'],
'dot': ['graphviz', 'pillow'],
})
| [
"[email protected]"
] | |
fc03332dcbf0200d0f9e90b5bfe070525ec87bf7 | 1336896824c8937cc744a112661061c7b89beb73 | /Tag02/dozent_pool.py | 5500d3d8b5f588eca1e8359d02afe868322021ae | [] | no_license | anna-s-dotcom/python01-python08 | df457fc1f93b74d91037fd7d62db5fa53baa8616 | cf3d539800ee7e83f1d32010481c7a9ee2d58858 | refs/heads/master | 2020-12-30T06:12:10.785131 | 2020-02-07T09:40:01 | 2020-02-07T09:40:01 | 238,888,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | import os
from multiprocessing import Pool
import time
def quadfunc(n):
time.sleep(0.2)
return n*n
if __name__ == '__main__':
print(os.cpu_count())
t = time.time()
p = Pool(processes = 5)
result = p.map(quadfunc, [1, 2, 3, 4, 5])
p.close()
print('Pool time:', time.time()-t)
t = time.time()
result2 = list(map(quadfunc, [1, 2, 3, 4, 5]))
print('Serial time:', time.time()-t)
# print(result)
| [
"[email protected]"
] | |
a49b401854cc2d03d3f273fcc6b393095b8453c6 | 41ef6fe769664ee8531306e4da94b333359c77bd | /ml_jobcontrol/ml_jobcontrol/migrations/0001_initial.py | 20b17210101fa812fbef8da129035ddd6ea87a34 | [
"BSD-2-Clause"
] | permissive | ephes/ml_jobcontrol | 85e24736b2cf6e3c587d9a994c4b42a2f04d205b | 854d9582d537f82f16d7ba7775e88948abb4d66d | refs/heads/master | 2021-01-20T05:08:10.340387 | 2014-07-22T20:02:45 | 2014-07-22T20:02:45 | 21,605,675 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,491 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MLDataSet'
db.create_table(u'ml_jobcontrol_mldataset', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('url', self.gf('django.db.models.fields.URLField')(unique=True, max_length=200)),
))
db.send_create_signal(u'ml_jobcontrol', ['MLDataSet'])
# Adding model 'MLClassificationTestSet'
db.create_table(u'ml_jobcontrol_mlclassificationtestset', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('mldataset', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ml_jobcontrol.MLDataSet'])),
('train_num', self.gf('django.db.models.fields.IntegerField')()),
('test_num', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'ml_jobcontrol', ['MLClassificationTestSet'])
# Adding model 'MLModel'
db.create_table(u'ml_jobcontrol_mlmodel', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('import_path', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
))
db.send_create_signal(u'ml_jobcontrol', ['MLModel'])
# Adding model 'MLModelConfig'
db.create_table(u'ml_jobcontrol_mlmodelconfig', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('mlmodel', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ml_jobcontrol.MLModel'])),
('json_config', self.gf('django.db.models.fields.TextField')(unique=True)),
))
db.send_create_signal(u'ml_jobcontrol', ['MLModelConfig'])
# Adding model 'MLScore'
db.create_table(u'ml_jobcontrol_mlscore', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'ml_jobcontrol', ['MLScore'])
# Adding model 'MLResult'
db.create_table(u'ml_jobcontrol_mlresult', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('mlmodel_config', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ml_jobcontrol.MLModelConfig'])),
('mlclassification_testset', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ml_jobcontrol.MLClassificationTestSet'])),
))
db.send_create_signal(u'ml_jobcontrol', ['MLResult'])
# Adding model 'MLResultScore'
db.create_table(u'ml_jobcontrol_mlresultscore', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('mlresult', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ml_jobcontrol.MLResult'])),
('mlscore', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ml_jobcontrol.MLScore'])),
('score', self.gf('django.db.models.fields.FloatField')()),
))
db.send_create_signal(u'ml_jobcontrol', ['MLResultScore'])
def backwards(self, orm):
# Deleting model 'MLDataSet'
db.delete_table(u'ml_jobcontrol_mldataset')
# Deleting model 'MLClassificationTestSet'
db.delete_table(u'ml_jobcontrol_mlclassificationtestset')
# Deleting model 'MLModel'
db.delete_table(u'ml_jobcontrol_mlmodel')
# Deleting model 'MLModelConfig'
db.delete_table(u'ml_jobcontrol_mlmodelconfig')
# Deleting model 'MLScore'
db.delete_table(u'ml_jobcontrol_mlscore')
# Deleting model 'MLResult'
db.delete_table(u'ml_jobcontrol_mlresult')
# Deleting model 'MLResultScore'
db.delete_table(u'ml_jobcontrol_mlresultscore')
models = {
u'ml_jobcontrol.mlclassificationtestset': {
'Meta': {'object_name': 'MLClassificationTestSet'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mldataset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ml_jobcontrol.MLDataSet']"}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'test_num': ('django.db.models.fields.IntegerField', [], {}),
'train_num': ('django.db.models.fields.IntegerField', [], {})
},
u'ml_jobcontrol.mldataset': {
'Meta': {'object_name': 'MLDataSet'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'})
},
u'ml_jobcontrol.mlmodel': {
'Meta': {'object_name': 'MLModel'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ml_jobcontrol.mlmodelconfig': {
'Meta': {'object_name': 'MLModelConfig'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json_config': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'mlmodel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ml_jobcontrol.MLModel']"})
},
u'ml_jobcontrol.mlresult': {
'Meta': {'object_name': 'MLResult'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mlclassification_testset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ml_jobcontrol.MLClassificationTestSet']"}),
'mlmodel_config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ml_jobcontrol.MLModelConfig']"}),
'scores': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ml_jobcontrol.MLScore']", 'through': u"orm['ml_jobcontrol.MLResultScore']", 'symmetrical': 'False'})
},
u'ml_jobcontrol.mlresultscore': {
'Meta': {'object_name': 'MLResultScore'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mlresult': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ml_jobcontrol.MLResult']"}),
'mlscore': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ml_jobcontrol.MLScore']"}),
'score': ('django.db.models.fields.FloatField', [], {})
},
u'ml_jobcontrol.mlscore': {
'Meta': {'object_name': 'MLScore'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['ml_jobcontrol'] | [
"[email protected]"
] | |
cf67b46f033723c3e6c4b4a18b3594092ee467a7 | 53c3462ff265b6273f4a4fa17f6d59688f69def0 | /数据结构/quick_sort.py | 9b98643d357f31d649bf67308b1db09a87523eb6 | [] | no_license | 17764591637/jianzhi_offer | b76e69a3ecb2174676da2c8d8d3372a3fc27b5c4 | 27e420ee302d5ab6512ecfdb8d469b043fb7102d | refs/heads/master | 2023-08-03T01:32:51.588472 | 2019-10-13T07:56:21 | 2019-10-13T07:56:21 | 197,692,548 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | '''
快速排序使用分治法来把一个串(list)分为两个子串(sub-lists)。
具体算法描述如下:
1.从数列中挑出一个元素,称为 “基准”(pivot);
2.重新排序数列,所有元素比基准值小的摆放在基准前面,所有元素比基准值大的摆在基准的后面(相同的数可以到任一边)。
在这个分区退出之后,该基准就处于数列的中间位置。这个称为分区(partition)操作;
3.递归地(recursive)把小于基准值元素的子数列和大于基准值元素的子数列排序。
时间复杂度:O(nlogn) 不稳定
'''
def quick_sort(nums,start,end):
#递归退出的条件
if start >= end:
return
mid = nums[start]
left = start
right = end
while left < right:
while left < right and nums[right] >= mid:
right -= 1
nums[left] = nums[right]
while left < right and nums[left] < mid:
left += 1
nums[right] = nums[left]
nums[left] = mid
# 对基准元素左边的子序列进行快速排序
quick_sort(nums, start, left - 1)
# 对基准元素右边的子序列进行快速排序
quick_sort(nums, left + 1, end)
alist = [54,26,93,17,77,31,44,55,20]
quick_sort(alist,0,len(alist)-1)
print(alist) | [
"[email protected]"
] | |
72b224e612831d78026a2b1d1c2e5fa5338f40d2 | 60f96f12bcac952de88e1f62a785149a4e0a6746 | /mixins_views/views.py | b9029128c1818ea8e61d60eadd773cc8c72674d8 | [] | no_license | jayednahain/Django-Rest_api-with-mixins. | 6242e5d4678ef6b77a4f3ced16f791f8997b6748 | 4536fad0ded80d70453ab46ce185e9d8ce16d2e1 | refs/heads/main | 2023-03-24T20:08:58.916183 | 2021-03-21T14:06:21 | 2021-03-21T14:06:21 | 350,015,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from mixins_views.models import Student
from mixins_views.serializers import StudentSerializer
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView #useing for class based views !
from rest_framework import generics,mixins
class StudentListView(mixins.ListModelMixin,mixins.CreateModelMixin,generics.GenericAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
def get(self,request):
return self.list(request)
def post(self,request):
return self.create(request)
#primary key based operation
class StudentDetailView(mixins.RetrieveModelMixin,mixins.UpdateModelMixin,mixins.DestroyModelMixin,generics.GenericAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
def get(self,request,pk):
return self.retrieve(request,pk)
def put(self,request,pk):
return self.update(request,pk)
def delete(self,request,pk):
return self.destroy(request,pk)
| [
"[email protected]"
] | |
46e14b52afac31c58275483a6c07f10b3a257261 | 1179cd00f986bbdb71950e796c7418beda2f124a | /Contents/Code/common.py | db9315c85b7ff89049138abe2510cabd1146b24a | [] | no_license | coder-alpha/DesiTelly.bundle | 5a7a7a92402d1e52f09174c3fd20df1f0f0a9613 | 3f700fd37523a0d93f5c6d6cd84f3d6b7f5e8cfa | refs/heads/master | 2021-01-16T23:06:38.674286 | 2018-10-16T10:39:40 | 2018-10-16T10:39:40 | 69,209,707 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,541 | py | ################################################################################
TITLE = L('Title')
VERSION = '0.13' # Release notation (x.y - where x is major and y is minor)
GITHUB_REPOSITORY = 'coder-alpha/DesiTelly.bundle'
PREFIX = "/video/desitelly"
################################################################################
NAME = L('Title')
ART = 'art-default.jpg'
ICON = 'icon-default.png'
ICON_PIN = "icon-pin.png"
ICON_PREFS = "icon-prefs.png"
ICON_UPDATE = "icon-update.png"
ICON_UPDATE_NEW = "icon-update-new.png"
STARPLUS = 'Star Plus'
ZEETV = 'Zee Tv'
SONYTV = 'Sony Tv'
LIFEOK = 'Life OK'
SAHARAONE = 'Sahara One'
STARJALSHA = 'Star Jalsha'
COLORS = 'Colors Channel'
SABTV = 'Sab TV'
STARPRAVAH = 'Star Pravah'
MTV = 'MTV (India/Pakistan)'
CHANNELV = 'Channel [V]'
BINDASSTV = 'Bindass TV'
UTVSTARS = 'UTV Stars'
NEWS = 'News Channels'
STARONE = 'Star One'
XINXMEDIA = '9X INX Media'
NDTV = 'NDTV Imagine'
COLORSTV = 'Colors'
MTVIN = 'MTV India'
CHANNELVV = 'Channel V'
BINDASS = 'Bindass'
COLORSTVCHAN = 'Colors Tv Channel'
COLORS_TV = 'Colors Tv'
SABTVCHAN = 'SabTv Channel'
SONYPAL = 'Sony Pal'
DDNATIONAL = 'DD National'
ZINDAGITV = 'Zindagi Tv'
BIGMAGIC = 'Big Magic'
BIGTHRILL = 'Big Thrill'
RISHTEYTV = 'Rishtey Tv'
ZEEANMOL = 'Zee Anmol'
STARUTSAV = 'Star Utsav'
MASTI = 'Masti'
ZINGTV = 'Zing Tv'
ZOOMTV = 'Zoom Tv'
ABPNEWS = 'ABP News'
AAJTAKNEWS = 'Aaj Tak News Channel'
ZEENEWS = 'Zee News Channel'
IBN7 = 'IBN7'
NDTVINDIA = 'NDTV India'
E24 = 'E24/News 24'
UTVSTARSNEWS = 'UtV Stars (News)'
NEWSEXPRESS = 'News Express'
SAHARASAMAY = 'Sahara Samay'
ZEEMARATHI = 'Zee Marathi'
ETVBANGLA = 'ETv Bangla'
ETVMARATHI = 'ETV Marathi'
ZEEBANGLA = 'Zee Bangla'
STARVIJAY = 'Star Vijay'
MAHUAATV = 'Mahuaa Tv'
PTCPUNJABI = 'PTC Punjabi'
STARWORLDHD = 'Star World Premiere HD'
STARWORLD = 'Star World'
STARBHARAT = 'Star Bharat'
ZEECAFE = 'Zee Cafe'
PAKCHAN = 'Pak Channels'
ZEENEXT = 'Zee-Next'
REALTV = 'Real Tv'
FIRANGI = 'FIRANGI'
ZEEMUZIC = 'Zee Muzic'
# Added by Coder-Alpha
ANDTV = '& Tv'
EPIC = 'EPIC'
# Yo-Desi
COLORSMARATHI = 'Colors Marathi'
COLORSBANGLA = 'Colors Bangla'
ZEEYUVA = 'Zee Yuva'
# DesiBoxTv
ANDTV2 = 'And TV'
SONYPAL2 = 'Pal'
MTVIN2 = 'MTV'
ZINDAGITV2 = 'Zindagi'
#DesiTashan
TV_NEWS = 'TV News'
VALID_SOURCES_DOMAIN = ['dailymotion.','playwire.','vidshare.','openload.','playu.', 'cloudy.', 'vmg.','watchvideo','tvlogy','google','mediatv.','vidwatch','speedwatch.us','tune.pk','vidoza.','dailytv.','thevideobee','videobee','irshare','vkprime']
VALID_SOURCES = ['Dailymotion','Flash Player','Flash','Playwire','Letwatch','Openload','PlayU','StreamHD','HDStream','Watchvideo','TvLogy','Google','VidWatch','Vid Watch','Vidwatch','SpeedWatch','Speedwatch','Speed','TunePK','Tunepk','Tune','ViDoza','DailyTV','TheVideoBee','Videobee','VK Prime']
VALID_SOURCES_ICONS = ['dailymotion','flashplayer','flashplayer','flashplayer','letwatchus','openload','playu','vmg','vmg','source-watchvideo','tvlogy','google','vidwatch','vidwatch','vidwatch','speedwatch','speedwatch','speedwatch','tunepk','tunepk','tunepk','vidoza','dailytv','thevideobee','videobee','vkprime']
DISABLED_SOURCES = ['domain-defined']
####################################################################################################
def GetSupportedChannels():
return [
STARPLUS.lower(),
STARBHARAT.lower(),
ZEETV.lower(),
SONYTV.lower(),
LIFEOK.lower(),
SAHARAONE.lower(),
STARJALSHA.lower(),
COLORS.lower(),
SABTV.lower(),
STARPRAVAH.lower(),
MTV.lower(),
CHANNELV.lower(),
BINDASSTV.lower(),
UTVSTARS.lower(),
STARONE.lower(),
XINXMEDIA.lower(),
NDTV.lower(),
COLORSTV.lower(),
COLORS_TV.lower(),
MTVIN.lower(),
CHANNELVV.lower(),
BINDASS.lower(),
COLORSTVCHAN.lower(),
SABTVCHAN.lower(),
SONYPAL.lower(),
DDNATIONAL.lower(),
ZINDAGITV.lower(),
BIGMAGIC.lower(),
BIGTHRILL.lower(),
RISHTEYTV.lower(),
ZEEANMOL.lower(),
STARUTSAV.lower(),
MASTI.lower(),
ZINGTV.lower(),
ZOOMTV.lower(),
ABPNEWS.lower(),
AAJTAKNEWS.lower(),
ZEENEWS.lower(),
IBN7.lower(),
NDTVINDIA.lower(),
E24.lower(),
UTVSTARSNEWS.lower(),
NEWSEXPRESS.lower(),
SAHARASAMAY.lower(),
ZEEMARATHI.lower(),
ETVBANGLA.lower(),
ETVMARATHI.lower(),
ZEEBANGLA.lower(),
STARVIJAY.lower(),
MAHUAATV.lower(),
PTCPUNJABI.lower(),
STARWORLD.lower(),
ZEECAFE.lower(),
PAKCHAN.lower(),
ZEENEXT.lower(),
REALTV.lower(),
FIRANGI.lower(),
ZEEMUZIC.lower(),
ANDTV.lower(),
ANDTV2.lower(),
SONYPAL2.lower(),
MTVIN2.lower(),
ZINDAGITV2.lower(),
EPIC.lower(),
COLORSMARATHI.lower(),
COLORSBANGLA.lower(),
ZEEYUVA.lower(),
ZEEMARATHI.lower(),
ZEEBANGLA.lower(),
TV_NEWS.lower()
]
####################################################################################################
def GetThumb(channel):
icon = R('icon-no-thumb.png')
if channel == STARPLUS.lower():
icon = R('icon-starplus.png')
elif channel == ZEETV.lower():
icon = R('icon-zeetv.png')
elif channel == SONYTV.lower():
icon = R('icon-sonytv.png')
elif channel == LIFEOK.lower():
icon = R('icon-lifeok.png')
elif channel == SAHARAONE.lower():
icon = R('icon-saharaone.png')
elif channel == STARJALSHA.lower():
icon = R('icon-starjalsha.png')
elif channel == COLORS.lower() or channel == COLORSTV.lower() or channel == COLORSTVCHAN.lower() or channel == COLORS_TV.lower():
icon = R('icon-colors.png')
elif channel == SABTV.lower() or channel == SABTVCHAN.lower():
icon = R('icon-sabtv.png')
elif channel == STARPRAVAH.lower():
icon = R('icon-starpravah.png')
elif channel == MTV.lower() or channel == MTVIN.lower() or channel == MTVIN2.lower():
icon = R('icon-mtv.png')
elif channel == CHANNELV.lower() or channel == CHANNELVV.lower():
icon = R('icon-channelv.png')
elif channel == BINDASSTV.lower() or channel == BINDASS.lower():
icon = R('icon-bindasstv.png')
elif channel == UTVSTARS.lower() or channel == UTVSTARSNEWS.lower():
icon = R('icon-utvstars.png')
elif channel == NEWS.lower():
icon = R('icon-indianews.png')
elif channel == STARONE.lower():
icon = R('icon-starone.png')
elif channel == XINXMEDIA.lower():
icon = R('icon-9xinxmedia.png')
elif channel == NDTV.lower():
icon = R('icon-ndtv.png')
elif channel == SONYPAL.lower() or channel == SONYPAL2.lower():
icon = R('icon-sonypal.png')
elif channel == DDNATIONAL.lower():
icon = R('icon-ddnational.png')
elif channel == ZINDAGITV.lower() or channel == ZINDAGITV2.lower():
icon = R('icon-zindagitv.png')
elif channel == BIGMAGIC.lower():
icon = R('icon-bigmagic.png')
elif channel == BIGTHRILL.lower():
icon = R('icon-bigthrill.png')
elif channel == RISHTEYTV.lower():
icon = R('icon-rishteytv.png')
elif channel == ZEEANMOL.lower():
icon = R('icon-zeeanmol.png')
elif channel == STARUTSAV.lower():
icon = R('icon-starutsav.png')
elif channel == MASTI.lower():
icon = R('icon-masti.png')
elif channel == ZINGTV.lower():
icon = R('icon-zingtv.png')
elif channel == ZOOMTV.lower():
icon = R('icon-zoomtv.png')
elif channel == ABPNEWS.lower():
icon = R('icon-abpnews.png')
elif channel == AAJTAKNEWS.lower():
icon = R('icon-aajtaknews.png')
elif channel == ZEENEWS.lower():
icon = R('icon-zeenews.png')
elif channel == IBN7.lower():
icon = R('icon-ibn7.png')
elif channel == NDTVINDIA.lower():
icon = R('icon-ndtvindia.png')
elif channel == E24.lower():
icon = R('icon-e24.png')
elif channel == NEWSEXPRESS.lower():
icon = R('icon-newsexpress.png')
elif channel == SAHARASAMAY.lower():
icon = R('icon-saharasamay.png')
elif channel == ZEEMARATHI.lower():
icon = R('icon-zeemarathi.png')
elif channel == ETVBANGLA.lower():
icon = R('icon-etvbangla.png')
elif channel == ETVMARATHI.lower():
icon = R('icon-etvmarathi.png')
elif channel == ZEEBANGLA.lower():
icon = R('icon-zeebangla.png')
elif channel == STARVIJAY.lower():
icon = R('icon-starvijay.png')
elif channel == MAHUAATV.lower():
icon = R('icon-mahuaatv.png')
elif channel == PTCPUNJABI.lower():
icon = R('icon-ptcpunjabi.png')
elif channel == STARWORLDHD.lower():
icon = R('icon-starworldpremierehd.png')
elif channel == STARWORLD.lower():
icon = R('icon-starworld.png')
elif channel == ZEECAFE.lower():
icon = R('icon-zeecafe.png')
elif channel == PAKCHAN.lower():
icon = R('icon-pakchannels.png')
elif channel == ZEENEXT.lower():
icon = R('icon-zeenext.png')
elif channel == REALTV.lower():
icon = R('icon-realtv.png')
elif channel == FIRANGI.lower():
icon = R('icon-firangi.png')
elif channel == ZEEMUZIC.lower():
icon = R('icon-zeemuzic.png')
elif channel == ANDTV.lower() or channel == ANDTV2.lower():
icon = R('icon-&TV.png')
elif channel == EPIC.lower():
icon = R('icon-epic.png')
elif channel == STARBHARAT.lower():
icon = R('icon-starbharat.png')
return icon
# author: Twoure
# source: https://github.com/Twoure/HindiMoviesOnline.bundle/blob/master/Contents/Code/messages.py
#
class NewMessageContainer(object):
def __init__(self, prefix, title):
self.title = title
Route.Connect(prefix + '/message', self.message_container)
def message_container(self, header, message):
"""Setup MessageContainer depending on Platform"""
if Client.Platform in ['Plex Home Theater', 'OpenPHT']:
oc = ObjectContainer(
title1=self.title, title2=header, no_cache=True,
no_history=True, replace_parent=True
)
oc.add(PopupDirectoryObject(title=header, summary=message))
return oc
else:
return MessageContainer(header, message) | [
"[email protected]"
] | |
35b3a29158681480060f348b5446d99387856c6b | 998c2105908e0a4463075a84f9e3f1678ffcdfb3 | /keras_video_object_detector/library/yolo_utils.py | ca0e5e348b8fcb14721858d241185efece4ba0ac | [
"MIT"
] | permissive | chen0040/keras-video-object-detector | a4bb2a080d62c0ecb56c12096ffe1f161b6d2c71 | 52f07ff4047dcc8732015c3debba1fa3eb7f2c56 | refs/heads/master | 2021-09-03T09:38:01.520006 | 2018-01-08T03:22:42 | 2018-01-08T03:22:42 | 116,548,809 | 15 | 9 | null | null | null | null | UTF-8 | Python | false | false | 3,459 | py | import colorsys
import imghdr
import os
import random
from keras import backend as K
import numpy as np
from PIL import Image, ImageDraw, ImageFont
def read_classes(classes_path):
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def read_anchors(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
return anchors
def generate_colors(class_names):
hsv_tuples = [(x / len(class_names), 1., 1.) for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
return colors
def scale_boxes(boxes, image_shape):
""" Scales the predicted boxes in order to be drawable on the image"""
height = image_shape[0]
width = image_shape[1]
image_dims = K.stack([height, width, height, width])
image_dims = K.reshape(image_dims, [1, 4])
boxes = boxes * image_dims
return boxes
def preprocess_image(img_path, model_image_size):
image_type = imghdr.what(img_path)
image = Image.open(img_path)
resized_image = image.resize(tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
return image, image_data
def preprocess_image_data(image):
image_data = np.array(image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
return image, image_data
def draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors):
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle([left + i, top + i, right - i, bottom - i], outline=colors[c])
draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw | [
"[email protected]"
] | |
2adc6545b8d6dcc6760907fe667129c5002aca2a | c14debb710242571769587d9e8fb1d0ecfbcd15b | /query_csv/utils.py | ea9bf421ec92f261c090af6872c3e1b44db13cf2 | [] | no_license | jayrbolton/query_csv | f464a6ad714372c41390b15f838c99c1e4c250ec | 4299f9e179d9fcaf41560c30372cb65b57d1756f | refs/heads/master | 2022-10-03T18:23:33.052523 | 2020-06-06T14:42:36 | 2020-06-06T14:42:36 | 270,006,356 | 0 | 0 | null | 2020-06-06T14:42:37 | 2020-06-06T14:29:59 | Python | UTF-8 | Python | false | false | 2,809 | py | import gzip
import shutil
import tempfile
import csv
import os
from typing import Union, List, Generator
# yielded by the CSV parsers below. Generator of lists of column values for
# every row in a CSV
Rows = Generator[List[str], None, None]
def convert_col_type(val: str) -> Union[str, float, int]:
"""
Convert a CSV column into an integer, a float, or keep as a string based on
its format.
Args:
val: column value
Returns:
Int if numeric without decimal, float if numeric with decimal, and
string otherwise
Examples:
"hi" -> "hi"
"10" -> 10 (int)
"10.0" -> 10.0 (float)
"""
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return val
def iter_csv_rows(path: str, delim: str) -> Rows:
"""
Only loads one row at a time into memory and yields it.
Args:
path: path to a .csv file
delim: string column delimiter
Yields:
List of string values for every column.
"""
with open(path) as fd:
reader = csv.reader(fd, delimiter=delim)
for row in reader:
yield row
def iter_gzip_csv_rows(path: str, delim: str) -> Rows:
"""
Args:
path: path to a .csv.gz file
delim: string column delimiter
Yields:
List of string values for every column.
"""
# Decompress the gzip contents into a tempfile without loading into memory
with gzip.open(path, 'rb') as fdout:
with tempfile.NamedTemporaryFile('w+b') as fdin:
# Copies by chunks
shutil.copyfileobj(fdout, fdin)
# Flush buffer to disk
fdin.flush()
for row in iter_csv_rows(fdin.name, delim):
yield row
# Tempfile delete at end of context
def dict_is_subset(subset: dict, superset: dict) -> bool:
"""
Check that all keys in `subset` are present in `superset` and have all the
same values by `==`.
Args:
subset: All keys and values in the dict must match those in `superset`
superset: Must contain all keys/vals from subset
Returns:
boolean result
Examples:
dict_is_subset({'x': 1}, {'x': 1, 'y': 2}) -> True
dict_is_subset({'x': 1, 'z': 2}, {'x': 1, 'y': 2}) -> False
"""
return all(
key in superset and superset[key] == subset[key]
for key in subset.keys()
)
def get_extension(path):
"""
Get the file extension of a given path. Returns double extensions, such as
'.csv.gz'
"""
(name, ext) = os.path.splitext(path)
(_, subext) = os.path.splitext(name)
# Get the double extension as '.csv.gz'
# `subext` will be '' if not present
ext = subext + ext
return ext
| [
"[email protected]"
] | |
220ec570d9a918a4ca8be2737dd79bd8c314fc0d | 810305a5f4d9592e81381c252ab24be43d33817e | /aishack/migrations/0023_auto__del_field_category_slug.py | a2dbc477883840398a3834247c0b3cb65afee002 | [] | no_license | awal123/aishack | 9dbfcbb329b35674c6f0a15c6dfc9de39ba34d05 | 5a16efca42899f3ec1495a509fe801348f3933ac | refs/heads/master | 2021-01-22T21:23:11.168117 | 2014-08-31T05:02:36 | 2014-08-31T05:02:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,364 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Category.slug'
db.delete_column(u'aishack_category', 'slug')
def backwards(self, orm):
# Adding field 'Category.slug'
db.add_column(u'aishack_category', 'slug',
self.gf('django.db.models.fields.SlugField')(default='2014-08-29 22:06:48.127270+00:00', max_length=50),
keep_default=False)
models = {
u'aishack.aishackuser': {
'Meta': {'object_name': 'AishackUser'},
'bio': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'blank': 'True'}),
'short_bio': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'tracks_following': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['aishack.Track']", 'through': u"orm['aishack.UserTrack']", 'symmetrical': 'False'}),
'tutorials_read': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['aishack.Tutorial']", 'through': u"orm['aishack.TutorialRead']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'aishack.category': {
'Meta': {'object_name': 'Category'},
'desc': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'})
},
u'aishack.quiz': {
'Meta': {'object_name': 'Quiz'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'aishack.track': {
'Meta': {'object_name': 'Track'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'excerpt': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['aishack.Tutorial']", 'through': u"orm['aishack.TrackTutorials']", 'symmetrical': 'False'})
},
u'aishack.tracktutorials': {
'Meta': {'object_name': 'TrackTutorials'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.Track']"}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.Tutorial']"})
},
u'aishack.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.Category']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'content_md': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateField', [], {}),
'excerpt': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256'}),
'read_count': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_rel_+'", 'blank': 'True', 'to': u"orm['aishack.Tutorial']"}),
'series': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['aishack.TutorialSeries']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'aishack.tutorialread': {
'Meta': {'object_name': 'TutorialRead'},
'date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.AishackUser']"})
},
u'aishack.tutorialseries': {
'Meta': {'object_name': 'TutorialSeries'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['aishack.Tutorial']", 'through': u"orm['aishack.TutorialSeriesOrder']", 'symmetrical': 'False'})
},
u'aishack.tutorialseriesorder': {
'Meta': {'ordering': "('order',)", 'object_name': 'TutorialSeriesOrder'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'series': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.TutorialSeries']"}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.Tutorial']"})
},
u'aishack.usertrack': {
'Meta': {'object_name': 'UserTrack'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'signup_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.Track']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aishack.AishackUser']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['aishack'] | [
"[email protected]"
] | |
ada3a03be028e3b915389cada419c859da69736d | eb42558f56fdb41526cc31ac4ef3a6937bf39e96 | /ConfigDefinitions/UserConfigs/SMHTT_2018_Configs_Deep/ST_tW_antitopConfig.py | 378774e807e5da7f892c2a679902bc63f061b479 | [] | no_license | samhiggie/Jesterworks | 6906b042d3e200efb9bd10b70284ccd30661aa53 | 562e8cbb20d7e4b1d5b9bdba3715578cc66f097d | refs/heads/master | 2020-09-11T19:35:59.770456 | 2019-11-16T12:37:35 | 2019-11-16T12:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | from ConfigDefinitions.JesterworksConfigurations import JesterworksConfiguration as Config
from ConfigDefinitions.BranchAdditions.UserDefinedCollections.SMHTT_2018_MC_Collection import MCCollection as BranchCollection
from ConfigDefinitions.CuttingDefinitions.UserCutConfigs.SMHTT2018Cuts_MC_NoEmbeddedOverlap_wDeep import SMHTT2018Cuts as CutConfig
from ConfigDefinitions.EndActionDefinitions.UserConfigs.GrabHistograms import HistogramGrabber as HistogramGrabber
DataConfig = Config()
DataConfig.Path = "/data/ccaillol/smhmt2018_svfitted_12oct/"
DataConfig.Files = ["ST_tW_antitop.root"]
DataConfig.InputTreeName = "mutau_tree"
DataConfig.SampleName = "ST_tW_antitop"
DataConfig.OutputPath = "/data/aloeliger/SMHTT_Selected_2018_Deep/"
DataConfig.OutputFile = "ST_tW_antitop.root"
DataConfig.OutputTreeName = "mt_Selected"
DataConfig.BranchCollection = BranchCollection
DataConfig.CutConfig = CutConfig
DataConfig.EndAction = HistogramGrabber
| [
"[email protected]"
] | |
cdd87b5b84d7dc7c907de04cbd185430dfb253e2 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/Taustar/Taustar_TauG_L10000_m4000_13TeV_pythia8.py | 9cdc43d01bf5f29ac69ce08fdf5c53e3f219175d | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,257 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExcitedFermion:qqbar2tauStartau = on',
'ExcitedFermion:Lambda= 10000',
'4000015:onMode = off',
'4000015:onIfMatch = 15 22',
'4000015:m0 = 4000'),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
| [
"[email protected]"
] | |
cc3756f9d748169d46b374c902c181e512a382fa | eafabc5e332f5fc0153e166d992ac0711cf90cd6 | /BOJ/11021/11021.py | 196aea58fa768861cb7e5f8f574957c8f0801695 | [] | no_license | PARKINHYO/Algorithm | 96038ce21bd9f66208af0886208ef6ed925c23e2 | 0ed8687fe971fc2b05e2f50f62c0d0e47c368a6c | refs/heads/master | 2021-12-23T23:48:25.247979 | 2021-08-20T01:52:50 | 2021-08-20T01:52:50 | 196,219,508 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | T = int(input())
AB = [[int(x) for x in input().split()] for y in range(T)]
for i in range(T):
C = AB[i][0] + AB[i][1]
print("Case #%d: %d" % (i+1, C)) | [
"[email protected]"
] | |
323135bcedfd94ec7cd2eae4703e33dde6537de0 | ab1c920583995f372748ff69d38a823edd9a06af | /shultais_courses/data_types/type_conversion/type_conversion.py | 1adc77061b934ef1b1a664bba675429f0fe1b226 | [] | no_license | adyadyat/pyprojects | 5e15f4e33892f9581b8ebe518b82806f0cd019dc | c8f79c4249c22eb9e3e19998d5b504153faae31f | refs/heads/master | 2022-11-12T16:59:17.482303 | 2020-07-04T09:08:18 | 2020-07-04T09:08:18 | 265,461,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | salary = "50000"
salary1 = "50000.5"
year_salary = int(salary) * 12
year_salary1 = float(salary1) * 12
print(year_salary, year_salary1)
print("Ваша годовая зарплата: " + str(year_salary))
# Преобразование типов | [
"[email protected]"
] | |
632df10af90453376bd5a9c07308d6d702f9eab6 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_318/ch23_2019_03_31_22_15_03_222154.py | ababaf32eacb836c1ef4fd1dcb5fe1051412ae6a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | def verifica_idade(x):
if(x>20):
print("Liberado EUA e BRASIL")
return x
elif(x>17 and x<21):
print("Liberado BRASIL")
return x
else:
print("Não está liberado")
return x | [
"[email protected]"
] | |
e731f34764d4a0c183cb174840d6cc907ce618bd | b4a58df63b7e42085d7b4a90cce184bab4039e97 | /src/config_29.py | 0b783d5a2b9d9b38f8b373fc503a67e5a2acd268 | [] | no_license | shinglyu/MusicPupil | 4f82a2240b99c98ec7eb8db1017cfa232cf21bb9 | edfc6da085e9433f347301d7f6ccc49eab45d14f | refs/heads/master | 2021-01-10T03:50:32.670628 | 2013-08-14T08:52:37 | 2013-08-14T08:52:37 | 51,300,212 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,595 | py | import os.path
#DEBUG = True
DEBUG = False
defaultTrainSampleList= "../training_samples/trainSampleList.txt"
unittestTrainSampleList="../training_samples/trainSampleList.txt"
defaultGenScore= "../testing_scores/chop_nc_phrase001"
#defaultTrainFeatsFilename="../output/trainFeats.json" #may need to prepend file name
#defaultGenFeatFilename="../output/genFeat.json"
#defaultModelFilename= "../output/model.bin"
defaultOutputDir= "../output/"
scoreFeatsList = [ "PosInPhrasePercent",
"PitchMidiNum",
"PitchDiffNextMidiNum",
"PitchDiffPrevMidiNum",
"Beat",
"BeatStrength",
"DurationQNote",
"DurationRatioNextPercent",
"DurationRatioPrevPercent", ]
perfFeatsList = [ "OnsetDiffQNote",
"DurationPercent",
"VelocityMidiScale",
]
modelFuncName = [ #"modelMultiLinearRegress",
"modelSVMStruct",
#"ha",
]
quantizerName= [ "quantizerLinear",
#"ha",
]
musicOutputFormat= [ "Midi",
#"ha",
]
#SVM^HMM related parameters
#svmhmm_c = None
svmhmm_c = 0.00000000001
def printDebug(string):
if DEBUG:
print("[DEBUG]"),
print(string)
def sanitizeDirPath(dirPath):
if not (dirPath.endswith("/")):
return dirPath + "/";
else:
return dirPath;
def getTrainSampleName(trainSampleFilename):
return os.path.splitext(os.path.basename(trainSampleFilename))[0]
def getTrainInFeatFilename(args):
trainFeatsFilename = sanitizeDirPath(args.outputDir)
trainFeatsFilename += getTrainSampleName(args.inputList)
trainFeatsFilename += ".train.allFeats.json"
return trainFeatsFilename
def getGenSampleName(genSampleFilename):
return os.path.basename(genSampleFilename)
def getGenInFeatFilename(args):
trainFeatsFilename = sanitizeDirPath(args.outputDir)
trainFeatsFilename += getGenSampleName(args.input)
trainFeatsFilename += ".gen.scoreFeats.json"
return trainFeatsFilename
def getGenOutFeatFilename(args):
trainFeatsFilename = sanitizeDirPath(args.outputDir)
trainFeatsFilename += getGenSampleName(args.input)
trainFeatsFilename += ".gen.perfFeats.json"
return trainFeatsFilename
def getModelFilename(args):
modelFilename = sanitizeDirPath(args.outputDir)
modelFilename += getTrainSampleName(args.inputList) + "."
modelFilename += modelFuncName[0] + ".model"
return modelFilename
| [
"[email protected]"
] | |
8c9f827f7dd01ae5a14d2a256505ffc43d563601 | 605c10db2f950a506af60d57a2074f97ebcf89ab | /code/MODULE/img_processing/record.py | 224137ab62610a7abcbd7067dcc47e6f658b24e3 | [] | no_license | MulongXie/Research-ReverselyGeneratingWebCode | 928f90d6b4f80ebff40a9a3a48f8b564277a0987 | 2c1598a765166f30786b0e6a22c485358ca2e98d | refs/heads/master | 2020-05-17T18:14:02.241209 | 2020-04-10T00:19:16 | 2020-04-10T00:19:16 | 183,857,077 | 0 | 3 | null | 2020-02-03T04:31:34 | 2019-04-28T04:51:24 | Python | UTF-8 | Python | false | false | 2,173 | py | import cv2
import numpy as np
def find_contour():
img = cv2.imread('0.png')
img = cv2.blur(img, (3,3))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)
cv2.imwrite('bb.png', binary)
binary, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # 输出为三个参数
cv2.drawContours(img, contours, -1, (0, 0, 255), 3)
cv2.imshow("img", img)
cv2.imwrite('bc.png', img)
cv2.waitKey(0)
def gradient():
img = cv2.imread("1.png", 0)
row, column = img.shape
img_f = np.copy(img)
# img_f = img_f.astype("float")
gradient = np.zeros((row, column))
for x in range(row - 1):
for y in range(column - 1):
gx = abs(img_f[x + 1, y] - img_f[x, y])
gy = abs(img_f[x, y + 1] - img_f[x, y])
gradient[x, y] = gx + gy
cv2.imshow("gradient", gradient)
cv2.imwrite('ab.png', gradient)
cv2.waitKey(0)
def hough():
img = cv2.imread('x.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 100)
for line in lines:
for rho, theta in line:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 2000 * (-b))
y1 = int(y0 + 2000 * (a))
x2 = int(x0 - 2000 * (-b))
y2 = int(y0 - 2000 * (a))
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imshow('houghlines', img)
cv2.imshow('edg', edges)
cv2.waitKey(0)
def houghp():
img = cv2.imread('x.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
minLineLength = 100
maxLineGap = 10
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, None, minLineLength, maxLineGap)
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.imshow('img', img)
cv2.imshow('edge', edges)
cv2.waitKey(0)
# find_contour()
gradient() | [
"[email protected]"
] | |
9e2bb78f520aa42abdeaba0e0225c1e8624c96cd | 496d3438c33196bc62a15a009cc892b69bef3fd6 | /hataripy/modflow/mfflwob.py | c2cf7cba57724d4979749c06c1e232e039ed5b2f | [
"MIT"
] | permissive | hatarilabs/hataripy | 9cb7749a93a5f35a71a4ee4538ce4686e13c3b77 | 7db7869f34b875c9f76d42b7a4801b0c23738448 | refs/heads/master | 2021-11-23T08:06:39.912715 | 2019-10-18T20:09:55 | 2019-10-18T20:09:55 | 216,093,522 | 4 | 1 | MIT | 2021-11-16T21:11:54 | 2019-10-18T19:27:15 | Python | UTF-8 | Python | false | false | 12,194 | py | import numpy as np
from ..pakbase import Package
class ModflowFlwob(Package):
"""
Head-dependent flow boundary Observation package class. Minimal working
example that will be refactored in a future version.
Parameters
----------
nqfb : int
Number of cell groups for the head-dependent flow boundary
observations
nqcfb : int
Greater than or equal to the total number of cells in all cell groups
nqtfb : int
Total number of head-dependent flow boundary observations for all cell
groups
iufbobsv : int
unit number where output is saved
tomultfb : float
Time-offset multiplier for head-dependent flow boundary observations.
The product of tomultfb and toffset must produce a time value in units
consistent with other model input. tomultfb can be dimensionless or
can be used to convert the units of toffset to the time unit used in
the simulation.
nqobfb : int list of length nqfb
The number of times at which flows are observed for the group of cells
nqclfb : int list of length nqfb
Is a flag, and the absolute value of nqclfb is the number of cells in
the group. If nqclfb is less than zero, factor = 1.0 for all cells in
the group.
obsnam : string list of length nqtfb
Observation name
irefsp : int of length nqtfb
Stress period to which the observation time is referenced.
The reference point is the beginning of the specified stress period.
toffset : float list of length nqtfb
Is the time from the beginning of the stress period irefsp to the time
of the observation. toffset must be in units such that the product of
toffset and tomultfb are consistent with other model input. For
steady state observations, specify irefsp as the steady state stress
period and toffset less than or equal to perlen of the stress period.
If perlen is zero, set toffset to zero. If the observation falls
within a time step, linearly interpolation is used between values at
the beginning and end of the time step.
flwobs : float list of length nqtfb
Observed flow value from the head-dependent flow boundary into the
aquifer (+) or the flow from the aquifer into the boundary (-)
layer : int list of length(nqfb, nqclfb)
layer index for the cell included in the cell group
row : int list of length(nqfb, nqclfb)
row index for the cell included in the cell group
column : int list of length(nqfb, nqclfb)
column index of the cell included in the cell group
factor : float list of length(nqfb, nqclfb)
Is the portion of the simulated gain or loss in the cell that is
included in the total gain or loss for this cell group (fn of eq. 5).
flowtype : string
String that corresponds to the head-dependent flow boundary condition
type (CHD, GHB, DRN, RIV)
extension : list of string
Filename extension. If extension is None, extension is set to
['chob','obc','gbob','obg','drob','obd', 'rvob','obr']
(default is None).
no_print : boolean
When True or 1, a list of flow observations will not be
written to the Listing File (default is False)
options : list of strings
Package options (default is None).
unitnumber : list of int
File unit number. If unitnumber is None, unitnumber is set to
[40, 140, 41, 141, 42, 142, 43, 143] (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the flwob output name will be created using
the model name and .out extension (for example,
modflowtest.out), if iufbobsv is a number greater than zero.
If a single string is passed the package will be set to the string
and flwob output name will be created using the model name and .out
extension, if iufbobsv is a number greater than zero. To define the
names for all package files (input and output) the length of the list
of strings should be 2. Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
This represents a minimal working example that will be refactored in a
future version.
"""
def __init__(self, model, nqfb=0, nqcfb=0, nqtfb=0, iufbobsv=0,
tomultfb=1.0, nqobfb=None, nqclfb=None, obsnam=None,
irefsp=None, toffset=None, flwobs=None, layer=None,
row=None, column=None, factor=None, flowtype=None,
extension=None, no_print=False, options=None,
filenames=None, unitnumber=None):
"""
Package constructor
"""
if nqobfb is None:
nqobfb = []
if nqclfb is None:
nqclfb = []
if obsnam is None:
obsnam = []
if irefsp is None:
irefsp = []
if toffset is None:
toffset = []
if flwobs is None:
flwobs = []
if layer is None:
layer = []
if row is None:
row = []
if column is None:
column = []
if factor is None:
factor = []
if extension is None:
extension = ['chob', 'obc', 'gbob', 'obg', 'drob', 'obd',
'rvob', 'obr']
if unitnumber is None:
unitnumber = [40, 140, 41, 141, 42, 142, 43, 143]
if flowtype.upper().strip() == 'CHD':
name = ['CHOB', 'DATA']
extension = extension[0:2]
unitnumber = unitnumber[0:2]
iufbobsv = unitnumber[1]
self.url = 'chob.htm'
self.heading = '# CHOB for MODFLOW, generated by hataripy.'
elif flowtype.upper().strip() == 'GHB':
name = ['GBOB', 'DATA']
extension = extension[2:4]
unitnumber = unitnumber[2:4]
iufbobsv = unitnumber[1]
self.url = 'gbob.htm'
self.heading = '# GBOB for MODFLOW, generated by hataripy.'
elif flowtype.upper().strip() == 'DRN':
name = ['DROB', 'DATA']
extension = extension[4:6]
unitnumber = unitnumber[4:6]
iufbobsv = unitnumber[1]
self.url = 'drob.htm'
self.heading = '# DROB for MODFLOW, generated by hataripy.'
elif flowtype.upper().strip() == 'RIV':
name = ['RVOB', 'DATA']
extension = extension[6:8]
unitnumber = unitnumber[6:8]
iufbobsv = unitnumber[1]
self.url = 'rvob.htm'
self.heading = '# RVOB for MODFLOW, generated by hataripy.'
else:
msg = 'ModflowFlwob: flowtype must be CHD, GHB, DRN, or RIV'
raise KeyError(msg)
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# call base package constructor
Package.__init__(self, model, extension=extension, name=name,
unit_number=unitnumber,
allowDuplicates=True, filenames=filenames)
self.nqfb = nqfb
self.nqcfb = nqcfb
self.nqtfb = nqtfb
self.iufbobsv = iufbobsv
self.tomultfb = tomultfb
self.nqobfb = nqobfb
self.nqclfb = nqclfb
self.obsnam = obsnam
self.irefsp = irefsp
self.toffset = toffset
self.flwobs = flwobs
self.layer = layer
self.row = row
self.column = column
self.factor = factor
# -create empty arrays of the correct size
self.layer = np.zeros((self.nqfb, max(self.nqclfb)), dtype='int32')
self.row = np.zeros((self.nqfb, max(self.nqclfb)), dtype='int32')
self.column = np.zeros((self.nqfb, max(self.nqclfb)), dtype='int32')
self.factor = np.zeros((self.nqfb, max(self.nqclfb)), dtype='float32')
self.nqobfb = np.zeros((self.nqfb), dtype='int32')
self.nqclfb = np.zeros((self.nqfb), dtype='int32')
self.irefsp = np.zeros((self.nqtfb), dtype='int32')
self.toffset = np.zeros((self.nqtfb), dtype='float32')
self.flwobs = np.zeros((self.nqtfb), dtype='float32')
# -assign values to arrays
self.nqobfb[:] = nqobfb
self.nqclfb[:] = nqclfb
self.obsnam[:] = obsnam
self.irefsp[:] = irefsp
self.toffset[:] = toffset
self.flwobs[:] = flwobs
for i in range(self.nqfb):
self.layer[i, :len(layer[i])] = layer[i]
self.row[i, :len(row[i])] = row[i]
self.column[i, :len(column[i])] = column[i]
self.factor[i, :len(factor[i])] = factor[i]
# add more checks here
self.no_print = no_print
self.np = 0
if options is None:
options = []
if self.no_print:
options.append('NOPRINT')
self.options = options
# add checks for input compliance (obsnam length, etc.)
self.parent.add_package(self)
def write_file(self):
"""
Write the package file
Returns
-------
None
"""
# open file for writing
f_fbob = open(self.fn_path, 'w')
# write header
f_fbob.write('{}\n'.format(self.heading))
# write sections 1 and 2 : NOTE- what about NOPRINT?
line = '{:10d}'.format(self.nqfb)
line += '{:10d}'.format(self.nqcfb)
line += '{:10d}'.format(self.nqtfb)
line += '{:10d}'.format(self.iufbobsv)
if self.no_print or 'NOPRINT' in self.options:
line += '{: >10}'.format('NOPRINT')
line += '\n'
f_fbob.write(line)
f_fbob.write('{:10e}\n'.format(self.tomultfb))
# write sections 3-5 looping through observations groups
c = 0
for i in range(self.nqfb):
# while (i < self.nqfb):
# write section 3
f_fbob.write('{:10d}{:10d}\n'.format(self.nqobfb[i],
self.nqclfb[i]))
# Loop through observation times for the groups
for j in range(self.nqobfb[i]):
# write section 4
line = '{}{:10d}{:10.4g} {:10.4g}\n'.format(self.obsnam[c],
self.irefsp[c],
self.toffset[c],
self.flwobs[c])
f_fbob.write(line)
c += 1 # index variable
# write section 5 - NOTE- need to adjust factor for multiple
# observations in the same cell
for j in range(abs(self.nqclfb[i])):
# set factor to 1.0 for all cells in group
if self.nqclfb[i] < 0:
self.factor[i, :] = 1.0
line = '{:10d}'.format(self.layer[i, j])
line += '{:10d}'.format(self.row[i, j])
line += '{:10d}'.format(self.column[i, j])
line += ' '.format(self.factor[i, j])
# note is 10f good enough here?
line += '{:10f}\n'.format(self.factor[i, j])
f_fbob.write(line)
f_fbob.close()
#
# swm: BEGIN hack for writing standard file
sfname = self.fn_path
sfname += '_ins'
# write header
f_ins = open(sfname, 'w')
f_ins.write('jif @\n')
f_ins.write('StandardFile 0 1 {}\n'.format(self.nqtfb))
for i in range(0, self.nqtfb):
f_ins.write('{}\n'.format(self.obsnam[i]))
f_ins.close()
# swm: END hack for writing standard file
return
| [
"[email protected]"
] | |
b0cc2ba9fac7cba1d33ade5d24feafd1c573bf99 | 1caf4418f3549567637f5e9893a445f52a38c6a0 | /CmsAdmin/media_content/app/dtos/__init__.py | ac09cc699e3fea3eea9f4320bb74d14a43fcd5c7 | [] | no_license | Final-Game/social_network_backend | c601563e08c0fd7de72a614944f354ef8d2d31d8 | 8111787d1d20eb87733ae360d8baa745a65e2743 | refs/heads/master | 2023-03-04T21:12:43.147084 | 2021-02-23T03:45:22 | 2021-02-23T03:45:22 | 290,542,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from .media_dto import MediaDto
from .create_media_list_dto import CreatePostMediaListDto | [
"[email protected]"
] | |
6fd957dec3b88887df1e62ab9b4bc131e1c557b1 | b8461afd9d11457a91ae803987bde74337ad4fd1 | /docs/source/reference-core/channels-shutdown.py | dcd35767ae1921678a10099dd6e99150a70a52b7 | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | abispo/trio | d9750920091fc4b77e4d8386def45ea727eb2218 | 5bcfb1b9b90cc6bbf517468251597e8b262ca789 | refs/heads/master | 2020-06-20T21:10:38.474717 | 2019-07-15T05:56:33 | 2019-07-15T05:56:33 | 197,250,586 | 1 | 0 | NOASSERTION | 2019-07-16T18:53:03 | 2019-07-16T18:53:03 | null | UTF-8 | Python | false | false | 582 | py | import trio
async def main():
async with trio.open_nursery() as nursery:
send_channel, receive_channel = trio.open_memory_channel(0)
nursery.start_soon(producer, send_channel)
nursery.start_soon(consumer, receive_channel)
async def producer(send_channel):
async with send_channel:
for i in range(3):
await send_channel.send("message {}".format(i))
async def consumer(receive_channel):
async with receive_channel:
async for value in receive_channel:
print("got value {!r}".format(value))
trio.run(main)
| [
"[email protected]"
] | |
5d1f056703f1b727bc1edd6a7ae06a89636722a4 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/BlendLuxCore/ui/halt.py | 07a3ec885fcf3cd6db5a21ef53e0815aa858c782 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,668 | py | from bl_ui.properties_render import RenderButtonsPanel
from bl_ui.properties_render_layer import RenderLayerButtonsPanel
from bpy.types import Panel
from ..utils import ui as utils_ui
from . import icons
def draw(layout, context, halt):
layout.active = halt.enable
row = layout.row()
row.prop(halt, "use_time")
split = row.split()
split.active = halt.use_time
split.prop(halt, "time")
if halt.use_time and halt.time > 60:
time_humanized = utils_ui.humanize_time(halt.time)
row = layout.row()
row.alignment = "RIGHT"
row.label(time_humanized, icon="TIME")
row = layout.row()
row.prop(halt, "use_samples")
split = row.split()
split.active = halt.use_samples
split.prop(halt, "samples")
config = context.scene.luxcore.config
if halt.use_samples and config.engine == "PATH" and config.use_tiles:
# some special warnings about tile path usage
aa = config.tile.path_sampling_aa_size
samples_per_pass = aa**2
if config.tile.multipass_enable and halt.samples % samples_per_pass != 0:
layout.label("Should be a multiple of %d" % samples_per_pass, icon=icons.WARNING)
if context.scene.luxcore.denoiser.enabled and context.scene.luxcore.denoiser.type == "BCD":
# BCD Denoiser needs one warmup pass plus at least one sample collecting pass
min_samples = samples_per_pass * 2
else:
min_samples = samples_per_pass
if halt.samples < min_samples:
layout.label("Use at least %d samples!" % min_samples, icon=icons.WARNING)
if not config.tile.multipass_enable and halt.samples > min_samples:
layout.label("Samples halt condition overriden by disabled multipass", icon=icons.INFO)
col = layout.column(align=True)
col.prop(halt, "use_noise_thresh")
if halt.use_noise_thresh:
col.prop(halt, "noise_thresh")
col.prop(halt, "noise_thresh_warmup")
col.prop(halt, "noise_thresh_step")
class LUXCORE_RENDER_PT_halt_conditions(Panel, RenderButtonsPanel):
"""
These are the global halt conditions shown in the render settings
"""
bl_label = "LuxCore Halt Conditions"
COMPAT_ENGINES = {"LUXCORE"}
@classmethod
def poll(cls, context):
return context.scene.render.engine == "LUXCORE"
def draw_header(self, context):
halt = context.scene.luxcore.halt
self.layout.prop(halt, "enable", text="")
def draw(self, context):
layout = self.layout
halt = context.scene.luxcore.halt
draw(layout, context, halt)
layers = context.scene.render.layers
overriding_layers = [layer for layer in layers if layer.use and layer.luxcore.halt.enable]
if overriding_layers:
layout.separator()
col = layout.column(align=True)
row = col.row()
split = row.split(percentage=0.8)
split.label("Render Layers Overriding Halt Conditions:")
op = split.operator("luxcore.switch_space_data_context",
text="Show", icon="RENDERLAYERS")
op.target = "RENDER_LAYER"
for layer in overriding_layers:
halt = layer.luxcore.halt
conditions = []
if halt.use_time:
conditions.append("Time (%ds)" % halt.time)
if halt.use_samples:
conditions.append("Samples (%d)" % halt.samples)
if halt.use_noise_thresh:
conditions.append("Noise (%d)" % halt.noise_thresh)
if conditions:
text = layer.name + ": " + ", ".join(conditions)
col.label(text, icon="RENDERLAYERS")
else:
text = layer.name + ": No Halt Condition!"
col.label(text, icon=icons.ERROR)
class LUXCORE_RENDERLAYER_PT_halt_conditions(Panel, RenderLayerButtonsPanel):
"""
These are the per-renderlayer halt condition settings,
they can override the global settings and are shown in the renderlayer settings
"""
bl_label = "Override Halt Conditions"
COMPAT_ENGINES = {"LUXCORE"}
@classmethod
def poll(cls, context):
return context.scene.render.engine == "LUXCORE"
def draw_header(self, context):
rl = context.scene.render.layers.active
halt = rl.luxcore.halt
self.layout.prop(halt, "enable", text="")
def draw(self, context):
rl = context.scene.render.layers.active
halt = rl.luxcore.halt
draw(self.layout, context, halt)
| [
"[email protected]"
] | |
adc9e3d8973dbb3380952f23d6606d8fea4fa7a0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03425/s431844808.py | f6ac9253a929b3316b46eab8732c3022a81c6fb6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | n = int(input())
s = [input() for i in range(n)]
l = [0] * 5
for i in range(n):
if s[i][0] == "M":
l[0] += 1
elif s[i][0] == "A":
l[1] += 1
elif s[i][0] == "R":
l[2] += 1
elif s[i][0] == "C":
l[3] += 1
elif s[i][0] == "H":
l[4] += 1
ans = 0
for j in range(3):
for k in range(j+1, 4):
for i in range(k+1, 5):
ans += l[j] * l[k] * l[i]
print(ans) | [
"[email protected]"
] | |
5ef99950fd6011c61d4088bd8fe2d21e6a1acc15 | f2601a5678fbc04738eff8393a42c07c87ca4d9c | /licenses/management/commands/upload_license_messages.py | e69e87ba92c4d6da391efd55cd87d58d77121df6 | [
"LicenseRef-scancode-free-unknown",
"MIT",
"LicenseRef-scancode-public-domain",
"CC0-1.0",
"CC-BY-NC-4.0",
"CC-BY-NC-ND-4.0",
"LicenseRef-scancode-unknown"
] | permissive | sp35/cc-licenses | 1c2e713fbc4ec96a90bdb6f5c8b5c4750fee3632 | 42573273bac4136adf9f482db75314d81efdcdcf | refs/heads/main | 2023-03-08T11:28:06.885986 | 2021-02-25T21:23:14 | 2021-02-25T21:23:14 | 342,029,287 | 0 | 0 | MIT | 2021-02-24T20:38:45 | 2021-02-24T20:38:45 | null | UTF-8 | Python | false | false | 303 | py | from django.core.management import BaseCommand
from licenses.models import License
class Command(BaseCommand):
def handle(self, **options):
for license in License.objects.filter(
version="4.0", license_code__startswith="by"
):
license.tx_upload_messages()
| [
"[email protected]"
] | |
ec737f98eaab6f5935ea821568169b9097114b80 | cfd9fa1af735ac3572954704a47e35543850b244 | /run.py | 5dcf8a98e292f9b7dbf746fb300f36d20052741f | [] | no_license | xingyueGK/hjsg | c1844ea8161d254f6d6cf70f42d1ac849e117438 | be0c4c457bdfaa9178f25f9f722dc78d88f24540 | refs/heads/master | 2022-12-12T08:28:55.823357 | 2020-12-05T12:02:06 | 2020-12-05T12:02:06 | 147,184,573 | 0 | 1 | null | 2022-01-06T22:26:48 | 2018-09-03T09:47:04 | HTML | UTF-8 | Python | false | false | 2,701 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/21 11:15
# @Author : xingyue
# @File : run.py
#运行方法
import threading
import os,time,apscheduler
from task.base import SaoDangFb
from task.talent import Talent
from task.hero_soul import DragonBoat,GoBoat
from task.glory_front import glory_front
from task.caomujiebing import Flag
from task.autoCountryBanquet import autoCountryBanquet
class run(SaoDangFb,Talent):
#'天赋卷轴'
pass
class duanwu(SaoDangFb,DragonBoat):
#'龙舟比赛'
pass
class longzhou(SaoDangFb,GoBoat):
pass
class dongxizhanxian(SaoDangFb,glory_front):
#'东西战线'
pass
class banquet(SaoDangFb,autoCountryBanquet):
pass
class cmjb(SaoDangFb,Flag):
pass
if __name__ == '__main__':
s1 = threading.Semaphore(3)
def act(user, apass, addr):
s1.acquire()
action = dongxizhanxian(user, apass, addr)
if action.level()< 150:
s1.release()
return False
action.zhanxian(s1)
s1.release()
def flag(user, apass, addr):
s1.acquire(blocking=False)
action = cmjb(user, apass, addr)
schedule = action.get_today_schedule()
if schedule['status'] == -2:
print schedule['msg']
exit(1)
elif schedule['status'] != 1:
print schedule['msg']
exit(1)
try:
self_server = schedule['data']['self_server']
except:
exit(3)
get_enter_list = action.get_enter_list(self_server)
enter_cd = get_enter_list['enter_cd']
print enter_cd
time.sleep(enter_cd)
action.enter(self_server,1)
s1.release()
def lz(user, apass, addr):
s1.acquire()
action = longzhou(user, apass, addr)
action.buytimes(200)
action.longzhou()
# action.meter_reward()
# action.bug_meter_reward()
s1.release()
def guoyan(user, apass, addr):
s1.acquire()
action = banquet(user, apass, addr)
action.jion_team()
s1.release()
filepath = os.path.dirname(os.path.abspath(__file__))
# cont = ['21user.txt', 'autouser.txt','gmnewyear.txt', 'user.txt', 'alluser.txt']
cont = ['user.txt']
for t in cont:
with open('%s/users/%s' % (filepath, t), 'r') as f:
for i in f:
if i.strip() and not i.startswith('#'):
name = i.split()[0]
passwd = i.split()[1]
addr = i.split()[2]
# addr = 147
t1 = threading.Thread(target=lz, args=(name, passwd, addr))
t1.start()
| [
"[email protected]"
] | |
4bf49fded33b7c4a27f087f75b6783e70d7a0f6f | 4ec1eda7669dbe2dd67ac7218421fae62b5ef741 | /userauth/urls.py | d8645c6d93d55fb5f15ec3a2f8a0d8f8793391d7 | [] | no_license | atul8727/medical_helper | 075284335644343d71d4c4d92f1e4e92b67089aa | 2e03f70b82834b95cb4d424d22f2bd5b82f652c8 | refs/heads/master | 2023-07-01T03:15:46.146540 | 2021-08-01T19:06:03 | 2021-08-01T19:06:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | from django.urls import path,include
from .views import dashboard_view,register_user
from app.views import *
urlpatterns = [
path('dashboard/', dashboard_view, name='dashboard'),
path('register/',register_user, name='register'),
path('oauth/',include('social_django.urls')),
] | [
"[email protected]"
] | |
9696a8447135bae26d4e81268979efa949782a04 | cc8416a20b3aa9832dabf29112e52b5dfb367157 | /stable_nalu/layer/regualized_linear_nac.py | 295fd28e4264cbe51423d12d827218e558f7c0b5 | [
"MIT"
] | permissive | AndreasMadsen/stable-nalu | ff877592ec965dca49a48bf94b38e343ba407411 | b3296ace137ffa4854edeef3759f1578b7650210 | refs/heads/master | 2023-05-22T04:53:17.495712 | 2021-08-19T18:15:14 | 2021-08-19T18:23:45 | 177,330,156 | 149 | 19 | MIT | 2020-01-15T08:06:12 | 2019-03-23T19:13:34 | HTML | UTF-8 | Python | false | false | 1,824 | py |
import scipy.optimize
import numpy as np
import torch
from ..abstract import ExtendedTorchModule
from ._abstract_recurrent_cell import AbstractRecurrentCell
class RegualizedLinearNACLayer(ExtendedTorchModule):
"""Implements the RegualizedLinearNAC
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features,
regualizer_shape='squared',
**kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self._regualizer_bias = Regualizer(
support='nac', type='bias',
shape=regualizer_shape
)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.W)
def regualizer(self):
return super().regualizer({
'W': self._regualizer_bias(self.W)
})
def forward(self, input, reuse=False):
self.writer.add_histogram('W', self.W)
self.writer.add_tensor('W', self.W, verbose_only=False)
return torch.nn.functional.linear(input, self.W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class RegualizedLinearNACCell(AbstractRecurrentCell):
"""Implements the RegualizedLinearNAC as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(RegualizedLinearNACLayer, input_size, hidden_size, **kwargs)
| [
"[email protected]"
] | |
993bb6fb588f69bf3dbc7122e9b8a916a0de47ce | 134ff3c0719d4c0022eb0fb7c859bdbff5ca34b2 | /desktop/core/ext-py/Twisted/twisted/internet/epollreactor.py | 0031e50b6adcc505e65831ddb8be2d3d8d84ef4e | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | civascu/hue | 22637f13a4cfc557716557661523131b6ac16da4 | 82f2de44789ff5a981ed725175bae7944832d1e9 | refs/heads/master | 2020-03-31T01:50:39.449966 | 2010-07-21T01:05:50 | 2010-07-21T01:07:15 | 788,284 | 0 | 0 | Apache-2.0 | 2019-02-04T07:03:12 | 2010-07-21T07:34:27 | Python | UTF-8 | Python | false | false | 8,517 | py | # Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An epoll() based implementation of the twisted main loop.
To install the event loop (and you should do this before any connections,
listeners or connectors are added)::
from twisted.internet import epollreactor
epollreactor.install()
Maintainer: Jp Calderone
"""
import sys, errno
from zope.interface import implements
from twisted.internet.interfaces import IReactorFDSet
from twisted.python import _epoll
from twisted.python import log
from twisted.internet import posixbase, error
from twisted.internet.main import CONNECTION_LOST
_POLL_DISCONNECTED = (_epoll.HUP | _epoll.ERR)
class EPollReactor(posixbase.PosixReactorBase):
"""
A reactor that uses epoll(4).
@ivar _poller: A L{poll} which will be used to check for I/O
readiness.
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of L{FileDescriptor} which have been registered with the
reactor. All L{FileDescriptors} which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for read readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
@ivar _writes: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for write readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
"""
implements(IReactorFDSet)
def __init__(self):
"""
Initialize epoll object, file descriptor tracking dictionaries, and the
base class.
"""
# Create the poller we're going to use. The 1024 here is just a hint
# to the kernel, it is not a hard maximum.
self._poller = _epoll.epoll(1024)
self._reads = {}
self._writes = {}
self._selectables = {}
posixbase.PosixReactorBase.__init__(self)
def _add(self, xer, primary, other, selectables, event, antievent):
"""
Private method for adding a descriptor from the event loop.
It takes care of adding it if new or modifying it if already added
for another state (read -> read/write for example).
"""
fd = xer.fileno()
if fd not in primary:
cmd = _epoll.CTL_ADD
flags = event
if fd in other:
flags |= antievent
cmd = _epoll.CTL_MOD
primary[fd] = 1
selectables[fd] = xer
# epoll_ctl can raise all kinds of IOErrors, and every one
# indicates a bug either in the reactor or application-code.
# Let them all through so someone sees a traceback and fixes
# something. We'll do the same thing for every other call to
# this method in this file.
self._poller._control(cmd, fd, flags)
def addReader(self, reader):
"""
Add a FileDescriptor for notification of data available to read.
"""
self._add(reader, self._reads, self._writes, self._selectables, _epoll.IN, _epoll.OUT)
def addWriter(self, writer):
"""
Add a FileDescriptor for notification of data available to write.
"""
self._add(writer, self._writes, self._reads, self._selectables, _epoll.OUT, _epoll.IN)
def _remove(self, xer, primary, other, selectables, event, antievent):
"""
Private method for removing a descriptor from the event loop.
It does the inverse job of _add, and also add a check in case of the fd
has gone away.
"""
fd = xer.fileno()
if fd == -1:
for fd, fdes in selectables.items():
if xer is fdes:
break
else:
return
if fd in primary:
cmd = _epoll.CTL_DEL
flags = event
if fd in other:
flags = antievent
cmd = _epoll.CTL_MOD
else:
del selectables[fd]
del primary[fd]
# See comment above _control call in _add.
self._poller._control(cmd, fd, flags)
def removeReader(self, reader):
"""
Remove a Selectable for notification of data available to read.
"""
self._remove(reader, self._reads, self._writes, self._selectables, _epoll.IN, _epoll.OUT)
def removeWriter(self, writer):
"""
Remove a Selectable for notification of data available to write.
"""
self._remove(writer, self._writes, self._reads, self._selectables, _epoll.OUT, _epoll.IN)
def removeAll(self):
"""
Remove all selectables, and return a list of them.
"""
if self.waker is not None:
fd = self.waker.fileno()
if fd in self._reads:
del self._reads[fd]
del self._selectables[fd]
result = self._selectables.values()
fds = self._selectables.keys()
self._reads.clear()
self._writes.clear()
self._selectables.clear()
for fd in fds:
try:
# Actually, we'll ignore all errors from this, since it's
# just last-chance cleanup.
self._poller._control(_epoll.CTL_DEL, fd, 0)
except IOError:
pass
if self.waker is not None:
fd = self.waker.fileno()
self._reads[fd] = 1
self._selectables[fd] = self.waker
return result
def getReaders(self):
return [self._selectables[fd] for fd in self._reads]
def getWriters(self):
return [self._selectables[fd] for fd in self._writes]
def doPoll(self, timeout):
"""
Poll the poller for new events.
"""
if timeout is None:
timeout = 1
timeout = int(timeout * 1000) # convert seconds to milliseconds
try:
# Limit the number of events to the number of io objects we're
# currently tracking (because that's maybe a good heuristic) and
# the amount of time we block to the value specified by our
# caller.
l = self._poller.wait(len(self._selectables), timeout)
except IOError, err:
if err.errno == errno.EINTR:
return
# See epoll_wait(2) for documentation on the other conditions
# under which this can fail. They can only be due to a serious
# programming error on our part, so let's just announce them
# loudly.
raise
_drdw = self._doReadOrWrite
for fd, event in l:
try:
selectable = self._selectables[fd]
except KeyError:
pass
else:
log.callWithLogger(selectable, _drdw, selectable, fd, event)
doIteration = doPoll
def _doReadOrWrite(self, selectable, fd, event):
"""
fd is available for read or write, make the work and raise errors
if necessary.
"""
why = None
inRead = False
if event & _POLL_DISCONNECTED and not (event & _epoll.IN):
why = CONNECTION_LOST
else:
try:
if event & _epoll.IN:
why = selectable.doRead()
inRead = True
if not why and event & _epoll.OUT:
why = selectable.doWrite()
inRead = False
if selectable.fileno() != fd:
why = error.ConnectionFdescWentAway(
'Filedescriptor went away')
inRead = False
except:
log.err()
why = sys.exc_info()[1]
if why:
self._disconnectSelectable(selectable, why, inRead)
def install():
"""
Install the epoll() reactor.
"""
p = EPollReactor()
from twisted.internet.main import installReactor
installReactor(p)
__all__ = ["EPollReactor", "install"]
| [
"[email protected]"
] | |
a2f9ac6a7a2e1e0f0409cdcdb101ee1a3326ccff | ca1e432c66ca9289cc25039f6c035c292e298d15 | /content_management_portal/migrations/0002_auto_20200629_1454.py | 98fb46bcc42bd507c329ea7f4728238c6de54c3e | [] | no_license | raviteja1766/ib_mini_projects | 9bf091acf34e87d7a44bec51a504bdb81aceae27 | 3fa36b97cfa90b5f5853253480934cf27714aa15 | refs/heads/master | 2022-11-19T07:08:27.061315 | 2020-07-02T16:54:42 | 2020-07-02T16:54:42 | 272,033,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # Generated by Django 2.2.1 on 2020-06-29 14:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('content_management_portal', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='question',
old_name='user',
new_name='user_id',
),
]
| [
"[email protected]"
] | |
42dc09e71e7b06ee6b54b705f7a5d017856157c3 | 38c10c01007624cd2056884f25e0d6ab85442194 | /third_party/catapult/third_party/gsutil/gslib/cloud_api.py | b7af6b6eeac56ac0457c213753aa18d7878f5edb | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 24,923 | py | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gsutil API for interacting with cloud storage providers."""
from __future__ import absolute_import
class CloudApi(object):
"""Abstract base class for interacting with cloud storage providers.
Implementations of the gsutil Cloud API are not guaranteed to be thread-safe.
Behavior when calling a gsutil Cloud API instance simultaneously across
threads is undefined and doing so will likely cause errors. Therefore,
a separate instance of the gsutil Cloud API should be instantiated per-thread.
"""
def __init__(self, bucket_storage_uri_class, logger, provider=None, debug=0):
"""Performs necessary setup for interacting with the cloud storage provider.
Args:
bucket_storage_uri_class: boto storage_uri class, used by APIs that
provide boto translation or mocking.
logger: logging.logger for outputting log messages.
provider: Default provider prefix describing cloud storage provider to
connect to.
debug: Debug level for the API implementation (0..3).
"""
self.bucket_storage_uri_class = bucket_storage_uri_class
self.logger = logger
self.provider = provider
self.debug = debug
def GetBucket(self, bucket_name, provider=None, fields=None):
"""Gets Bucket metadata.
Args:
bucket_name: Name of the bucket.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Bucket metadata fields, for
example, ['logging', 'defaultObjectAcl']
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Bucket object.
"""
raise NotImplementedError('GetBucket must be overloaded')
def ListBuckets(self, project_id=None, provider=None, fields=None):
"""Lists bucket metadata for the given project.
Args:
project_id: Project owning the buckets, default from config if None.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these metadata fields for the listing,
for example:
['items/logging', 'items/defaultObjectAcl'].
Note that the WildcardIterator class should be used to list
buckets instead of calling this function directly. It amends
the fields definition from get-like syntax such as
['logging', 'defaultObjectAcl'] so that the caller does not
need to prepend 'items/' or specify fields necessary for listing
(like nextPageToken).
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Iterator over Bucket objects.
"""
raise NotImplementedError('ListBuckets must be overloaded')
def PatchBucket(self, bucket_name, metadata, canned_acl=None,
canned_def_acl=None, preconditions=None, provider=None,
fields=None):
"""Updates bucket metadata for the bucket with patch semantics.
Args:
bucket_name: Name of bucket to update.
metadata: Bucket object defining metadata to be updated.
canned_acl: Canned ACL to apply to the bucket.
canned_def_acl: Canned default object ACL to apply to the bucket.
preconditions: Preconditions for the request.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Bucket metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Bucket object describing new bucket metadata.
"""
raise NotImplementedError('PatchBucket must be overloaded')
def CreateBucket(self, bucket_name, project_id=None, metadata=None,
provider=None, fields=None):
"""Creates a new bucket with the specified metadata.
Args:
bucket_name: Name of the new bucket.
project_id: Project owner of the new bucket, default from config if None.
metadata: Bucket object defining new bucket metadata.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Bucket metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Bucket object describing new bucket metadata.
"""
raise NotImplementedError('CreateBucket must be overloaded')
def DeleteBucket(self, bucket_name, preconditions=None, provider=None):
"""Deletes a bucket.
Args:
bucket_name: Name of the bucket to delete.
preconditions: Preconditions for the request.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None.
"""
raise NotImplementedError('DeleteBucket must be overloaded')
class CsObjectOrPrefixType(object):
"""Enum class for describing CsObjectOrPrefix types."""
OBJECT = 'object' # Cloud object
PREFIX = 'prefix' # Cloud bucket subdirectory
class CsObjectOrPrefix(object):
"""Container class for ListObjects results."""
def __init__(self, data, datatype):
"""Stores a ListObjects result.
Args:
data: Root object, either an apitools Object or a string Prefix.
datatype: CsObjectOrPrefixType of data.
"""
self.data = data
self.datatype = datatype
def ListObjects(self, bucket_name, prefix=None, delimiter=None,
all_versions=None, provider=None, fields=None):
"""Lists objects (with metadata) and prefixes in a bucket.
Args:
bucket_name: Bucket containing the objects.
prefix: Prefix for directory-like behavior.
delimiter: Delimiter for directory-like behavior.
all_versions: If true, list all object versions.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these metadata fields for the listing,
for example:
['items/acl', 'items/updated', 'prefixes'].
Note that the WildcardIterator class should be used to list
objects instead of calling this function directly. It amends
the fields definition from get-like syntax such as
['acl', 'updated'] so that the caller does not need to
prepend 'items/' or specify any fields necessary for listing
(such as prefixes or nextPageToken).
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Iterator over CsObjectOrPrefix wrapper class.
"""
raise NotImplementedError('ListObjects must be overloaded')
def GetObjectMetadata(self, bucket_name, object_name, generation=None,
provider=None, fields=None):
"""Gets object metadata.
Args:
bucket_name: Bucket containing the object.
object_name: Object name.
generation: Generation of the object to retrieve.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields, for
example, ['acl', 'updated'].
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Object object.
"""
raise NotImplementedError('GetObjectMetadata must be overloaded')
def PatchObjectMetadata(self, bucket_name, object_name, metadata,
canned_acl=None, generation=None, preconditions=None,
provider=None, fields=None):
"""Updates object metadata with patch semantics.
Args:
bucket_name: Bucket containing the object.
object_name: Object name for object.
metadata: Object object defining metadata to be updated.
canned_acl: Canned ACL to be set on the object.
generation: Generation (or version) of the object to update.
preconditions: Preconditions for the request.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Updated object metadata.
"""
raise NotImplementedError('PatchObjectMetadata must be overloaded')
class DownloadStrategy(object):
"""Enum class for specifying download strategy."""
ONE_SHOT = 'oneshot'
RESUMABLE = 'resumable'
def GetObjectMedia(self, bucket_name, object_name, download_stream,
provider=None, generation=None, object_size=None,
download_strategy=DownloadStrategy.ONE_SHOT, start_byte=0,
end_byte=None, progress_callback=None,
serialization_data=None, digesters=None):
"""Gets object data.
Args:
bucket_name: Bucket containing the object.
object_name: Object name.
download_stream: Stream to send the object data to.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
generation: Generation of the object to retrieve.
object_size: Total size of the object being downloaded.
download_strategy: Cloud API download strategy to use for download.
start_byte: Starting point for download (for resumable downloads and
range requests). Can be set to negative to request a range
of bytes (python equivalent of [:-3])
end_byte: Ending point for download (for range requests).
progress_callback: Optional callback function for progress notifications.
Receives calls with arguments
(bytes_transferred, total_size).
serialization_data: Implementation-specific dict containing serialization
information for the download.
digesters: Dict of {string : digester}, where string is a name of a hash
algorithm, and digester is a validation digester that supports
update(bytes) and digest() using that algorithm.
Implementation can set the digester value to None to indicate
bytes were not successfully digested on-the-fly.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Content-encoding string if it was detected that the server sent an encoded
object during transfer, None otherwise.
"""
raise NotImplementedError('GetObjectMedia must be overloaded')
def UploadObject(self, upload_stream, object_metadata, canned_acl=None,
size=None, preconditions=None, progress_callback=None,
provider=None, fields=None):
"""Uploads object data and metadata.
Args:
upload_stream: Seekable stream of object data.
object_metadata: Object metadata for new object. Must include bucket
and object name.
canned_acl: Optional canned ACL to apply to object. Overrides ACL set
in object_metadata.
size: Optional object size.
preconditions: Preconditions for the request.
progress_callback: Optional callback function for progress notifications.
Receives calls with arguments
(bytes_transferred, total_size).
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Object object for newly created destination object.
"""
raise NotImplementedError('UploadObject must be overloaded')
def UploadObjectStreaming(self, upload_stream, object_metadata,
canned_acl=None, preconditions=None,
progress_callback=None, provider=None,
fields=None):
"""Uploads object data and metadata.
Args:
upload_stream: Stream of object data. May not be seekable.
object_metadata: Object metadata for new object. Must include bucket
and object name.
canned_acl: Optional canned ACL to apply to object. Overrides ACL set
in object_metadata.
preconditions: Preconditions for the request.
progress_callback: Optional callback function for progress notifications.
Receives calls with arguments
(bytes_transferred, total_size), but fills in only
bytes_transferred.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Object object for newly created destination object.
"""
raise NotImplementedError('UploadObjectStreaming must be overloaded')
def UploadObjectResumable(
self, upload_stream, object_metadata, canned_acl=None,
size=None, preconditions=None, serialization_data=None,
tracker_callback=None, progress_callback=None, provider=None,
fields=None):
"""Uploads object data and metadata using a resumable upload strategy.
Args:
upload_stream: Seekable stream of object data.
object_metadata: Object metadata for new object. Must include bucket
and object name.
canned_acl: Optional canned ACL to apply to object. Overrides ACL set
in object_metadata.
size: Total size of the object.
preconditions: Preconditions for the request.
serialization_data: Dict of {'url' : UploadURL} allowing for uploads to
be resumed.
tracker_callback: Callback function taking a upload URL string.
Guaranteed to be called when the implementation gets an
upload URL, allowing the caller to resume the upload
across process breaks by saving the upload URL in
a tracker file.
progress_callback: Optional callback function for progress notifications.
Receives calls with arguments
(bytes_transferred, total_size).
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields when the
upload is complete.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Object object for newly created destination object.
"""
raise NotImplementedError('UploadObjectResumable must be overloaded')
def CopyObject(self, src_obj_metadata, dst_obj_metadata, src_generation=None,
canned_acl=None, preconditions=None, progress_callback=None,
max_bytes_per_call=None, provider=None, fields=None):
"""Copies an object in the cloud.
Args:
src_obj_metadata: Object metadata for source object. Must include
bucket name, object name, and etag.
dst_obj_metadata: Object metadata for new object. Must include bucket
and object name.
src_generation: Generation of the source object to copy.
canned_acl: Optional canned ACL to apply to destination object. Overrides
ACL set in dst_obj_metadata.
preconditions: Destination object preconditions for the request.
progress_callback: Optional callback function for progress notifications.
Receives calls with arguments
(bytes_transferred, total_size).
max_bytes_per_call: Integer describing maximum number of bytes
to rewrite per service call.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Object object for newly created destination object.
"""
raise NotImplementedError('CopyObject must be overloaded')
def ComposeObject(self, src_objs_metadata, dst_obj_metadata,
preconditions=None, provider=None, fields=None):
"""Composes an object in the cloud.
Args:
src_objs_metadata: List of ComposeRequest.SourceObjectsValueListEntries
specifying the objects to compose.
dst_obj_metadata: Metadata for the destination object including bucket
and object name.
preconditions: Destination object preconditions for the request.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Object metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Composed object metadata.
"""
raise NotImplementedError('ComposeObject must be overloaded')
def DeleteObject(self, bucket_name, object_name, preconditions=None,
generation=None, provider=None):
"""Deletes an object.
Args:
bucket_name: Name of the containing bucket.
object_name: Name of the object to delete.
preconditions: Preconditions for the request.
generation: Generation (or version) of the object to delete; if None,
deletes the live object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None.
"""
raise NotImplementedError('DeleteObject must be overloaded')
def WatchBucket(self, bucket_name, address, channel_id, token=None,
provider=None, fields=None):
"""Creates a notification subscription for changes to objects in a bucket.
Args:
bucket_name: Bucket containing the objects.
address: Address to which to send notifications.
channel_id: Unique ID string for the channel.
token: If present, token string is delivered with each notification.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
fields: If present, return only these Channel metadata fields.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Channel object describing the notification subscription.
"""
raise NotImplementedError('WatchBucket must be overloaded')
def StopChannel(self, channel_id, resource_id, provider=None):
"""Stops a notification channel.
Args:
channel_id: Unique ID string for the channel.
resource_id: Version-agnostic ID string for the channel.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None.
"""
raise NotImplementedError('StopChannel must be overloaded')
class Preconditions(object):
"""Preconditions class for specifying preconditions to cloud API requests."""
def __init__(self, gen_match=None, meta_gen_match=None):
"""Instantiates a Preconditions object.
Args:
gen_match: Perform request only if generation of target object
matches the given integer. Ignored for bucket requests.
meta_gen_match: Perform request only if metageneration of target
object/bucket matches the given integer.
"""
self.gen_match = gen_match
self.meta_gen_match = meta_gen_match
class ArgumentException(Exception):
"""Exception raised when arguments to a Cloud API method are invalid.
This exception is never raised as a result of a failed call to a cloud
storage provider.
"""
def __init__(self, reason):
Exception.__init__(self)
self.reason = reason
def __repr__(self):
return str(self)
def __str__(self):
return '%s: %s' % (self.__class__.__name__, self.reason)
class ProjectIdException(ArgumentException):
"""Exception raised when a Project ID argument is required but not present."""
class ServiceException(Exception):
"""Exception raised when a cloud storage provider request fails.
This exception is raised only as a result of a failed remote call.
"""
def __init__(self, reason, status=None, body=None):
Exception.__init__(self)
self.reason = reason
self.status = status
self.body = body
def __repr__(self):
return str(self)
def __str__(self):
message = '%s:' % self.__class__.__name__
if self.status:
message += ' %s' % self.status
message += ' %s' % self.reason
if self.body:
message += '\n%s' % self.body
return message
class RetryableServiceException(ServiceException):
"""Exception class for retryable exceptions."""
class ResumableDownloadException(RetryableServiceException):
"""Exception raised for res. downloads that can be retried later."""
class ResumableUploadException(RetryableServiceException):
"""Exception raised for res. uploads that can be retried w/ same upload ID."""
class ResumableUploadStartOverException(RetryableServiceException):
"""Exception raised for res. uploads that can be retried w/ new upload ID."""
class ResumableUploadAbortException(ServiceException):
"""Exception raised for resumable uploads that cannot be retried later."""
class AuthenticationException(ServiceException):
"""Exception raised for errors during the authentication process."""
class PreconditionException(ServiceException):
"""Exception raised for precondition failures."""
class NotFoundException(ServiceException):
"""Exception raised when a resource is not found (404)."""
class NotEmptyException(ServiceException):
"""Exception raised when trying to delete a bucket is not empty."""
class BadRequestException(ServiceException):
"""Exception raised for malformed requests.
Where it is possible to detect invalid arguments prior to sending them
to the server, an ArgumentException should be raised instead.
"""
class AccessDeniedException(ServiceException):
"""Exception raised when authenticated user has insufficient access rights.
This is raised when the authentication process succeeded but the
authenticated user does not have access rights to the requested resource.
"""
| [
"[email protected]"
] | |
14fa26d61098fd50e84ed8cab47b9e770689805e | 8f8498bb6f56b19d45a1989c8113a077348c0a02 | /백준/Silver/미로 탐색.py | d234250c529ddb20d32d95135c9f6906a5932cda | [] | no_license | gjtjdtn201/practice | a09b437c892b0b601e156c09cb1f053b52fab11b | ea45582b2773616b2b8f350b927559210009d89f | refs/heads/master | 2021-01-01T13:29:46.640740 | 2020-11-28T00:55:37 | 2020-11-28T00:55:37 | 239,299,485 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | import sys
sys.stdin = open('미로 탐색.txt', 'r')
from collections import deque
def BFS(y,x):
queue = deque()
queue.append((y,x))
visit[y][x] = 1
while queue:
a, b = queue.popleft()
if (a, b) == (N-1, M-1):
print(visit[a][b])
return
for i in range(4):
ny = a + dy[i]
nx = b + dx[i]
if 0 <= ny < N and 0 <= nx < M and matrix[ny][nx] != 0 and visit[ny][nx] == 0:
visit[ny][nx] = visit[a][b] + 1
queue.append((ny, nx))
N, M = map(int, input().split())
matrix = []
for i in range(N):
matrix.append(list(map(int, input())))
dy = [1,-1,0,0]
dx = [0,0,1,-1]
visit = [[0]*M for _ in range(N)]
BFS(0, 0)
| [
"[email protected]"
] | |
8a5a13a59d69a8b4e360050051c66440c9475be8 | 00b1fe62aff1bbad885a1b13354239b07925c5c1 | /catalyst_rl/dl/callbacks/inference.py | 0e865e1f3b6ece6a4f57fa06a27b1a96f3d7c224 | [
"Apache-2.0"
] | permissive | catalyst-team/catalyst-rl | a78675c477bef478d73cd1e7101be6dbb7b586aa | 75ffa808e2bbb9071a169a1a9c813deb6a69a797 | refs/heads/master | 2021-09-22T08:36:12.161991 | 2021-09-13T05:59:12 | 2021-09-13T05:59:12 | 247,928,934 | 50 | 8 | null | null | null | null | UTF-8 | Python | false | false | 4,901 | py | from collections import defaultdict
import os
import imageio
import numpy as np
from skimage.color import label2rgb
import torch
import torch.nn.functional as F
from catalyst_rl.dl import Callback, CallbackOrder, State, utils
# @TODO: refactor
class InferCallback(Callback):
def __init__(self, out_dir=None, out_prefix=None):
super().__init__(CallbackOrder.Internal)
self.out_dir = out_dir
self.out_prefix = out_prefix
self.predictions = defaultdict(lambda: [])
self._keys_from_state = ["out_dir", "out_prefix"]
def on_stage_start(self, state: State):
for key in self._keys_from_state:
value = getattr(state, key, None)
if value is not None:
setattr(self, key, value)
# assert self.out_prefix is not None
if self.out_dir is not None:
self.out_prefix = str(self.out_dir) + "/" + str(self.out_prefix)
if self.out_prefix is not None:
os.makedirs(os.path.dirname(self.out_prefix), exist_ok=True)
def on_loader_start(self, state: State):
self.predictions = defaultdict(lambda: [])
def on_batch_end(self, state: State):
dct = state.batch_out
dct = {key: value.detach().cpu().numpy() for key, value in dct.items()}
for key, value in dct.items():
self.predictions[key].append(value)
def on_loader_end(self, state: State):
self.predictions = {
key: np.concatenate(value, axis=0)
for key, value in self.predictions.items()
}
if self.out_prefix is not None:
for key, value in self.predictions.items():
suffix = ".".join([state.loader_name, key])
np.save(f"{self.out_prefix}/{suffix}.npy", value)
class InferMaskCallback(Callback):
def __init__(
self,
out_dir=None,
out_prefix=None,
input_key=None,
output_key=None,
name_key=None,
mean=None,
std=None,
threshold: float = 0.5,
mask_strength: float = 0.5,
mask_type: str = "soft"
):
super().__init__(CallbackOrder.Internal)
self.out_dir = out_dir
self.out_prefix = out_prefix
self.mean = mean or np.array([0.485, 0.456, 0.406])
self.std = std or np.array([0.229, 0.224, 0.225])
assert input_key is not None
assert output_key is not None
self.threshold = threshold
self.mask_strength = mask_strength
self.mask_type = mask_type
self.input_key = input_key
self.output_key = output_key
self.name_key = name_key
self.counter = 0
self._keys_from_state = ["out_dir", "out_prefix"]
def on_stage_start(self, state: State):
for key in self._keys_from_state:
value = getattr(state, key, None)
if value is not None:
setattr(self, key, value)
# assert self.out_prefix is not None
self.out_prefix = self.out_prefix \
if self.out_prefix is not None \
else ""
if self.out_dir is not None:
self.out_prefix = str(self.out_dir) + "/" + str(self.out_prefix)
os.makedirs(os.path.dirname(self.out_prefix), exist_ok=True)
def on_loader_start(self, state: State):
lm = state.loader_name
os.makedirs(f"{self.out_prefix}/{lm}/", exist_ok=True)
def on_batch_end(self, state: State):
lm = state.loader_name
names = state.batch_in.get(self.name_key, [])
features = state.batch_in[self.input_key].detach().cpu()
images = utils.tensor_to_ndimage(features)
logits = state.batch_out[self.output_key]
logits = torch.unsqueeze_(logits, dim=1) \
if len(logits.shape) < 4 \
else logits
if self.mask_type == "soft":
probabilities = torch.sigmoid(logits)
else:
probabilities = F.softmax(logits, dim=1)
probabilities = probabilities.detach().cpu().numpy()
masks = []
for probability in probabilities:
mask = np.zeros_like(probability[0], dtype=np.int32)
for i, ch in enumerate(probability):
mask[ch >= self.threshold] = i + 1
masks.append(mask)
for i, (image, mask) in enumerate(zip(images, masks)):
try:
suffix = names[i]
except IndexError:
suffix = f"{self.counter:06d}"
self.counter += 1
mask = label2rgb(mask, bg_label=0)
image = image * (1 - self.mask_strength) \
+ mask * self.mask_strength
image = (image * 255).clip(0, 255).round().astype(np.uint8)
filename = f"{self.out_prefix}/{lm}/{suffix}.jpg"
imageio.imwrite(filename, image)
__all__ = ["InferCallback", "InferMaskCallback"]
| [
"[email protected]"
] | |
807ce2ef2b7594044dfcedb3be33a7a555fbea60 | f24c35bb0919f9ad75f45e7906691c3189536b33 | /chengbinWorkSpace/droneLanding/python/Tello/path-plan.py | a09ea6ba4b28dcee3cbd474fc8a53b485a698cdf | [] | no_license | mfkiwl/supreme-xcb | 9b941f49bab5a811d23a0cd75790d1e5722aa9f0 | d1287657607bf86d4b1393acf285951760670925 | refs/heads/main | 2023-03-07T12:10:28.288282 | 2021-03-02T11:46:00 | 2021-03-02T11:46:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,434 | py | '''
brief:
Version:
Autor: shuike
Date: 2021-01-12 19:23:39
LastEditors: shuike
LastEditTime: 2021-01-12 19:23:39
FilePath: /droneLanding/python/Tello/path-plan.py
'''
#!/usr/bin/python
import pygame
import json
import math
"""
how many pixel = actual distance in cm
70px = 360cm --> 360/70 = MAP_SIZE_COEFF
"""
MAP_SIZE_COEFF = 5.14
pygame.init()
screen = pygame.display.set_mode([720, 720])
screen.fill((255, 255, 255))
running = True
class Background(pygame.sprite.Sprite):
def __init__(self, image, location, scale):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image)
self.image = pygame.transform.rotozoom(self.image, 0, scale)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
def get_dist_btw_pos(pos0, pos1):
"""
Get distance between 2 mouse position.
"""
x = abs(pos0[0] - pos1[0])
y = abs(pos0[1] - pos1[1])
dist_px = math.hypot(x, y)
dist_cm = dist_px * MAP_SIZE_COEFF
return int(dist_cm), int(dist_px)
def get_angle_btw_line(pos0, pos1, posref):
"""
Get angle between two lines respective to 'posref'
NOTE: using dot product calculation.
"""
ax = posref[0] - pos0[0]
ay = posref[1] - pos0[1]
bx = posref[0] - pos1[0]
by = posref[1] - pos1[1]
# Get dot product of pos0 and pos1.
_dot = (ax * bx) + (ay * by)
# Get magnitude of pos0 and pos1.
_magA = math.sqrt(ax**2 + ay**2)
_magB = math.sqrt(bx**2 + by**2)
_rad = math.acos(_dot / (_magA * _magB))
# Angle in degrees.
angle = (_rad * 180) / math.pi
return int(angle)
"""
Main capturing mouse program.
"""
# Load background image.
bground = Background('image.png', [0, 0], 1.6)
screen.blit(bground.image, bground.rect)
path_wp = []
index = 0
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
path_wp.append(pos)
if index > 0:
pygame.draw.line(screen, (255, 0, 0), path_wp[index-1], pos, 2)
index += 1
pygame.display.update()
"""
Compute the waypoints (distance and angle).
"""
# Append first pos ref. (dummy)
path_wp.insert(0, (path_wp[0][0], path_wp[0][1] - 10))
path_dist_cm = []
path_dist_px = []
path_angle = []
for index in range(len(path_wp)):
# Skip the first and second index.
if index > 1:
dist_cm, dist_px = get_dist_btw_pos(path_wp[index-1], path_wp[index])
path_dist_cm.append(dist_cm)
path_dist_px.append(dist_px)
# Skip the first and last index.
if index > 0 and index < (len(path_wp) - 1):
angle = get_angle_btw_line(path_wp[index-1], path_wp[index+1], path_wp[index])
path_angle.append(angle)
# Print out the information.
print('path_wp: {}'.format(path_wp))
print('dist_cm: {}'.format(path_dist_cm))
print('dist_px: {}'.format(path_dist_px))
print('dist_angle: {}'.format(path_angle))
"""
Save waypoints into JSON file.
"""
waypoints = []
for index in range(len(path_dist_cm)):
waypoints.append({
"dist_cm": path_dist_cm[index],
"dist_px": path_dist_px[index],
"angle_deg": path_angle[index]
})
# Save to JSON file.
f = open('waypoint.json', 'w+')
path_wp.pop(0)
json.dump({
"wp": waypoints,
"pos": path_wp
}, f, indent=4)
f.close() | [
"[email protected]"
] | |
c9316b33117f1c9cc1e359f48d5e48384095555c | a34ec07c3464369a88e68c9006fa1115f5b61e5f | /N_Queue/MonotonicQueue/L3_862_Shortest_Subarray_with_Sum_at_Least_K.py | 75b2d94429dd60dfa0b738814446722e2d7d5e6d | [] | no_license | 824zzy/Leetcode | 9220f2fb13e03d601d2b471b5cfa0c2364dbdf41 | 93b7f4448a366a709214c271a570c3399f5fc4d3 | refs/heads/master | 2023-06-27T02:53:51.812177 | 2023-06-16T16:25:39 | 2023-06-16T16:25:39 | 69,733,624 | 14 | 3 | null | 2022-05-25T06:48:38 | 2016-10-01T10:56:07 | Python | UTF-8 | Python | false | false | 813 | py | """ https://leetcode.com/problems/shortest-subarray-with-sum-at-least-k/
Transform the problem to find the shortest sliding window with sum >= k,
we can use a monotonic increasing queue to maintain the prefix sum,
and try to make queue head as small(but larger than k) as possible and queue tail as large as possible.
"""
from header import *
class Solution:
def shortestSubarray(self, A: List[int], k: int) -> int:
A = list(accumulate(A, initial=0))
dq = deque()
ans = inf
for i in range(len(A)):
# update ans based on head of queue
while dq and A[i]-A[dq[0]]>=k: ans = min(ans, i-dq.popleft())
# ensure monotonic increasing
while dq and A[dq[-1]]>=A[i]: dq.pop()
dq.append(i)
return ans if ans!=inf else -1 | [
"[email protected]"
] | |
d79b390ee107b353b56586cffd43e1fbeba7d65e | 8998a6cf66578453249544ca10b4239615751c53 | /setup.py | 6b66ea18fd958460305ba03c5534b4cbfac428fa | [
"MIT"
] | permissive | kwaegema/jicirodsmanager | be7c3857989c78a0cde4d8d41da45559dcc15499 | aca97415acb8f1b40bbb72c1c05b25fe20808d84 | refs/heads/master | 2021-04-15T18:39:25.766088 | 2017-11-15T11:19:57 | 2017-11-15T11:19:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | from setuptools import setup
url = "https://github.com/JIC-CSB/jicirodsmanager"
version = "1.1.0"
readme = open('README.rst').read()
dsc = "Python tools to manage users/groups/quotas/namespaces in an iRODS zone",
setup(name="jicirodsmanager",
packages=["jicirodsmanager"],
version=version,
description=dsc,
long_description=readme,
include_package_data=True,
author="Tjelvar Olsson",
author_email="[email protected]",
url=url,
install_requires=[],
download_url="{}/tarball/{}".format(url, version),
license="MIT")
| [
"[email protected]"
] | |
20d0d8ba1ae8681a0c74b1b4783294fcc0c6f309 | c058f51b99f91faebf27183b2b579e9f96e0d8f5 | /botorch/utils/multi_objective/box_decompositions/box_decomposition.py | 5b1450dd1a1caf433a06fb4e7627e1cfb00b8c39 | [
"MIT"
] | permissive | pytorch/botorch | 255d62f698cc615c750e9343c278a63c7e96a586 | 4cc5ed59b2e8a9c780f786830c548e05cc74d53c | refs/heads/main | 2023-08-22T15:23:51.071048 | 2023-08-22T05:30:38 | 2023-08-22T05:30:38 | 142,940,093 | 2,891 | 373 | MIT | 2023-09-13T00:16:13 | 2018-07-30T23:59:57 | Jupyter Notebook | UTF-8 | Python | false | false | 13,382 | py | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Box decomposition algorithms.
References
.. [Lacour17]
R. Lacour, K. Klamroth, C. Fonseca. A box decomposition algorithm to
compute the hypervolume indicator. Computers & Operations Research,
Volume 79, 2017.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Optional
import torch
from botorch.exceptions.errors import BotorchError
from botorch.utils.multi_objective.box_decompositions.utils import (
_expand_ref_point,
_pad_batch_pareto_frontier,
update_local_upper_bounds_incremental,
)
from botorch.utils.multi_objective.pareto import is_non_dominated
from torch import Tensor
from torch.nn import Module
class BoxDecomposition(Module, ABC):
r"""An abstract class for box decompositions.
Note: Internally, we store the negative reference point (minimization).
:meta private:
"""
def __init__(
self, ref_point: Tensor, sort: bool, Y: Optional[Tensor] = None
) -> None:
"""Initialize BoxDecomposition.
Args:
ref_point: A `m`-dim tensor containing the reference point.
sort: A boolean indicating whether to sort the Pareto frontier.
Y: A `(batch_shape) x n x m`-dim tensor of outcomes.
"""
super().__init__()
self._neg_ref_point = -ref_point
self.sort = torch.tensor(sort, dtype=torch.bool)
self.num_outcomes = ref_point.shape[-1]
self.register_buffer("hypercell_bounds", None)
if Y is not None:
if Y.isnan().any():
raise ValueError(
"NaN inputs are not supported. Got Y with "
f"{Y.isnan().sum()} NaN values."
)
self._neg_Y = -Y
self._validate_inputs()
self._neg_pareto_Y = self._compute_pareto_Y()
self.partition_space()
else:
self._neg_Y = None
self._neg_pareto_Y = None
@property
def pareto_Y(self) -> Tensor:
r"""This returns the non-dominated set.
Returns:
A `n_pareto x m`-dim tensor of outcomes.
"""
if self._neg_pareto_Y is not None:
return -self._neg_pareto_Y
raise BotorchError("pareto_Y has not been initialized")
@property
def ref_point(self) -> Tensor:
r"""Get the reference point.
Returns:
A `m`-dim tensor of outcomes.
"""
return -self._neg_ref_point
@property
def Y(self) -> Tensor:
r"""Get the raw outcomes.
Returns:
A `n x m`-dim tensor of outcomes.
"""
if self._neg_Y is not None:
return -self._neg_Y
raise BotorchError("Y data has not been initialized")
def _compute_pareto_Y(self) -> Tensor:
if self._neg_Y is None:
raise BotorchError("Y data has not been initialized")
# is_non_dominated assumes maximization
if self._neg_Y.shape[-2] == 0:
return self._neg_Y
# assumes maximization
pareto_Y = -_pad_batch_pareto_frontier(
Y=self.Y,
ref_point=_expand_ref_point(
ref_point=self.ref_point, batch_shape=self.batch_shape
),
)
if not self.sort:
return pareto_Y
# sort by first objective
if len(self.batch_shape) > 0:
pareto_Y = pareto_Y.gather(
index=torch.argsort(pareto_Y[..., :1], dim=-2).expand(pareto_Y.shape),
dim=-2,
)
else:
pareto_Y = pareto_Y[torch.argsort(pareto_Y[:, 0])]
return pareto_Y
def _reset_pareto_Y(self) -> bool:
r"""Update the non-dominated front.
Returns:
A boolean indicating whether the Pareto frontier has changed.
"""
pareto_Y = self._compute_pareto_Y()
if (self._neg_pareto_Y is None) or not torch.equal(
pareto_Y, self._neg_pareto_Y
):
self._neg_pareto_Y = pareto_Y
return True
return False
def partition_space(self) -> None:
r"""Compute box decomposition."""
if self.num_outcomes == 2:
try:
self._partition_space_2d()
except NotImplementedError:
self._partition_space()
else:
self._partition_space()
def _partition_space_2d(self) -> None:
r"""Compute box decomposition for 2 objectives."""
raise NotImplementedError
@abstractmethod
def _partition_space(self) -> None:
r"""Partition the non-dominated space into disjoint hypercells.
This method supports an arbitrary number of outcomes, but is
less efficient than `partition_space_2d` for the 2-outcome case.
"""
@abstractmethod
def get_hypercell_bounds(self) -> Tensor:
r"""Get the bounds of each hypercell in the decomposition.
Returns:
A `2 x num_cells x num_outcomes`-dim tensor containing the
lower and upper vertices bounding each hypercell.
"""
def _update_neg_Y(self, Y: Tensor) -> bool:
r"""Update the set of outcomes.
Returns:
A boolean indicating if _neg_Y was initialized.
"""
if Y.isnan().any():
raise ValueError(
"NaN inputs are not supported. Got Y with "
f"{Y.isnan().sum()} NaN values."
)
# multiply by -1, since internally we minimize.
if self._neg_Y is not None:
self._neg_Y = torch.cat([self._neg_Y, -Y], dim=-2)
return False
self._neg_Y = -Y
return True
def update(self, Y: Tensor) -> None:
r"""Update non-dominated front and decomposition.
By default, the partitioning is recomputed. Subclasses can override
this functionality.
Args:
Y: A `(batch_shape) x n x m`-dim tensor of new, incremental outcomes.
"""
self._update_neg_Y(Y=Y)
self.reset()
def _validate_inputs(self) -> None:
self.batch_shape = self.Y.shape[:-2]
self.num_outcomes = self.Y.shape[-1]
if len(self.batch_shape) > 1:
raise NotImplementedError(
f"{type(self).__name__} only supports a single "
f"batch dimension, but got {len(self.batch_shape)} "
"batch dimensions."
)
elif len(self.batch_shape) > 0 and self.num_outcomes > 2:
raise NotImplementedError(
f"{type(self).__name__} only supports a batched box "
f"decompositions in the 2-objective setting."
)
def reset(self) -> None:
r"""Reset non-dominated front and decomposition."""
self._validate_inputs()
is_new_pareto = self._reset_pareto_Y()
# Update decomposition if the Pareto front changed
if is_new_pareto:
self.partition_space()
@abstractmethod
def _compute_hypervolume_if_y_has_data(self) -> Tensor:
"""Compute hypervolume for the case that there is data in self._neg_pareto_Y."""
def compute_hypervolume(self) -> Tensor:
r"""Compute hypervolume that is dominated by the Pareto Froniter.
Returns:
A `(batch_shape)`-dim tensor containing the hypervolume dominated by
each Pareto frontier.
"""
if self._neg_pareto_Y is None:
return torch.tensor(0.0)
if self._neg_pareto_Y.shape[-2] == 0:
return torch.zeros(
self._neg_pareto_Y.shape[:-2],
dtype=self._neg_pareto_Y.dtype,
device=self._neg_pareto_Y.device,
)
return self._compute_hypervolume_if_y_has_data()
class FastPartitioning(BoxDecomposition, ABC):
r"""A class for partitioning the (non-)dominated space into hyper-cells.
Note: this assumes maximization. Internally, it multiplies outcomes by -1
and performs the decomposition under minimization.
This class is abstract to support to two applications of Alg 1 from
[Lacour17]_: 1) partitioning the space that is dominated by the Pareto
frontier and 2) partitioning the space that is not dominated by the
Pareto frontier.
:meta private:
"""
def __init__(
self,
ref_point: Tensor,
Y: Optional[Tensor] = None,
) -> None:
"""
Args:
ref_point: A `m`-dim tensor containing the reference point.
Y: A `(batch_shape) x n x m`-dim tensor
"""
super().__init__(ref_point=ref_point, Y=Y, sort=ref_point.shape[-1] == 2)
def update(self, Y: Tensor) -> None:
r"""Update non-dominated front and decomposition.
Args:
Y: A `(batch_shape) x n x m`-dim tensor of new, incremental outcomes.
"""
if self._update_neg_Y(Y=Y):
self.reset()
else:
if self.num_outcomes == 2 or self._neg_pareto_Y.shape[-2] == 0:
# If there are two objective, recompute the box decomposition
# because the partitions can be computed analytically.
# If the current pareto set has no points, recompute the box
# decomposition.
self.reset()
else:
# only include points that are better than the reference point
better_than_ref = (Y > self.ref_point).all(dim=-1)
Y = Y[better_than_ref]
Y_all = torch.cat([self._neg_pareto_Y, -Y], dim=-2)
pareto_mask = is_non_dominated(-Y_all)
# determine the number of points in Y that are Pareto optimal
num_new_pareto = pareto_mask[-Y.shape[-2] :].sum()
self._neg_pareto_Y = Y_all[pareto_mask]
if num_new_pareto > 0:
# update local upper bounds for the minimization problem
self._U, self._Z = update_local_upper_bounds_incremental(
# this assumes minimization
new_pareto_Y=self._neg_pareto_Y[-num_new_pareto:],
U=self._U,
Z=self._Z,
)
# use the negative local upper bounds as the new pareto
# frontier for the minimization problem and perform
# box decomposition on dominated space.
self._get_partitioning()
@abstractmethod
def _get_single_cell(self) -> None:
r"""Set the partitioning to be a single cell in the case of no Pareto points.
This method should set self.hypercell_bounds
"""
pass # pragma: no cover
def partition_space(self) -> None:
if self._neg_pareto_Y.shape[-2] == 0:
self._get_single_cell()
else:
super().partition_space()
def _partition_space(self):
r"""Partition the non-dominated space into disjoint hypercells.
This method supports an arbitrary number of outcomes, but is
less efficient than `partition_space_2d` for the 2-outcome case.
"""
if len(self.batch_shape) > 0:
# this could be triggered when m=2 outcomes and
# BoxDecomposition._partition_space_2d is not overridden.
raise NotImplementedError(
"_partition_space does not support batch dimensions."
)
# this assumes minimization
# initialize local upper bounds
self.register_buffer("_U", self._neg_ref_point.unsqueeze(-2).clone())
# initialize defining points to be the dummy points \hat{z} that are
# defined in Sec 2.1 in [Lacour17]_. Note that in [Lacour17]_, outcomes
# are assumed to be between [0,1], so they used 0 rather than -inf.
self._Z = torch.zeros(
1,
self.num_outcomes,
self.num_outcomes,
dtype=self.Y.dtype,
device=self.Y.device,
)
for j in range(self.ref_point.shape[-1]):
# use ref point for maximization as the ideal point for minimization.
self._Z[0, j] = float("-inf")
self._Z[0, j, j] = self._U[0, j]
# incrementally update local upper bounds and defining points
# for each new Pareto point
self._U, self._Z = update_local_upper_bounds_incremental(
new_pareto_Y=self._neg_pareto_Y,
U=self._U,
Z=self._Z,
)
self._get_partitioning()
@abstractmethod
def _get_partitioning(self) -> None:
r"""Compute partitioning given local upper bounds for the minimization problem.
This method should set self.hypercell_bounds
"""
pass # pragma: no cover
def get_hypercell_bounds(self) -> Tensor:
r"""Get the bounds of each hypercell in the decomposition.
Returns:
A `2 x (batch_shape) x num_cells x m`-dim tensor containing the
lower and upper vertices bounding each hypercell.
"""
return self.hypercell_bounds
| [
"[email protected]"
] | |
ed67f09cdd0c79e6d529bf5923da918fd286314f | 5492374aeb1df9fb5a2cbef8eb8a48af47556f18 | /galaga/galaga.py | 794facec78d3a8fd39356a25dad30224681820b0 | [] | no_license | HeeeeeJinJeong/Practice_Python | 136397f43a7ba525ff2561adb85de353c1f1cc21 | 1e8a0c525bf35324d5e9f3f1ff7747b2352de7b3 | refs/heads/master | 2020-08-07T02:59:55.583054 | 2019-10-16T03:40:29 | 2019-10-16T03:40:29 | 213,265,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,075 | py | import pygame # pygame 라이브러리 임포트
import random # random 라이브러리 임포트
from time import sleep
# 게임에 사용되는 전역변수 정의
BLACK = (0, 0, 0) # 게임 바탕화면의 색상
RED = (255, 0, 0)
pad_width = 480 # 게임화면의 가로크기
pad_height = 640 # 게임화면의 세로크기
player_width = 36
player_height = 38
enemy_width = 26
enmey_height = 20
# 적을 맞춘 개수 계산하는 함수
def drawScore(count):
global gamepad
font = pygame.font.SysFont(None, 20)
text = font.render('Enemy Kills:' + str(count), True, (255, 255, 255))
gamepad.blit(text, (0, 0))
# 적이 화면 아래로 통과한 개수
def drawPassed(count):
global gamepad
font = pygame.font.SysFont(None, 20)
text = font.render('Enemy Passed:' + str(count), True, RED)
gamepad.blit(text, (360, 0))
# 화면에 글씨 보이게 하기
def dispMessage(text):
global gamepad
textfont = pygame.font.Font('freesansbold.ttf', 80)
text = textfont.render(text, True, RED)
textpos = text.get_rect()
textpos.center = (pad_width / 2, pad_height / 2)
gamepad.blit(text, textpos)
pygame.display.update()
sleep(2)
runGame()
# 전투기가 적과 충돌했을 때 메시지 출력
def crash():
global gamepad
dispMessage('Crashed!')
# 게임 오버 메시지 출력
def gameover():
global gamepad
dispMessage('Game Over')
# 게임에 등장하는 객체를 드로잉
def drawObject(obj, x, y):
global gamepad
gamepad.blit(obj, (x, y))
# 게임 실행 메인 함수
def runGame():
global gamepad, clock, player, enemy, bullet
# 전투기 무기에 적이 맞았을 경우 True로 설정되는 플래그
isShot = False
shotcount = 0
enemypassed = 0
# 무기 좌표를 위환 리스트 자료
bullet_xy = []
# 전투기 초기 위치 (x,y)
x = pad_width * 0.45
y = pad_height * 0.9
x_change = 0
# 적 초기위치 설정
enemy_x = random.randrange(0, pad_width - enemy_width)
enemy_y = 0
enemy_speed = 3
ongame = False
while not ongame:
for event in pygame.event.get():
if event.type == pygame.QUIT: # 마우스로 창을 닫는 이벤트
doneFlag = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change -= 5
elif event.key == pygame.K_RIGHT:
x_change += 5
# 왼쪽 컨트롤 키를 누르면 무기 발사. 무기는 한 번에 2발만 발사됨
elif event.key == pygame.K_SPACE:
if len(bullet_xy) < 3:
bullet_x = x + player_width / 2
bullet_y = y - player_height
bullet_xy.append([bullet_x, bullet_y])
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
gamepad.fill(BLACK) # 게임화면을 검은색으로 채우고 화면을 업데이트함
# 전투기 위치를 재조정
x += x_change
if x < 0:
x = 0
elif x > pad_width - player_width:
x = pad_width - player_width
# 게이머 전투기가 적과 충돌했는지 체크
if y < enemy_y + enmey_height:
if (enemy_x > x and enemy_x < x + player_width) or \
(enemy_x + enemy_width > x and enemy_x + enemy_width < x + player_width):
crash()
drawObject(player, x, y) # 비행기를 게임 화면의 (x,y) 좌표에 그림
# 총알 발사 구현
if len(bullet_xy) != 0:
for i, bxy in enumerate(bullet_xy):
bxy[1] -= 10 # 총알의 y좌표를 -10함 (위로 이동)
bullet_xy[i][1] = bxy[1]
# 전투기 무기가 적을 격추했을 경우
if bxy[1] < enemy_y:
if bxy[0] > enemy_x and bxy[0] < enemy_x + enemy_width:
bullet_xy.remove(bxy)
isShot = True
shotcount += 1
if bxy[1] <= 0: # 총알이 화면밖을 벗어나면
try:
bullet_xy.remove(bxy) # 총알을 제거한다.
except:
pass
if len(bullet_xy) != 0:
for bx, by in bullet_xy:
drawObject(bullet, bx, by)
drawScore(shotcount)
# 적을 아래로 움직임
enemy_y += enemy_speed
if enemy_y > pad_height:
enemy_y = 0
enemy_x = random.randrange(0, pad_width - enemy_width)
enemypassed += 1
if enemypassed == 3:
gameover()
drawPassed(enemypassed)
# 적이 무기에 맞았는지 체크하고, 맞았으면 스피드 업
if isShot:
enemy_speed += 1
if enemy_speed >= 10:
enemy_speed = 10
enemy_x = random.randrange(0, pad_width - enemy_width)
enemy_y = 0
isShot = False
drawObject(enemy, enemy_x, enemy_y)
pygame.display.update() # 게임화면 재로딩
clock.tick(60) # 게임화면의 초당 프레임수를 60으로 설정
pygame.quit()
# 게임 초기화 함수
def initGame():
global gamepad, clock, player, enemy, bullet # 게임이 진행될 게임 화면, 게임의 초당 프레임(FPS), 비행기 변수 선언, 적 선언
pygame.init()
gamepad = pygame.display.set_mode((pad_width, pad_height)) # 게임화면의 가로세로크기를 설정
pygame.display.set_caption('Shooting Game') # 게임화면의 제목 지정
player = pygame.image.load('player.png')
enemy = pygame.image.load('enemy.png')
bullet = pygame.image.load('bullet.png')
clock = pygame.time.Clock() # 초당 프레임수를 설정할 수 있는 Clock객체 생성
initGame()
runGame() | [
"[email protected]"
] | |
8dcb9d84c112c2909facdc43f23f166eb593f67d | 9a9e739dcc559476ba796510182374ad460f2f8b | /PA2/PA2 2013/PA2-12/Asitha/pa2-12-2013.py | 417b2661135cadcac82806c583d3fe2709b266e0 | [] | no_license | Divisekara/Python-Codes-First-sem | 542e8c0d4a62b0f66c598ff68a5c1c37c20e484d | e4ca28f07ecf96181af3c528d74377ab02d83353 | refs/heads/master | 2022-11-28T01:12:51.283260 | 2020-08-01T08:55:53 | 2020-08-01T08:55:53 | 284,220,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | def getText():
try:
FileOpen=open("FileIn.txt","r")
L=[]
while True:
L.append(FileOpen.readline().split())
if L[-1]==[]:
break
FileOpen.close()
except IOError:
print "File Not Found"
else:
L.pop(-1)
return L
def calculations(L):
L1=[]
for i in L:
Temp=[]
Temp.append(i.pop(0))
Temp.append(sum(map(int,i)))
Temp.append(round(float(sum(map(int,i)))/len(i),2))
L1.append(Temp)
sums=[]
for j in L1:
sums.append(j[1])
ranks=sorted(sums)[::-1]
for k in L1:
k.append(ranks.index(k[1])+1)
return L1
def show(L):
L1=[]
for i in L:
L1.append(" ".join(map(str,i)))
lines="\n".join(L1)
print lines
return lines
def saveFile(s):
try:
FileCreate=open("result.txt","w")
FileCreate.write(s)
FileCreate.close()
except IOError:
print "File Error"
pass
saveFile(show(calculations(getText())))
| [
"[email protected]"
] | |
cd0d6330ebfe7555e50056381334270665abfb7c | 30a2a924eb32e7297b5a99785950467f25ea785d | /ppgmle.py | 65d92e6491d2764ae34a67f18c76334f0b21e094 | [] | no_license | zshwuhan/Reinforcement-Learning-of-Spatio-Temporal-Point-Processes | 1a794e83491b52dea5db3926de91779a9e661a17 | a3f98e77b56c03839dcdb545b17b3675e7c43878 | refs/heads/master | 2020-07-22T16:18:10.020860 | 2019-07-02T18:49:02 | 2019-07-02T18:49:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,065 | py | import sys
import arrow
import utils
import numpy as np
import tensorflow as tf
from tfgen import SpatialTemporalHawkes
from ppgrl import RL_Hawkes_Generator
from stppg import GaussianMixtureDiffusionKernel, HawkesLam, SpatialTemporalPointProcess, StdDiffusionKernel
class MLE_Hawkes_Generator(object):
"""
Reinforcement Learning Based Point Process Generator
"""
def __init__(self, T, S, layers, n_comp, batch_size, C=1., data_dim=3, keep_latest_k=None, lr=1e-3, reg_scale=0.):
"""
Params:
- T: the maximum time of the sequences
- S: the space of location
- C: the constant in diffusion kernel
- batch_size: batch size of the training data
- maximum: upper bound of the conditional intensity
- data_dim: data dimension (=3 by default)
- keep_latest_k: only compute latest k points in log-likelihood calculation
- lr: learning rate for the SGD optimizer
"""
self.batch_size = batch_size
# Hawkes process
self.hawkes = SpatialTemporalHawkes(T, S, layers=layers, n_comp=n_comp, C=C, maximum=1e+3, verbose=True)
# regularization
l1_regularizer = tf.contrib.layers.l1_regularizer(scale=reg_scale, scope=None)
penalty_term = tf.contrib.layers.apply_regularization(l1_regularizer, self.hawkes.Wss)
# input tensors: expert sequences (time, location, marks)
self.input_seqs = tf.placeholder(tf.float32, [batch_size, None, data_dim]) # [batch_size, seq_len, data_dim]
self.cost = -1 * self.log_likelihood(S, keep_latest_k=keep_latest_k) / batch_size # + penalty_term
# Adam optimizer
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(lr, global_step, decay_steps=100, decay_rate=0.99, staircase=True)
self.optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.6, beta2=0.9).minimize(self.cost, global_step=global_step)
def log_likelihood(self, S, keep_latest_k):
"""
compute the log-likelihood of the input data given the hawkes point process.
"""
# log-likelihood
loglikli = 0.
for b in range(batch_size):
seq = self.input_seqs[b, :, :]
# mask_t = tf.cast(seq[:, 0] > 0, tf.float32)
# trunc_seq = tf.boolean_mask(seq, mask_t)
# seq_len = tf.shape(trunc_seq)[0]
# # calculate the log conditional pdf for each of data points in the sequence.
# loglikli += tf.reduce_sum(tf.scan(
# lambda a, i: self.hawkes.log_conditional_pdf(trunc_seq[:i, :], keep_latest_k=keep_latest_k),
# tf.range(1, seq_len+1), # from the first point to the last point
# initializer=np.array(0., dtype=np.float32)))
loglikli += self.hawkes.log_likelihood(seq)
return loglikli
def train(self, sess,
epoches, # number of epoches (how many times is the entire dataset going to be trained)
expert_seqs, # [n, seq_len, data_dim=3]
pretrained=False):
"""train the point process generator given expert sequences."""
# initialization
if not pretrained:
# initialize network parameters
init_op = tf.global_variables_initializer()
sess.run(init_op)
print("[%s] parameters are initialized." % arrow.now(), file=sys.stderr)
# data configurations
# - number of expert sequences
n_data = expert_seqs.shape[0]
# - number of batches
n_batches = int(n_data / batch_size)
# training over epoches
all_train_cost = []
for epoch in range(epoches):
# shuffle indices of the training samples
shuffled_ids = np.arange(n_data)
np.random.shuffle(shuffled_ids)
# training over batches
avg_train_cost = []
for b in range(n_batches):
idx = np.arange(batch_size * b, batch_size * (b + 1))
# training and testing indices selected in current batch
batch_train_ids = shuffled_ids[idx]
# training and testing batch data
batch_train_seqs = expert_seqs[batch_train_ids, :, :]
# optimization procedure
sess.run(self.optimizer, feed_dict={self.input_seqs: batch_train_seqs})
# cost for train batch and test batch
train_cost = sess.run(self.cost, feed_dict={self.input_seqs: batch_train_seqs})
print("[%s] batch training cost: %.2f." % (arrow.now(), train_cost), file=sys.stderr)
# record cost for each batch
avg_train_cost.append(train_cost)
all_train_cost.append(train_cost)
# training log output
avg_train_cost = np.mean(avg_train_cost)
print('[%s] Epoch %d (n_train_batches=%d, batch_size=%d)' % (arrow.now(), epoch, n_batches, batch_size), file=sys.stderr)
print('[%s] Training cost:\t%f' % (arrow.now(), avg_train_cost), file=sys.stderr)
# save all training cost into numpy file.
np.savetxt("results/robbery_mle_train_cost.txt", all_train_cost, delimiter=",")
if __name__ == "__main__":
# Unittest example
S = [[-1., 1.], [-1., 1.]]
T = [0., 10.]
data = np.load('../Spatio-Temporal-Point-Process-Simulator/data/rescale.ambulance.perday.npy')
data = data[:320, 1:51, :] # remove the first element in each seqs, since t = 0
da = utils.DataAdapter(init_data=data, S=S, T=T)
# data = np.load('../Spatio-Temporal-Point-Process-Simulator/data/northcal.earthquake.perseason.npy')
# da = utils.DataAdapter(init_data=data)
seqs = da.normalize(data)
print(da)
print(seqs.shape)
# training model
with tf.Session() as sess:
batch_size = 32
epoches = 10
layers = [5]
n_comp = 5
ppg = MLE_Hawkes_Generator(
T=T, S=S, layers=layers, n_comp=n_comp,
batch_size=batch_size, data_dim=3,
keep_latest_k=None, lr=1e-1, reg_scale=0.)
ppg.train(sess, epoches, seqs)
ppg.hawkes.save_params_npy(sess,
path="../Spatio-Temporal-Point-Process-Simulator/data/rescale_ambulance_mle_gaussian_mixture_params.npz")
# generate samples and test mmd metric
# test_size = 20
# params = np.load('../Spatio-Temporal-Point-Process-Simulator/data/earthquake_mle_gaussian_mixture_params.npz')
# mu = .1 # params['mu']
# beta = 1. # params['beta']
# # print(mu)
# # print(beta)
# kernel = GaussianMixtureDiffusionKernel(
# n_comp=n_comp, layers=layers, C=1., beta=beta,
# SIGMA_SHIFT=.05, SIGMA_SCALE=.2, MU_SCALE=.01,
# Wss=params['Wss'], bss=params['bss'], Wphis=params['Wphis'])
# # kernel = StdDiffusionKernel(C=1., beta=1., sigma_x=.08, sigma_y=.08)
# lam = HawkesLam(mu, kernel, maximum=1e+3)
# pp = SpatialTemporalPointProcess(lam)
# learner_seqs = pp.generate(T, S, batch_size=test_size, min_n_points=5, verbose=True)[0]
# # uniform samples
# learner_seqs = []
# for i in range(test_size):
# N = 30
# _S = [T] + S
# points = [ np.random.uniform(_S[i][0], _S[i][1], N) for i in range(len(_S)) ]
# points = np.array(points).transpose()
# points = points[points[:, 0].argsort()].tolist()
# learner_seqs.append(points)
# learner_seqs = np.array(learner_seqs)
# expert_seqs = seqs[:test_size, :, :]
# print(learner_seqs.shape)
# # calculate mmd
# rlgen = RL_Hawkes_Generator(T, S, layers, n_comp, test_size)
# mmd = rlgen.mmd(sess, expert_seqs, learner_seqs)
# print(mmd)
| [
"[email protected]"
] | |
776052160c7f3ab8cc4d0d503dc7aeded2f1e521 | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /headless/lib/browser/devtools_api/client_api_generator.py | 59a8510ca628b7ac406e3f1ae63c04013130d0c3 | [
"BSD-3-Clause"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 20,376 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import functools
import os.path
import re
import sys
try:
import json
except ImportError:
import simplejson as json
# Path handling for libraries and templates
# Paths have to be normalized because Jinja uses the exact template path to
# determine the hash used in the cache filename, and we need a pre-caching step
# to be concurrency-safe. Use absolute path because __file__ is absolute if
# module is imported, and relative if executed directly.
# If paths differ between pre-caching and individual file compilation, the cache
# is regenerated, which causes a race condition and breaks concurrent build,
# since some compile processes will try to read the partially written cache.
module_path, module_filename = os.path.split(os.path.realpath(__file__))
third_party_dir = os.path.normpath(
os.path.join(module_path, os.pardir, os.pardir, os.pardir, os.pardir,
'third_party'))
templates_dir = module_path
# jinja2 is in chromium's third_party directory.
# Insert at 1 so at front to override system libraries, and
# after path[0] == invoking script dir
sys.path.insert(1, third_party_dir)
import jinja2
def ParseArguments(args):
"""Parses command line arguments and returns a (json_api, output_dir) tuple.
"""
cmdline_parser = argparse.ArgumentParser()
cmdline_parser.add_argument('--protocol', required=True)
cmdline_parser.add_argument('--output_dir', required=True)
args = cmdline_parser.parse_args(args)
with open(args.protocol, 'r') as f:
return json.load(f), args.output_dir
def ToTitleCase(name):
return name[:1].upper() + name[1:]
def DashToCamelCase(word):
return ''.join(ToTitleCase(x) for x in word.split('-'))
def CamelCaseToHackerStyle(name):
# Do two passes to insert '_' chars to deal with overlapping matches (e.g.,
# 'LoLoLoL').
name = re.sub(r'([^_])([A-Z][a-z]+?)', r'\1_\2', name)
name = re.sub(r'([^_])([A-Z][a-z]+?)', r'\1_\2', name)
return name.lower()
def Shorten(js_name, domain_name):
short_name = domain_name + '.'
long_name = 'chromium.DevTools.' + short_name
return js_name.replace(long_name, short_name)
def ShortForm(domain, js_name):
if not 'js_dependencies' in domain:
return js_name
for dependency in domain['js_dependencies']:
js_name = Shorten(js_name, dependency)
js_name = Shorten(js_name, domain['domain'])
return js_name
def SanitizeLiteral(literal):
return {
# Rename null enumeration values to avoid a clash with the NULL macro.
'null': 'none',
# Rename literals that clash with Win32 defined macros.
'error': 'err',
'mouseMoved': 'mouse_ptr_moved',
'Strict': 'exact',
'getCurrentTime': 'getCurrentAnimationTime',
# Rename mathematical constants to avoid colliding with C macros.
'Infinity': 'InfinityValue',
'-Infinity': 'NegativeInfinityValue',
'NaN': 'NaNValue',
# Turn negative zero into a safe identifier.
'-0': 'NegativeZeroValue',
}.get(literal, literal)
def InitializeJinjaEnv(cache_dir):
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_dir),
# Bytecode cache is not concurrency-safe unless pre-cached:
# if pre-cached this is read-only, but writing creates a race condition.
bytecode_cache=jinja2.FileSystemBytecodeCache(cache_dir),
keep_trailing_newline=True, # Newline-terminate generated files.
lstrip_blocks=True, # So we can indent control flow tags.
trim_blocks=True)
jinja_env.filters.update({
'to_title_case': ToTitleCase,
'dash_to_camelcase': DashToCamelCase,
'camelcase_to_hacker_style': CamelCaseToHackerStyle,
'sanitize_literal': SanitizeLiteral,
})
jinja_env.add_extension('jinja2.ext.loopcontrols')
return jinja_env
def PatchFullQualifiedRefs(json_api):
def PatchFullQualifiedRefsInDomain(json, domain_name):
if isinstance(json, list):
for item in json:
PatchFullQualifiedRefsInDomain(item, domain_name)
if not isinstance(json, dict):
return
for key in json:
if key != '$ref':
PatchFullQualifiedRefsInDomain(json[key], domain_name)
continue
if not '.' in json['$ref']:
json['$ref'] = domain_name + '.' + json['$ref']
for domain in json_api['domains']:
PatchFullQualifiedRefsInDomain(domain, domain['domain'])
def CreateUserTypeDefinition(domain, type):
namespace = CamelCaseToHackerStyle(domain['domain'])
return {
'js_type': '!chromium.DevTools.%s.%s' % (domain['domain'], type['id']),
'return_type': 'std::unique_ptr<::headless::%s::%s>' % (
namespace, type['id']),
'pass_type': 'std::unique_ptr<::headless::%s::%s>' % (
namespace, type['id']),
'to_raw_type': '*%s',
'to_raw_return_type': '%s.get()',
'to_pass_type': 'std::move(%s)',
'type': 'std::unique_ptr<::headless::%s::%s>' % (namespace, type['id']),
'raw_type': '::headless::%s::%s' % (namespace, type['id']),
'raw_pass_type': '::headless::%s::%s*' % (namespace, type['id']),
'raw_return_type': 'const ::headless::%s::%s*' % (namespace, type['id']),
}
def CreateEnumTypeDefinition(domain_name, type):
namespace = CamelCaseToHackerStyle(domain_name)
return {
'js_type': '!chromium.DevTools.%s.%s' % (domain_name, type['id']),
'return_type': '::headless::%s::%s' % (namespace, type['id']),
'pass_type': '::headless::%s::%s' % (namespace, type['id']),
'to_raw_type': '%s',
'to_raw_return_type': '%s',
'to_pass_type': '%s',
'type': '::headless::%s::%s' % (namespace, type['id']),
'raw_type': '::headless::%s::%s' % (namespace, type['id']),
'raw_pass_type': '::headless::%s::%s' % (namespace, type['id']),
'raw_return_type': '::headless::%s::%s' % (namespace, type['id']),
}
def CreateObjectTypeDefinition():
return {
'js_type': 'Object',
'return_type': 'std::unique_ptr<base::DictionaryValue>',
'pass_type': 'std::unique_ptr<base::DictionaryValue>',
'to_raw_type': '*%s',
'to_raw_return_type': '%s.get()',
'to_pass_type': 'std::move(%s)',
'type': 'std::unique_ptr<base::DictionaryValue>',
'raw_type': 'base::DictionaryValue',
'raw_pass_type': 'base::DictionaryValue*',
'raw_return_type': 'const base::DictionaryValue*',
}
def WrapObjectTypeDefinition(type):
id = type.get('id', 'base::Value')
return {
'js_type': '!Object',
'return_type': 'std::unique_ptr<%s>' % id,
'pass_type': 'std::unique_ptr<%s>' % id,
'to_raw_type': '*%s',
'to_raw_return_type': '%s.get()',
'to_pass_type': 'std::move(%s)',
'type': 'std::unique_ptr<%s>' % id,
'raw_type': id,
'raw_pass_type': '%s*' % id,
'raw_return_type': 'const %s*' % id,
}
def CreateAnyTypeDefinition():
return {
'js_type': '*',
'return_type': 'std::unique_ptr<base::Value>',
'pass_type': 'std::unique_ptr<base::Value>',
'to_raw_type': '*%s',
'to_raw_return_type': '%s.get()',
'to_pass_type': 'std::move(%s)',
'type': 'std::unique_ptr<base::Value>',
'raw_type': 'base::Value',
'raw_pass_type': 'base::Value*',
'raw_return_type': 'const base::Value*',
}
def CreateStringTypeDefinition():
return {
'js_type': 'string',
'return_type': 'std::string',
'pass_type': 'const std::string&',
'to_pass_type': '%s',
'to_raw_type': '%s',
'to_raw_return_type': '%s',
'type': 'std::string',
'raw_type': 'std::string',
'raw_pass_type': 'const std::string&',
'raw_return_type': 'std::string',
}
def CreateBinaryTypeDefinition():
return {
'js_type': 'string',
'return_type': 'protocol::Binary',
'pass_type': 'const protocol::Binary&',
'to_pass_type': '%s',
'to_raw_type': '%s',
'to_raw_return_type': '%s',
'type': 'protocol::Binary',
'raw_type': 'protocol::Binary',
'raw_pass_type': 'const protocol::Binary&',
'raw_return_type': 'protocol::Binary',
}
def CreatePrimitiveTypeDefinition(type):
typedefs = {
'number': 'double',
'integer': 'int',
'boolean': 'bool',
}
js_typedefs = {
'number': 'number',
'integer': 'number',
'boolean': 'boolean',
}
return {
'js_type': js_typedefs[type],
'return_type': typedefs[type],
'pass_type': typedefs[type],
'to_pass_type': '%s',
'to_raw_type': '%s',
'to_raw_return_type': '%s',
'type': typedefs[type],
'raw_type': typedefs[type],
'raw_pass_type': typedefs[type],
'raw_return_type': typedefs[type],
}
type_definitions = {}
type_definitions['number'] = CreatePrimitiveTypeDefinition('number')
type_definitions['integer'] = CreatePrimitiveTypeDefinition('integer')
type_definitions['boolean'] = CreatePrimitiveTypeDefinition('boolean')
type_definitions['string'] = CreateStringTypeDefinition()
type_definitions['binary'] = CreateBinaryTypeDefinition()
type_definitions['object'] = CreateObjectTypeDefinition()
type_definitions['any'] = CreateAnyTypeDefinition()
def WrapArrayDefinition(type):
return {
'js_type': '!Array.<%s>' % type['js_type'],
'return_type': 'std::vector<%s>' % type['type'],
'pass_type': 'std::vector<%s>' % type['type'],
'to_raw_type': '%s',
'to_raw_return_type': '&%s',
'to_pass_type': 'std::move(%s)',
'type': 'std::vector<%s>' % type['type'],
'raw_type': 'std::vector<%s>' % type['type'],
'raw_pass_type': 'std::vector<%s>*' % type['type'],
'raw_return_type': 'const std::vector<%s>*' % type['type'],
}
def CreateTypeDefinitions(json_api):
for domain in json_api['domains']:
if not ('types' in domain):
continue
for type in domain['types']:
if type['type'] == 'object':
if 'properties' in type:
type_definitions[domain['domain'] + '.' + type['id']] = (
CreateUserTypeDefinition(domain, type))
else:
type_definitions[domain['domain'] + '.' + type['id']] = (
CreateObjectTypeDefinition())
elif type['type'] == 'array':
type_definitions[domain['domain'] + '.' + type['id']] = (
ResolveType(type))
elif 'enum' in type:
type_definitions[domain['domain'] + '.' + type['id']] = (
CreateEnumTypeDefinition(domain['domain'], type))
type['$ref'] = domain['domain'] + '.' + type['id']
elif type['type'] == 'any':
type_definitions[domain['domain'] + '.' + type['id']] = (
CreateAnyTypeDefinition())
elif type['type'] == 'string':
type_definitions[domain['domain'] + '.' + type['id']] = (
CreateStringTypeDefinition())
elif type['type'] == 'binary':
type_definitions[domain['domain'] + '.' + type['id']] = (
CreateBinaryTypeDefinition())
else:
type_definitions[domain['domain'] + '.' + type['id']] = (
CreatePrimitiveTypeDefinition(type['type']))
def TypeDefinition(name):
return type_definitions[name]
def ResolveType(property):
if '$ref' in property:
return type_definitions[property['$ref']]
elif property['type'] == 'object':
return WrapObjectTypeDefinition(property)
elif property['type'] == 'array':
return WrapArrayDefinition(ResolveType(property['items']))
return type_definitions[property['type']]
def JoinArrays(dict, keys):
result = []
for key in keys:
if key in dict:
result += dict[key]
return result
def SynthesizeEnumType(domain, owner, type):
type['id'] = ToTitleCase(owner) + ToTitleCase(type['name'])
type_definitions[domain['domain'] + '.' + type['id']] = (
CreateEnumTypeDefinition(domain['domain'], type))
type['$ref'] = domain['domain'] + '.' + type['id']
domain['types'].append(type)
def SynthesizeCommandTypes(json_api):
"""Generate types for command parameters, return values and enum
properties.
"""
for domain in json_api['domains']:
if not 'types' in domain:
domain['types'] = []
for type in domain['types']:
if type['type'] == 'object':
for property in type.get('properties', []):
if 'enum' in property and not '$ref' in property:
SynthesizeEnumType(domain, type['id'], property)
for command in domain.get('commands', []):
parameters_required = False
if 'parameters' in command:
for parameter in command['parameters']:
if not 'optional' in parameter:
parameters_required = True
if 'enum' in parameter and not '$ref' in parameter:
SynthesizeEnumType(domain, command['name'], parameter)
parameters_type = {
'id': ToTitleCase(SanitizeLiteral(command['name'])) + 'Params',
'type': 'object',
'description': 'Parameters for the %s command.' % ToTitleCase(
SanitizeLiteral(command['name'])),
'properties': command['parameters']
}
domain['types'].append(parameters_type)
if 'returns' in command:
for parameter in command['returns']:
if 'enum' in parameter and not '$ref' in parameter:
SynthesizeEnumType(domain, command['name'], parameter)
result_type = {
'id': ToTitleCase(SanitizeLiteral(command['name'])) + 'Result',
'type': 'object',
'description': 'Result for the %s command.' % ToTitleCase(
SanitizeLiteral(command['name'])),
'properties': command['returns']
}
domain['types'].append(result_type)
command['parameters_required'] = parameters_required
def SynthesizeEventTypes(json_api):
"""Generate types for events and their properties.
Note that parameter objects are also created for events without parameters to
make it easier to introduce parameters later.
"""
for domain in json_api['domains']:
if not 'types' in domain:
domain['types'] = []
for event in domain.get('events', []):
for parameter in event.get('parameters', []):
if 'enum' in parameter and not '$ref' in parameter:
SynthesizeEnumType(domain, event['name'], parameter)
event_type = {
'id': ToTitleCase(event['name']) + 'Params',
'type': 'object',
'description': 'Parameters for the %s event.' % ToTitleCase(
event['name']),
'properties': event.get('parameters', [])
}
domain['types'].append(event_type)
def InitializeDomainDependencies(json_api):
"""For each domain create list of domains given domain depends on,
including itself."""
direct_deps = collections.defaultdict(set)
types_required = collections.defaultdict(set)
def GetDomainDepsFromRefs(domain_name, json):
if isinstance(json, list):
for value in json:
GetDomainDepsFromRefs(domain_name, value)
return
if not isinstance(json, dict):
return
for value in json.itervalues():
GetDomainDepsFromRefs(domain_name, value)
if '$ref' in json:
if '.' in json['$ref']:
dep = json['$ref'].split('.')[0]
direct_deps[domain_name].add(dep)
types_required[domain_name].add(json['$ref'])
for domain in json_api['domains']:
direct_deps[domain['domain']] = set(domain.get('dependencies', []))
types_required[domain['domain']] = set(domain.get('types_required', []))
GetDomainDepsFromRefs(domain['domain'], domain)
def TraverseDependencies(domain, deps):
if domain in deps:
return
deps.add(domain)
for dep in direct_deps[domain]:
TraverseDependencies(dep, deps)
for domain in json_api['domains']:
domain_deps = set()
TraverseDependencies(domain['domain'], domain_deps)
if 'dependencies' in domain:
domain['js_dependencies'] = domain['dependencies']
else:
domain['js_dependencies'] = []
domain['js_forward_declarations'] = []
for type in types_required[domain['domain']]:
if not type.split('.')[0] in domain['js_dependencies']:
domain['js_forward_declarations'].append(type)
domain['dependencies'] = sorted(domain_deps)
def PatchExperimentalCommandsAndEvents(json_api):
"""Mark all commands and events in experimental domains as experimental
and make sure experimental commands have at least empty parameters
and return values.
"""
for domain in json_api['domains']:
if domain.get('experimental', False):
for command in domain.get('commands', []):
command['experimental'] = True
for event in domain.get('events', []):
event['experimental'] = True
def EnsureDirectoryExists(path):
if not os.path.exists(path):
os.makedirs(path)
def EnsureCommandsHaveParametersAndReturnTypes(json_api):
"""Make sure all commands have at least empty parameters and return values.
This guarantees API compatibility if a previously experimental command is made
stable.
"""
for domain in json_api['domains']:
for command in domain.get('commands', []):
if not 'parameters' in command:
command['parameters'] = []
if not 'returns' in command:
command['returns'] = []
for event in domain.get('events', []):
if not 'parameters' in event:
event['parameters'] = []
def GeneratePerDomain(jinja_env, output_dirname, json_api, class_name,
file_types, domain_name_to_file_name_func):
EnsureDirectoryExists(output_dirname)
for file_type in file_types:
template = jinja_env.get_template('/%s_%s.template' % (
class_name, file_type))
for domain in json_api['domains']:
template_context = {
'domain': domain,
'resolve_type': ResolveType,
'short_form': functools.partial(ShortForm, domain),
}
domain_name = CamelCaseToHackerStyle(domain['domain'])
output_file = '%s/%s.%s' % (output_dirname,
domain_name_to_file_name_func(domain_name),
file_type)
with open(output_file, 'w') as f:
f.write(template.render(template_context))
def GenerateDomains(jinja_env, output_dirname, json_api):
GeneratePerDomain(
jinja_env, os.path.join(output_dirname, 'devtools', 'domains'), json_api,
'domain', ['cc', 'h'],
lambda domain_name: domain_name)
GeneratePerDomain(
jinja_env, os.path.join(output_dirname, 'devtools_js'), json_api,
'domain', ['js'],
lambda domain_name: domain_name)
GeneratePerDomain(
jinja_env, os.path.join(output_dirname, 'devtools_js', 'externs'),
json_api, 'domain_externs', ['js'],
lambda domain_name: 'externs_%s' % (domain_name, ))
def GenerateTypes(jinja_env, output_dirname, json_api):
# Generate forward declarations for types.
GeneratePerDomain(
jinja_env, os.path.join(output_dirname, 'devtools', 'internal'),
json_api, 'domain_types_forward_declarations', ['h'],
lambda domain_name: 'types_forward_declarations_%s' % (domain_name, ))
# Generate types on per-domain basis.
GeneratePerDomain(
jinja_env, os.path.join(output_dirname, 'devtools', 'domains'),
json_api, 'domain_types', ['h', 'cc'],
lambda domain_name: 'types_%s' % (domain_name, ))
def GenerateTypeConversions(jinja_env, output_dirname, json_api):
# Generate type conversions on per-domain basis.
GeneratePerDomain(
jinja_env, os.path.join(output_dirname, 'devtools', 'internal'),
json_api, 'domain_type_conversions', ['h'],
lambda domain_name: 'type_conversions_%s' % (domain_name, ))
if __name__ == '__main__':
json_api, output_dirname = ParseArguments(sys.argv[1:])
jinja_env = InitializeJinjaEnv(output_dirname)
InitializeDomainDependencies(json_api)
PatchExperimentalCommandsAndEvents(json_api)
EnsureCommandsHaveParametersAndReturnTypes(json_api)
SynthesizeCommandTypes(json_api)
SynthesizeEventTypes(json_api)
PatchFullQualifiedRefs(json_api)
CreateTypeDefinitions(json_api)
GenerateDomains(jinja_env, output_dirname, json_api)
GenerateTypes(jinja_env, output_dirname, json_api)
GenerateTypeConversions(jinja_env, output_dirname, json_api)
| [
"[email protected]"
] | |
28acd089a5318eca2c288aeb6b39ea6b02b19415 | c036befbd9a4b81c0f082273dd0eb007e7f9582d | /dort-core/protocols/full_node_protocol.py | 79a277910c3f61ce22c0a990b22b93f0b6596264 | [
"Apache-2.0"
] | permissive | Dortchain/dort-blockchian | 889f52f36dcdeffe0f852b413cdd32879741462f | 14f16e321a60f9d70f849f58e4e9964fa337a084 | refs/heads/main | 2023-06-16T01:31:30.718415 | 2021-07-11T03:03:12 | 2021-07-11T03:03:12 | 384,694,718 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,240 | py | from dataclasses import dataclass
from typing import List, Optional
from Dort.types.blockchain_format.sized_bytes import bytes32
from Dort.types.blockchain_format.vdf import VDFInfo, VDFProof
from Dort.types.end_of_slot_bundle import EndOfSubSlotBundle
from Dort.types.full_block import FullBlock
from Dort.types.peer_info import TimestampedPeerInfo
from Dort.types.spend_bundle import SpendBundle
from Dort.types.unfinished_block import UnfinishedBlock
from Dort.types.weight_proof import WeightProof
from Dort.util.ints import uint8, uint32, uint64, uint128
from Dort.util.streamable import Streamable, streamable
"""
Protocol between full nodes.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class NewPeak(Streamable):
header_hash: bytes32
height: uint32
weight: uint128
fork_point_with_previous_peak: uint32
unfinished_reward_block_hash: bytes32
@dataclass(frozen=True)
@streamable
class NewTransaction(Streamable):
transaction_id: bytes32
cost: uint64
fees: uint64
@dataclass(frozen=True)
@streamable
class RequestTransaction(Streamable):
transaction_id: bytes32
@dataclass(frozen=True)
@streamable
class RespondTransaction(Streamable):
transaction: SpendBundle
@dataclass(frozen=True)
@streamable
class RequestProofOfWeight(Streamable):
total_number_of_blocks: uint32
tip: bytes32
@dataclass(frozen=True)
@streamable
class RespondProofOfWeight(Streamable):
wp: WeightProof
tip: bytes32
@dataclass(frozen=True)
@streamable
class RequestBlock(Streamable):
height: uint32
include_transaction_block: bool
@dataclass(frozen=True)
@streamable
class RejectBlock(Streamable):
height: uint32
@dataclass(frozen=True)
@streamable
class RequestBlocks(Streamable):
start_height: uint32
end_height: uint32
include_transaction_block: bool
@dataclass(frozen=True)
@streamable
class RespondBlocks(Streamable):
start_height: uint32
end_height: uint32
blocks: List[FullBlock]
@dataclass(frozen=True)
@streamable
class RejectBlocks(Streamable):
start_height: uint32
end_height: uint32
@dataclass(frozen=True)
@streamable
class RespondBlock(Streamable):
block: FullBlock
@dataclass(frozen=True)
@streamable
class NewUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@dataclass(frozen=True)
@streamable
class RequestUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@dataclass(frozen=True)
@streamable
class RespondUnfinishedBlock(Streamable):
unfinished_block: UnfinishedBlock
@dataclass(frozen=True)
@streamable
class NewSignagePointOrEndOfSubSlot(Streamable):
prev_challenge_hash: Optional[bytes32]
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@dataclass(frozen=True)
@streamable
class RequestSignagePointOrEndOfSubSlot(Streamable):
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@dataclass(frozen=True)
@streamable
class RespondSignagePoint(Streamable):
index_from_challenge: uint8
challenge_chain_vdf: VDFInfo
challenge_chain_proof: VDFProof
reward_chain_vdf: VDFInfo
reward_chain_proof: VDFProof
@dataclass(frozen=True)
@streamable
class RespondEndOfSubSlot(Streamable):
end_of_slot_bundle: EndOfSubSlotBundle
@dataclass(frozen=True)
@streamable
class RequestMempoolTransactions(Streamable):
filter: bytes
@dataclass(frozen=True)
@streamable
class NewCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@dataclass(frozen=True)
@streamable
class RequestCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@dataclass(frozen=True)
@streamable
class RespondCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
vdf_proof: VDFProof
@dataclass(frozen=True)
@streamable
class RequestPeers(Streamable):
"""
Return full list of peers
"""
@dataclass(frozen=True)
@streamable
class RespondPeers(Streamable):
peer_list: List[TimestampedPeerInfo]
| [
"[email protected]"
] | |
64c8fbf46fb22fc14a9bf06a229215fef2968c1d | bd87d8947878ccb2f5b720e70a22493b00868fd3 | /fluent/17_concurrency_with_futures/flags2.py | 8f780b17b279566b83594808af0ab947b0b18362 | [] | no_license | damiansp/completePython | 4cbf12ef682a1d4a5498f77e407dc02e44a7d7ac | 3f5e2f14d79c93df5147b82d901190c054535158 | refs/heads/master | 2023-09-01T20:50:03.444440 | 2023-08-28T00:27:57 | 2023-08-28T00:27:57 | 99,197,610 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,351 | py | '''Download 20 flag images sequentially (synchronous) for baseline comparison'''
import os
import sys
import time
import requests
POP20_CC = ('CN IN US ID BR PK NG BD RU JP MX PH VN ET EG DE IR TR CD FR')\
.split()
BASE_URL = 'http://flupy.org/data/flags'
DEST_DIR = 'images'
def save_flag(img, filename):
path = os.path.join(DEST_DIR, filename)
with open(path, 'wb') as f:
f.write(img)
def get_flag(cc):
url = '{}/{cc}/{cc}.gif'.format(BASE_URL, cc=cc.lower())
resp = requests.get(url)
if resp.status_code != 200:
reps.raise_for_status()
return resp.content
def download_one(cc, base_url, verbose=False):
try:
image = get_flag(base_url, cc)
except requests.exceptions.HTTPError as e:
res = e.response
if res.status_code == 404:
status = HTTPStatus.not_found
msg = 'not found'
else:
raise
else:
save_flag(image, cc.lower() + '.gif')
status = HTTPStatus.ok
msg = 'OK'
if verbose:
print(cc, msg)
return Result(status, cc)
def show(text):
print(text, end=' ')
sys.stdout.flush()
def download_many(cc_list, base_url, vebose, max_req):
counter = collections.Counter()
cc_iter = sorted(cc_list)
if not verbose:
cc_iter = tqdm.tqdm(cc_iter)
for cc in cc_iter:
try:
res = download_one(cc, base_url, verbose)
except requests.exceptions.HTTPError as e:
error_msg = 'HTTP error {res.status_code} - {res.reason}'
error_msg = error_msg.format(res=e.response)
except requests.exceptionsConnectionError as e:
error_msg = 'Connection error'
else:
error_msg = ''
status = res.status
if error_msg:
status = HTTPStatus.error
counter[status] += 1
if verbose and error_msg:
print('*** Error for {}: {}'.format(cc, error_msg))
return counter
# pass download_all to main so main can be used as lib func with other
# implementations for downloading
def main(download_many):
t0 = time.time()
count = download_all(POP20_CC)
elapsed = time.time() - t0
msg = '\n{} flags downloaded in {:.2f}s'
print(msg.format(count, elapsed))
if __name__ == '__main__':
main(download_all)
| [
"[email protected]"
] | |
27dcf8647fd0983da8e44125393eb3cc04a7340d | 9dfb3372a1e4516d970a6e9d0a9fd8360580eae7 | /python patten find/patten_find_of_know_type.py | 5c17066891d4009fc74c65a7643b516f6664d7aa | [] | no_license | clambering-goat/cameron_pyton | d1cd0e7b04da14e7ba4f89dcb4d973f297a4626c | df0b0365b86e75cfcfc2c1fc21608f1536a3b79f | refs/heads/master | 2021-07-14T20:37:37.021401 | 2019-02-28T07:52:11 | 2019-02-28T07:52:11 | 137,251,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,049 | py |
#works but if the tail data is in the data packet the code will think that is the end of the data packet
import random
data=""
head="123"
tail="789"
parrton_data="456646849861"
parrton=head+parrton_data+tail
lenth_data=len(parrton)
print("looking for",parrton)
#data maker
#v1
'''
random_noise=5
parrton_reption=2
for number_of_parrton in range(parrton_reption):
data=data+parrton
for q in range(random_noise):
data=data+str(random.randint(1,10))
'''
#v2
random_noise=5
parrton_reption=2
aount_of_curruptiuon=2
for number_of_parrton in range(parrton_reption):
data=data+parrton
for q in range(random_noise):
data=data+str(random.randint(1,10))
#curption
for _ in range(aount_of_curruptiuon):
palce=random.randint(0,len(data))
print("ramdom curruption at ",palce)
data=data[0:palce]+str(random.randint(1,10))+data[palce:]
print(data)
#data find
#print("data in ",parrton in data)
head_match=[False,False,False]
looking_for_head=True
looking_for_tail=False
tail_match=[False,False,False]
retrved_data=""
for q in data:
#print("Q",q)
if looking_for_head==True:
if q==head[0] and head_match[0]==False and head_match[1]==False and head_match[2]==False:
head_match[0]=True
#print("1",q)
#print(head_match)
elif q==head[1] and head_match[0]==True and head_match[1]==False and head_match[2]==False:
head_match[1]=True
#print("2",q)
#print(head_match)
elif q==head[2] and head_match[0]==True and head_match[1]==True and head_match[2]==False:
head_match[2]=True
#print("3",q)
print("posible start found")
looking_for_head=False
looking_for_tail=True
retrved_data=""
#print(head_match)
else:
#print("reset")
head_match=[False,False,False]
#print(head_match)
if looking_for_tail ==True:
retrved_data=retrved_data+q
if q==tail[0] and tail_match[0]==False and tail_match[1]==False and tail_match[2]==False:
tail_match[0]=True
#print("1",q)
#print(head_match)
elif q==tail[1] and tail_match[0]==True and tail_match[1]==False and tail_match[2]==False:
tail_match[1]=True
#print("2",q)
#print(head_match)
elif q==tail[2] and tail_match[0]==True and tail_match[1]==True and tail_match[2]==False:
tail_match[2]=True
#print("3",q)
print("end found")
print("the data is :")
print(retrved_data[1:-len(tail)])
print("did the code work")
print(parrton_data==(retrved_data[1:-len(tail)]))
exit()
looking_for_head=True
looking_for_tail=False
#print(head_match)
else:
#print("reset")
tail_match=[False,False,False]
#print(head_match)
print("code not found")
| [
"[email protected]"
] | |
fb5f3c5055468c0cafdccaa22346bea873c241e8 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/comic.py | 20bdd3de8ea7b48341d0023677451135cdd02ef7 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 794 | py | ii = [('EmerRN.py', 1), ('CookGHP3.py', 1), ('MarrFDI.py', 1), ('WilbRLW.py', 1), ('KembFJ1.py', 5), ('WilbRLW5.py', 2), ('LeakWTI3.py', 1), ('PettTHE.py', 2), ('GellWPT.py', 3), ('FitzRNS3.py', 4), ('WilbRLW2.py', 1), ('ClarGE2.py', 2), ('GellWPT2.py', 4), ('CarlTFR.py', 1), ('LyttELD.py', 2), ('CrokTPS.py', 3), ('ClarGE.py', 5), ('LandWPA.py', 1), ('GilmCRS.py', 5), ('AinsWRR.py', 1), ('MedwTAI.py', 2), ('GodwWLN.py', 2), ('CoopJBT.py', 1), ('SoutRD2.py', 1), ('BackGNE.py', 1), ('MedwTAI2.py', 1), ('SoutRD.py', 3), ('DickCSG.py', 1), ('MereHHB3.py', 1), ('HowiWRL2.py', 2), ('WilkJMC.py', 1), ('HogaGMM.py', 20), ('FitzRNS4.py', 2), ('KembFJ2.py', 5), ('LewiMJW.py', 2), ('ClarGE3.py', 9), ('FitzRNS2.py', 1), ('HogaGMM2.py', 42), ('EvarJSP.py', 1), ('NortSTC.py', 1), ('TaylIF.py', 1)] | [
"[email protected]"
] | |
72d95d9d79cef5cbeca6ee583d2ebc7314606d6e | 54d2887e3c910f68366bd0aab3c692d54245e22a | /arc/arc025/a.py | bbcdf5f5008d4ea5e2a27ebd6595406ba81f4c0e | [] | no_license | Kevinrobot34/atcoder | 7aec367fd2c6b589e9d583dae7b3c7520ce9fa12 | 482ea508f098f81e4f19522fe518dd22c781aca9 | refs/heads/master | 2022-07-10T23:44:45.290022 | 2022-06-29T11:30:26 | 2022-06-29T11:30:26 | 158,081,477 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | d = list(map(int, input().split()))
j = list(map(int, input().split()))
ans = 0
for i in range(7):
if d[i] > j[i]:
ans += d[i]
else:
ans += j[i]
print(ans)
| [
"[email protected]"
] | |
eb5616cfb71f50d5ece42954779cc6d30ab2fd09 | 19ce218477449ece043c0e40fb47ba55c4bfd38e | /network/tagged_delegate.py | 2d321c1ae60e772d8f2caa140fde192c45bd4de7 | [] | no_license | vickylunna/PyAuthServer | 7aef8b86f3a748bdd1b50a8c10872cfa39d6cdef | 9b6be1a805938f48292a231bad3f9006c667e06b | refs/heads/master | 2021-01-18T04:50:15.790653 | 2014-12-01T18:27:02 | 2014-12-01T18:27:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | from .decorators import get_tag, has_tag
from .metaclasses.register import TypeRegister
from .world_info import WorldInfo
__all__ = ['DelegateByNetmode', 'DelegateByTag', 'FindByTag']
class FindByTag(metaclass=TypeRegister):
"""Provides an interface to select a subclass by a tag value"""
@classmethod
def register_type(cls):
cls._cache = {}
@classmethod
def update_cache(cls, from_cls=None):
try:
subclasses = cls.subclasses
except AttributeError:
if from_cls is None:
raise TypeError("Subclass dictionary was not implemented by {}".format(cls.type_name))
else:
return
cls._cache.update({get_tag(c): c for c in subclasses.values() if has_tag(c)})
try:
parent = next(c for c in cls.__mro__[1:] if getattr(c, "subclasses", subclasses) is not subclasses)
except StopIteration:
pass
else:
parent.update_cache(from_cls=cls)
@classmethod
def find_subclass_for(cls, tag_value):
"""Find subclass with a tag value
:param tag_value: value of tag to isolate
"""
try:
cache = cls._cache
except AttributeError:
raise TypeError("Subclass dictionary was not implemented by {}".format(cls.type_name))
try:
return cache[tag_value]
except KeyError:
raise TypeError("Tag: {} is not supported by {}".format(tag_value, cls.type_name))
class DelegateByTag(FindByTag):
def __new__(cls, *args, **kwargs):
tag = cls.get_current_tag()
delegated_class = cls.find_subclass_for(tag)
if delegated_class.is_delegate:
return delegated_class.__new__(delegated_class, *args, **kwargs)
return super().__new__(delegated_class)
@classmethod
def register_type(cls):
super().register_type()
cls.is_delegate = True
@classmethod
def register_subtype(cls):
super().register_subtype()
cls.is_delegate = False
@staticmethod
def get_current_tag():
raise NotImplementedError()
class DelegateByNetmode(DelegateByTag):
@staticmethod
def get_current_tag():
return WorldInfo.netmode
| [
"[email protected]"
] | |
05769004316d5852da9887d9c1d227590ba3f3ea | 45a0356aaaf338d929b6e620fcc2b9386beb6b50 | /rally/openstack/common/strutils.py | f512fbf8465b3d0fd0d56853c3a379d0ab338d1f | [
"Apache-2.0"
] | permissive | pombredanne/rally | 6e182dd0dfa9e94e899f1b156967b6778805c6f2 | 3921b4faa535ef141259a7076b414178836528f2 | refs/heads/master | 2020-12-11T05:54:59.049744 | 2013-11-27T14:47:27 | 2013-11-27T14:47:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,352 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import re
import sys
import unicodedata
import six
from rally.openstack.common.gettextutils import _ # noqa
# Used for looking up extensions of text
# to their 'multiplied' byte amount
BYTE_MULTIPLIERS = {
'': 1,
't': 1024 ** 4,
'g': 1024 ** 3,
'm': 1024 ** 2,
'k': 1024,
}
BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)')
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else is considered False.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return False
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming str using `incoming` if they're not already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an isntance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming str/unicode using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an isntance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
return text
def to_bytes(text, default=0):
"""Converts a string into an integer of bytes.
Looks at the last characters of the text to determine
what conversion is needed to turn the input text into a byte number.
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
:param text: String input for bytes size conversion.
:param default: Default return value when text is blank.
"""
match = BYTE_REGEX.search(text)
if match:
magnitude = int(match.group(1))
mult_key_org = match.group(2)
if not mult_key_org:
return magnitude
elif text:
msg = _('Invalid string format: %s') % text
raise TypeError(msg)
else:
return default
mult_key = mult_key_org.lower().replace('b', '', 1)
multiplier = BYTE_MULTIPLIERS.get(mult_key)
if multiplier is None:
msg = _('Unknown byte multiplier: %s') % mult_key_org
raise TypeError(msg)
return magnitude * multiplier
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
| [
"[email protected]"
] | |
1775764213fa055a85117b3e4513753453f10093 | 45fc3ca374b15e63fee49a78c54d1a7d5949ebd6 | /vega/core/evaluator/gpu_evaluator.py | 00caec4c26234cd3c8f389fc5f8fbe1b036e6a3b | [
"MIT"
] | permissive | 1ziyanW1/vega | d0849d09ba9dea85f1e135f515202e90533972aa | 9640a32d3cbb22e39593e2786b825b946aa6b281 | refs/heads/master | 2022-11-25T20:30:50.517879 | 2020-08-04T08:17:28 | 2020-08-04T08:17:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,892 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""GpuEvaluator used to do evaluate process on gpu."""
import os
import time
import logging
import errno
import pickle
import torch
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.core.trainer.pytorch.trainer import Trainer
from vega.core.trainer.utils import WorkerTypes
from vega.core.common import FileOps, init_log
from vega.datasets.pytorch import Dataset
from vega.core.metrics.pytorch import Metrics
from vega.core.common.utils import update_dict
@ClassFactory.register(ClassType.GPU_EVALUATOR)
class GpuEvaluator(Trainer):
"""Evaluator is a gpu evaluator.
:param args: arguments from user and default config file
:type args: dict or Config, default to None
:param train_data: training dataset
:type train_data: torch dataset, default to None
:param valid_data: validate dataset
:type valid_data: torch dataset, default to None
:param worker_info: the dict worker info of workers that finished train.
:type worker_info: dict or None.
"""
def __init__(self, worker_info=None, model=None, hps=None, load_checkpoint=False, **kwargs):
"""Init GpuEvaluator."""
self._reference_trainer_settings()
super(GpuEvaluator, self).__init__(self.cfg)
self.worker_type = WorkerTypes.GPU_EVALUATOR
self.worker_info = worker_info
if worker_info is not None and "step_name" in worker_info and "worker_id" in worker_info:
self.step_name = self.worker_info["step_name"]
self.worker_id = self.worker_info["worker_id"]
self._flag_load_checkpoint = load_checkpoint
self.hps = hps
self.model = model
self.evaluate_result = None
def _reference_trainer_settings(self):
"""Set reference Trainer."""
ref = self.cfg.get('ref')
if ref:
ref_dict = ClassFactory.__configs__
for key in ref.split('.'):
ref_dict = ref_dict.get(key)
update_dict(ref_dict, self.cfg)
def _init_all_settings(self):
"""Init all settings from config."""
self._reference_trainer_settings()
if self.cfg.cuda:
self._init_cuda_setting()
self._init_hps(self.hps)
if self.model is None:
self.model = self._init_model()
if self.model is not None and self.cfg.cuda:
self.model = self.model.cuda()
# TODO
if self._flag_load_checkpoint:
self.load_checkpoint()
else:
self._load_pretrained_model()
self._init_dataloader()
def _init_dataloader(self):
"""Init dataloader."""
valid_dataset = Dataset(mode='test')
self.valid_loader = valid_dataset.dataloader
def valid(self, valid_loader):
"""Validate one step of mode.
:param loader: valid data loader
"""
self.model.eval()
metrics = Metrics(self.cfg.metric)
data_num = 0
latency_sum = 0.0
with torch.no_grad():
for step, (data, target) in enumerate(valid_loader):
if self.cfg.cuda:
data, target = data.cuda(), target.cuda()
self.model = self.model.cuda()
time_start = time.time()
logits = self.model(data)
latency_sum += time.time() - time_start
metrics(logits, target)
n = data.size(0)
data_num += n
if self._first_rank and step % self.cfg.report_freq == 0:
logging.info("step [{}/{}], valid metric [{}]".format(
step + 1, len(valid_loader), str(metrics.results_dict)))
latency = latency_sum / data_num
pfms = metrics.results_dict
performance = [pfms[list(pfms.keys())[0]]]
if self.cfg.evaluate_latency:
performance.append(latency)
logging.info("valid performance: {}".format(performance))
return performance
def train_process(self):
"""Validate process for the model validate worker."""
init_log(log_file="gpu_eva_{}.txt".format(self.worker_id))
logging.info("start evaluate process")
self._init_all_settings()
performance = self.valid(self.valid_loader)
self._save_performance(performance)
logging.info("finished evaluate for id {}".format(self.worker_id))
self.evaluate_result = performance
return
| [
"[email protected]"
] | |
0731c42c5febe59fc7aa48a0d10201bb653fca1c | 42dce5a3a1ace43022968ec059df1b17f94776ba | /tat_aws_creator_auto_tag/tests/__init__.py | a989bb3d56e264743ade2655c449c9e32a1eec94 | [
"MIT"
] | permissive | techantllc/aws-creator-auto-tagger | 647db1c5cbed275608c0cb1f28622ef3d11c92bb | 413c9f6c91cfaa088bbc45bed0f6c9f09e02f48a | refs/heads/master | 2020-08-22T13:40:05.495905 | 2019-10-30T18:27:52 | 2019-10-30T18:27:52 | 216,407,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | # -*- coding: utf-8 -*-
import boto3
aws_profile = "tat_sanhe"
boto_ses = boto3.Session(profile_name=aws_profile)
iam_client = boto_ses.client("iam")
s3_client = boto_ses.client("s3")
| [
"[email protected]"
] | |
a0a756527c4bea878ec4ec9a51f27f2aa7b0e709 | 0c9ec5d4bafca45505f77cbd3961f4aff5c10238 | /openapi-python-client/openapi_client/models/multi_form_variable_binary_dto.py | 5b444e02f29acbb121a3784c4daa78f23cced18b | [
"Apache-2.0"
] | permissive | yanavasileva/camunda-bpm-examples | 98cd2930f5c8df11a56bf04845a8ada5b3bb542d | 051f8f28c62845e68ce4059ab64264c5a0bdc009 | refs/heads/master | 2022-10-19T20:07:21.278160 | 2020-05-27T15:28:27 | 2020-05-27T15:28:27 | 267,320,400 | 0 | 0 | Apache-2.0 | 2020-05-27T14:35:22 | 2020-05-27T13:00:01 | null | UTF-8 | Python | false | false | 5,035 | py | # coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class MultiFormVariableBinaryDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'file',
'value_type': 'str'
}
attribute_map = {
'data': 'data',
'value_type': 'valueType'
}
def __init__(self, data=None, value_type=None, local_vars_configuration=None): # noqa: E501
"""MultiFormVariableBinaryDto - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._data = None
self._value_type = None
self.discriminator = None
self.data = data
if value_type is not None:
self.value_type = value_type
@property
def data(self):
"""Gets the data of this MultiFormVariableBinaryDto. # noqa: E501
The binary data to be set. For File variables, this multipart can contain the filename, binary value and MIME type of the file variable to be set Only the filename is mandatory. # noqa: E501
:return: The data of this MultiFormVariableBinaryDto. # noqa: E501
:rtype: file
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this MultiFormVariableBinaryDto.
The binary data to be set. For File variables, this multipart can contain the filename, binary value and MIME type of the file variable to be set Only the filename is mandatory. # noqa: E501
:param data: The data of this MultiFormVariableBinaryDto. # noqa: E501
:type: file
"""
self._data = data
@property
def value_type(self):
"""Gets the value_type of this MultiFormVariableBinaryDto. # noqa: E501
The name of the variable type. Either Bytes for a byte array variable or File for a file variable. # noqa: E501
:return: The value_type of this MultiFormVariableBinaryDto. # noqa: E501
:rtype: str
"""
return self._value_type
@value_type.setter
def value_type(self, value_type):
"""Sets the value_type of this MultiFormVariableBinaryDto.
The name of the variable type. Either Bytes for a byte array variable or File for a file variable. # noqa: E501
:param value_type: The value_type of this MultiFormVariableBinaryDto. # noqa: E501
:type: str
"""
allowed_values = ["Bytes", "File"] # noqa: E501
if self.local_vars_configuration.client_side_validation and value_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `value_type` ({0}), must be one of {1}" # noqa: E501
.format(value_type, allowed_values)
)
self._value_type = value_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MultiFormVariableBinaryDto):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MultiFormVariableBinaryDto):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
66ec8c7f71b6759f16016e52cff49cb461b2a215 | 78518b65b6823ac42b20515ae7716ada4b59db3d | /手写代码/第2章 数据处理与可视化/Pex2_12.py | b0c9c2b22908f384e5fe922680d7a77078d6e2a7 | [] | no_license | YunHao-Von/Mathematical-Modeling | 70d6ad8f2f543751883afdc85aa19b1c80a106a0 | 4fe153453cccb4b474166c104e08d13ed72bc5ac | refs/heads/master | 2023-03-02T23:16:40.839865 | 2021-02-15T09:52:24 | 2021-02-15T09:52:24 | 339,029,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | import numpy as np
a=np.arange(4).reshape(2,2)
b=np.arange(4,8).reshape(2,2)
c1=np.vstack([a,b])
c2=np.r_[a,b]
d1=np.hstack([a,b])
d2=np.c_[a,b]
print(c1)
print(c2)
print(d1) | [
"[email protected]"
] | |
d6ae188b2c356f7e8c7b1e9f6b73323adeed445a | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-integrations/connectors/source-google-analytics-data-api/setup.py | d93d394a43545a29e58da42374364ee3ea361f13 | [
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 679 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = ["airbyte-cdk", "PyJWT==2.4.0", "cryptography==37.0.4", "requests"]
TEST_REQUIREMENTS = [
"freezegun",
"pytest~=6.1",
"pytest-mock~=3.6.1",
"requests-mock",
]
setup(
name="source_google_analytics_data_api",
description="Source implementation for Google Analytics Data Api.",
author="Airbyte",
author_email="[email protected]",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS,
package_data={"": ["*.json", "schemas/*.json"]},
extras_require={
"tests": TEST_REQUIREMENTS,
},
)
| [
"[email protected]"
] | |
000792c8fe2398359388109a569bf6af56d0fdf1 | a543274065774256020081702b9a18b2be098bc8 | /examples/adwords/v201809/shopping/add_product_partition_tree.py | d66f135f4d0fec6729e23c89eb50dc2b0cbb3fd1 | [
"Apache-2.0"
] | permissive | Bhanditz/googleads-python-lib | 0b6326c7496adfee08fe98d3aa647c85cd4f7d85 | 188785c7003c03d8b408e48ef891e3a15ec1e1b1 | refs/heads/master | 2023-04-15T23:40:44.699676 | 2019-03-28T14:33:37 | 2019-03-28T14:33:37 | 178,733,156 | 0 | 0 | Apache-2.0 | 2023-03-24T18:56:03 | 2019-03-31T19:32:48 | Python | UTF-8 | Python | false | false | 8,380 | py | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a ProductPartition tree.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import adwords
ADGROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
class ProductPartitionHelper(object):
"""A helper for creating ProductPartition trees."""
def __init__(self, adgroup_id):
"""Initializer.
Args:
adgroup_id: The ID of the AdGroup that we wish to attach the partition
tree to.
"""
# The next temporary criterion ID to be used.
# When creating our tree we need to specify the parent-child relationships
# between nodes. However, until a criterion has been created on the server
# we do not have a criterion ID with which to refer to it.
# Instead we can specify temporary IDs that are specific to a single mutate
# request. Once the criteria have been created they are assigned an ID as
# normal and the temporary ID will no longer refer to it.
# A valid temporary ID is any negative integer.
self.next_id = -1
# The set of mutate operations needed to create the current tree.
self.operations = []
self.adgroup_id = adgroup_id
def CreateSubdivision(self, parent=None, value=None):
"""Creates a subdivision node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
Returns:
A new subdivision node.
"""
division = {
'xsi_type': 'ProductPartition',
'partitionType': 'SUBDIVISION',
'id': str(self.next_id)
}
# The root has neither a parent nor a value.
if parent is not None:
division['parentCriterionId'] = parent['id']
division['caseValue'] = value
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': self.adgroup_id,
'criterion': division
}
self.CreateAddOperation(adgroup_criterion)
self.next_id -= 1
return division
def CreateUnit(self, parent=None, value=None, bid_amount=None):
"""Creates a unit node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
bid_amount: The amount to bid for matching products, in micros.
Returns:
A new unit node.
"""
unit = {
'xsi_type': 'ProductPartition',
'partitionType': 'UNIT'
}
# The root node has neither a parent nor a value.
if parent is not None:
unit['parentCriterionId'] = parent['id']
unit['caseValue'] = value
if bid_amount is not None and bid_amount > 0:
bidding_strategy_configuration = {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'xsi_type': 'Money',
'microAmount': str(bid_amount)
}
}]
}
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'biddingStrategyConfiguration': bidding_strategy_configuration
}
else:
adgroup_criterion = {
'xsi_type': 'NegativeAdGroupCriterion'
}
adgroup_criterion['adGroupId'] = self.adgroup_id
adgroup_criterion['criterion'] = unit
self.CreateAddOperation(adgroup_criterion)
return unit
def GetOperations(self):
"""Returns the set of mutate operations needed to create the current tree.
Returns:
The set of operations
"""
return self.operations
def CreateAddOperation(self, criterion):
"""Creates an AdGroupCriterionOperation for the given criterion.
Args:
criterion: The criterion we want to add.
"""
operation = {
'operator': 'ADD',
'operand': criterion
}
self.operations.append(operation)
def main(client, adgroup_id):
"""Runs the example."""
adgroup_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201809')
helper = ProductPartitionHelper(adgroup_id)
# The most trivial partition tree has only a unit node as the root, e.g.:
# helper.CreateUnit(bid_amount=100000)
root = helper.CreateSubdivision()
new_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
'condition': 'NEW'
}
used_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
'condition': 'USED'
}
other_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
}
helper.CreateUnit(root, new_product_canonical_condition, 200000)
helper.CreateUnit(root, used_product_canonical_condition, 100000)
other_condition = helper.CreateSubdivision(
root, other_product_canonical_condition)
cool_product_brand = {
'xsi_type': 'ProductBrand',
'value': 'CoolBrand'
}
cheap_product_brand = {
'xsi_type': 'ProductBrand',
'value': 'CheapBrand'
}
other_product_brand = {
'xsi_type': 'ProductBrand',
}
helper.CreateUnit(other_condition, cool_product_brand, 900000)
helper.CreateUnit(other_condition, cheap_product_brand, 10000)
other_brand = helper.CreateSubdivision(other_condition, other_product_brand)
# The value for the bidding category is a fixed ID for the 'Luggage & Bags'
# category. You can retrieve IDs for categories from the ConstantDataService.
# See the 'GetProductTaxonomy' example for more details.
luggage_category = {
'xsi_type': 'ProductBiddingCategory',
'type': 'BIDDING_CATEGORY_L1',
'value': '-5914235892932915235'
}
generic_category = {
'xsi_type': 'ProductBiddingCategory',
'type': 'BIDDING_CATEGORY_L1',
}
helper.CreateUnit(other_brand, luggage_category, 750000)
helper.CreateUnit(other_brand, generic_category, 110000)
# Make the mutate request
result = adgroup_criterion_service.mutate(helper.GetOperations())
children = {}
root_node = None
# For each criterion, make an array containing each of its children.
# We always create the parent before the child, so we can rely on that here.
for adgroup_criterion in result['value']:
children[adgroup_criterion['criterion']['id']] = []
if 'parentCriterionId' in adgroup_criterion['criterion']:
children[adgroup_criterion['criterion']['parentCriterionId']].append(
adgroup_criterion['criterion'])
else:
root_node = adgroup_criterion['criterion']
# Show the tree
DisplayTree(root_node, children)
def DisplayTree(node, children, level=0):
"""Recursively display a node and each of its children.
Args:
node: The node we're displaying the children of.
children: Children of the parent node.
level: How deep in the tree we are.
"""
value = ''
node_type = ''
if 'caseValue' in node:
case_value = node['caseValue']
node_type = case_value['ProductDimension.Type']
if node_type == 'ProductCanonicalCondition':
value = (case_value['condition'] if 'condition' in case_value
else 'OTHER')
elif node_type == 'ProductBiddingCategory':
value = '%s(%s)' % (case_value['type'], case_value['value']
if 'value' in case_value else 'OTHER')
else:
value = (case_value['value'] if 'value' in case_value else 'OTHER')
print ('%sid: %s, node_type: %s, value: %s\n'
% (' ' * level, node['id'], node_type, value))
for child_node in children[node['id']]:
DisplayTree(child_node, children, level + 1)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUP_ID)
| [
"[email protected]"
] | |
f5d44c1fcfada39187884766cab499aa2b45c3d8 | 1b36425f798f484eda964b10a5ad72b37b4da916 | /ee/clickhouse/materialized_columns/test/test_query.py | c78623a831c3ed5c5ef9a4c39315d40ea239b639 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | dorucioclea/posthog | 0408baa2a7ae98e5bea352c516f741ddc17c0a3e | 8848981baf237117fb22d28af0770a0165881423 | refs/heads/master | 2023-01-23T11:01:57.942146 | 2023-01-13T09:03:00 | 2023-01-13T09:03:00 | 241,222,000 | 0 | 0 | MIT | 2020-02-17T22:34:37 | 2020-02-17T22:34:36 | null | UTF-8 | Python | false | false | 945 | py | from posthog.test.base import APIBaseTest, ClickhouseTestMixin
class TestQuery(ClickhouseTestMixin, APIBaseTest):
def test_get_queries_detects(self):
# some random
with self.capture_select_queries() as queries:
self.client.post(
f"/api/projects/{self.team.id}/insights/funnel/",
{
"events": [{"id": "step one", "type": "events", "order": 0}],
"funnel_window_days": 14,
"funnel_order_type": "unordered",
"insight": "funnels",
},
).json()
self.assertTrue(len(queries))
# make sure that the queries start with a discoverable prefix.
# If this changes, also update ee/clickhouse/materialized_columns/analyze.py::_get_queries to
# filter on the right queries
for q in queries:
self.assertTrue(q.startswith("/* user_id"))
| [
"[email protected]"
] | |
a1bffc2fe20ca4257e795c2a592f63257efca427 | 76d27bfccbd24c86c8a528d634e8c53a884bd331 | /blinking_image.py | bb94efc930b23102ec429ef8a70f687c4f2d0028 | [] | no_license | nickdelgrosso/SSVEP-CardGui | 07bb2d539ffe0bd46e4a80959466422c7e79e63b | edbccaccbe93f088019a545111b2fbf0f2af54bd | refs/heads/master | 2023-06-24T22:47:31.656748 | 2021-07-22T21:15:21 | 2021-07-22T21:15:21 | 388,243,533 | 0 | 1 | null | 2021-07-21T22:38:39 | 2021-07-21T20:55:53 | Jupyter Notebook | UTF-8 | Python | false | false | 764 | py | from psychopy import visual, core
class BlinkingImage:
def __init__(self, win, blink_frequency = 10., **kwargs):
self.win = win
self.image = visual.ImageStim(win=win, **kwargs)
self.clock = core.CountdownTimer(1. / blink_frequency)
self.blink_frequency = blink_frequency
@property
def blink_frequency(self):
return self._blink_frequency
@blink_frequency.setter
def blink_frequency(self, value: float):
self._blink_frequency = value
self.clock.reset(.5 / value)
def draw(self):
time_to_flip = -self.win.getFutureFlipTime(clock=self.clock)
if time_to_flip <= 0:
self.clock.reset()
self.image.setAutoDraw(not self.image.autoDraw)
| [
"[email protected]"
] | |
038c9de59e5023434470e6b497ccc1ec1fa1b609 | d4b454d67a02af66c7622f77e538a1f1f95f5bdf | /rhalphalib/sample.py | 0f84abc88812a2513b00073445377bbcdd2de57d | [] | no_license | lgray/rhalphalib | 89c98708789222b69466f018901bd0e1a0cd092d | 6082c4f045b0de679c429ca06673a655f13ff3d9 | refs/heads/master | 2020-05-07T19:09:58.177414 | 2019-07-22T14:50:27 | 2019-07-22T14:50:27 | 180,800,992 | 0 | 0 | null | 2019-04-11T13:43:53 | 2019-04-11T13:43:53 | null | UTF-8 | Python | false | false | 16,132 | py | import numpy as np
import numbers
from .parameter import NuisanceParameter, DependentParameter, Observable
from .util import _to_numpy, _to_TH1
class Sample(object):
"""
Sample base class
"""
SIGNAL, BACKGROUND = range(2)
def __init__(self, name, sampletype):
self._name = name
self._sampletype = sampletype
self._observable = None
def __repr__(self):
return "<%s (%s) instance at 0x%x>" % (
self.__class__.__name__,
self._name,
id(self),
)
@property
def name(self):
return self._name
@property
def sampletype(self):
return self._sampletype
@property
def observable(self):
if self._observable is None:
raise RuntimeError("A Sample was not constructed correctly")
return self._observable
@observable.setter
def observable(self, obs):
# TODO check compatible?
self._observable = obs
@property
def parameters(self):
raise NotImplementedError
def normalization(self):
raise NotImplementedError
def setParamEffect(self, param, effect_up, effect_down=None):
raise NotImplementedError
def getParamEffect(self, param, up=True):
raise NotImplementedError
def getExpectation(self, nominal=False):
raise NotImplementedError
def renderRoofit(self, workspace):
raise NotImplementedError
def combineParamEffect(self, param):
raise NotImplementedError
class TemplateSample(Sample):
def __init__(self, name, sampletype, template):
'''
name: self-explanatory
sampletype: Sample.SIGNAL or BACKGROUND or DATA
template: Either a ROOT TH1, a 1D Coffea Hist object, or a numpy histogram
in the latter case, please extend the numpy histogram tuple to define an observable name
i.e. (sumw, binning, name)
(for the others, the observable name is taken from the x axis name)
'''
super(TemplateSample, self).__init__(name, sampletype)
sumw, binning, obs_name = _to_numpy(template)
observable = Observable(obs_name, binning)
self._observable = observable
self._nominal = sumw
self._paramEffectsUp = {}
self._paramEffectsDown = {}
@property
def parameters(self):
'''
Set of independent parameters that affect this sample
'''
return set(self._paramEffectsUp.keys())
def normalization(self):
return self._nominal.sum()
def setParamEffect(self, param, effect_up, effect_down=None):
'''
Set the effect of a parameter on a sample (e.g. the size of unc. or multiplier for shape unc.)
param: a Parameter object
effect_up: a numpy array representing the relative (multiplicative) effect of the parameter on the bin yields,
or a single number representing the relative effect on the sample normalization,
or a histogram representing the *bin yield* under the effect of the parameter (i.e. not relative)
effect_down: if asymmetric effects, fill this in, otherwise the effect_up value will be symmetrized
N.B. the parameter must have a compatible combinePrior, i.e. if param.combinePrior is 'shape', then one must pass a numpy array
'''
if not isinstance(param, NuisanceParameter):
raise ValueError("Template morphing can only be done via independent parameters with priors (i.e. a NuisanceParameter)")
if isinstance(effect_up, np.ndarray):
if len(effect_up) != self.observable.nbins:
raise ValueError("effect_up has the wrong number of bins (%d, expected %d)" % (len(effect_up), self.observable.nbins))
elif isinstance(effect_up, numbers.Number):
if 'shape' in param.combinePrior:
effect_up = np.full(self.observable.nbins, effect_up)
else:
effect_up, binning, _ = _to_numpy(effect_up)
if not np.array_equal(binning, self.observable.binning):
raise ValueError("effect_up has incompatible binning with sample %r" % self)
zerobins = self._nominal <= 0.
effect_up[zerobins] = 0.
effect_up[~zerobins] /= self._nominal[~zerobins]
self._paramEffectsUp[param] = effect_up
if effect_down is not None:
if isinstance(effect_down, np.ndarray):
if len(effect_down) != self.observable.nbins:
raise ValueError("effect_down has the wrong number of bins (%d, expected %d)" % (len(effect_down), self.observable.nbins))
elif isinstance(effect_down, numbers.Number):
if 'shape' in param.combinePrior:
effect_down = np.full(self.observable.nbins, effect_down)
else:
effect_down, binning, _ = _to_numpy(effect_down)
if not np.array_equal(binning, self.observable.binning):
raise ValueError("effect_down has incompatible binning with sample %r" % self)
zerobins = self._nominal <= 0.
effect_down[zerobins] = 0.
effect_down[~zerobins] /= self._nominal[~zerobins]
self._paramEffectsDown[param] = effect_down
else:
self._paramEffectsDown[param] = None
def getParamEffect(self, param, up=True):
'''
Get the parameter effect
'''
if up:
return self._paramEffectsUp[param]
else:
if self._paramEffectsDown[param] is None:
# TODO the symmeterized value depends on if param prior is 'shapeN' or 'shape'
return 1. / self._paramEffectsUp[param]
return self._paramEffectsDown[param]
def getExpectation(self, nominal=False):
'''
Create an array of per-bin expectations, accounting for all nuisance parameter effects
nominal: if True, calculate the nominal expectation (i.e. just plain numbers)
'''
if nominal:
return self._nominal
else:
# TODO: construct a DependentParameter per bin, as a function of the nuisance params
raise NotImplementedError
def renderRoofit(self, workspace):
'''
Import the necessary Roofit objects into the workspace for this sample
and return an extended pdf representing this sample's prediciton for pdf and norm.
'''
import ROOT
rooObservable = self.observable.renderRoofit(workspace)
rooTemplate = ROOT.RooDataHist(self.name, self.name, ROOT.RooArgList(rooObservable), _to_TH1(self._nominal, self.observable.binning, self.observable.name))
workspace.add(rooTemplate)
for param in self.parameters:
effect_up = self.getParamEffect(param, up=True)
if not isinstance(effect_up, np.ndarray):
# Normalization systematics can just go into combine datacards
continue
name = self.name + '_' + param.name + 'Up'
shape = self._nominal * effect_up
rooTemplate = ROOT.RooDataHist(name, name, ROOT.RooArgList(rooObservable), _to_TH1(shape, self.observable.binning, self.observable.name))
workspace.add(rooTemplate)
name = self.name + '_' + param.name + 'Down'
shape = self._nominal * self.getParamEffect(param, up=False)
rooTemplate = ROOT.RooDataHist(name, name, ROOT.RooArgList(rooObservable), _to_TH1(shape, self.observable.binning, self.observable.name))
workspace.add(rooTemplate)
# TODO build the pdf from the data hist, maybe or maybe not with systematics, return pdf and normalization
return None, None
def combineParamEffect(self, param):
'''
A formatted string for placement into the combine datacard that represents
the effect of a parameter on a sample (e.g. the size of unc. or multiplier for shape unc.)
'''
if param not in self._paramEffectsUp:
return '-'
elif 'shape' in param.combinePrior:
return '1'
else:
up = self._paramEffectsUp[param]
down = self._paramEffectsDown[param]
if down is None:
return '%.3f' % up
else:
return '%.3f/%.3f' % (up, down)
class ParametericSample(Sample):
UseRooParametricHist = False
def __init__(self, name, sampletype, observable, params):
'''
Create a sample that is a binned function, where each bin yield
is given by the param in params. The list params should have the
same number of bins as observable.
'''
super(ParametericSample, self).__init__(name, sampletype)
if not isinstance(observable, Observable):
raise ValueError
if len(params) != observable.nbins:
raise ValueError
self._observable = observable
self._params = np.array(params)
self._paramEffectsUp = {}
self._paramEffectsDown = {}
@property
def parameters(self):
'''
Set of independent parameters that affect this sample
'''
pset = set()
for p in self._params:
pset.update(p.getDependents(deep=True))
pset.update(self._paramEffectsUp.keys())
return pset
def normalization(self):
'''
For combine, the normalization in the card is used to scale the parameteric process PDF
Since we provide an explicit normalization function, this should always stay at 1.
'''
return 1.
def setParamEffect(self, param, effect_up, effect_down=None):
'''
Set the effect of a parameter on a sample (e.g. the size of unc. or multiplier for shape unc.)
param: a Parameter object
effect_up: a numpy array representing the multiplicative effect of the parameter on the yield, or a single number
effect_down: if asymmetric effects, fill this in, otherwise the effect_up value will be symmetrized
For ParametericSample, only relative effects are supported. Not sure if they are useful though.
'''
raise NotImplementedError
def getParamEffect(self, param, up=True):
'''
Get the parameter effect
'''
raise NotImplementedError
def getExpectation(self, nominal=False):
'''
Create an array of per-bin expectations, accounting for all nuisance parameter effects
nominal: if True, calculate the nominal expectation (i.e. just plain numbers)
'''
params = self._params
if nominal:
return np.array([p.value for p in params])
else:
# TODO: create morph/modifier of self._params with any additional effects in _paramEffectsUp/Down
for i, p in enumerate(params):
p.name = self.name + '_bin%d' % i
if isinstance(p, DependentParameter):
# Let's make sure to render these
p.intermediate = False
return params
def renderRoofit(self, workspace):
'''
Produce a RooParametricHist and add to workspace
'''
import ROOT
rooObservable = self.observable.renderRoofit(workspace)
params = self.getExpectation()
if self.UseRooParametricHist:
rooParams = [p.renderRoofit(workspace) for p in params]
# need a dummy hist to generate proper binning
dummyHist = _to_TH1(np.zeros(len(self._params)), self.observable.binning, self.observable.name)
rooTemplate = ROOT.RooParametricHist(self.name, self.name, rooObservable, ROOT.RooArgList.fromiter(rooParams), dummyHist)
rooNorm = ROOT.RooAddition(self.name + '_norm', self.name + '_norm', ROOT.RooArgList.fromiter(rooParams))
workspace.add(rooTemplate)
workspace.add(rooNorm)
else:
# RooParametricStepFunction expects parameters to represent PDF density (i.e. bin width normalized, and integrates to 1)
norm = params.sum()
norm.name = self.name + '_norm'
norm.intermediate = False
binw = np.diff(self.observable.binning)
dparams = params / binw / norm
for p, oldp in zip(dparams, params):
p.name = oldp.name + "_density"
p.intermediate = False
# The last bin value is defined by 1 - sum(others), so no need to render it
rooParams = [p.renderRoofit(workspace) for p in dparams[:-1]]
rooTemplate = ROOT.RooParametricStepFunction(self.name, self.name,
rooObservable,
ROOT.RooArgList.fromiter(rooParams),
self.observable.binningTArrayD(),
self.observable.nbins
)
workspace.add(rooTemplate)
rooNorm = norm.renderRoofit(workspace) # already rendered but we want to return it
return rooTemplate, rooNorm
def combineParamEffect(self, param):
'''
Combine cannot build shape param effects for parameterized templates, so we have to do it in the model
For normalization effects, I am not sure what happens.. if combine adds the nuisance properly then we just
need the effect size line as below, and we correspondingly should ignore it when calculating effects ourselves.
This would be annoying though, because then getExpectation() needs to behave different between combine rendering and otherwise.
'''
if param not in self._paramEffectsUp:
return '-'
elif 'shape' in param.combinePrior:
return '1'
else:
up = self._paramEffectsUp[param]
down = self._paramEffectsDown[param]
return '%.3f/%.3f' % (up, down)
class TransferFactorSample(ParametericSample):
def __init__(self, name, sampletype, transferfactor, dependentsample, observable=None):
'''
Create a sample that depends on another Sample by some transfer factor.
The transfor factor can be a constant, an array of parameters of same length
as the dependent sample binning, or a matrix of parameters where the second
dimension matches the sample binning, i.e. expectation = tf @ dependent_expectation.
The latter requires an additional observable argument to specify the definition of the first dimension.
In all cases, please use numpy object arrays of Parameter types.
'''
if not isinstance(transferfactor, np.ndarray):
raise ValueError("Transfer factor is not a numpy array")
if not isinstance(dependentsample, Sample):
raise ValueError("Dependent sample does not inherit from Sample")
if len(transferfactor.shape) == 2:
if observable is None:
raise ValueError("Transfer factor is 2D array, please provide an observable")
params = np.dot(transferfactor, dependentsample.getExpectation())
elif len(transferfactor.shape) <= 1:
observable = dependentsample.observable
params = transferfactor * dependentsample.getExpectation()
else:
raise ValueError("Transfer factor has invalid dimension")
super(TransferFactorSample, self).__init__(name, sampletype, observable, params)
self._transferfactor = transferfactor
self._dependentsample = dependentsample
@property
def parameters(self):
'''
Set of independent parameters that affect this sample
'''
pset = set()
for p in self._transferfactor:
pset.update(p.getDependents(deep=True))
pset.update(self._dependentsample.parameters)
return pset
| [
"[email protected]"
] | |
6a47ec21f205e0e2961b22fcd6f376cc90061d1b | 8c730ccb9ec23fd9cbcb5903abecda86bf50e6ab | /config/memory_network_adeb.py | a9d6fef7a06170bbbae130786c8052f74d71095d | [] | no_license | JimStearns206/taxi | 93278b49e485bbe2feaa08a3e11f0f79e884bd0d | b6566c010be7c871a5b6c199feaf1dfda0910ade | refs/heads/master | 2021-01-21T07:07:59.274279 | 2015-07-16T22:59:50 | 2015-07-16T22:59:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | from blocks.initialization import IsotropicGaussian, Constant
from blocks.algorithms import AdaDelta, CompositeRule, GradientDescent, RemoveNotFinite, StepRule, Momentum
import data
from model.memory_network import Model, Stream
n_begin_end_pts = 5 # how many points we consider at the beginning and end of the known trajectory
dim_embeddings = [
('origin_call', data.origin_call_train_size, 10),
('origin_stand', data.stands_size, 10),
('week_of_year', 52, 10),
('day_of_week', 7, 10),
('qhour_of_day', 24 * 4, 10),
('day_type', 3, 10),
]
class MLPConfig(object):
__slots__ = ('dim_input', 'dim_hidden', 'dim_output', 'weights_init', 'biases_init')
prefix_encoder = MLPConfig()
prefix_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
prefix_encoder.dim_hidden = [100, 100]
prefix_encoder.weights_init = IsotropicGaussian(0.001)
prefix_encoder.biases_init = Constant(0.0001)
candidate_encoder = MLPConfig()
candidate_encoder.dim_input = n_begin_end_pts * 2 * 2 + sum(x for (_, _, x) in dim_embeddings)
candidate_encoder.dim_hidden = [100, 100]
candidate_encoder.weights_init = IsotropicGaussian(0.001)
candidate_encoder.biases_init = Constant(0.0001)
embed_weights_init = IsotropicGaussian(0.001)
step_rule = Momentum(learning_rate=0.001, momentum=0.9)
batch_size = 32
valid_set = 'cuts/test_times_0'
max_splits = 1
num_cuts = 1000
train_candidate_size = 1000
valid_candidate_size = 10000
load_model = False
| [
"[email protected]"
] | |
053626d020f792d2a77cd4ec679ef747ea630e2d | f7139e3979fc32d96de0082d6402d94be64f0f2e | /Exercise-3/sensor_stick/scripts/segmentation.py | 6c7fc70194ee0bd5fa128ec4b7da45cd26ff2b2e | [
"MIT"
] | permissive | jaycode/RoboND-Perception-Exercises | 9eb652b15ad5eccd171b4894035fbcbaa185b4a1 | 8918730423fa4e788dc01352872644c065d58b4c | refs/heads/master | 2021-01-16T20:04:15.198564 | 2018-02-14T06:30:44 | 2018-02-14T06:30:44 | 100,196,660 | 0 | 1 | null | 2017-08-13T18:30:43 | 2017-08-13T18:30:43 | null | UTF-8 | Python | false | false | 3,791 | py | #!/usr/bin/env python
# Import modules
from pcl_helper import *
# TODO: Define functions as required
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# TODO: Convert ROS msg to PCL data
pcl_data = ros_to_pcl(pcl_msg)
# TODO: Voxel Grid Downsampling
vox = pcl_data.make_voxel_grid_filter()
LEAF_SIZE = 0.01
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
cloud_filtered = vox.filter()
# TODO: PassThrough Filter
passthrough = cloud_filtered.make_passthrough_filter()
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0.6
axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
cloud_filtered = passthrough.filter()
# Extract outliers
outlier_filter = cloud_filtered.make_statistical_outlier_filter()
outlier_filter.set_mean_k(50)
x = 1.0
outlier_filter.set_std_dev_mul_thresh(x)
cloud_filtered = outlier_filter.filter()
# TODO: RANSAC Plane Segmentation
seg = cloud_filtered.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
max_distance = 0.01
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
# TODO: Extract inliers and outliers
cloud_objects = cloud_filtered.extract(inliers, negative=True)
cloud_table = cloud_filtered.extract(inliers, negative=False)
# TODO: Euclidean Clustering
white_cloud = XYZRGB_to_XYZ(cloud_objects)
tree = white_cloud.make_kdtree()
ec = white_cloud.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance(0.05)
ec.set_MinClusterSize(10)
ec.set_MaxClusterSize(1500)
ec.set_SearchMethod(tree)
cluster_indices = ec.Extract()
# TODO: Create Cluster-Mask Point Cloud to visualize each cluster separately
#Assign a color corresponding to each segmented object in scene
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
# TODO: Convert PCL data to ROS messages
ros_cloud_objects = pcl_to_ros(cloud_objects)
ros_cloud_table = pcl_to_ros(cloud_table)
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# TODO: Publish ROS messages
pcl_objects_pub.publish(ros_cloud_objects)
pcl_table_pub.publish(ros_cloud_table)
pcl_cluster_pub.publish(ros_cluster_cloud)
if __name__ == '__main__':
# TODO: ROS node initialization
rospy.init_node('clustering', anonymous=True)
# TODO: Create Subscribers
pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud",
pc2.PointCloud2, pcl_callback,
queue_size=1)
# TODO: Create Publishers
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2,
queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2,
queue_size=1)
pcl_cluster_pub = rospy.Publisher("/pcl_cluster", PointCloud2,
queue_size=1)
# Initialize color_list
get_color_list.color_list = []
# TODO: Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin()
| [
"[email protected]"
] | |
9d38d3cb8de2e003c85e45d93eff2ac7cf72a2b1 | 9e780f17eb49171d1f234944563225ca22b3c286 | /postgresqleu/membership/migrations/0008_membermail.py | 1d9ae5505932c90b5884e9723a0702bec6d8cd49 | [
"MIT"
] | permissive | pgeu/pgeu-system | e5216d5e90eec6c72770b88a5af4b3fd565cda59 | 885cfdcdadd4a721f72b699a39f26c94d1f636e0 | refs/heads/master | 2023-08-06T13:03:55.606562 | 2023-08-03T12:47:37 | 2023-08-03T12:47:37 | 161,434,221 | 15 | 27 | MIT | 2023-05-30T11:21:24 | 2018-12-12T04:48:14 | Python | UTF-8 | Python | false | false | 812 | py | # Generated by Django 3.2.11 on 2022-02-07 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('membership', '0007_meeting_reminders'),
]
operations = [
migrations.CreateModel(
name='MemberMail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sentat', models.DateTimeField(auto_now_add=True, db_index=True)),
('sentfrom', models.CharField(max_length=100)),
('subject', models.CharField(max_length=100)),
('message', models.TextField(max_length=8000)),
('sentto', models.ManyToManyField(to='membership.Member')),
],
),
]
| [
"[email protected]"
] | |
53ea2685d2b68240d10424f11046e839be8682ef | b72d0900bec98fcee6c725cef035c02ca29bbf1b | /Python/VirtualEnvironment/portfolio/.history/portfolio/views_20201119102645.py | b701bf886f4d03f578254db395f0d10035583ba1 | [
"MIT"
] | permissive | sugamkarki/NAMI-Year-II-TERM-I-Group_Project | 68b8808c8607858a313e8b4d601d8d12c6edda2b | f0a9a5f219ccbec024eb5316361db3fca46e171c | refs/heads/master | 2023-06-28T19:07:19.330236 | 2021-07-24T03:05:42 | 2021-07-24T03:05:42 | 312,819,148 | 0 | 0 | MIT | 2021-07-24T12:45:06 | 2020-11-14T13:08:08 | Python | UTF-8 | Python | false | false | 183 | py | from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
def cat():
| [
"[email protected]"
] | |
49defc368cb5599babac9b0973cbb3f3da2485e8 | acad69f0abe162eea0cb13cbe15bfd88f6da08b4 | /down-stream-tasks/mmdetection/tests/test_models/test_dense_heads/test_tood_head.py | 9d8c79c47fd1d441b368e1d7246d27775dd50d7e | [
"Apache-2.0"
] | permissive | zhangzjn/EMO | 69afcac53800d8b9a390f1214e178e2ca4da3b24 | 141afbdbce04683790f0699f256327ec420be442 | refs/heads/main | 2023-08-27T19:04:23.313676 | 2023-08-15T04:09:55 | 2023-08-15T04:09:55 | 584,987,542 | 139 | 9 | null | null | null | null | UTF-8 | Python | false | false | 5,070 | py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import TOODHead
def test_tood_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# since Focal Loss is not supported on CPU
self = TOODHead(
num_classes=80,
in_channels=1,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
train_cfg=train_cfg,
test_cfg=test_cfg)
self.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = self(feat)
# test initial assigner and losses
self.epoch = 0
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
# test task alignment assigner and losses
self.epoch = 10
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
| [
"[email protected]"
] | |
5b115168345a1652c56f759de440862388473ee0 | 5da988c176252fca1b558190eff74ef3b89afc9f | /instrumentation/opentelemetry-instrumentation-falcon/tests/test_falcon.py | fe33a2f2dd1cca06c37f6d6179aa05655d9378c6 | [
"Apache-2.0"
] | permissive | kinvolk/opentelemetry-python | 3801376ee6bdb46d85d8876a97713e698e1241ce | 47483865854c7adae7455f8441dab7f814f4ce2a | refs/heads/master | 2023-05-25T19:36:05.130267 | 2020-11-02T17:29:59 | 2020-11-02T17:29:59 | 201,488,070 | 1 | 2 | Apache-2.0 | 2023-05-16T18:48:46 | 2019-08-09T14:56:28 | Python | UTF-8 | Python | false | false | 7,123 | py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock, patch
from falcon import testing
from opentelemetry.instrumentation.falcon import FalconInstrumentor
from opentelemetry.test.test_base import TestBase
from opentelemetry.trace.status import StatusCode
from opentelemetry.util import ExcludeList
from .app import make_app
class TestFalconInstrumentation(TestBase):
def setUp(self):
super().setUp()
FalconInstrumentor().instrument()
self.app = make_app()
def client(self):
return testing.TestClient(self.app)
def tearDown(self):
super().tearDown()
with self.disable_logging():
FalconInstrumentor().uninstrument()
def test_get(self):
self._test_method("GET")
def test_post(self):
self._test_method("POST")
def test_patch(self):
self._test_method("PATCH")
def test_put(self):
self._test_method("PUT")
def test_delete(self):
self._test_method("DELETE")
def test_head(self):
self._test_method("HEAD")
def _test_method(self, method):
self.client().simulate_request(method=method, path="/hello")
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(
span.name, "HelloWorldResource.on_{0}".format(method.lower())
)
self.assertEqual(span.status.status_code, StatusCode.UNSET)
self.assert_span_has_attributes(
span,
{
"component": "http",
"http.method": method,
"http.server_name": "falconframework.org",
"http.scheme": "http",
"host.port": 80,
"http.host": "falconframework.org",
"http.target": "/",
"net.peer.ip": "127.0.0.1",
"net.peer.port": "65133",
"http.flavor": "1.1",
"falcon.resource": "HelloWorldResource",
"http.status_text": "Created",
"http.status_code": 201,
},
)
self.memory_exporter.clear()
def test_404(self):
self.client().simulate_get("/does-not-exist")
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "HTTP GET")
self.assertEqual(span.status.status_code, StatusCode.ERROR)
self.assert_span_has_attributes(
span,
{
"component": "http",
"http.method": "GET",
"http.server_name": "falconframework.org",
"http.scheme": "http",
"host.port": 80,
"http.host": "falconframework.org",
"http.target": "/",
"net.peer.ip": "127.0.0.1",
"net.peer.port": "65133",
"http.flavor": "1.1",
"http.status_text": "Not Found",
"http.status_code": 404,
},
)
def test_500(self):
try:
self.client().simulate_get("/error")
except NameError:
pass
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.name, "ErrorResource.on_get")
self.assertFalse(span.status.is_ok)
self.assertEqual(span.status.status_code, StatusCode.ERROR)
self.assertEqual(
span.status.description,
"NameError: name 'non_existent_var' is not defined",
)
self.assert_span_has_attributes(
span,
{
"component": "http",
"http.method": "GET",
"http.server_name": "falconframework.org",
"http.scheme": "http",
"host.port": 80,
"http.host": "falconframework.org",
"http.target": "/",
"net.peer.ip": "127.0.0.1",
"net.peer.port": "65133",
"http.flavor": "1.1",
"http.status_code": 500,
},
)
def test_uninstrument(self):
self.client().simulate_get(path="/hello")
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 1)
self.memory_exporter.clear()
FalconInstrumentor().uninstrument()
self.app = make_app()
self.client().simulate_get(path="/hello")
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 0)
@patch(
"opentelemetry.instrumentation.falcon._excluded_urls",
ExcludeList(["ping"]),
)
def test_exclude_lists(self):
self.client().simulate_get(path="/ping")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 0)
self.client().simulate_get(path="/hello")
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 1)
def test_traced_request_attributes(self):
self.client().simulate_get(path="/hello?q=abc")
span = self.memory_exporter.get_finished_spans()[0]
self.assertNotIn("query_string", span.attributes)
self.memory_exporter.clear()
middleware = self.app._middleware[0][ # pylint:disable=W0212
0
].__self__
with patch.object(
middleware, "_traced_request_attrs", ["query_string"]
):
self.client().simulate_get(path="/hello?q=abc")
span = self.memory_exporter.get_finished_spans()[0]
self.assertIn("query_string", span.attributes)
self.assertEqual(span.attributes["query_string"], "q=abc")
def test_traced_not_recording(self):
mock_tracer = Mock()
mock_span = Mock()
mock_span.is_recording.return_value = False
mock_tracer.start_span.return_value = mock_span
mock_tracer.use_span.return_value.__enter__ = mock_span
mock_tracer.use_span.return_value.__exit__ = mock_span
with patch("opentelemetry.trace.get_tracer") as tracer:
tracer.return_value = mock_tracer
self.client().simulate_get(path="/hello?q=abc")
self.assertFalse(mock_span.is_recording())
self.assertTrue(mock_span.is_recording.called)
self.assertFalse(mock_span.set_attribute.called)
self.assertFalse(mock_span.set_status.called)
| [
"[email protected]"
] | |
7b3ef0762b6b97c6ae1824d7ea35d51ffab78fdd | 70054615f56be28373b00c9df96544ec822be683 | /res/scripts/client/gui/prb_control/restrictions/limits.py | 24bec330b4cc11c9dfd3ffdd2f4533d7eab4872a | [] | no_license | wanyancan/WOTDecompiled | c646ad700f5ec3fb81fb4e87862639ce0bdf0000 | 9ffb09007a61d723cdb28549e15db39c34c0ea1e | refs/heads/master | 2020-04-17T23:13:15.649069 | 2013-11-15T16:37:10 | 2013-11-15T16:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,198 | py | from collections import defaultdict
import weakref
from CurrentVehicle import g_currentVehicle
from constants import PREBATTLE_ACCOUNT_STATE, PREBATTLE_TYPE
from gui.prb_control import getClassLevelLimits, getTotalLevelLimits
from gui.prb_control import getPrebattleRosters, getMaxSizeLimits
from gui.prb_control.restrictions.interfaces import IVehicleLimit, ITeamLimit
from gui.prb_control.settings import PREBATTLE_ROSTER, PREBATTLE_RESTRICTION
from items.vehicles import VehicleDescr, VEHICLE_CLASS_TAGS
from prebattle_shared import isTeamValid, isVehicleValid
class VehicleIsValid(IVehicleLimit):
def check(self, teamLimits):
if not g_currentVehicle.isReadyToFight():
return (False, PREBATTLE_RESTRICTION.VEHICLE_NOT_READY)
vehicle = g_currentVehicle.item
shellsList = []
for shell in vehicle.shells:
shellsList.extend([shell.intCD, shell.count])
return isVehicleValid(vehicle.descriptor, shellsList, teamLimits)
class TeamIsValid(ITeamLimit):
def check(self, rosters, team, teamLimits):
rosterKey = None
if team is 1:
rosterKey = PREBATTLE_ROSTER.ASSIGNED_IN_TEAM1
elif team is 2:
rosterKey = PREBATTLE_ROSTER.ASSIGNED_IN_TEAM2
if rosterKey in rosters:
accountsInfo = rosters[rosterKey]
else:
accountsInfo = {}
return isTeamValid(accountsInfo, teamLimits)
class TeamNoPlayersInBattle(ITeamLimit):
def __init__(self, prbType):
super(TeamNoPlayersInBattle, self).__init__()
self.__range = PREBATTLE_ROSTER.getRange(prbType)
def __isPlayerInBattle(self, player):
return player['state'] & PREBATTLE_ACCOUNT_STATE.IN_BATTLE != 0
def check(self, rosters, team, teamLimits):
for rosterKey in self.__range:
if rosterKey & team and rosterKey in rosters:
filtered = filter(self.__isPlayerInBattle, rosters[rosterKey].itervalues())
if len(filtered):
return (False, PREBATTLE_RESTRICTION.HAS_PLAYER_IN_BATTLE)
return (True, '')
class MaxCount(ITeamLimit):
def __init__(self, assigned = True):
super(MaxCount, self).__init__()
self.__assigned = assigned
def check(self, rosters, team, teamLimits):
if self.__assigned:
index = 0
if team is 1:
key = PREBATTLE_ROSTER.ASSIGNED_IN_TEAM1
else:
key = PREBATTLE_ROSTER.ASSIGNED_IN_TEAM2
else:
index = 1
if team is 1:
key = PREBATTLE_ROSTER.UNASSIGNED_IN_TEAM1
else:
key = PREBATTLE_ROSTER.UNASSIGNED_IN_TEAM2
maxCount = getMaxSizeLimits(teamLimits)[index]
if key in rosters and len(rosters[key]) >= maxCount:
return (False, PREBATTLE_RESTRICTION.LIMIT_MAX_COUNT)
return (True, '')
class TotalMaxCount(ITeamLimit):
def check(self, rosters, team, teamLimits):
maxCount = sum(getMaxSizeLimits(teamLimits))
result, restriction = True, ''
if team is 1:
keys = [PREBATTLE_ROSTER.ASSIGNED_IN_TEAM1, PREBATTLE_ROSTER.UNASSIGNED_IN_TEAM1]
else:
keys = [PREBATTLE_ROSTER.ASSIGNED_IN_TEAM2, PREBATTLE_ROSTER.UNASSIGNED_IN_TEAM2]
playersCount = 0
for key in keys:
if key in rosters:
playersCount += len(rosters[key])
if playersCount >= maxCount:
result, restriction = False, PREBATTLE_RESTRICTION.LIMIT_MAX_COUNT
return (result, restriction)
class VehiclesLevelLimit(ITeamLimit):
def check(self, rosters, team, teamLimits):
isValid, notValidReason = True, ''
assignedRosters = rosters.get(team, {})
totalLevel, classLevels = self.__calculate(assignedRosters)
for classTag in VEHICLE_CLASS_TAGS:
minLevel, maxLevel = getClassLevelLimits(teamLimits, classTag)
currentLevel = classLevels[classTag]
vClassTags = PREBATTLE_RESTRICTION.getVehClassTags()
if not minLevel <= currentLevel <= maxLevel:
if not currentLevel == 0:
isValid = False
notValidReason = classTag in vClassTags and vClassTags[classTag]
else:
notValidReason = PREBATTLE_RESTRICTION.LIMIT_CLASSES
if isValid:
minLevel, maxLevel = getTotalLevelLimits(teamLimits)
if not minLevel <= totalLevel <= maxLevel:
isValid = False
notValidReason = PREBATTLE_RESTRICTION.LIMIT_TOTAL_LEVEL
return (isValid, notValidReason)
def __calculate(self, rosters):
classLevels = defaultdict(lambda : 0)
totalLevel = 0
vehClassTags = set(VEHICLE_CLASS_TAGS)
for roster in rosters.itervalues():
if not roster['state'] & PREBATTLE_ACCOUNT_STATE.READY:
continue
vehCompDescr = roster.get('vehCompDescr', '')
if vehCompDescr is not None and len(vehCompDescr):
vehType = VehicleDescr(compactDescr=vehCompDescr).type
level = vehType.level
union = vehClassTags & vehType.tags
if len(union):
vehClass = union.pop()
classLevels[vehClass] = max(classLevels[vehClass], level)
totalLevel += level
return (totalLevel, classLevels)
class LimitsCollection(object):
def __init__(self, functional, vehicleLimits, teamLimits):
self.__functional = weakref.proxy(functional)
self.__vehicleLimits = vehicleLimits
self.__teamLimits = teamLimits
def clear(self):
self.__functional = None
self.__vehicleLimits = ()
self.__teamLimits = ()
return
def isVehicleValid(self):
result, errorCode = True, ''
settings = self.__functional.getSettings()
teamLimits = settings.getTeamLimits(self.__functional.getPlayerTeam())
for limit in self.__vehicleLimits:
result, errorCode = limit.check(teamLimits)
if not result:
break
return (result, errorCode)
def isTeamValid(self, team = None):
result, errorCode = True, ''
if team is None:
team = self.__functional.getPlayerTeam()
settings = self.__functional.getSettings()
teamLimits = settings.getTeamLimits(team)
rosters = getPrebattleRosters()
for limit in self.__teamLimits:
result, errorCode = limit.check(rosters, team, teamLimits)
if not result:
break
return (result, errorCode)
def isTeamsValid(self):
settings = self.__functional.getSettings()
rosters = getPrebattleRosters()
for team in [1, 2]:
teamLimits = settings.getTeamLimits(team)
for limit in self.__teamLimits:
result, errorCode = limit.check(rosters, team, teamLimits)
if not result:
return (result, errorCode)
return (True, '')
def isMaxCountValid(self, team, assigned):
settings = self.__functional.getSettings()
rosters = getPrebattleRosters()
return MaxCount(assigned=assigned).check(rosters, team, settings.getTeamLimits(team))
class DefaultLimits(LimitsCollection):
def __init__(self, functional):
super(DefaultLimits, self).__init__(functional, (VehicleIsValid(),), (TeamIsValid(),))
class TrainingLimits(LimitsCollection):
def __init__(self, functional):
super(TrainingLimits, self).__init__(functional, (VehicleIsValid(),), (TeamNoPlayersInBattle(PREBATTLE_TYPE.TRAINING), TeamIsValid()))
class CompanyLimits(LimitsCollection):
def __init__(self, functional):
super(CompanyLimits, self).__init__(functional, (VehicleIsValid(),), (VehiclesLevelLimit(), TeamIsValid()))
class BattleSessionLimits(LimitsCollection):
def __init__(self, functional):
super(BattleSessionLimits, self).__init__(functional, (VehicleIsValid(),), (VehiclesLevelLimit(), TeamIsValid()))
| [
"[email protected]"
] | |
b8c18f0363ba38923dd3d1d02986cb5781e661f1 | fc5fa8501e8a62291a48c82611e1b74b961ca561 | /nopad_inception_v3_fcn/model.py | 393c286132aa65d20952d7d7f3f4fab539db0815 | [
"Apache-2.0"
] | permissive | hitesh-hk/google-research | fa3d3e31cce995fa6da89322dab4993bf1c1ead8 | ddc22300c4cb3223654c9a981f892dc0f6286e35 | refs/heads/master | 2021-02-17T18:57:31.267570 | 2020-01-17T14:49:25 | 2020-01-17T14:54:27 | 245,119,290 | 1 | 1 | Apache-2.0 | 2020-03-05T09:24:01 | 2020-03-05T09:24:00 | null | UTF-8 | Python | false | false | 19,851 | py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Contains the definition for no padding inception FCN.
This is a variant of inception v3 by removing all the paddings. This change
allows the network to be trained and inference run with different patch size
(Fully Convolutional Network, FCN mode) while having the same inference results.
"""
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
def _trim_border_px(inputs, n):
"""Crop n pixels around the border of inputs.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
n: an integer for number of pixels to crop.
Returns:
cropped tensor.
Raises:
ValueError: if cropping leads to empty output tensor.
"""
if n > min(inputs.shape[1], inputs.shape[2]) // 2:
raise ValueError(
'n (%d) can not be greater than or equal to half of the input shape.' %
n)
return inputs[:, n:-n, n:-n, :]
def nopad_inception_v3_base(inputs,
min_depth=16,
depth_multiplier=1.0,
num_final_1x1_conv=0,
scope=None):
"""Constructs a no padding Inception v3 network from inputs.
Args:
inputs: a tensor of size [batch_size, height, width, channels]. Must be
floating point. If a pretrained checkpoint is used, pixel values should be
the same as during training.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels) for
all convolution ops. The value must be greater than zero. Typical usage
will be to set this value in (0, 1) to reduce the number of parameters or
computation cost of the model.
num_final_1x1_conv: Int, number of final 1x1 conv layers.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'NopadInceptionV3', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
padding='VALID'):
# 911 x 911 x 3
end_point = 'Conv2d_1a_3x3'
net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
# 455 x 455 x 32
end_point = 'Conv2d_2a_3x3'
net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)
end_points[end_point] = net
# 453 x 453 x 32
end_point = 'Conv2d_2b_3x3'
net = slim.conv2d(net, depth(64), [3, 3], scope=end_point)
end_points[end_point] = net
# 451 x 451 x 64
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
# 225 x 225 x 64
end_point = 'Conv2d_3b_1x1'
net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)
end_points[end_point] = net
# 225 x 225 x 80.
end_point = 'Conv2d_4a_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
# 223 x 223 x 192.
end_point = 'MaxPool_5a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
# 111 x 111 x 192.
# Inception blocks
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
padding='VALID'):
# Mixed_5b: 107 x 107 x 256.
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(32), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
[
_trim_border_px(branch_0, 2), # branch_0: 111 x 111 x 64
branch_1, # branch_1: 107 x 107 x 64
branch_2, # branch_2: 107 x 107 x 96
_trim_border_px(branch_3, 1) # branch_3: 109 x 109 x 32
],
3)
end_points[end_point] = net
# Mixed_5c: 103 x 103 x 288.
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = slim.conv2d(
branch_1, depth(64), [5, 5], scope='Conv_1_0c_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
[
_trim_border_px(branch_0, 2), # branch_0: 107 x 107 x 64
branch_1, # branch_1: 103 x 103 x 64
branch_2, # branch_2: 103 x 103 x 96
_trim_border_px(branch_3, 1) # branch_3: 105 x 105 x 64
],
3)
end_points[end_point] = net
# Mixed_5d: 99 x 99 x 288.
end_point = 'Mixed_5d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
[
_trim_border_px(branch_0, 2), # branch_0: 103 x 103 x 64
branch_1, # branch_1: 99 x 99 x 64
branch_2, # branch_2: 99 x 99 x 96
_trim_border_px(branch_3, 1) # branch_2: 101 x 101 x 64
],
3)
end_points[end_point] = net
# Mixed_6a: 49 x 49 x 768.
end_point = 'Mixed_6a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net,
depth(384), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1,
depth(96), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(
net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = tf.concat(
[
branch_0, # branch_0: 49 x 49 x 384
branch_1, # branch_1: 49 x 49 x 96
branch_2, # branch_2: 49 x 49 x 288
],
3)
end_points[end_point] = net
# Mixed_6b: 37 x 37 x 768.
end_point = 'Mixed_6b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(128), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(128), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(
branch_2, depth(128), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(
branch_2, depth(128), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
[
_trim_border_px(branch_0, 6), # branch_0: 49 x 49 x 192
_trim_border_px(branch_1, 3), # branch_1: 43 x 43 x 192
branch_2, # branch_2: 37 x 37 x 192
_trim_border_px(branch_3, 5) # branch_3: 47 x 47 x 192
],
3)
end_points[end_point] = net
# Mixed_6c: 25 x 25 x 768.
end_point = 'Mixed_6c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(
branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
[
_trim_border_px(branch_0, 6), # branch_0: 37 x 37 x 192
_trim_border_px(branch_1, 3), # branch_1: 31 x 31 x 192
branch_2, # branch_2: 25 x 25 x 192
_trim_border_px(branch_3, 5) # branch_3: 35 x 35 x 192
],
3)
end_points[end_point] = net
# mixed_6: 13 x 13 x 768.
end_point = 'Mixed_6d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(
branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
[
_trim_border_px(branch_0, 6), # branch_0: 25 x 25 x 192
_trim_border_px(branch_1, 3), # branch_1: 19 x 19 x 192
branch_2, # branch_2: 13 x 13 x 192
_trim_border_px(branch_3, 5) # branch_3: 23 x 23 x 192
],
3)
end_points[end_point] = net
# Mixed_6e: 1 x 1 x 768.
end_point = 'Mixed_6e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, depth(192), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(
branch_2, depth(192), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
[
_trim_border_px(branch_0, 6), # branch_0: 13 x 13 x 192
_trim_border_px(branch_1, 3), # branch_1: 7 x 7 x 192
branch_2, # branch_2: 1 x 1 x 192
_trim_border_px(branch_3, 5) # branch_3: 11 x 11 x 192
],
3)
end_points[end_point] = net
for i in range(num_final_1x1_conv):
slim.conv2d(
net, depth(256), [1, 1], scope='Final_Conv2d_{}_1x1'.format(i))
end_points['Final_Conv2d_{}_1x1'.format(i)] = net
return net, end_points
def nopad_inception_v3_fcn(inputs,
num_classes=1000,
dropout_keep_prob=0.8,
is_training=True,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=slim.softmax,
reuse=None,
inception_fcn_stride=1,
scope='NoPadInceptionV3'):
"""No pad inception FCN model.
Args:
inputs: A tensor of size [batch_size, height, width, channels]. Must be
floating point. If a pretrained checkpoint is used, pixel values should be
the same as during training.
num_classes: number of predicted classes. If 0 or None, the logits layer is
omitted and the input features to the logits layer (before dropout) are
returned instead.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels) for
all convolution ops. The value must be greater than zero. Typical usage
will be to set this value in (0, 1) to reduce the number of parameters or
computation cost of the model.
prediction_fn: a function to get predictions out of logits.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
inception_fcn_stride: The stride that's used to match the input stride with
output label resolution.
scope: Optional variable_scope.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if 'depth_multiplier' is less than or equal to zero.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
with tf.variable_scope(
scope, 'NopadInceptionV3', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = nopad_inception_v3_base(
inputs,
scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
# Final pooling and prediction
with tf.variable_scope('Logits'):
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
stride=inception_fcn_stride,
scope='Conv2d_1c_1x1')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
| [
"[email protected]"
] | |
c41b811032ac7e36dd9d4ab116a69cd029889b32 | ec726dac5bcd01ea807a3b24c9e25f9951f6e910 | /scripts/Lock.py | 42e6bcceb8660339fc43fc88988d5086f2b9182d | [] | no_license | cms-sw/int-build | 06d7fd680b0fcc06cce58869b928f2547b4731b1 | c0f2ca0ef96cdd46d9588ef908d11e9a9cab6618 | refs/heads/master | 2021-01-01T18:02:04.757560 | 2015-10-26T08:28:17 | 2015-10-26T08:28:17 | 11,394,116 | 0 | 3 | null | 2015-10-16T15:02:19 | 2013-07-13T21:00:45 | Python | UTF-8 | Python | false | false | 2,432 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from os import getpid, makedirs, kill
from os.path import join, getmtime
from commands import getstatusoutput
from time import sleep, time
def isProcessRunning(pid):
running = False
try:
kill(pid, 0)
running = True
except:
pass
return running
class Lock(object):
def __init__(
self,
dirname,
checkStale=False,
stableGap=600,
):
self.piddir = dirname
self.pidfile = join(self.piddir, 'pid')
self.pid = str(getpid())
self.locktime = 0
self._hasLock = self._get()
if not self._hasLock and self.locktime and checkStale \
and time() - self.locktime >= stableGap:
self._release(True)
self._hasLock = self._get()
def getLock(self, waitStep=2, maxTries=0):
if waitStep <= 0:
waitStep = 2
while not self._hasLock:
sleep(waitStep)
self._hasLock = self._get()
if maxTries > 0:
maxTries -= 1
if maxTries <= 0:
break
return
def __del__(self):
self._release()
def __nonzero__(self):
return self._hasLock
def _release(self, force=False):
if not force and self._hasLock and self._get():
force = True
if force:
getstatusoutput('rm -rf %s' % self.piddir)
self.locktime = 0
self._hasLock = False
def _get(self, tries=3, success=3):
if tries <= 0:
return False
pid = self._readPid()
if pid:
if pid == self.pid:
if success <= 0:
return True
sleep(0.001)
return self._get(tries, success - 1)
if isProcessRunning(int(pid)):
return False
self._create()
sleep(1)
return self._get(tries - 1, success)
def _readPid(self):
pid = None
try:
pid = open(self.pidfile).readlines()[0]
self.locktime = getmtime(self.pidfile)
except:
pid = None
return pid
def _create(self):
self._release(True)
try:
makedirs(self.piddir)
lock = open(self.pidfile, 'w')
lock.write(self.pid)
lock.close()
except:
pass
| [
"[email protected]"
] | |
6912fbc6262eef219acef4bb0f46fcdca0cde550 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1544+107/sdB_PG_1544+107_coadd.py | d34ff0b3147d3f6d2c4257e6a18c9c3868d100a4 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[236.659375,10.503742], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_PG_1544+107/sdB_PG_1544+107_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_PG_1544+107/sdB_PG_1544+107_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
345cefe5bc1a3363f162b744cdfbff6621c1c3c1 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/compute/v20180930/_inputs.py | 6b8bf7813d4f67dd8aa5cd7584ba4d0bd3b9ba67 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 14,859 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'CreationDataArgs',
'DiskSkuArgs',
'EncryptionSettingsCollectionArgs',
'EncryptionSettingsElementArgs',
'ImageDiskReferenceArgs',
'KeyVaultAndKeyReferenceArgs',
'KeyVaultAndSecretReferenceArgs',
'SnapshotSkuArgs',
'SourceVaultArgs',
]
@pulumi.input_type
class CreationDataArgs:
def __init__(__self__, *,
create_option: pulumi.Input[str],
image_reference: Optional[pulumi.Input['ImageDiskReferenceArgs']] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
source_uri: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None):
"""
Data used when creating a disk.
:param pulumi.Input[str] create_option: This enumerates the possible sources of a disk's creation.
:param pulumi.Input['ImageDiskReferenceArgs'] image_reference: Disk source information.
:param pulumi.Input[str] source_resource_id: If createOption is Copy, this is the ARM id of the source snapshot or disk.
:param pulumi.Input[str] source_uri: If createOption is Import, this is the URI of a blob to be imported into a managed disk.
:param pulumi.Input[str] storage_account_id: If createOption is Import, the Azure Resource Manager identifier of the storage account containing the blob to import as a disk. Required only if the blob is in a different subscription
"""
pulumi.set(__self__, "create_option", create_option)
if image_reference is not None:
pulumi.set(__self__, "image_reference", image_reference)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if source_uri is not None:
pulumi.set(__self__, "source_uri", source_uri)
if storage_account_id is not None:
pulumi.set(__self__, "storage_account_id", storage_account_id)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> pulumi.Input[str]:
"""
This enumerates the possible sources of a disk's creation.
"""
return pulumi.get(self, "create_option")
@create_option.setter
def create_option(self, value: pulumi.Input[str]):
pulumi.set(self, "create_option", value)
@property
@pulumi.getter(name="imageReference")
def image_reference(self) -> Optional[pulumi.Input['ImageDiskReferenceArgs']]:
"""
Disk source information.
"""
return pulumi.get(self, "image_reference")
@image_reference.setter
def image_reference(self, value: Optional[pulumi.Input['ImageDiskReferenceArgs']]):
pulumi.set(self, "image_reference", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
If createOption is Copy, this is the ARM id of the source snapshot or disk.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="sourceUri")
def source_uri(self) -> Optional[pulumi.Input[str]]:
"""
If createOption is Import, this is the URI of a blob to be imported into a managed disk.
"""
return pulumi.get(self, "source_uri")
@source_uri.setter
def source_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_uri", value)
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
If createOption is Import, the Azure Resource Manager identifier of the storage account containing the blob to import as a disk. Required only if the blob is in a different subscription
"""
return pulumi.get(self, "storage_account_id")
@storage_account_id.setter
def storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_id", value)
@pulumi.input_type
class DiskSkuArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS.
:param pulumi.Input[str] name: The sku name.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The sku name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class EncryptionSettingsCollectionArgs:
def __init__(__self__, *,
enabled: pulumi.Input[bool],
encryption_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]] = None):
"""
Encryption settings for disk or snapshot
:param pulumi.Input[bool] enabled: Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged.
:param pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]] encryption_settings: A collection of encryption settings, one for each disk volume.
"""
pulumi.set(__self__, "enabled", enabled)
if encryption_settings is not None:
pulumi.set(__self__, "encryption_settings", encryption_settings)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="encryptionSettings")
def encryption_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]]:
"""
A collection of encryption settings, one for each disk volume.
"""
return pulumi.get(self, "encryption_settings")
@encryption_settings.setter
def encryption_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]]):
pulumi.set(self, "encryption_settings", value)
@pulumi.input_type
class EncryptionSettingsElementArgs:
def __init__(__self__, *,
disk_encryption_key: Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']] = None,
key_encryption_key: Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']] = None):
"""
Encryption settings for one disk volume.
:param pulumi.Input['KeyVaultAndSecretReferenceArgs'] disk_encryption_key: Key Vault Secret Url and vault id of the disk encryption key
:param pulumi.Input['KeyVaultAndKeyReferenceArgs'] key_encryption_key: Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
"""
if disk_encryption_key is not None:
pulumi.set(__self__, "disk_encryption_key", disk_encryption_key)
if key_encryption_key is not None:
pulumi.set(__self__, "key_encryption_key", key_encryption_key)
@property
@pulumi.getter(name="diskEncryptionKey")
def disk_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]:
"""
Key Vault Secret Url and vault id of the disk encryption key
"""
return pulumi.get(self, "disk_encryption_key")
@disk_encryption_key.setter
def disk_encryption_key(self, value: Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']]):
pulumi.set(self, "disk_encryption_key", value)
@property
@pulumi.getter(name="keyEncryptionKey")
def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]:
"""
Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
"""
return pulumi.get(self, "key_encryption_key")
@key_encryption_key.setter
def key_encryption_key(self, value: Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]):
pulumi.set(self, "key_encryption_key", value)
@pulumi.input_type
class ImageDiskReferenceArgs:
def __init__(__self__, *,
id: pulumi.Input[str],
lun: Optional[pulumi.Input[int]] = None):
"""
The source image used for creating the disk.
:param pulumi.Input[str] id: A relative uri containing either a Platform Image Repository or user image reference.
:param pulumi.Input[int] lun: If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null.
"""
pulumi.set(__self__, "id", id)
if lun is not None:
pulumi.set(__self__, "lun", lun)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
A relative uri containing either a Platform Image Repository or user image reference.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def lun(self) -> Optional[pulumi.Input[int]]:
"""
If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null.
"""
return pulumi.get(self, "lun")
@lun.setter
def lun(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "lun", value)
@pulumi.input_type
class KeyVaultAndKeyReferenceArgs:
def __init__(__self__, *,
key_url: pulumi.Input[str],
source_vault: pulumi.Input['SourceVaultArgs']):
"""
Key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to unwrap the encryptionKey
:param pulumi.Input[str] key_url: Url pointing to a key or secret in KeyVault
:param pulumi.Input['SourceVaultArgs'] source_vault: Resource id of the KeyVault containing the key or secret
"""
pulumi.set(__self__, "key_url", key_url)
pulumi.set(__self__, "source_vault", source_vault)
@property
@pulumi.getter(name="keyUrl")
def key_url(self) -> pulumi.Input[str]:
"""
Url pointing to a key or secret in KeyVault
"""
return pulumi.get(self, "key_url")
@key_url.setter
def key_url(self, value: pulumi.Input[str]):
pulumi.set(self, "key_url", value)
@property
@pulumi.getter(name="sourceVault")
def source_vault(self) -> pulumi.Input['SourceVaultArgs']:
"""
Resource id of the KeyVault containing the key or secret
"""
return pulumi.get(self, "source_vault")
@source_vault.setter
def source_vault(self, value: pulumi.Input['SourceVaultArgs']):
pulumi.set(self, "source_vault", value)
@pulumi.input_type
class KeyVaultAndSecretReferenceArgs:
def __init__(__self__, *,
secret_url: pulumi.Input[str],
source_vault: pulumi.Input['SourceVaultArgs']):
"""
Key Vault Secret Url and vault id of the encryption key
:param pulumi.Input[str] secret_url: Url pointing to a key or secret in KeyVault
:param pulumi.Input['SourceVaultArgs'] source_vault: Resource id of the KeyVault containing the key or secret
"""
pulumi.set(__self__, "secret_url", secret_url)
pulumi.set(__self__, "source_vault", source_vault)
@property
@pulumi.getter(name="secretUrl")
def secret_url(self) -> pulumi.Input[str]:
"""
Url pointing to a key or secret in KeyVault
"""
return pulumi.get(self, "secret_url")
@secret_url.setter
def secret_url(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_url", value)
@property
@pulumi.getter(name="sourceVault")
def source_vault(self) -> pulumi.Input['SourceVaultArgs']:
"""
Resource id of the KeyVault containing the key or secret
"""
return pulumi.get(self, "source_vault")
@source_vault.setter
def source_vault(self, value: pulumi.Input['SourceVaultArgs']):
pulumi.set(self, "source_vault", value)
@pulumi.input_type
class SnapshotSkuArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
:param pulumi.Input[str] name: The sku name.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The sku name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SourceVaultArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
The vault id is an Azure Resource Manager Resource id in the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}
:param pulumi.Input[str] id: Resource Id
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
| [
"[email protected]"
] | |
55e9e54940ecf7d642f3da6d4a45d111c723a725 | 0a091d537701f93cbab5c7f76c7bf8d117c93887 | /alembic/versions/138007156428_fix_komm_fylke_mapping.py | d97348a0a5fe9f5cdfe87180d6ed2538ad1ffdc7 | [
"MIT"
] | permissive | atlefren/beerdatabase | 61ecfc8b9de6e6b202c50b501ccded87387de289 | d3acf2b02966e058d3840cd167c1c787c0cb88ce | refs/heads/master | 2021-05-04T11:01:47.117527 | 2016-09-14T22:00:34 | 2016-09-14T22:00:34 | 45,804,354 | 10 | 3 | null | 2016-01-25T20:45:08 | 2015-11-08T23:38:31 | JavaScript | UTF-8 | Python | false | false | 1,164 | py | """fix komm fylke mapping
Revision ID: 138007156428
Revises: 13603cc8e9a7
Create Date: 2015-12-06 23:12:25.241712
"""
# revision identifiers, used by Alembic.
revision = '138007156428'
down_revision = '13603cc8e9a7'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute('DROP MATERIALIZED VIEW komm_fylke CASCADE;')
op.execute('''
CREATE MATERIALIZED VIEW komm_fylke AS
SELECT
k.name as name,
k.kommnr as kommnr,
f.name as fylke_name,
f.fylkesnr as fylkesnr,
k.geom as geom
FROM
kommune k, fylke f
WHERE
k.geom && f.geom
AND
st_contains(f.geom, k.geom)
''')
op.execute('''
CREATE VIEW pol_shop_komm_fylke as
SELECT
s.*,
k.name as komm_name,
k.kommnr as kommnr,
k.fylke_name as fylke_name,
k.fylkesnr as fylkesnr
FROM
pol_shop s,
komm_fylke k
WHERE
k.geom::geography && s.geog
AND
st_contains(k.geom, s.geog::geometry);
''')
def downgrade():
pass
| [
"[email protected]"
] | |
e9f551acfb12c47aa1548d24eee4b522ceb8073b | 8cd15fba24b6dfa431f3764932101969f5fb524f | /JAMediaImagenes/gtk2/Interfaz/ToolbarPrincipal.py | 01fb926646a3946f16ca0652c42039c7313e706a | [] | no_license | srevinsaju/JAMediaSuite | c872b4781657bf1bcf63908f71abeca799b8c666 | 1813d1205cf31f89be3c4512eb495baed427494f | refs/heads/master | 2020-12-04T12:14:53.794749 | 2019-01-05T12:52:13 | 2019-01-05T12:52:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,462 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import gtk
import gobject
import commands
class ToolbarPrincipal(gtk.Toolbar):
__gsignals__ = {
"accion": (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (gobject.TYPE_STRING,))}
def __init__(self):
gtk.Toolbar.__init__(self)
abrir = gtk.ToolButton()
abrir.set_stock_id(gtk.STOCK_OPEN)
abrir.set_tooltip_text("Abrir")
abrir.connect("clicked", self.__emit_senial, "open")
self.insert(abrir, -1)
self.__guardar = gtk.ToolButton()
self.__guardar.set_stock_id(gtk.STOCK_SAVE)
self.__guardar.set_tooltip_text("Guardar")
self.__guardar.connect("clicked", self.__emit_senial, "save")
self.insert(self.__guardar, -1)
self.__guardar_como = gtk.ToolButton()
self.__guardar_como.set_stock_id(gtk.STOCK_SAVE_AS)
self.__guardar_como.set_tooltip_text("Guardar Como")
self.__guardar_como.connect("clicked", self.__emit_senial, "save_as")
self.insert(self.__guardar_como, -1)
self.insert(gtk.SeparatorToolItem(), -1)
self.__zoom_in = gtk.ToolButton()
self.__zoom_in.set_stock_id(gtk.STOCK_ZOOM_IN)
self.__zoom_in.set_tooltip_text("Acercar")
self.__zoom_in.connect("clicked", self.__emit_senial, "zoom_in")
self.insert(self.__zoom_in, -1)
self.__zoom_out = gtk.ToolButton()
self.__zoom_out.set_stock_id(gtk.STOCK_ZOOM_OUT)
self.__zoom_out.set_tooltip_text("Alejar")
self.__zoom_out.connect("clicked", self.__emit_senial, "zoom_out")
self.insert(self.__zoom_out, -1)
self.__zoom_100 = gtk.ToolButton()
self.__zoom_100.set_stock_id(gtk.STOCK_ZOOM_100)
self.__zoom_100.set_tooltip_text("Ver a tamaño original")
self.__zoom_100.connect("clicked", self.__emit_senial, "zoom_100")
self.insert(self.__zoom_100, -1)
self.__zoom_fit = gtk.ToolButton()
self.__zoom_fit.set_stock_id(gtk.STOCK_ZOOM_FIT)
self.__zoom_fit.set_tooltip_text("Ocupar todo el espacio disponible")
self.__zoom_fit.connect("clicked", self.__emit_senial, "zoom_fit")
self.insert(self.__zoom_fit, -1)
self.insert(gtk.SeparatorToolItem(), -1)
self.__izquierda = gtk.ToolButton()
self.__izquierda.set_stock_id(gtk.STOCK_UNDO)
self.__izquierda.set_tooltip_text("Rotar a la izquierda")
self.__izquierda.connect("clicked", self.__emit_senial, "izquierda")
self.insert(self.__izquierda, -1)
self.__derecha = gtk.ToolButton()
self.__derecha.set_stock_id(gtk.STOCK_REDO)
self.__derecha.set_tooltip_text("Rotar a la derecha")
self.__derecha.connect("clicked", self.__emit_senial, "derecha")
self.insert(self.__derecha, -1)
self.insert(gtk.SeparatorToolItem(), -1)
self.__anterior = gtk.ToolButton()
self.__anterior.set_stock_id(gtk.STOCK_GO_BACK)
self.__anterior.set_tooltip_text("Ver imagen anterior")
self.__anterior.connect("clicked", self.__emit_senial, "anterior")
self.insert(self.__anterior, -1)
self.__siguiente = gtk.ToolButton()
self.__siguiente.set_stock_id(gtk.STOCK_GO_FORWARD)
self.__siguiente.set_tooltip_text("Ver imagen siguiente")
self.__siguiente.connect("clicked", self.__emit_senial, "siguiente")
self.insert(self.__siguiente, -1)
self.show_all()
def __emit_senial(self, widget, senial):
self.emit("accion", senial)
def has_file(self, hasfile, acceso, dirpath=False):
buttons = [self.__guardar, self.__guardar_como, self.__zoom_in,
self.__zoom_out, self.__zoom_100, self.__zoom_fit,
self.__izquierda, self.__derecha, self.__anterior,
self.__siguiente]
for button in buttons:
button.set_sensitive(hasfile)
self.__guardar.set_sensitive(acceso)
paths = 0
if dirpath:
files = os.listdir(dirpath)
for f in files:
path = os.path.join(dirpath, f)
datos = commands.getoutput(
'file -ik %s%s%s' % ("\"", path, "\""))
if "image" in datos:
paths += 1
if paths > 1:
break
for button in [self.__anterior, self.__siguiente]:
button.set_sensitive(bool(paths > 1))
| [
"[email protected]"
] | |
5cf5686aab6d2c0ad688cea8069de3ca8a68b512 | 2df47589ca457d16fbffd4e1bccf5133174a0b97 | /highcharts/core/tests/test_product_json_view.py | b51a7fddda42ab847b32c19939890f918833a368 | [] | no_license | bguerbas/highcharts | a805419cb8d5a00bc3f82b5c4df285598f7685d8 | 571fba58465136c5040266b3d4ba2d65a5cc740c | refs/heads/master | 2022-02-12T19:33:12.244474 | 2016-06-04T05:00:24 | 2016-06-04T05:00:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | from django.test import TestCase
from highcharts.core.models import Category, Product
class TestGet(TestCase):
def setUp(self):
category = Category.objects.create(category='Papelaria')
Product.objects.create(
product='A4',
price=4.2,
category=category
)
self.resp = self.client.get('/product_json/')
def test_get(self):
self.assertEqual(200, self.resp.status_code)
def test_mimetype(self):
self.assertEqual('application/json', self.resp['Content-Type'])
def test_contents(self):
data = self.resp.json()
self.assertIn('products', data.keys())
self.assertEqual(1, len(data['products']))
self.assertEqual('Papelaria', data['products'][0]['categoria'])
self.assertEqual(100.0, data['products'][0]['porcentagem'])
| [
"[email protected]"
] | |
92bca2bb4a0d14e571d6c8140331872c810d61c0 | de066f2aaf7810a9377020a2d25d674a52547a15 | /Cap05_GUI_Tkinter/extras/03_Barron_Stone/Chap05_Advanced_Widgets/06_configuring_widgets_styles.py | 08de715b89e479ed091ff2a6fa3d1a6052b49cc0 | [] | no_license | frclasso/2nd_Step_Python_Fabio_Classo | 91092b24c442cced4f7c5c145e1e9e8e5f7483d9 | ad6eefaeb4e6283c461562e7fddcb0aa81f2d90e | refs/heads/master | 2022-12-10T09:42:52.724283 | 2019-07-27T20:28:17 | 2019-07-27T20:28:17 | 146,759,779 | 1 | 0 | null | 2022-12-08T05:55:53 | 2018-08-30T14:13:49 | Python | UTF-8 | Python | false | false | 2,311 | py | #!/usr/bin/env python3
# Configurando o estilo dos widgetes
# styles descreve como o widget vai ser exibido de acordo com com o estado em que se
# econtra
# active, disabled, focus, pressed, selected, background, readonly, alternate, invalid, hover
# Vamos trabalhar com temas, que são uma coleção de estilos para widgets
from tkinter import *
from tkinter import ttk
root = Tk()
button1 = ttk.Button(root, text='Botão 1')
button2 = ttk.Button(root, text='Botão 2')
button1.pack()
button2.pack()
# instanciando um objeto style
style = ttk.Style()
# varificando os estilos diposniveis no sistema
print(style.theme_names()) # ('aqua', 'clam', 'alt', 'default', 'classic')
# para verificar qual tema está em uso utilizamos style.theme_use()sem parametros
print(style.theme_use())
# para alterar o tema utilizamos styel.them_use() passando o nome do tema como parametro
style.theme_use('classic')
# retornando ao tema anterior
style.theme_use('aqua')
# Os nomes dos widgets por conveção usam a letra "T" antes do nome
# exemplo: TButton é o nome padrão para Button
# exceção Treeview, não TTreview
# para descobrir o nome padrão que o widget está utilizando usamos winfo_class()
print(button1.winfo_class()) # TButton
# para configurar o estilo do TButton para alterar a aparencia de todos os botões do programa
style.configure('TButton', foreground='blue')
# Podemos ainda criar estilos customizados derivados de outros estilos existentes
style.configure('Alarm.TButton', foreground='orange', font=('Arial', 24, 'bold'))
# aplicando o estilo customizado a button2
button2.config(style='Alarm.TButton')
# Podemos configurar o estilo baseado no estado no widget utilizando style.map()
style.map('Alarm.TButton', foreground=[('pressed', 'purple'), ('disabled', 'grey')])
button2.state(['disabled'])
# Para verificar todos os componentes intenos de estilo utilizamos o método layout()
print(style.layout('TButton')) # passando o nome do estilo como paramentro
# Para verificar as opções disponíveis para cada componente utilizamos
# styel.element_options('nome do componente')
print(style.element_options('Button_label'))
# para verificar qual configuração está em uso em um estilo específico
print(style.lookup('TButton', 'foreground')) # style, property
root = mainloop() | [
"[email protected]"
] | |
88f772fda60dad89eacffbe396367d9c5fd3de8f | fd67592b2338105e0cd0b3503552d188b814ad95 | /egoi_api/apis/paths/__init__.py | d0fcf2e17c920e939c549a52c875cecfa03fb1db | [] | no_license | E-goi/sdk-python | 175575fcd50bd5ad426b33c78bdeb08d979485b7 | 5cba50a46e1d288b5038d18be12af119211e5b9f | refs/heads/master | 2023-04-29T20:36:02.314712 | 2023-04-18T07:42:46 | 2023-04-18T07:42:46 | 232,095,340 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # do not import all endpoints into this module because that uses a lot of memory and stack frames
# if you need the ability to import all endpoints from this module, import them with
# from egoi_api.apis.path_to_api import path_to_api
| [
"[email protected]"
] | |
1efc75fd3a62326bea8b5dfc1ee2e3d82520b1cc | 18887a0808c0a06a69be3e66c6337295bfc7d99e | /menus/models.py | b9443c0e078dc25a3ecb36e23f2231136700b1d8 | [] | no_license | teedee22/tobyd | 78adf69d7a02cce42dc5a94e0e58007b8e6be196 | 5c54a817608a3911dd44840be82d2bbea44f35c3 | refs/heads/master | 2022-12-14T08:29:12.952292 | 2019-09-25T20:13:31 | 2019-09-25T20:13:31 | 205,281,622 | 0 | 0 | null | 2022-12-08T06:35:44 | 2019-08-30T01:33:39 | Python | UTF-8 | Python | false | false | 1,776 | py | from django.db import models
from django_extensions.db.fields import AutoSlugField
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.admin.edit_handlers import (
MultiFieldPanel,
InlinePanel,
FieldPanel,
PageChooserPanel,
)
from wagtail.core.models import Orderable
from wagtail.snippets.models import register_snippet
class MenuItem(Orderable):
"""For each menu item in the menu"""
link_title = models.CharField(max_length=100, blank=True, null=True)
link_url = models.URLField(max_length=500, blank=True, null=True)
link_page = models.ForeignKey(
"wagtailcore.page",
null=True,
blank=True,
related_name="+",
on_delete=models.CASCADE,
)
open_in_new_tab = models.BooleanField(default=False, blank=True)
page = ParentalKey("Menu", related_name="menu_items")
highlighted = models.BooleanField(default=False, blank=True)
panels = [
FieldPanel("link_title"),
FieldPanel("link_url"),
PageChooserPanel("link_page"),
FieldPanel("open_in_new_tab"),
FieldPanel("highlighted"),
]
@property
def link(self):
if self.link_url:
return self.link_url
elif self.link_page:
return self.link_page.url
return "missing page url"
@register_snippet
class Menu(ClusterableModel):
"""The main menu"""
title = models.CharField(max_length=100)
slug = AutoSlugField(populate_from="title", editable=True)
panels = [
MultiFieldPanel(
[FieldPanel("title"), FieldPanel("slug")], heading="Menu"
),
InlinePanel("menu_items", label="Menu Items")
]
def __str__(self):
return self.title
| [
"[email protected]"
] | |
d21cd8904bb3eea88d7fd3e5c4a93fff99003a16 | 26dd0732426322eb7c411b7f53d72ec3dddd63fe | /ABC_169/B.py | 42659b9159cc54e9ceb89999a6cf9a30bc723e12 | [] | no_license | Jinmin-Goh/AtCoder | 417290531bf92f79e1285052eb82d9a8a3c7b138 | 68d3f7840748814123beebabf478c9316268f166 | refs/heads/master | 2022-12-10T21:31:41.411567 | 2020-08-29T15:02:18 | 2020-08-29T15:02:18 | 262,826,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | # Contest No.: ABC169
# Problem No.: B
# Solver: JEMINI
# Date: 20200531
import sys
import heapq
def main():
n = int(input())
nums = list(map(int, sys.stdin.readline().split()))
ans = 1
for i in nums:
if i == 0:
ans = 0
break
if ans > 10 ** 18:
continue
ans *= i
if ans > 10 ** 18:
print(-1)
else:
print(ans)
return
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
ca05b371020a184e2d08e3162cf7c33c53f0a712 | 1fa2ad5ad57f08f805b9175ab2a961a24d621101 | /src/test_net.py | 6808c27a0db116df6527ec8343064fde04b06359 | [] | no_license | ZQPei/Lenet_cifar10_pytorch | e61626bdcadd1abec4389b5f1e40edd665db68fd | 911fd66e41d4ad7c03f13b603df8bc7c187acb38 | refs/heads/master | 2020-03-16T17:36:24.723685 | 2018-05-10T02:55:00 | 2018-05-10T02:55:00 | 132,839,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,571 | py | import torch
import torch.nn as nn
import torchvision
import numpy as np
import matplotlib.pyplot as plt
from Net import Net
# load net
mynet = torch.load('model/net.pth')
print(mynet)
#import ipdb; ipdb.set_trace()
# show me the weight
weight_conv1 = list(mynet.parameters())[0]
weight_conv1 = (weight_conv1-weight_conv1.min())/(weight_conv1.max()-weight_conv1.min())
weight_conv1 = weight_conv1.cpu()
weight_conv1 = torchvision.utils.make_grid(weight_conv1)
weight_conv1_np = weight_conv1.detach().numpy()
weight_conv1_np = weight_conv1_np.transpose(1,2,0)
weight_conv2 = list(mynet.parameters())[2]
weight_conv2 = (weight_conv2-weight_conv2.min())/(weight_conv2.max()-weight_conv2.min())
weight_conv2 = weight_conv2.cpu()
weight_conv2 = weight_conv2.view(16*6,1,5,5)
print(weight_conv2.shape)
weight_conv2 = torchvision.utils.make_grid(weight_conv2)
weight_conv2_np = weight_conv2.detach().numpy()
print(weight_conv2_np.shape)
weight_conv2_np = weight_conv2_np.transpose(1,2,0)
#weight_conv2_np = weight_conv2_np.squeeze(1)
plt.figure()
plt.imshow(weight_conv1_np)
plt.figure()
plt.imshow(weight_conv2_np)
plt.show()
# test on my img
img = plt.imread("myimg/4.jpg")
print(img.shape)
img = img.transpose(2,1,0)
img = torch.unsqueeze(torch.from_numpy(img),0)
print(img.shape)
img = img.type(torch.float)
img = (img-img.min())/(img.max()-img.min())
img = (img-0.5)/0.5
img = img.cuda()
pred = mynet(img)
print(pred)
pred = pred.max(1)[1]
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
print(classes[pred]) | [
"[email protected]"
] | |
7921cac556470b0a4ac9583697ede3ba8d7170d3 | 2d58c1351ab970eb55f4832b09582592e96468d5 | /p74.py | 7dbccc89adfe89703af596f378511832a3be92eb | [] | no_license | 0x0400/LeetCode | 832bc971c2cae9eecb55f5b14e8c34eaec0d9e26 | 94bb9fedc908490cc52d87def317c057fadaeceb | refs/heads/master | 2023-02-24T20:13:11.345873 | 2023-02-10T16:46:31 | 2023-02-10T16:46:31 | 84,653,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | # https://leetcode.com/problems/search-a-2d-matrix/
from typing import List
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
rowLen = len(matrix)
colLen = len(matrix[0])
for rowIdx in range(0, rowLen):
if matrix[rowIdx][colLen-1] < target:
continue
if matrix[rowIdx][colLen-1] == target:
return True
for colIdx in range(0, colLen):
if matrix[rowIdx][colIdx] == target:
return True
if matrix[rowIdx][colIdx] > target:
return False
return False
| [
"[email protected]"
] | |
f0890b75c3c25dfcbb281f0881f8e215f9a72c1e | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_23844.py | c65bb53c5f20a6b1517566f9d2447106889237be | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | # Python create tuples using function returning multiple values
new_list = [(name, value) + extra_info(name) for name, value in old_list]
| [
"[email protected]"
] | |
43082e96c8ebbd7f7616eece51e9a54b4937c2c7 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/clouds_20200703155604.py | 15b31a2ea014762a7c9ddce1050ddf21dfc44820 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | def jumpingClouds(c):
i = 0
jumps = 0
while i < len(c)-2:
if c[i] == 0 and c[i+2] == 0:
print('here')
print('c---->',c[i],'i-->')
jumps +=1
elif c[i] == 0 and c[i+1] == 0:
jumps +=1
i +=1
print(jumps)
jumpingClouds([0,0,1,0,0,1,0]) | [
"[email protected]"
] | |
3f0ed451120ff0dd0158c67eff2f9059a8e2aa1d | 81539aba88c22cf75bd2e14f5e0e92f2bf54e962 | /DarkMatterMap2017/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8/TTbarDMJets_Dilepton_pseudoscalar_LO_Mchi-1_Mphi-150_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8_260000_5_cff.py | db44454d063b108e8986c360d27b91c8a1df2c8b | [] | no_license | nistefan/RandomizedParametersSeparator | ad35b48b95e9745814c0bf9d8d8b6eb8aa479177 | 66a0e291b59113c6b5301768f1c10e36cf23d3c3 | refs/heads/master | 2021-01-03T00:41:17.415005 | 2020-02-19T13:30:54 | 2020-02-19T13:30:54 | 239,838,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, lumisToProcess = cms.untracked.VLuminosityBlockRange(*('1:32873', '1:32891', '1:25573', '1:34291', '1:27705', '1:34076', '1:34402', '1:34416', '1:29147', '1:34429', '1:34449', ))
)
readFiles.extend( ['/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/ECAA5628-DF19-EA11-B3DA-0242AC130002.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/260000/E6CE347E-2018-EA11-8EBA-AC1F6B34AA78.root']); | [
"[email protected]"
] | |
80e5a14b25950761d13f38e235d30d5e4bf02c98 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/cultu.py | f75285b1c2c8d5c7c0f1708402c098b18e105f8c | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 120 | py | ii = [('SadlMLP.py', 1), ('FitzRNS3.py', 1), ('ClarGE2.py', 1), ('WestJIT2.py', 1), ('GodwWLN.py', 1), ('SoutRD.py', 2)] | [
"[email protected]"
] | |
25a4d8eb546827e8520c4882b124731d98a7838a | 9080e6e53da365b0d811099e2e992041cf4b5b47 | /0x06-python-classes/0-square.py | a28766ef62fd1c5a8882dd34a656adb8aff3a88f | [] | no_license | benjamesian/holbertonschool-higher_level_programming | 213ad8c39d1fc2ee81843124a46914be166445d3 | 99f00414833757e3b156c148927a858ce38baa0e | refs/heads/master | 2020-07-23T00:52:46.858544 | 2020-02-11T22:03:24 | 2020-02-11T22:03:24 | 207,389,880 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | #!/usr/bin/python3
"""Noot
"""
class Square:
"""Empty Square class
"""
pass
| [
"[email protected]"
] | |
071413884073143eac16e6b991abd4fee106fc0e | 9bcb5032d27ca321f489c035f7d46019ffdf4b85 | /numericalFunctions/ptwXY/Python/Test/UnitTesting/thicken/thicken.py | e40b151f77d3bf9074457d4b03816df30afbfdc4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LLNL/gidiplus | 128ef4d4acbcb264e31794a535cd95e8c77d8a96 | e1c6f0e4de51bc4d7616c5c4676b9818c4b9817c | refs/heads/master | 2023-08-31T06:21:14.519577 | 2023-02-13T18:35:20 | 2023-02-13T18:35:20 | 187,251,526 | 10 | 3 | NOASSERTION | 2021-12-23T00:28:07 | 2019-05-17T16:48:24 | C++ | UTF-8 | Python | false | false | 2,306 | py | # <<BEGIN-copyright>>
# Copyright 2019, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: MIT
# <<END-copyright>>
import os
from numericalFunctions import pointwiseXY_C
if( 'CHECKOPTIONS' in os.environ ) :
options = os.environ['CHECKOPTIONS'].split( )
if( '-e' in options ) : print( __file__ )
CPATH = '../../../../Test/UnitTesting/thicken'
os.system( 'cd %s; thicken -v > v' % CPATH )
f = open( os.path.join( CPATH, 'v' ) )
ls = f.readlines( )
f.close( )
def getData( ls, hasLabel ) :
i = 0
for l in ls :
if( l.strip( ) != '' ) : break
i = i + 1
ls = ls[i:]
if( len( ls ) == 0 ) : return( None, None, None )
label = None
if( hasLabel ) : label, ls = ls[0].strip( ), ls[1:]
length, ls = ls[0], ls[1:]
if( '# length = ' != length[:11] ) : raise Exception( 'Line does not contain length info: "%s"' % ls[0].strip( ) )
length = int( length.split( '=' )[1] )
data = [ list( map( float, ls[i].split( )[:2] ) ) for i in range( length ) ]
return( ls[length:], label, pointwiseXY_C.pointwiseXY_C( data, initialSize = 10, overflowSize = 10 ) )
def compareValues( label, i, v1, v2 ) :
sv1, sv2 = '%.7g' % v1, '%.7g' % v2
if( sv1 != sv2 ) : raise Exception( 'Values %s %s diff at %d for label = %s' % ( v1, v2, i, label ) )
def thicken( label, original, data ) :
values = label.split( ':' )[1].split( '=' )
sectionSubdivideMax = int( values[1].split( )[0] )
dxMax = float( values[2].split( )[0] )
fxMax = float( values[3].split( )[0] )
thick = original.thicken( sectionSubdivideMax = sectionSubdivideMax, dDomainMax = dxMax, fDomainMax = fxMax )
if( len( data ) != len( thick ) ) : raise Exception( 'len( data ) = %d != len( thick ) = %d for label = "%s"' % \
( len( data ), len( thick ), label ) )
if( 'log-log' in label ) : return
for i, xy in enumerate( data ) :
xc, yc = xy
xp, yp = thick[i]
compareValues( label, i, xc, xp )
compareValues( label, i, yc, yp )
hasLabel = False
while( 1 ) :
ls, label, data = getData( ls, hasLabel )
if( ls is None ) : break
if( hasLabel ) :
thicken( label, original, data )
else :
original = data
hasLabel = True
| [
"[email protected]"
] | |
9c3991a7edb65b7ffd45e10112c675d5522eafdc | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/malvern.py | 6e7a9b4b78daa025f614d595cd55f674ae95c7f1 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 103 | py | ii = [('WilbRLW5.py', 4), ('ClarGE2.py', 2), ('BuckWGM2.py', 1), ('HowiWRL2.py', 2), ('DibdTRL.py', 4)] | [
"[email protected]"
] | |
686f9dae605c6b38e4db127878cb0fbcd0360617 | b665374878dd4a0b565afb3be8f41c97515b9d33 | /elifecrossref/preprint.py | 71cc91577a502fd31e407296fa7b1751fd1ebd91 | [
"MIT"
] | permissive | elifesciences/elife-crossref-xml-generation | e5ab62fec56653fee32ba7b9a2751df37b691b66 | 56440ebc20d4a652201011c9511ed64dfcc80c3d | refs/heads/develop | 2023-08-21T16:34:24.019886 | 2023-08-12T00:50:17 | 2023-08-12T00:50:17 | 95,716,515 | 5 | 4 | MIT | 2023-09-07T19:45:58 | 2017-06-28T22:21:40 | Python | UTF-8 | Python | false | false | 753 | py | from xml.etree.ElementTree import Element, SubElement
from elifearticle import utils as eautils
from elifecrossref import related
def set_preprint(parent, preprint):
"""
add rel:inter_work_relation tag for a preprint
"""
related_item_tag = SubElement(parent, "rel:related_item")
related_item_type = "intra_work_relation"
relationship_type = "hasPreprint"
if preprint.doi:
identifier_type = "doi"
related_item_text = preprint.doi
elif preprint.uri:
identifier_type = "uri"
related_item_text = preprint.uri
related.set_related_item_work_relation(
related_item_tag,
related_item_type,
relationship_type,
identifier_type,
related_item_text,
)
| [
"[email protected]"
] | |
38916d27a6cbfea28a51061f2baec16d7065fd62 | 3056736f013b25d1e70adb355082c578e5091314 | /ml_quality/datasets/shit/snake_board_test.py | 92a2badc6ac91075e56ab0ca18e1394e5e96ab35 | [] | no_license | flerchy/codestyle-core | dcad1385b76678e4473f0804d2ecfaa03866d124 | a009bcd2f17dadd440ea5ff24bd3167e38948bff | refs/heads/master | 2021-01-20T03:21:16.694322 | 2017-06-05T05:52:35 | 2017-06-05T05:52:35 | 89,524,181 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,825 | py | import pygame, sys, random
from pygame.locals import *
from basics import snake,point,board, food
from basics.direction import Direction
def encrypt_snake(snake):
"""Returns encrpted Body list to send over netowrk."""
enc_data = "%%body%%"
for point in snake.get_body_points():
enc_data += str(point.get_x()) + "%%sep_xy%%"
enc_data += str(point.get_y())
enc_data += "%%eop%%"
enc_data += "%%body%%"
enc_data += "%%dir%%"
enc_data += snake.get_direction()
enc_data += "%%dir%%"
enc_data += "%%color%%"
enc_data += snake.get_color()
enc_data += "%%color%%"
return enc_data
def get_snake_points(enc_data):
"""Returns Snake object for given encypted string."""
body_list = []
for points in enc_data.split("%%body%%")[1].split("%%eop%%")[:-1]:
x_y = points.split("%%sep_xy%%")
body_list.append(point.Point(int(x_y[0]), int(x_y[1])))
return body_list
def get_snake_direction(enc_data):
return enc_data.split("%%dir%%")[1]
def get_snake_color(enc_data):
return enc_data.split("%%color%%")[1]
def get_food_location():
"""Returns random x and y coordinates for food."""
return (random.randint(0,20), random.randint(0,15))
##First Snake
point1 = point.Point(0,0)
point2 = point.Point(0,1)
point3 = point.Point(0,2)
snake1 = snake.Snake([point1, point2, point3], Direction.RIGHT)
snake_food = food.Food(20,15)
#PyGame Variables
pygame.init()
FPS = 6
GAME_OVER = False
fpsClock = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((400, 300), 0, 32)
pygame.display.set_caption('Snakes')
myfont = pygame.font.SysFont("Comic Sans MS", 30)
game_over_text = myfont.render("Game Over!", 1, (0,0,0))
WHITE = (255, 255, 255)
snake_body = pygame.image.load('imgs/snake/'+snake1.get_color()+'/snake_body.png')
snake_mouth_icon = {}
snake_mouth_icon['yellow'] = {
'right' : pygame.image.load('imgs/snake/yellow/snake_mouth_right.gif'),
'left' : pygame.image.load('imgs/snake/yellow/snake_mouth_left.gif'),
'up' : pygame.image.load('imgs/snake/yellow/snake_mouth_up.gif'),
'down' : pygame.image.load('imgs/snake/yellow/snake_mouth_down.gif'),
}
snake_food_icon = pygame.image.load('imgs/frog.png')
#Networking Part
while True:
#snake_mouth = pygame.image.load('imgs/snake/'+snake1.get_color()+'/snake_mouth_'+snake1.get_direction()+'.gif')
DISPLAYSURF.fill(WHITE)
snake_body_points = snake1.get_body_points()
snake_mouth_point = snake_body_points[-1]
enc_data = encrypt_snake(snake1)
#print enc_data
#print snake_body_points == get_snake_points(enc_data),
#print get_snake_direction(enc_data), get_snake_color(enc_data)
print snake_food
for body_point in snake_body_points[:-1]:
DISPLAYSURF.blit(snake_body, (20*body_point.get_x(), 20*body_point.get_y()))
DISPLAYSURF.blit(snake_mouth_icon[snake1.get_color()][snake1.get_direction()],
(20*snake_mouth_point.get_x(), 20*snake_mouth_point.get_y()))
DISPLAYSURF.blit(snake_food_icon, (20*snake_food.get_x(), 20*snake_food.get_y()))
#direction = random.choice([0,1,3,4])
#print direction
key_pressed = False
if snake1.has_eaten_food(snake_food):
snake_food.update_position()
snake1.grow_snake()
if snake1.is_bitten_by_itself():
GAME_OVER = True
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_RIGHT:
snake1.update_direction(Direction.RIGHT)
elif event.key == K_LEFT:
snake1.update_direction(Direction.LEFT)
elif event.key == K_UP:
snake1.update_direction(Direction.DOWN)
elif event.key == K_DOWN:
snake1.update_direction(Direction.UP)
if not GAME_OVER:
snake1.move_snake()
key_pressed = True
if not GAME_OVER and not key_pressed:
snake1.move_snake()
if GAME_OVER:
DISPLAYSURF.blit(game_over_text, (100, 100))
break
pygame.display.update()
fpsClock.tick(FPS)
| [
"[email protected]"
] | |
99e2f8abf29c8d3a63efa35b197921747f42b243 | d3cdceb672f3ffa9d0f7cddfb03062d48a118427 | /hoods/admin.py | cde1e05c3f36d0a160cd75f271913491702fc12e | [] | no_license | sethmuriuki/Nights-Watch | e6e4020276ab87801f964e59da3f29f47b4e5c88 | 57a5bd3444b88442cb5ea988fc0ea5d64f44df2f | refs/heads/master | 2021-05-02T02:28:23.335867 | 2018-03-22T05:49:46 | 2018-03-22T05:49:46 | 120,884,306 | 0 | 1 | null | 2018-06-19T09:30:41 | 2018-02-09T09:18:05 | Python | UTF-8 | Python | false | false | 198 | py | from django.contrib import admin
from . import models
# Register your models here.
class GroupMemberInline(admin.TabularInline):
model = models.GroupMember
admin.site.register(models.Group) | [
"[email protected]"
] | |
20e8adc9925eb8056ebdcc7f534d23ae41fd9216 | a9fc496e0724866093dbb9cba70a8fdce12b67a9 | /scripts/field/enter_23210.py | e4c56b74040134ab83fbaa561d5a188a8226e0a2 | [
"MIT"
] | permissive | ryantpayton/Swordie | b2cd6b605f7f08f725f5e35d23ba3c22ef2ae7c0 | ca6f42dd43f63b1d2e6bb5cdc8fc051c277f326e | refs/heads/master | 2022-12-01T09:46:47.138072 | 2020-03-24T10:32:20 | 2020-03-24T10:32:20 | 253,997,319 | 2 | 0 | MIT | 2022-11-24T08:17:54 | 2020-04-08T05:50:22 | Java | UTF-8 | Python | false | false | 123 | py | # Mastema | Demon 2nd job
sm.spawnMob(9001036, 640, -14, False)
sm.waitForMobDeath(9001036)
sm.warpInstanceOut(931050110)
| [
"[email protected]"
] | |
35bb586667a937f6c2d36795e7b13b063fa3de4d | 0466559817d3a1be9409da2c83db99c4db3bacfe | /hubcheck/pageobjects/widgets/members_profile_website.py | 7aae462a94a557e4bcab95a0af413cd84964ba13 | [
"MIT"
] | permissive | ken2190/hubcheck | 955cf9b75a1ee77e28256dfd3a780cfbc17de961 | 2ff506eb56ba00f035300862f8848e4168452a17 | refs/heads/master | 2023-03-20T15:17:12.949715 | 2015-09-29T16:11:18 | 2015-09-29T16:11:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,080 | py | from hubcheck.pageobjects.basepageelement import Select
from hubcheck.pageobjects.basepageelement import Text
from hubcheck.pageobjects.widgets.members_profile_element import MembersProfileElement
class MembersProfileWebsite(MembersProfileElement):
def __init__(self, owner, locatordict={}):
super(MembersProfileWebsite,self).__init__(owner,locatordict)
# load hub's classes
MembersProfileWebsite_Locators = self.load_class('MembersProfileWebsite_Locators')
# update this object's locator
self.locators.update(MembersProfileWebsite_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.website = Text(self,{'base':'website'})
self.access = Select(self,{'base':'access'})
# update the component's locators with this objects overrides
self._updateLocators()
def value(self):
"""return a dictionary with website and access values"""
return {'website' : self.website.value(),
'access' : self.access.value()}
def update(self,website=None,access=None):
"""update the website and access values"""
if website != None:
self.website.value = website
if access != None:
self.access.value = access
self.save.click()
class MembersProfileWebsite_Locators_Base(object):
"""locators for MembersProfileWebsite object"""
locators = {
'base' : "css=.profile-web",
'website' : "css=#profile-url",
'access' : "css=.profile-web select[name='access[org]']",
'sectionkey' : "css=.profile-web .key",
'sectionvalue' : "css=.profile-web .value",
'open' : "css=.profile-web .edit-profile-section",
'close' : "css=.profile-web .edit-profile-section",
'save' : "css=.profile-web .section-edit-submit",
'cancel' : "css=.profile-web .section-edit-cancel",
}
| [
"[email protected]"
] | |
f6aab59774f510e241f1046f6ec4b42798bd38b4 | 8633ec7985ffd7f849210b93bc20e632f8ae8707 | /tree/CMSSW_4_2_8_patch7/src/Validation/RecoTrack/test/borisTests/MTV.py | 4f90e6fb5980d818f54e52ed358645a99d9d285f | [] | no_license | liis/el_track | 2ed5b3b7a64d57473328df0e5faf28808bab6166 | cd7978e5fa95d653bab5825b940911b465172c1a | refs/heads/master | 2016-09-10T20:09:07.882261 | 2015-01-08T14:41:59 | 2015-01-08T14:41:59 | 14,494,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,494 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("MULTITRACKVALIDATOR")
# message logger
process.MessageLogger = cms.Service("MessageLogger",
default = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
# source
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles)
#source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( ['file:./aod.root'])
process.source = source
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
### conditions
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'START42_V11::All'
### standard includes
process.load('Configuration/StandardSequences/Services_cff')
process.load('Configuration.StandardSequences.GeometryPilot2_cff')
process.load("Configuration.StandardSequences.RawToDigi_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
### validation-specific includes
process.load("SimTracker.TrackAssociation.TrackAssociatorByHits_cfi")
process.load("SimTracker.TrackAssociation.trackingParticleRecoTrackAsssociation_cfi")
process.load("Validation.RecoTrack.cuts_cff")
process.load("Validation.RecoTrack.MultiTrackValidator_cff")
process.load("DQMServices.Components.EDMtoMEConverter_cff")
process.load("Validation.Configuration.postValidation_cff")
process.load("Validation.RecoTrack.TrackValidation_cff")
process.TrackAssociatorByHits.SimToRecoDenominator = cms.string('reco')
#--- To change the fraction of sHits that are required to be matched by the associator
# The default is 0.75
#process.TrackAssociatorByHits.Purity_SimToReco = cms.double(0.60)
#process.TrackAssociatorByHits.Cut_RecoToSim = cms.double(0.60)
#---
########### configuration MultiTrackValidator ########
process.multiTrackValidator.outputFile = 'multitrackvalidator.root'
process.multiTrackValidator.associators = ['TrackAssociatorByHits']
process.multiTrackValidator.skipHistoFit=cms.untracked.bool(False)
process.multiTrackValidator.runStandalone=cms.bool(True)
#process.multiTrackValidator.label=cms.VInputTag(cms.InputTag("generalTracks"),
process.multiTrackValidator.label=cms.VInputTag(
cms.InputTag("cutsRecoTracksHp"),
# cms.InputTag("cutsRecoTracksZero"),
# cms.InputTag("cutsRecoTracksZeroHp"),
# cms.InputTag("cutsRecoTracksFirst"),
# cms.InputTag("cutsRecoTracksFirstHp"),
# cms.InputTag("cutsRecoTracksSecond"),
# cms.InputTag("cutsRecoTracksSecondHp"),
# cms.InputTag("cutsRecoTracksThird"),
# cms.InputTag("cutsRecoTracksThirdHp"),
# cms.InputTag("cutsRecoTracksFourth"),
# cms.InputTag("cutsRecoTracksFourthHp"),
# cms.InputTag("cutsRecoTracksFifth"),
# cms.InputTag("cutsRecoTracksFifthHp")
)
process.multiTrackValidator.useLogPt=cms.untracked.bool(True)
process.multiTrackValidator.minPt = cms.double(0.1)
process.multiTrackValidator.maxPt = cms.double(300.0)
process.multiTrackValidator.nintPt = cms.int32(40)
#--- Filter on TrackingParticles
# pt in [0,2.8] when calculating the tracking Fake rate
# pt in [0,2.5] when calculating the efficiency vs eta
# pt in eta slice when calculating the efficiency vs pt for barrel/transition/endcap
process.multiTrackValidator.useLogPt=cms.untracked.bool(True)
process.multiTrackValidator.histoProducerAlgoBlock.minPt = cms.double(0.1)
process.multiTrackValidator.histoProducerAlgoBlock.maxPt = cms.double(300.0)
process.multiTrackValidator.histoProducerAlgoBlock.nintPt = cms.int32(40)
#process.multiTrackValidator.minAbsEtaTP = cms.double(0.0)
#process.multiTrackValidator.maxAbsEtaTP = cms.double(0.9)
#process.multiTrackValidator.minAbsEtaTP = cms.double(0.9)
#process.multiTrackValidator.maxAbsEtaTP = cms.double(1.4)
#process.multiTrackValidator.minAbsEtaTP = cms.double(1.4)
#process.multiTrackValidator.maxAbsEtaTP = cms.double(2.5)
#process.multiTrackValidator.minAbsEtaTP = cms.double(0.0)
#process.multiTrackValidator.maxAbsEtaTP = cms.double(2.8)
process.multiTrackValidator.minAbsEtaTP = cms.double(0.0)
process.multiTrackValidator.maxAbsEtaTP = cms.double(2.5)
#---
#--- uncomment this part to run MTV on GsfTrack collection
#
#process.cutsRecoTracksHp = cms.EDFilter("RecoGsfTrackSelector",
# src = cms.InputTag("electronGsfTracks"),
### src = cms.InputTag("elGsfTracksWithQuality"),
# beamSpot = cms.InputTag("offlineBeamSpot"),
# algorithm = cms.vstring(),
# maxChi2 = cms.double(10000.0),
### #quality = cms.vstring('highPurity'), ## NEW
### quality = cms.vstring('loose'), ## NEW
# quality = cms.vstring(), ## NEW
# minRapidity = cms.double(-5.0),
# maxRapidity = cms.double(5.0),
# tip = cms.double(120.0),
# lip = cms.double(300.0),
# ptMin = cms.double(0.1),
# min3DHit = cms.int32(0),
# minHit = cms.int32(3),
# minAbsEta = cms.double(1.4),
# maxAbsEta = cms.double(2.5)
#)
#process.multiTrackValidator.histoProducerAlgoBlock.useGsf = cms.bool(True)
#---
#--- Filter on track collection
# pt in [0,2.8] when calculating the tracking efficiency
# pt in eta slice when calculating the fake rate vs pt for barrel/transition/endcap
#process.cutsRecoTracksHp.minAbsEta = cms.double(0.0)
#process.cutsRecoTracksHp.maxAbsEta = cms.double(0.9)
#process.cutsRecoTracksHp.minAbsEta = cms.double(0.9)
#process.cutsRecoTracksHp.maxAbsEta = cms.double(1.4)
#process.cutsRecoTracksHp.minAbsEta = cms.double(1.4)
#process.cutsRecoTracksHp.maxAbsEta = cms.double(2.5)
process.cutsRecoTracksHp.minAbsEta = cms.double(0.0)
process.cutsRecoTracksHp.maxAbsEta = cms.double(2.8)
#process.cutsRecoTracksHp.minAbsEta = cms.double(0.0)
#process.cutsRecoTracksHp.maxAbsEta = cms.double(2.5)
process.multiTrackValidator.UseAssociators = cms.bool(True)
process.ValidationSelectors = cms.Sequence( process.cutsRecoTracksHp
# process.cutsRecoTracksZero*
# process.cutsRecoTracksZeroHp*
# process.cutsRecoTracksFirst*
# process.cutsRecoTracksFirstHp*
# process.cutsRecoTracksSecond*
# process.cutsRecoTracksSecondHp*
# process.cutsRecoTracksThird*
# process.cutsRecoTracksThirdHp*
# process.cutsRecoTracksFourth*
# process.cutsRecoTracksFourthHp*
# process.cutsRecoTracksFifth*
# process.cutsRecoTracksFifthHp
)
process.validation = cms.Sequence(
process.multiTrackValidator
)
# paths
process.p = cms.Path(
process.ValidationSelectors *
process.validation
)
process.schedule = cms.Schedule(
process.p
)
#process.MTVHistoProducerAlgoForTrackerBlock.TpSelectorForEfficiencyVsEta.tip = cms.double(0.5)
| [
"[email protected]"
] | |
0c167adf308cbd963e674e936b502ec49de9948c | 8010b4640a79c5c1fb58dd9b011723b744b3dd47 | /src/unv/web/helpers.py | 3399a083be504cc40f7d81610d2c57ed2b2bf153 | [
"MIT"
] | permissive | c137digital/unv_web | 277578e2a128e193b12e88465fe7c93a4a2019f1 | 52bea090c630b4e2a393c70907d35c9558d259fa | refs/heads/master | 2020-04-15T07:39:27.027454 | 2019-08-29T20:50:53 | 2019-08-29T20:50:53 | 164,498,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | import urllib
from aiohttp import web
from unv.utils.files import calc_crc32_for_file
from .deploy import SETTINGS as DEPLOY_SETTINGS
async def render_template(
request, template_name, context=None, status=web.HTTPOk.status_code):
template = request.app['jinja2'].get_template(template_name)
return web.Response(
text=await template.render_async(context or {}),
status=status, charset='utf-8',
content_type='text/html'
)
def url_for_static(path: str, with_hash: bool = False):
url = DEPLOY_SETTINGS.static_url
directory = DEPLOY_SETTINGS.static_dir
real_path = directory / path.lstrip('/')
hash_ = ''
if with_hash:
hash_ = '?hash={}'.format(calc_crc32_for_file(real_path))
path = str(path).replace(str(directory), '', 1).lstrip('/')
return f"{url}/{path}{hash_}"
def url_with_domain(path: str):
protocol = 'http'
path = path.lstrip('/')
if DEPLOY_SETTINGS.use_https:
protocol = 'https'
return f'{protocol}://{DEPLOY_SETTINGS.domain}/{path}'
def make_url_for_func(app, with_domain=False):
def url_for(route, **parts):
parts = {key: str(value) for key, value in parts.items()}
url = app.router[route].url_for(**parts)
if with_domain:
url = url_with_domain(str(url))
return url
return url_for
def inline_static_from(path):
with (DEPLOY_SETTINGS.static_dir / path).open('r') as f:
return f.read().replace("\n", "")
| [
"[email protected]"
] | |
b945ff417f597ac2d5502e5f5d09f0d02334f7d4 | d44d11bb5e8a3245a8f2db39f9e460eae2b50c82 | /HKDataBase/tests.py | ec3068a14f0c6b395c56965e5930bdedf44af2db | [] | no_license | banziha104/HKServerA1 | 636f50de907ed5a8764b678b434acbc37bb7ee65 | 14a20432fdf3bcb6574e249f95b8c3662a30ee26 | refs/heads/master | 2021-08-10T12:42:49.889987 | 2017-11-12T11:55:46 | 2017-11-12T11:55:46 | 110,443,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from django.test import TestCase
import os
# Create your tests here.
from os.path import abspath, dirname, join
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
print(os.path.join(BASE_DIR, 'db.sqlite3'))
print(os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings.common"))
| [
"[email protected]"
] | |
2f392a90b98f1dbc100c0fdd1c399ce2843eddef | 700f9f9e319ebd26d2557d64ea3827808dfad2f5 | /tests/fixtures/test_appendices_json/content_02_expected.py | 23c6bae8a17b15b59da7860417ea52c7ae82ffa6 | [
"MIT"
] | permissive | elifesciences/elife-tools | 1b44e660e916a82ef8ff64dd5a6ee5506e517359 | bc16e7dd5d6245077e39f8561b99c9acd510ddf7 | refs/heads/develop | 2023-03-06T08:37:47.424282 | 2023-02-20T20:40:49 | 2023-02-20T20:40:49 | 30,274,058 | 13 | 11 | MIT | 2023-02-20T20:40:50 | 2015-02-04T01:14:41 | Python | UTF-8 | Python | false | false | 3,392 | py | from collections import OrderedDict
expected = [
OrderedDict(
[
("id", u"app1"),
(
"title",
u"Appendix 1: Details of the automated linear stability analysis",
),
(
"content",
[
OrderedDict(
[
("type", "paragraph"),
(
"text",
u"We consider a reaction-diffusion system of the form",
),
]
),
OrderedDict(
[
("type", "mathml"),
("id", u"equ5"),
("label", u"(1)"),
(
"mathml",
'<math><mrow><mi mathvariant="bold">c</mi></mrow></math>',
),
]
),
OrderedDict([("type", "paragraph"), ("text", u"where etc.")]),
OrderedDict(
[
("type", "section"),
("id", u"s16"),
("title", u"Step 1. Possible networks of size ..."),
(
"content",
[
OrderedDict(
[
("type", "paragraph"),
(
"text",
u"We first generate a list of possible networks with ...",
),
]
)
],
),
]
),
OrderedDict(
[
("type", "paragraph"),
("text", u"Test another section with no title"),
]
),
OrderedDict(
[
("type", "section"),
("id", u"test2"),
("title", u"Section with title"),
(
"content",
[
OrderedDict(
[
("type", "paragraph"),
("text", u"Section content"),
]
)
],
),
]
),
OrderedDict(
[
("type", "paragraph"),
("text", u"Second section with no title"),
]
),
],
),
]
)
]
| [
"[email protected]"
] | |
702e48c2aa3d8d52708984ce1249139d83d40039 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /FJk4mJwRk2TYZhkeQ_11.py | f8eebc3f73f01afb69527a31188520efad9b25d1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | """
Create a function that takes a string and returns a new string with each new
character accumulating by +1. Separate each set with a dash.
### Examples
accum("abcd") ➞ "A-Bb-Ccc-Dddd"
accum("RqaEzty") ➞ "R-Qq-Aaa-Eeee-Zzzzz-Tttttt-Yyyyyyy"
accum("cwAt") ➞ "C-Ww-Aaa-Tttt"
### Notes
* Capitalize the first letter of each set.
* All tests contain valid strings with alphabetic characters (a-z, A-Z).
"""
def accum(txt):
return '-'.join([char.upper()+char.lower()*index for index,char in enumerate(txt)])
| [
"[email protected]"
] | |
f5c64a21035dbbdf79fe9300cdf1ba736915177c | 29afbde1e2d5497f5ab023a99ea37b0c59e6bb09 | /password.py | d86fbaee471a50c76295435c6499e1e6078e8bdb | [
"MIT"
] | permissive | kahenya-anita/Password-Manager | 5358fc048ba90d0827e4ff8c7f54919107c5aa95 | b3e7683e61e55574b846dfe49bf5d4016085261c | refs/heads/master | 2022-12-15T08:04:08.350058 | 2020-09-08T19:42:48 | 2020-09-08T19:42:48 | 293,597,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | import string
import random
class Password:
"""
Class that generates system given passwords.
"""
password_letters=list(string.ascii_letters)
password_nums=list(string.digits)
password_symbols=["#","@","&","$","%"]
password_chars=[]
password_chars.extend(password_letters)
password_chars.extend(password_nums)
password_chars.extend(password_symbols)
@classmethod
def gen_password(cls):
"""
Method to generate system given passwords.
Returns:
System generated password
"""
pass_length=10
num_valid=True
while num_valid:
try:
pass_length=int(input("Enter password length (at least 5): "))
if pass_length<5:
print("**Length should be at least 5. Try again.")
num_valid=True
else:
num_valid=False
except ValueError:
print("**Invalid input. Use numbers.")
num_valid=True
sys_password="".join(random.sample(cls.password_chars, k=pass_length))
return sys_password
| [
"[email protected]"
] | |
ee28a8aa66f3d6fec24591cae31c80251e0b6f07 | 4bf5f83a8e5cd4c3ee700569e4a6f07a87dd209c | /students/11th/jiwonko/project/User/migrations/0003_auto_20200812_1307.py | 88bb82d97b9338ddd0609dc2118a3630dbab1049 | [] | no_license | gledong12/westagram-backend | 6e066f4c741aa19df13224ba530b0d9f43a405f7 | 1842f065c599885ad5dcb9ec5fb267eaf3295872 | refs/heads/master | 2023-03-11T20:32:47.055525 | 2021-03-04T01:04:31 | 2021-03-04T01:04:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # Generated by Django 3.0.8 on 2020-08-12 13:07
import User.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('User', '0002_auto_20200805_1106'),
]
operations = [
migrations.AlterField(
model_name='user',
name='password',
field=models.CharField(max_length=156, validators=[User.validators.validate_password]),
),
]
| [
"[email protected]"
] | |
598fa7f17dd1d125cc395d7e717deb6f7a89dab0 | 685fa2cb16ff8bce96b603dee8117ed3e9a1adcb | /dlib-find-min-global/parse_xml.py | 6a073916c47463b9d891a91cdabd8ad99ff0a563 | [] | no_license | albertofernandezvillan/pyimagesearch | 352ec1ec678cb628524c476fdcc86c22238a1a2f | 8c87e6c5d218e42a8864778c032c0fd20261bcdd | refs/heads/master | 2023-02-27T22:02:25.581660 | 2021-02-08T15:15:15 | 2021-02-08T15:15:15 | 338,087,397 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,366 | py | # ------------------------
# USAGE
# ------------------------
# python parse_xml.py --input ibug_300W_large_face_landmark_dataset/labels_ibug_300W_train.xml
# --output ibug_300W_large_face_landmark_dataset/labels_ibug_300W_train_eyes.xml
# python parse_xml.py --input ibug_300W_large_face_landmark_dataset/labels_ibug_300W_test.xml
# --output ibug_300W_large_face_landmark_dataset/labels_ibug_300W_test_eyes.xml
# ------------------------
# IMPORTS
# ------------------------
# Import the necessary packages
import argparse
import re
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True, help="path to iBug 300-W data split XML file")
ap.add_argument("-t", "--output", required=True, help="path output data split XML file")
args = vars(ap.parse_args())
# In the iBUG 300-W dataset, each (x, y)-coordinate maps to a specific facial feature (i.e., eye, mouth, nose, etc.)
# -- in order to train a dlib shape predictor on *just* the eyes, we must first
# define the integer indexes that belong to the eyes
LANDMARKS = set(list(range(36, 48)))
# To easily parse out the eye locations from the XML file we can utilize regular expressions
# to determine if there is a 'part' element on any given line
PART = re.compile("part name='[0-9]+'")
# Load the contents of the original XML file and open the output file for writing
print("[INFO] parsing data split XML file...")
rows = open(args["input"]).read().strip().split("\n")
output = open(args["output"], "w")
# Loop over the rows of the data split file
for row in rows:
# Check to see if the current line has the (x, y)-coordinates for the facial landmarks we are interested in
parts = re.findall(PART, row)
# If there is no information related to the (x, y)-coordinates of the facial landmarks,
# we can write the current line out to disk with no further modifications
if len(parts) == 0:
output.write("{}\n".format(row))
# Otherwise, there is annotation information that we must process
else:
# Parse out the name of the attribute from the row
attr = "name='"
i = row.find(attr)
j = row.find("'", i + len(attr) + 1)
name = int(row[i + len(attr):j])
# If the facial landmark name exists within the range of the indexes, write it to our output file
if name in LANDMARKS:
output.write("{}\n".format(row))
| [
"[email protected]"
] | |
9a24020666e786dd930310c84812123cc468ef3a | c5758c1f4c880f4530df1a5ffb4c30ee2da445ee | /pytracking/vot_ep/sk3x3_meanmax_adaptive/vot_wrapper_sk3x3_meanmax_adaptive_ep0020.py | 68408effc2c17172004adef9bfbecef3d34e37ef | [] | no_license | bfjei2825401/d3s | 6d662fc301181a0e3ad831b0db6111e3cf8f4097 | 32140a3c67252f0e98cbfbf6ad6d2a79267c221b | refs/heads/master | 2023-02-27T09:57:25.692878 | 2021-01-27T14:20:57 | 2021-01-27T14:20:57 | 297,217,521 | 0 | 0 | null | 2020-09-21T03:23:09 | 2020-09-21T03:23:09 | null | UTF-8 | Python | false | false | 2,523 | py | import pytracking.vot as vot
import sys
import cv2
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from pytracking.tracker.segm_sk3x3_meanmax_adaptive import SegmSK3x3MeanMaxAdaptive
from pytracking.parameter.segm_sk3x3_meanmax_adaptive import default_params_ep as vot_params
def rect_to_poly(rect):
x0 = rect[0]
y0 = rect[1]
x1 = rect[0] + rect[2]
y1 = rect[1]
x2 = rect[0] + rect[2]
y2 = rect[1] + rect[3]
x3 = rect[0]
y3 = rect[1] + rect[3]
return [x0, y0, x1, y1, x2, y2, x3, y3]
def parse_sequence_name(image_path):
idx = image_path.find('/color/')
return image_path[idx - image_path[:idx][::-1].find('/'):idx], idx
def parse_frame_name(image_path, idx):
frame_name = image_path[idx + len('/color/'):]
return frame_name[:frame_name.find('.')]
# MAIN
handle = vot.VOT("polygon")
selection = handle.region()
imagefile = handle.frame()
if not imagefile:
sys.exit(0)
params = vot_params.parameters(20)
gt_rect = [round(selection.points[0].x, 2), round(selection.points[0].y, 2),
round(selection.points[1].x, 2), round(selection.points[1].y, 2),
round(selection.points[2].x, 2), round(selection.points[2].y, 2),
round(selection.points[3].x, 2), round(selection.points[3].y, 2)]
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
sequence_name, idx_ = parse_sequence_name(imagefile)
frame_name = parse_frame_name(imagefile, idx_)
params.masks_save_path = ''
params.save_mask = False
tracker = SegmSK3x3MeanMaxAdaptive(params)
# tell the sequence name to the tracker (to save segmentation masks to the disk)
tracker.sequence_name = sequence_name
tracker.frame_name = frame_name
tracker.initialize(image, gt_rect)
while True:
imagefile = handle.frame()
if not imagefile:
break
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
# tell the frame name to the tracker (to save segmentation masks to the disk)
frame_name = parse_frame_name(imagefile, idx_)
tracker.frame_name = frame_name
prediction = tracker.track(image)
if len(prediction) == 4:
prediction = rect_to_poly(prediction)
pred_poly = vot.Polygon([vot.Point(prediction[0], prediction[1]),
vot.Point(prediction[2], prediction[3]),
vot.Point(prediction[4], prediction[5]),
vot.Point(prediction[6], prediction[7])])
handle.report(pred_poly)
| [
"[email protected]"
] | |
c2e007ef13501c87c7fc8e93c7490f29a01b9e1d | 8d06729522dbdf7c6391ffcd608d1f1bba3f3ae0 | /bricks/utils/__init__.py | 2a8d9c7b0e42ccc4c9d85c6758134e58f7b0da08 | [] | no_license | seler/django-bricks | 14d9b1de356b698cd0c17574a8f0304e682febb0 | fe1713971d0c881c0f0352217a69b196553f11aa | refs/heads/master | 2021-04-05T04:26:47.311914 | 2013-01-21T05:45:52 | 2013-01-21T05:45:52 | 248,519,671 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py |
def inheritors(klass):
"""
Returns all inheritors of `klass`.
source: `http://stackoverflow.com/a/5883218/708764`
"""
subclasses = set()
work = [klass]
while work:
parent = work.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses.add(child)
work.append(child)
return subclasses
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.