hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4818f40bb2961d309e93cce19f1650592ac0d462 | 123 | py | Python | src/aceinna/bootstrap/__init__.py | lihaiyong827/python-openimu | f1c536ba4182aaeabd87b63c08ebd92f97e8dbb4 | [
"Apache-2.0"
]
| 41 | 2018-07-20T17:30:33.000Z | 2022-02-24T08:17:39.000Z | src/aceinna/bootstrap/__init__.py | lihaiyong827/python-openimu | f1c536ba4182aaeabd87b63c08ebd92f97e8dbb4 | [
"Apache-2.0"
]
| 52 | 2018-06-25T22:15:14.000Z | 2022-03-10T07:30:56.000Z | src/aceinna/bootstrap/__init__.py | lihaiyong827/python-openimu | f1c536ba4182aaeabd87b63c08ebd92f97e8dbb4 | [
"Apache-2.0"
]
| 31 | 2018-12-19T00:10:08.000Z | 2022-03-19T02:14:03.000Z | import sys
import os
import traceback
from .default import Default
from .cli import CommandLine
from .loader import Loader | 17.571429 | 28 | 0.829268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
48197fe7d2676b37c5b385a0395e386523f42d50 | 63 | py | Python | test/tutorial/scripts/api/logout_api.py | GPelayo/dcp-cli | d585fd8b4687f29bfd034242472b870d17ed1e50 | [
"MIT"
]
| 8 | 2017-10-10T18:29:27.000Z | 2019-06-15T04:25:43.000Z | test/tutorial/scripts/api/logout_api.py | GPelayo/dcp-cli | d585fd8b4687f29bfd034242472b870d17ed1e50 | [
"MIT"
]
| 440 | 2017-10-09T16:06:22.000Z | 2021-03-25T17:12:18.000Z | test/tutorial/scripts/api/logout_api.py | GPelayo/dcp-cli | d585fd8b4687f29bfd034242472b870d17ed1e50 | [
"MIT"
]
| 10 | 2017-11-07T22:42:59.000Z | 2020-05-05T15:36:01.000Z | from hca.dss import DSSClient
dss = DSSClient()
dss.logout()
| 10.5 | 29 | 0.730159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
481a202af8c698328bf81874ddd4607ef4a05765 | 1,995 | py | Python | home/stock_model.py | 85599/nse-django | f42531528137f39596b374a0dacdd37957e69ed2 | [
"MIT"
]
| null | null | null | home/stock_model.py | 85599/nse-django | f42531528137f39596b374a0dacdd37957e69ed2 | [
"MIT"
]
| 1 | 2021-02-25T05:34:43.000Z | 2021-02-25T05:34:43.000Z | home/stock_model.py | 85599/nse-django | f42531528137f39596b374a0dacdd37957e69ed2 | [
"MIT"
]
| null | null | null | import os
import pandas as pd
from sklearn import linear_model
from nsetools import Nse
import pathlib
import joblib
nse = Nse()
def nse_data(stock_name):
'''input stock_name : str
output : list = output,high,low'''
data = nse.get_quote(stock_name)
current = [data['open'],data['dayHigh'],data['dayLow']]
return current
def model_check(stock_name):
'''checking if model exits or not;
input stock_name str
return true or false'''
model_path = pathlib.Path(os.getcwd()+"\\nse_data\\saved_model\\"+stock_name+'.pkl')
if model_path.exists():
return True
else:
return False
def any_stock(stock_name):
'''function to predict any stock values
stock_name == str; today_value= list,[open,high,low]
'''
try:
if model_check(stock_name) == False:
data_path = os.getcwd()+"\\home\\nse_data\\HISTORICAL_DATA\\"
df = pd.read_csv(data_path + stock_name + '_data.csv')
df.fillna(df.mean(),inplace=True)
X = df.iloc[:,[1,2,3]]
y = df.iloc[:,[4]]
reg = linear_model.LinearRegression()
reg.fit(X,y)
y_today = reg.predict([nse_data(stock_name)])
model_path_one = os.getcwd()+"\\home\\nse_data\\saved_model\\"
joblib_file = model_path_one + stock_name+ ".pkl"
joblib.dump(reg, joblib_file)
print('model creation')
return y_today[0][0]
else:
print('model loading')
model_path_one = os.getcwd()+"\\home\\nse_data\\saved_model\\"
joblib_file = model_path_one + stock_name+ ".pkl"
model = joblib.load(joblib_file)
y_today = model.predict([nse_data(stock_name)])
return y_today
except:
return (" internal error")
# try:
# print(any_stock('SBIN'))
# except IndexError:
# print('index error')
# except FileNotFoundError:
# print("no file")
| 28.098592 | 88 | 0.593484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 616 | 0.308772 |
481a590185ab360ad8f0c2ef3e09b5d683dfa4f6 | 26,620 | py | Python | examples/rough_translated1/osgthreadedterrain.py | JaneliaSciComp/osgpyplusplus | a5ae3f69c7e9101a32d8cc95fe680dab292f75ac | [
"BSD-3-Clause"
]
| 17 | 2015-06-01T12:19:46.000Z | 2022-02-12T02:37:48.000Z | examples/rough_translated1/osgthreadedterrain.py | cmbruns/osgpyplusplus | f8bfca2cf841e15f6ddb41c958f3ad0d0b9e4b75 | [
"BSD-3-Clause"
]
| 7 | 2015-07-04T14:36:49.000Z | 2015-07-23T18:09:49.000Z | examples/rough_translated1/osgthreadedterrain.py | cmbruns/osgpyplusplus | f8bfca2cf841e15f6ddb41c958f3ad0d0b9e4b75 | [
"BSD-3-Clause"
]
| 7 | 2015-11-28T17:00:31.000Z | 2020-01-08T07:00:59.000Z | #!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgthreadedterrain"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import OpenThreads
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgTerrain
from osgpypp import osgText
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osgthreadedterrain.cpp'
# OpenSceneGraph example, osgterrain.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <OpenThreads/Block>
#include <osg/Group>
#include <osg/Geode>
#include <osg/ShapeDrawable>
#include <osg/Texture2D>
#include <osg/PositionAttitudeTransform>
#include <osg/MatrixTransform>
#include <osg/CoordinateSystemNode>
#include <osg/ClusterCullingCallback>
#include <osg/ArgumentParser>
#include <osgDB/FileUtils>
#include <osgDB/fstream>
#include <osgDB/ReadFile>
#include <osgUtil/IncrementalCompileOperation>
#include <osgText/FadeText>
#include <osgViewer/Viewer>
#include <osgViewer/ViewerEventHandlers>
#include <osgGA/TrackballManipulator>
#include <osgGA/FlightManipulator>
#include <osgGA/DriveManipulator>
#include <osgGA/KeySwitchMatrixManipulator>
#include <osgGA/StateSetManipulator>
#include <osgGA/AnimationPathManipulator>
#include <osgGA/TerrainManipulator>
#include <osgTerrain/TerrainTile>
#include <osgTerrain/GeometryTechnique>
#include <osgTerrain/Layer>
#include <iostream>
typedef std.vector< osg.GraphicsThread > GraphicsThreads
class ReleaseBlockOnCompileCompleted (osgUtil.IncrementalCompileOperation.CompileCompletedCallback) :
ReleaseBlockOnCompileCompleted(osg.RefBlockCount* block):
_block(block)
def compileCompleted(compileSet):
if _block.valid() : _block.completed()
# tell IncrementalCompileOperation that it's now safe to remove the compileSet
osg.notify(osg.NOTICE), "compileCompleted(", compileSet, ")"
return True
_block = osg.RefBlockCount()
class LoadAndCompileOperation (osg.Operation) :
LoadAndCompileOperation( str filename, osgUtil.IncrementalCompileOperation* ico , osg.RefBlockCount* block):
Operation("Load and compile Operation", False),
_filename(filename),
_incrementalCompileOperation(ico),
_block(block)
virtual void operator () (osg.Object* object)
# osg.notify(osg.NOTICE), "LoadAndCompileOperation ", _filename
_loadedModel = osgDB.readNodeFile(_filename)
if _loadedModel.valid() and _incrementalCompileOperation.valid() :
compileSet = osgUtil.IncrementalCompileOperation.CompileSet(_loadedModel)
compileSet._compileCompletedCallback = ReleaseBlockOnCompileCompleted(_block)
_incrementalCompileOperation.add(compileSet)
else:
if _block.valid() : _block.completed()
# osg.notify(osg.NOTICE), "done LoadAndCompileOperation ", _filename
_filename = str()
_loadedModel = osg.Node()
_incrementalCompileOperation = osgUtil.IncrementalCompileOperation()
_block = osg.RefBlockCount()
class MasterOperation (osg.Operation) :
typedef std.set<str> Files
typedef std.map<str, osg.Node > FilenameNodeMap
typedef std.vector< osg.Node > Nodes
MasterOperation( str filename, osgUtil.IncrementalCompileOperation* ico):
Operation("Master reading operation",True),
_filename(filename),
_incrementalCompileOperation(ico)
#* Set the OperationQueue that the MasterOperation can use to place tasks like file loading on for other processes to handle.
def setOperationQueue(oq):
_operationQueue = oq
def getOperationQueue():
return _operationQueue
def readMasterFile(files):
fin = osgDB.ifstream(_filename.c_str())
if fin :
fr = osgDB.Input()
fr.attach(fin)
readFilename = False
while not fr.eof() :
itrAdvanced = False
if fr.matchSequence("file %s") or fr.matchSequence("file %w") :
files.insert(fr[1].getStr())
fr += 2
itrAdvanced = True
readFilename = True
if not itrAdvanced :
++fr
return readFilename
return False
def open(group):
files = Files()
readMasterFile(files)
for(Files.iterator itr = files.begin()
not = files.end()
++itr)
model = osgDB.readNodeFile(*itr)
if model :
osg.notify(osg.NOTICE), "open: Loaded file ", *itr
group.addChild(model)
_existingFilenameNodeMap[*itr] = model
return True
virtual void operator () (osg.Object* callingObject)
# decided which method to call according to whole has called me.
viewer = dynamic_cast<osgViewer.Viewer*>(callingObject)
if viewer : update(viewer.getSceneData())
load = else()
def load():
#osg.notify(osg.NOTICE), "void load(Object)"
filesA = Files()
filesB = Files()
readMasterFile(filesB)
# osg.notify(osg.NOTICE), "First read ", filesA.size()
# itererate until the master file is stable
do
OpenThreads.Thread.microSleep(100000)
filesB.swap(filesA)
filesB.clear()
readMasterFile(filesB)
# osg.notify(osg.NOTICE), "second read ", filesB.size()
while filesA not =filesB :
files = Files()
files.swap(filesB)
# osg.notify(osg.NOTICE), "Now equal ", files.size()
newFiles = Files()
removedFiles = Files()
# find out which files are , and which ones have been removed.
lock = OpenThreads.ScopedLock<OpenThreads.Mutex>(_mutex)
for(Files.iterator fitr = files.begin()
not = files.end()
++fitr)
if _existingFilenameNodeMap.count(*fitr)==0 : newFiles.insert(*fitr)
for(FilenameNodeMap.iterator litr = _existingFilenameNodeMap.begin()
not = _existingFilenameNodeMap.end()
++litr)
if files.count(litr.first)==0 :
removedFiles.insert(litr.first)
#if 0
if not newFiles.empty() or not removedFiles.empty() :
osg.notify(osg.NOTICE), "void operator () files.size()=", files.size()
#endif
# first load the files.
nodesToAdd = FilenameNodeMap()
if not newFiles.empty() :
typedef std.vector< osg.GraphicsThread > GraphicsThreads
threads = GraphicsThreads()
for(unsigned int i=0 i<= osg.GraphicsContext.getMaxContextID() ++i)
gc = osg.GraphicsContext.getCompileContext(i)
gt = gc.getGraphicsThread() if (gc) else 0
if gt : threads.push_back(gt)
if _operationQueue.valid() :
# osg.notify(osg.NOTICE), "Using OperationQueue"
_endOfLoadBlock = osg.RefBlockCount(newFiles.size())
_endOfLoadBlock.reset()
typedef std.list< LoadAndCompileOperation > LoadAndCompileList
loadAndCompileList = LoadAndCompileList()
for(Files.iterator nitr = newFiles.begin()
not = newFiles.end()
++nitr)
# osg.notify(osg.NOTICE), "Adding LoadAndCompileOperation ", *nitr
loadAndCompile = LoadAndCompileOperation( *nitr, _incrementalCompileOperation, _endOfLoadBlock )
loadAndCompileList.push_back(loadAndCompile)
_operationQueue.add( loadAndCompile )
#if 1
operation = osg.Operation()
while operation=_operationQueue.getNextOperation() :.valid() :
# osg.notify(osg.NOTICE), "Local running of operation"
(*operation)(0)
#endif
# osg.notify(osg.NOTICE), "Waiting for completion of LoadAndCompile operations"
_endOfLoadBlock.block()
# osg.notify(osg.NOTICE), "done ... Waiting for completion of LoadAndCompile operations"
for(LoadAndCompileList.iterator litr = loadAndCompileList.begin()
not = loadAndCompileList.end()
++litr)
if *litr :._loadedModel.valid() :
nodesToAdd[(*litr)._filename] = (*litr)._loadedModel
else:
_endOfLoadBlock = osg.RefBlockCount(newFiles.size())
_endOfLoadBlock.reset()
for(Files.iterator nitr = newFiles.begin()
not = newFiles.end()
++nitr)
loadedModel = osgDB.readNodeFile(*nitr)
if loadedModel :
nodesToAdd[*nitr] = loadedModel
if _incrementalCompileOperation.valid() :
compileSet = osgUtil.IncrementalCompileOperation.CompileSet(loadedModel)
compileSet._compileCompletedCallback = ReleaseBlockOnCompileCompleted(_endOfLoadBlock)
_incrementalCompileOperation.add(compileSet)
else:
_endOfLoadBlock.completed()
else:
_endOfLoadBlock.completed()
_endOfLoadBlock.block()
requiresBlock = False
# pass the locally peppared data to MasterOperations shared data
# so that updated thread can merge these changes with the main scene
# graph. This merge is carried out via the update(..) method.
if not removedFiles.empty() or not nodesToAdd.empty() :
lock = OpenThreads.ScopedLock<OpenThreads.Mutex>(_mutex)
_nodesToRemove.swap(removedFiles)
_nodesToAdd.swap(nodesToAdd)
requiresBlock = True
# now block so we don't try to load anything till the data has been merged
# otherwise _existingFilenameNodeMap will get out of sync.
if requiresBlock :
_updatesMergedBlock.block()
else:
OpenThreads.Thread.YieldCurrentThread()
# merge the changes with the main scene graph.
def update(scene):
# osg.notify(osg.NOTICE), "void update(Node*)"
group = dynamic_cast<osg.Group*>(scene)
if not group :
osg.notify(osg.NOTICE), "Error, MasterOperation.update(Node*) can only work with a Group as Viewer.getSceneData()."
return
lock = OpenThreads.ScopedLock<OpenThreads.Mutex>(_mutex)
if not _nodesToRemove.empty() or not _nodesToAdd.empty() :
osg.notify(osg.NOTICE), "update().................. "
if not _nodesToRemove.empty() :
for(Files.iterator itr = _nodesToRemove.begin()
not = _nodesToRemove.end()
++itr)
fnmItr = _existingFilenameNodeMap.find(*itr)
if fnmItr not = _existingFilenameNodeMap.end() :
osg.notify(osg.NOTICE), " update():removing ", *itr
group.removeChild(fnmItr.second)
_existingFilenameNodeMap.erase(fnmItr)
_nodesToRemove.clear()
if not _nodesToAdd.empty() :
for(FilenameNodeMap.iterator itr = _nodesToAdd.begin()
not = _nodesToAdd.end()
++itr)
osg.notify(osg.NOTICE), " update():inserting ", itr.first
group.addChild(itr.second)
_existingFilenameNodeMap[itr.first] = itr.second
_nodesToAdd.clear()
_updatesMergedBlock.release()
# add release implementation so that any thread cancellation can
# work even when blocks and barriers are used.
def release():
if _operationQueue.valid() : _operationQueue.removeAllOperations()
_updatesMergedBlock.release()
if _endOfCompilebarrier.valid() : _endOfCompilebarrier.release()
if _endOfLoadBlock.valid() : _endOfLoadBlock.release()
_filename = str()
_mutex = OpenThreads.Mutex()
_existingFilenameNodeMap = FilenameNodeMap()
_nodesToRemove = Files()
_nodesToAdd = FilenameNodeMap()
_updatesMergedBlock = OpenThreads.Block()
_incrementalCompileOperation = osgUtil.IncrementalCompileOperation()
_endOfCompilebarrier = osg.BarrierOperation()
_endOfLoadBlock = osg.RefBlockCount()
_operationQueue = osg.OperationQueue()
class FilterHandler (osgGA.GUIEventHandler) :
FilterHandler(osgTerrain.GeometryTechnique* gt):
_gt(gt)
def handle(ea, aa):
if not _gt : return False
switch(ea.getEventType())
case(osgGA.GUIEventAdapter.KEYDOWN):
if ea.getKey() == ord("g") :
osg.notify(osg.NOTICE), "Gaussian"
_gt.setFilterMatrixAs(osgTerrain.GeometryTechnique.GAUSSIAN)
return True
elif ea.getKey() == ord("s") :
osg.notify(osg.NOTICE), "Smooth"
_gt.setFilterMatrixAs(osgTerrain.GeometryTechnique.SMOOTH)
return True
elif ea.getKey() == ord("S") :
osg.notify(osg.NOTICE), "Sharpen"
_gt.setFilterMatrixAs(osgTerrain.GeometryTechnique.SHARPEN)
return True
elif ea.getKey() == ord("+") :
_gt.setFilterWidth(_gt.getFilterWidth()*1.1)
osg.notify(osg.NOTICE), "Filter width = ", _gt.getFilterWidth()
return True
elif ea.getKey() == ord("-") :
_gt.setFilterWidth(_gt.getFilterWidth()/1.1)
osg.notify(osg.NOTICE), "Filter width = ", _gt.getFilterWidth()
return True
elif ea.getKey() == ord(">") :
_gt.setFilterBias(_gt.getFilterBias()+0.1)
osg.notify(osg.NOTICE), "Filter bias = ", _gt.getFilterBias()
return True
elif ea.getKey() == ord("<") :
_gt.setFilterBias(_gt.getFilterBias()-0.1)
osg.notify(osg.NOTICE), "Filter bias = ", _gt.getFilterBias()
return True
break
default:
break
return False
_gt = osg.observer_ptr<osgTerrain.GeometryTechnique>()
class LayerHandler (osgGA.GUIEventHandler) :
LayerHandler(osgTerrain.Layer* layer):
_layer(layer)
def handle(ea, aa):
if not _layer : return False
scale = 1.2
switch(ea.getEventType())
case(osgGA.GUIEventAdapter.KEYDOWN):
if ea.getKey() == ord("q") :
_layer.transform(0.0, scale)
return True
elif ea.getKey() == ord("a") :
_layer.transform(0.0, 1.0/scale)
return True
break
default:
break
return False
_layer = osg.observer_ptr<osgTerrain.Layer>()
def main(argv):
arguments = osg.ArgumentParser(argv)
# construct the viewer.
viewer = osgViewer.Viewer(arguments)
# set up the camera manipulators.
keyswitchManipulator = osgGA.KeySwitchMatrixManipulator()
keyswitchManipulator.addMatrixManipulator( ord("1"), "Trackball", osgGA.TrackballManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("2"), "Flight", osgGA.FlightManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("3"), "Drive", osgGA.DriveManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("4"), "Terrain", osgGA.TerrainManipulator() )
pathfile = str()
keyForAnimationPath = ord("5")
while arguments.read("-p",pathfile) :
apm = osgGA.AnimationPathManipulator(pathfile)
if apm or not apm.valid() :
num = keyswitchManipulator.getNumMatrixManipulators()
keyswitchManipulator.addMatrixManipulator( keyForAnimationPath, "Path", apm )
keyswitchManipulator.selectMatrixManipulator(num)
++keyForAnimationPath
viewer.setCameraManipulator( keyswitchManipulator )
# add the state manipulator
viewer.addEventHandler( osgGA.StateSetManipulator(viewer.getCamera().getOrCreateStateSet()) )
# add the stats handler
viewer.addEventHandler(osgViewer.StatsHandler)()
# add the record camera path handler
viewer.addEventHandler(osgViewer.RecordCameraPathHandler)()
# attach an IncrementaCompileOperation to allow the master loading
# to be handled with an incremental compile to avoid frame drops when large objects are added.
viewer.setIncrementalCompileOperation(osgUtil.IncrementalCompileOperation())
x = 0.0
y = 0.0
w = 1.0
h = 1.0
numLoadThreads = 1
while arguments.read("--load-threads",numLoadThreads) :
masterOperation = MasterOperation()
masterFilename = str()
while arguments.read("-m",masterFilename) :
masterOperation = MasterOperation(masterFilename, viewer.getIncrementalCompileOperation())
terrainTile = osgTerrain.TerrainTile()
locator = osgTerrain.Locator()
validDataOperator = osgTerrain.NoDataValue(0.0)
lastAppliedLayer = osgTerrain.Layer()
locator.setCoordinateSystemType(osgTerrain.Locator.GEOCENTRIC)
locator.setTransformAsExtents(-osg.PI, -osg.PI*0.5, osg.PI, osg.PI*0.5)
layerNum = 0
filterName = str()
filter = osg.Texture.LINEAR
float minValue, maxValue
scale = 1.0
offset = 0.0
pos = 1
while pos<arguments.argc() :
filename = str()
if arguments.read(pos, "--layer",layerNum) :
osg.notify(osg.NOTICE), "Set layer number to ", layerNum
elif arguments.read(pos, "-b") :
terrainTile.setTreatBoundariesToValidDataAsDefaultValue(True)
elif arguments.read(pos, "-e",x,y,w,h) :
# define the extents.
locator.setCoordinateSystemType(osgTerrain.Locator.GEOCENTRIC)
locator.setTransformAsExtents(x,y,x+w,y+h)
elif arguments.read(pos, "--transform",offset, scale) or arguments.read(pos, "-t",offset, scale) :
# define the extents.
elif arguments.read(pos, "--cartesian",x,y,w,h) :
# define the extents.
locator.setCoordinateSystemType(osgTerrain.Locator.PROJECTED)
locator.setTransformAsExtents(x,y,x+w,y+h)
elif arguments.read(pos, "--hf",filename) :
osg.notify(osg.NOTICE), "--hf ", filename
hf = osgDB.readHeightFieldFile(filename)
if hf.valid() :
hfl = osgTerrain.HeightFieldLayer()
hfl.setHeightField(hf)
hfl.setLocator(locator)
hfl.setValidDataOperator(validDataOperator)
hfl.setMagFilter(filter)
if offset not =0.0 or scale not =1.0 :
hfl.transform(offset,scale)
terrainTile.setElevationLayer(hfl)
lastAppliedLayer = hfl
osg.notify(osg.NOTICE), "created osgTerrain.HeightFieldLayer"
else:
osg.notify(osg.NOTICE), "failed to create osgTerrain.HeightFieldLayer"
scale = 1.0
offset = 0.0
elif arguments.read(pos, "-d",filename) or arguments.read(pos, "--elevation-image",filename) :
osg.notify(osg.NOTICE), "--elevation-image ", filename
image = osgDB.readImageFile(filename)
if image.valid() :
imageLayer = osgTerrain.ImageLayer()
imageLayer.setImage(image)
imageLayer.setLocator(locator)
imageLayer.setValidDataOperator(validDataOperator)
imageLayer.setMagFilter(filter)
if offset not =0.0 or scale not =1.0 :
imageLayer.transform(offset,scale)
terrainTile.setElevationLayer(imageLayer)
lastAppliedLayer = imageLayer
osg.notify(osg.NOTICE), "created Elevation osgTerrain.ImageLayer"
else:
osg.notify(osg.NOTICE), "failed to create osgTerrain.ImageLayer"
scale = 1.0
offset = 0.0
elif arguments.read(pos, "-c",filename) or arguments.read(pos, "--image",filename) :
osg.notify(osg.NOTICE), "--image ", filename, " x=", x, " y=", y, " w=", w, " h=", h
image = osgDB.readImageFile(filename)
if image.valid() :
imageLayer = osgTerrain.ImageLayer()
imageLayer.setImage(image)
imageLayer.setLocator(locator)
imageLayer.setValidDataOperator(validDataOperator)
imageLayer.setMagFilter(filter)
if offset not =0.0 or scale not =1.0 :
imageLayer.transform(offset,scale)
terrainTile.setColorLayer(layerNum, imageLayer)
lastAppliedLayer = imageLayer
osg.notify(osg.NOTICE), "created Color osgTerrain.ImageLayer"
else:
osg.notify(osg.NOTICE), "failed to create osgTerrain.ImageLayer"
scale = 1.0
offset = 0.0
elif arguments.read(pos, "--filter",filterName) :
if filterName=="NEAREST" :
osg.notify(osg.NOTICE), "--filter ", filterName
filter = osg.Texture.NEAREST
elif filterName=="LINEAR" :
filter = osg.Texture.LINEAR
osg.notify(osg.NOTICE), "--filter ", filterName
else:
osg.notify(osg.NOTICE), "--filter ", filterName, " unrecognized filter name, please use LINEAER or NEAREST."
if terrainTile.getColorLayer(layerNum) :
terrainTile.getColorLayer(layerNum).setMagFilter(filter)
elif arguments.read(pos, "--tf",minValue, maxValue) :
tf = osg.TransferFunction1D()
numCells = 6
delta = (maxValue-minValue)/float(numCells-1)
v = minValue
tf.allocate(6)
tf.setColor(v, osg.Vec4(1.0,1.0,1.0,1.0)) v += delta
tf.setColor(v, osg.Vec4(1.0,0.0,1.0,1.0)) v += delta
tf.setColor(v, osg.Vec4(1.0,0.0,0.0,1.0)) v += delta
tf.setColor(v, osg.Vec4(1.0,1.0,0.0,1.0)) v += delta
tf.setColor(v, osg.Vec4(0.0,1.0,1.0,1.0)) v += delta
tf.setColor(v, osg.Vec4(0.0,1.0,0.0,1.0))
osg.notify(osg.NOTICE), "--tf ", minValue, " ", maxValue
terrainTile.setColorLayer(layerNum, osgTerrain.ContourLayer(tf))
else:
++pos
scene = osg.Group()
if terrainTile.valid() and (terrainTile.getElevationLayer() or terrainTile.getColorLayer(0)) :
osg.notify(osg.NOTICE), "Terrain created"
scene.addChild(terrainTile)
geometryTechnique = osgTerrain.GeometryTechnique()
terrainTile.setTerrainTechnique(geometryTechnique)
viewer.addEventHandler(FilterHandler(geometryTechnique))
viewer.addEventHandler(LayerHandler(lastAppliedLayer))
if masterOperation.valid() :
osg.notify(osg.NOTICE), "Master operation created"
masterOperation.open(scene)
if scene.getNumChildren()==0 :
osg.notify(osg.NOTICE), "No model created, please specify terrain or master file on command line."
return 0
viewer.setSceneData(scene)
# start operation thread if a master file has been used.
masterOperationThread = osg.OperationThread()
typedef std.list< osg.OperationThread > OperationThreadList
generalThreadList = OperationThreadList()
if masterOperation.valid() :
masterOperationThread = osg.OperationThread()
masterOperationThread.startThread()
masterOperationThread.add(masterOperation)
# if numLoadThreads>0 :
operationQueue = osg.OperationQueue()
masterOperation.setOperationQueue(operationQueue)
for(unsigned int i=0 i<numLoadThreads ++i)
thread = osg.OperationThread()
thread.setOperationQueue(operationQueue)
thread.startThread()
generalThreadList.push_back(thread)
viewer.addUpdateOperation(masterOperation)
viewer.setThreadingModel(osgViewer.Viewer.SingleThreaded)
# enable the use of compile contexts and associated threads.
# osg.DisplaySettings.instance().setCompileContextsHint(True)
# realize the graphics windows.
viewer.realize()
# run the viewers main loop
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
| 34.661458 | 130 | 0.598911 | 101 | 0.003794 | 0 | 0 | 0 | 0 | 0 | 0 | 5,550 | 0.20849 |
481b02369261f195f00f8a2beb84d5e057a643b6 | 5,207 | py | Python | deepy/data/audio/tau2019.py | popura/deepy-pytorch | 71d87a82e937d82b9b149041280a392cc24b7299 | [
"MIT"
]
| 1 | 2021-07-19T09:38:26.000Z | 2021-07-19T09:38:26.000Z | deepy/data/audio/tau2019.py | popura/deepy-pytorch | 71d87a82e937d82b9b149041280a392cc24b7299 | [
"MIT"
]
| 1 | 2021-07-26T06:47:45.000Z | 2021-07-26T06:47:45.000Z | deepy/data/audio/tau2019.py | popura/deepy-pytorch | 71d87a82e937d82b9b149041280a392cc24b7299 | [
"MIT"
]
| null | null | null | import sys
import os
import os.path
import random
from pathlib import Path
import torch
import torchaudio
from .audiodataset import AUDIO_EXTENSIONS, default_loader
from ..dataset import PureDatasetFolder, has_file_allowed_extension
class TAU2019(PureDatasetFolder):
"""TAU urban acoustic scene 2019 dataset.
This dataset was used for DCASE 2019 Task 1.
For using this dataset, download the dataset from the following links:
https://zenodo.org/record/2589280#.XvWs0Zbgprk
https://zenodo.org/record/3063822#.XvWs55bgprk
Then, unzip them in the *root* folder.
"""
def __init__(self, root, mode, loader=default_loader, extensions=AUDIO_EXTENSIONS,
transforms=None, transform=None, target_transform=None,
is_valid_file=None,
pre_load=False, pre_transform=None,
pre_target_transform=None, pre_transforms=None):
super(TAU2019, self).__init__(root,
transforms=transforms,
transform=transform,
target_transform=target_transform)
self.MODES = ('train', 'evaluate', 'test')
if mode not in self.MODES:
raise ValueError("mode \"{}\" is not in {}".format(mode, self.MODES))
self.mode = mode
classes, class_to_idx = self._define_classes()
samples = self._make_dataset(str(self.root), mode,
class_to_idx, extensions, is_valid_file)
self.loader = loader
self.extensions = extensions
self.samples = samples
self.targets = [s[1] for s in samples]
self.classes = classes
self.class_to_idx = class_to_idx
has_pre_transforms = pre_transforms is not None
has_pre_separate_transform = pre_transform is not None or pre_target_transform is not None
if has_pre_transforms and has_pre_separate_transform:
raise ValueError("Only pre_transforms or pre_transform/pre_target_transform can "
"be passed as argument")
if has_pre_separate_transform:
pre_transforms = torchdataset.transform.SeparatedTransform(pre_transform, pre_target_transform)
self.pre_transforms = pre_transforms
self.pre_load = pre_load
if pre_load:
self.pre_process()
def pre_process(self, ):
preprocessed_samples = []
for i in range(len(self)):
sys.stdout.write("\rloaded {0} / {1}".format(i+1, len(self)))
sys.stdout.flush()
path, target = self.samples[i]
sample = self.loader(path)
if self.pre_transforms is not None:
sample, target = self.pre_transforms(sample, target)
preprocessed_samples.append((sample, target))
self.preprocessed_samples = preprocessed_samples
sys.stdout.write("\n")
def _define_classes(self, ):
classes = ['airport', 'shopping_mall', 'metro_station', 'street_pedestrian',
'public_square', 'street_traffic', 'tram', 'bus', 'metro', 'park']
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def _make_dataset(self, directory, mode, class_to_idx, extensions=None, is_valid_file=None):
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x):
return has_file_allowed_extension(x, extensions)
if not os.path.isdir(directory):
raise ValueError("{} is not a directory".format(directory))
with open(os.path.join(directory, 'evaluation_setup', 'fold1_'+mode+'.csv')) as f:
for i, line in enumerate(f):
if i == 0:
continue
line = line.rstrip('\n')
fname = line.split('\t')[0]
path = os.path.join(directory, fname)
class_index = class_to_idx[os.path.split(fname)[1].split('-')[0]]
item = path, class_index
instances.append(item)
return instances
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
if self.pre_load:
sample, target = self.preprocessed_samples[index]
else:
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
| 41.325397 | 109 | 0.61014 | 4,970 | 0.954484 | 0 | 0 | 0 | 0 | 0 | 0 | 913 | 0.175341 |
481b0da064442a0c9a7254f4c7fb899384b93ddc | 13,530 | py | Python | rrl.py | siekmanj/apex | 49483c827d8e70302b3e993acf29e9798f4435c1 | [
"MIT"
]
| null | null | null | rrl.py | siekmanj/apex | 49483c827d8e70302b3e993acf29e9798f4435c1 | [
"MIT"
]
| 1 | 2019-11-14T21:12:31.000Z | 2019-11-14T21:12:31.000Z | rrl.py | siekmanj/rrl | 49483c827d8e70302b3e993acf29e9798f4435c1 | [
"MIT"
]
| null | null | null | import os
import torch
import hashlib
from collections import OrderedDict
from util.env import env_factory, eval_policy
from util.logo import print_logo
if __name__ == "__main__":
import sys, argparse, time, os
parser = argparse.ArgumentParser()
parser.add_argument("--nolog", action='store_true')
print_logo(subtitle="Recurrent Reinforcement Learning for Robotics.")
if len(sys.argv) < 2:
print("Usage: python apex.py [algorithm name]", sys.argv)
elif sys.argv[1] == 'ars':
"""
Utility for running Augmented Random Search.
"""
from algos.ars import run_experiment
sys.argv.remove(sys.argv[1])
parser.add_argument("--workers", type=int, default=4)
parser.add_argument("--hidden_size", default=32, type=int) # neurons in hidden layer
parser.add_argument("--timesteps", "-t", default=1e8, type=float) # timesteps to run experiment ofr
parser.add_argument("--load_model", "-l", default=None, type=str) # load a model from a saved file.
parser.add_argument('--std', "-sd", default=0.0075, type=float) # the standard deviation of the parameter noise vectors
parser.add_argument("--deltas", "-d", default=64, type=int) # number of parameter noise vectors to use
parser.add_argument("--lr", "-lr", default=0.01, type=float) # the learning rate used to update policy
parser.add_argument("--reward_shift", "-rs", default=1, type=float) # the reward shift (to counter Gym's alive_bonus)
parser.add_argument("--traj_len", "-tl", default=1000, type=int) # max trajectory length for environment
parser.add_argument("--algo", "-a", default='v1', type=str) # whether to use ars v1 or v2
parser.add_argument("--normalize" '-n', action='store_true') # normalize states online
parser.add_argument("--recurrent", "-r", action='store_true') # whether to use a recurrent policy
parser.add_argument("--logdir", default="./logs/ars/", type=str)
parser.add_argument("--seed", "-s", default=0, type=int)
parser.add_argument("--env_name", "-e", default="Hopper-v3")
parser.add_argument("--average_every", default=10, type=int)
parser.add_argument("--save_model", "-m", default=None, type=str) # where to save the trained model to
parser.add_argument("--redis", default=None)
args = parser.parse_args()
run_experiment(args)
elif sys.argv[1] == 'ddpg':
sys.argv.remove(sys.argv[1])
"""
Utility for running Recurrent/Deep Deterministic Policy Gradients.
"""
from algos.off_policy import run_experiment
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps in replay buffer
parser.add_argument("--start_timesteps", default=1e4, type=int) # number of timesteps to generate random actions for
parser.add_argument("--load_actor", default=None, type=str) # load an actor from a .pt file
parser.add_argument("--load_critic", default=None, type=str) # load a critic from a .pt file
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--expl_noise', default=0.2, type=float) # random noise used for exploration
parser.add_argument('--tau', default=0.01, type=float) # update factor for target networks
parser.add_argument("--a_lr", "-alr", default=1e-5, type=float) # adam learning rate for critic
parser.add_argument("--c_lr", "-clr", default=1e-4, type=float) # adam learning rate for actor
parser.add_argument("--traj_len", "-tl", default=1000, type=int) # max trajectory length for environment
parser.add_argument("--center_reward", "-r", action='store_true') # normalize rewards to a normal distribution
parser.add_argument("--normc_init", default=True, type=bool) # using col norm to init weights
parser.add_argument("--normalize" '-n', action='store_true') # normalize states online
parser.add_argument("--batch_size", default=64, type=int) # batch size for policy update
parser.add_argument("--updates", default=1, type=int) # (if recurrent) number of times to update policy per episode
parser.add_argument("--eval_every", default=100, type=int) # how often to evaluate the trained policy
parser.add_argument("--save_actor", default=None, type=str)
parser.add_argument("--save_critic", default=None, type=str)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--logdir", default="./logs/ddpg/", type=str)
parser.add_argument("--seed", "-s", default=0, type=int)
parser.add_argument("--env_name", "-e", default="Hopper-v3")
args = parser.parse_args()
args.algo = 'ddpg'
run_experiment(args)
elif sys.argv[1] == 'td3':
sys.argv.remove(sys.argv[1])
"""
Utility for running Twin-Delayed Deep Deterministic policy gradients.
"""
from algos.off_policy import run_experiment
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps in replay buffer
parser.add_argument("--start_timesteps", default=1e4, type=float) # number of timesteps to generate random actions for
parser.add_argument("--load_actor", default=None, type=str) # load an actor from a .pt file
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--expl_noise', default=0.1, type=float) # random noise used for exploration
parser.add_argument('--max_action', default=1.0, type=float) #
parser.add_argument('--policy_noise', default=0.2, type=float) #
parser.add_argument('--noise_clip', default=0.5, type=float) #
parser.add_argument('--tau', default=0.005, type=float) # update factor for target networks
parser.add_argument("--a_lr", "-alr", default=3e-4, type=float) # adam learning rate for critic
parser.add_argument("--c_lr", "-clr", default=3e-4, type=float) # adam learning rate for actor
parser.add_argument("--traj_len", "-tl", default=1000, type=int) # max trajectory length for environment
parser.add_argument("--center_reward", "-r", action='store_true') # normalize rewards to a normal distribution
parser.add_argument("--batch_size", default=256, type=int) # batch size for policy update
parser.add_argument("--updates", default=1, type=int) # (if recurrent) number of times to update policy per episode
parser.add_argument("--update_freq", default=1, type=int) # how many episodes to skip before updating
parser.add_argument("--eval_every", default=100, type=int) # how often to evaluate the trained policy
parser.add_argument("--save_actor", default=None, type=str)
#parser.add_argument("--save_critics", default=None, type=str)
parser.add_argument("--logdir", default="./logs/td3/", type=str)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--seed", "-s", default=0, type=int)
parser.add_argument("--env_name", "-e", default="Hopper-v3")
args = parser.parse_args()
args.algo = 'td3'
run_experiment(args)
elif sys.argv[1] == 'ppo':
sys.argv.remove(sys.argv[1])
"""
Utility for running Proximal Policy Optimization.
"""
from algos.ppo import run_experiment
parser.add_argument("--seed", default=0, type=int) # number of timesteps to run experiment for
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps to run experiment for
parser.add_argument("--env_name", default='Cassie-v0', type=str)
parser.add_argument("--traj_len", "-tl", default=400, type=int) # max trajectory length for environment
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--num_steps", default=5000, type=int)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--std', default=0.13, type=float) # the fixed exploration std
parser.add_argument("--a_lr", "-alr", default=1e-4, type=float) # adam learning rate for actor
parser.add_argument("--c_lr", "-clr", default=1e-4, type=float) # adam learning rate for critic
parser.add_argument("--eps", "-ep", default=1e-5, type=float) # adam eps
parser.add_argument("--kl", default=0.02, type=float) # kl abort threshold
parser.add_argument("--entropy_coeff", default=0.0, type=float)
parser.add_argument("--grad_clip", default=0.05, type=float)
parser.add_argument("--batch_size", default=64, type=int) # batch size for policy update
parser.add_argument("--epochs", default=3, type=int) # number of updates per iter
parser.add_argument("--save_actor", default=None, type=str)
parser.add_argument("--save_critic", default=None, type=str)
parser.add_argument("--logdir", default="./logs/ppo/", type=str)
parser.add_argument("--workers", default=4, type=int)
parser.add_argument("--redis", default=None, type=str)
args = parser.parse_args()
run_experiment(args)
elif sys.argv[1] == 'sac':
sys.argv.remove(sys.argv[1])
"""
Utility for running Soft Actor-Critic.
"""
from algos.off_policy import run_experiment
parser.add_argument("--seed", default=0, type=int) # number of timesteps to run experiment for
parser.add_argument("--timesteps", "-t", default=1e6, type=float) # number of timesteps to run experiment for
parser.add_argument("--env_name", default='Cassie-v0', type=str)
parser.add_argument("--traj_len", "-tl", default=400, type=int) # max trajectory length for environment
parser.add_argument("--start_timesteps", default=10000, type=int)
parser.add_argument("--eval_every", default=100, type=int)
parser.add_argument("--recurrent", action='store_true')
parser.add_argument('--discount', default=0.99, type=float) # the discount factor
parser.add_argument('--tau', default=1e-2, type=float)
parser.add_argument("--a_lr", "-alr", default=1e-4, type=float) # adam learning rate for actor
parser.add_argument("--c_lr", "-clr", default=1e-4, type=float) # adam learning rate for critic
parser.add_argument("--alpha", default=None, type=float) # adam learning rate for critic
parser.add_argument("--grad_clip", default=0.05, type=float)
parser.add_argument("--batch_size", default=128, type=int) # batch size for policy update
parser.add_argument("--prenormalize_steps", default=10000, type=int)
parser.add_argument("--save_actor", default=None, type=str)
parser.add_argument("--save_critic", default=None, type=str)
parser.add_argument("--logdir", default="./logs/sac/", type=str)
args = parser.parse_args()
args.algo = 'sac'
run_experiment(args)
elif sys.argv[1] == 'eval':
sys.argv.remove(sys.argv[1])
parser.add_argument("--policy", default="./trained_models/ddpg/ddpg_actor.pt", type=str)
parser.add_argument("--env_name", default=None, type=str)
parser.add_argument("--traj_len", default=400, type=int)
args = parser.parse_args()
policy = torch.load(args.policy)
eval_policy(policy, min_timesteps=100000, env_name=args.env_name, max_traj_len=args.traj_len)
elif sys.argv[1] == 'cassie':
sys.argv.remove(sys.argv[1])
from cassie.udp import run_udp
parser.add_argument("--policy", default='logs/ppo/Cassie-nodelta-stateest-clockbased/bcbc77-seed0/actor.pt', type=str)
args = parser.parse_args()
run_udp(args)
else:
print("Invalid option '{}'".format(sys.argv[1]))
| 63.820755 | 143 | 0.601404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,614 | 0.34102 |
481b39532625d6b67659f1b6e06ab7e797e7fa51 | 215 | py | Python | swaping 2.py | aash-gates/aash-python-babysteps | cb88b02b0d33ac74acb183d4f11f6baad0ad3db9 | [
"Unlicense"
]
| 7 | 2020-11-16T18:23:21.000Z | 2021-12-18T14:08:54.000Z | swaping 2.py | 00mjk/aash-python-babysteps | c52ffbc2690ea387eaad6639bb9764b9ee015bfd | [
"Unlicense"
]
| null | null | null | swaping 2.py | 00mjk/aash-python-babysteps | c52ffbc2690ea387eaad6639bb9764b9ee015bfd | [
"Unlicense"
]
| 1 | 2020-12-21T15:59:44.000Z | 2020-12-21T15:59:44.000Z | '''
practice qusestion from chapter 1 Module 5 of IBM Digital Nation Courses
by Aashik J Krishnan/Aash Gates
'''
x = 10
y = "ten"
#step 1
x,y = y,x
#printing on next line
print(x)
print(y)
#end of the program | 11.944444 | 72 | 0.688372 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.767442 |
481b7b847a5b07aac335adc738e6043d2c120dd3 | 1,965 | py | Python | Lab_5_Eigen_Decomposition/eigen_images.py | NahianHasan/ECE63700-Digital_Image_Processing | ef1f1df93ffa16a4c76ddc8cc5ed6bc303dea96b | [
"BSD-3-Clause"
]
| null | null | null | Lab_5_Eigen_Decomposition/eigen_images.py | NahianHasan/ECE63700-Digital_Image_Processing | ef1f1df93ffa16a4c76ddc8cc5ed6bc303dea96b | [
"BSD-3-Clause"
]
| null | null | null | Lab_5_Eigen_Decomposition/eigen_images.py | NahianHasan/ECE63700-Digital_Image_Processing | ef1f1df93ffa16a4c76ddc8cc5ed6bc303dea96b | [
"BSD-3-Clause"
]
| null | null | null | import read_data as RD
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
X = RD.read_data()
print('X = ',X.shape)
X_mean = np.reshape(np.sum(X,1)/X.shape[1],[ X.shape[0],1])
X = X-X_mean
print('X_centerred = ',X.shape)
[U,S,V] = np.linalg.svd(X, full_matrices=False)
print('U = ',U.shape)
print('S = ',S.shape)
print('V = ',V.shape)
N = 12#number of eigen images
Eig_im = U[:,0:N]
plt.figure(figsize=(10,10))
for i in range(0,N):
plt.subplot(int(np.sqrt(N)),int(np.ceil(N/int(np.sqrt(N)))),i+1)
im = np.reshape(Eig_im[:,i],[64,64])
plt.imshow(im,cmap=plt.cm.gray, interpolation='none')
plt.title('Eigen Image = '+str(i+1))
plt.savefig('Eigen_Images.png')
plt.savefig('Eigen_Images.tif')
Y = np.matmul(np.transpose(U),X)
print('Y = ',Y.shape)
plt.figure(figsize=(10,10))
Np = 10#Number of projection coefficients to plot
Ni = 4#Number of images
images = ['a','b','c','d']
for i in range(0,Ni):
plt.plot(np.arange(1,Np+1),Y[0:Np,i],label='Image = '+images[i])
plt.xlabel('Eigenvectors',fontsize=20)
plt.xticks(weight = 'bold',fontsize=15)
plt.ylabel('Magnitude of the projection coefficient',fontsize=20)
plt.yticks(weight = 'bold',fontsize=15)
plt.legend(fontsize=20)
plt.savefig('Projection_Coefficients.png')
plt.savefig('Projection_Coefficients.tif')
#Image synthesis
ind = 0#index of the image to synthesize
m = [1, 5, 10, 15, 20, 30]
plt.figure(figsize=(10,15))
for i in range(0,len(m)):
X_hat = np.reshape(np.matmul(U[:,0:m[i]],Y[0:m[i],ind]),[X.shape[0],1])
print(X_hat.shape)
print(X_mean.shape)
X_hat += X_mean
plt.subplot(3,2,i+1)
im = np.reshape(X_hat,[64,64])
plt.imshow(im,cmap=plt.cm.gray, interpolation='none')
plt.title('m = '+str(m[i]),fontsize=20)
plt.xticks(weight = 'bold',fontsize=15)
plt.yticks(weight = 'bold',fontsize=15)
#img_out = Image.fromarray(im.astype(np.uint8))
#img_out.save('Im_reconstruction_'+str(m[i])+'.tif')
plt.savefig('Im_reconstruction.png')
plt.savefig('Im_reconstruction.tif')
| 31.190476 | 72 | 0.690076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 551 | 0.280407 |
481e2d50c4328a17ebca8a6fc8ec42de2e15ed63 | 3,986 | py | Python | activatable_model/models.py | wesleykendall/django-activatable-model | e039db72e6a94f622293cabbbac47f88b1c1c4dc | [
"MIT"
]
| null | null | null | activatable_model/models.py | wesleykendall/django-activatable-model | e039db72e6a94f622293cabbbac47f88b1c1c4dc | [
"MIT"
]
| null | null | null | activatable_model/models.py | wesleykendall/django-activatable-model | e039db72e6a94f622293cabbbac47f88b1c1c4dc | [
"MIT"
]
| null | null | null | from django.db import models
from manager_utils import ManagerUtilsQuerySet, ManagerUtilsManager
from activatable_model.signals import model_activations_changed
class ActivatableQuerySet(ManagerUtilsQuerySet):
"""
Provides bulk activation/deactivation methods.
"""
def update(self, *args, **kwargs):
if self.model.ACTIVATABLE_FIELD_NAME in kwargs:
# Fetch the instances that are about to be updated if they have an activatable flag. This
# is because their activatable flag may be changed in the subsequent update, causing us
# to potentially lose what this original query referenced
updated_instance_ids = list(self.values_list('id', flat=True))
ret_val = super(ActivatableQuerySet, self).update(*args, **kwargs)
if self.model.ACTIVATABLE_FIELD_NAME in kwargs and updated_instance_ids:
# Refetch the instances that were updated and send them to the activation signal
model_activations_changed.send(
self.model, instance_ids=updated_instance_ids,
is_active=kwargs[self.model.ACTIVATABLE_FIELD_NAME])
return ret_val
def activate(self):
return self.update(**{
self.model.ACTIVATABLE_FIELD_NAME: True
})
def deactivate(self):
return self.update(**{
self.model.ACTIVATABLE_FIELD_NAME: False
})
def delete(self, force=False):
return super(ActivatableQuerySet, self).delete() if force else self.deactivate()
class ActivatableManager(ManagerUtilsManager):
def get_queryset(self):
return ActivatableQuerySet(self.model)
def activate(self):
return self.get_queryset().activate()
def deactivate(self):
return self.get_queryset().deactivate()
class BaseActivatableModel(models.Model):
"""
Adds an is_active flag and processes information about when an is_active flag is changed.
"""
class Meta:
abstract = True
# The name of the Boolean field that determines if this model is active or inactive. A field
# must be defined with this name, and it must be a BooleanField. Note that the reason we don't
# define a BooleanField is because this would eliminate the ability for the user to easily
# define default values for the field and if it is indexed.
ACTIVATABLE_FIELD_NAME = 'is_active'
objects = ActivatableManager()
# The original activatable field value, for determining when it changes
__original_activatable_value = None
def __init__(self, *args, **kwargs):
super(BaseActivatableModel, self).__init__(*args, **kwargs)
# Keep track of the original activatable value to know when it changes
self.__original_activatable_value = getattr(self, self.ACTIVATABLE_FIELD_NAME)
def save(self, *args, **kwargs):
"""
A custom save method that handles figuring out when something is activated or deactivated.
"""
current_activable_value = getattr(self, self.ACTIVATABLE_FIELD_NAME)
is_active_changed = self.id is None or self.__original_activatable_value != current_activable_value
self.__original_activatable_value = current_activable_value
ret_val = super(BaseActivatableModel, self).save(*args, **kwargs)
# Emit the signal for when the is_active flag is changed
if is_active_changed:
model_activations_changed.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value)
return ret_val
def delete(self, force=False, **kwargs):
"""
It is impossible to delete an activatable model unless force is True. This function instead sets it to inactive.
"""
if force:
return super(BaseActivatableModel, self).delete(**kwargs)
else:
setattr(self, self.ACTIVATABLE_FIELD_NAME, False)
return self.save(update_fields=[self.ACTIVATABLE_FIELD_NAME])
| 39.078431 | 120 | 0.699448 | 3,814 | 0.956849 | 0 | 0 | 0 | 0 | 0 | 0 | 1,277 | 0.320371 |
481e768516bbaa32d5ff9541dc002fb4c1d54c46 | 755 | py | Python | my_lambdata/my_mod.py | DevvinK/lambdata-devvink-dspt6 | 4920402865912cea619d995a0a092ff8f7dc7806 | [
"MIT"
]
| null | null | null | my_lambdata/my_mod.py | DevvinK/lambdata-devvink-dspt6 | 4920402865912cea619d995a0a092ff8f7dc7806 | [
"MIT"
]
| null | null | null | my_lambdata/my_mod.py | DevvinK/lambdata-devvink-dspt6 | 4920402865912cea619d995a0a092ff8f7dc7806 | [
"MIT"
]
| null | null | null | # my_lambdata/my_mod.py
# my_lambdata.my_mod
import pandas as pd
def enlarge(num):
return num * 100
def null_check(df):
null_lines = df[df.isnull().any(axis=1)]
return null_lines
def date_divider(df,date_col):
'''
df: the whole dataframe adding new day, month, year to
date_col: the name of the column the date is stored in
'''
converted_df = df.copy()
converted_df["Year"] = pd.DatetimeIndex(converted_df[date_col]).year
converted_df["Month"] = pd.DatetimeIndex(converted_df[date_col]).month
converted_df["Day"] = pd.DatetimeIndex(converted_df[date_col]).day
return converted_df
if __name__ == "__main__":
x = 11
print(enlarge(x))
y = int(input("Please choose a number (e.g. 5)"))
print(enlarge(y)) | 26.034483 | 73 | 0.695364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.307285 |
4820376c0af9d2cd61ab8620253e559ebeadb415 | 30,627 | py | Python | visibility.py | DanielAndreasen/ObservationTools | bae6bce4345cbd207d901ad5c4073a8e8e8a0d3e | [
"MIT"
]
| 2 | 2016-04-05T16:29:32.000Z | 2016-04-13T15:51:48.000Z | visibility.py | iastro-pt/ObservationTools | bae6bce4345cbd207d901ad5c4073a8e8e8a0d3e | [
"MIT"
]
| 45 | 2016-11-23T17:51:36.000Z | 2021-05-17T10:57:37.000Z | visibility.py | DanielAndreasen/ObservationTools | bae6bce4345cbd207d901ad5c4073a8e8e8a0d3e | [
"MIT"
]
| 2 | 2016-11-24T00:24:29.000Z | 2016-11-30T11:36:43.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import numpy as np
import datetime as dt
from dateutil import tz
import pickle
from random import choice
from PyAstronomy import pyasl
from astropy.coordinates import SkyCoord
from astropy.coordinates import name_resolve
import ephem
import argparse
import calendar
try:
from tqdm import tqdm
except ImportError:
tqdm = lambda x: x
import io
import matplotlib.pyplot as plt
import matplotlib
replace_figure = True
try:
from PySide.QtGui import QApplication, QImage
except ImportError:
try:
from PyQt4.QtGui import QApplication, QImage
except ImportError:
try:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QImage
except ImportError:
replace_figure = False
def add_clipboard_to_figures():
# replace the original plt.figure() function with one that supports
# clipboard-copying
oldfig = plt.figure
def newfig(*args, **kwargs):
fig = oldfig(*args, **kwargs)
def clipboard_handler(event):
if event.key == 'ctrl+c':
# store the image in a buffer using savefig(), this has the
# advantage of applying all the default savefig parameters
# such as background color; those would be ignored if you simply
# grab the canvas using Qt
buf = io.BytesIO()
fig.savefig(buf)
QApplication.clipboard().setImage(QImage.fromData(buf.getvalue()))
buf.close()
print('Ctrl+C pressed: image is now in the clipboard')
fig.canvas.mpl_connect('key_press_event', clipboard_handler)
return fig
plt.figure = newfig
if replace_figure: add_clipboard_to_figures()
def _parser():
parser = argparse.ArgumentParser(description='Plot altitudes of objects'
' against time for a specific night')
parser.add_argument('targets', help='E.g. HD20010 or HD20010,HD41248', nargs='+')
parser.add_argument('-d', '--date', default='today',
help='Date in format YYYY-MM-DD (or YYYY if starobs). '
' Default is today (this year if starobs).')
parser.add_argument('-P', '--period', default=None, type=str, nargs=1,
help='Specify ESO period (October-March / April-September)')
parser.add_argument('-s', '--site', default='esolasilla',
help='Observatory. Default is ESO La Silla. '
'Common codes are esoparanal, lapalma, keck, lco, Palomar, etc')
parser.add_argument('-l', '--loc', default=None,
help='Give the location of the observatory.'
'Comma-separated altitude, latitude, longitude, timezone')
parser.add_argument('-c', default=False, action='store_true',
help='Just print "target RA DEC" (to use in STARALT)')
parser.add_argument('-m', '--mode', choices=['staralt', 'starobs'], default='staralt',
help='staralt: plot altitude against time for a particular night; '
'starobs: plot how altitude changes over a year')
parser.add_argument('--sh', default=None, type=float, nargs=1, dest='A',
help='Include plot of sunless hours above airmass A')
parser.add_argument('--hover', default=False, action='store_true',
help='Color lines when mouse over')
parser.add_argument('-o', '--save', default=None, type=str, nargs=1,
help='Save figure in output file (provide file extension)')
parser.add_argument('--remove-watermark', default=False, action='store_true',
help='Remove "Created with..." watermark text')
return parser.parse_args()
def decdeg2dms(dd):
""" Convert decimal degrees to deg,min,sec """
is_positive = dd >= 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
degrees = degrees if is_positive else -degrees
return (degrees,minutes,seconds)
class CacheSkyCoord(SkyCoord):
@classmethod
def from_name(cls, name, frame='icrs'):
try:
cached = pickle.load(open('CachedSkyCoords.pickle', 'rb'))
except FileNotFoundError:
cached = {}
if name in cached:
return cached[name]
else:
original = super(CacheSkyCoord, cls).from_name(name, frame)
# keep the cached dict manageable
n = len(cached)
if n>100:
# remove a random cached target
cached.pop(choice(list(cached.keys())))
cached.update({name: original})
pickle.dump(cached, open('CachedSkyCoords.pickle', 'wb'))
return original
ESO_periods = {
104 : [ (2019, 10, 1), (2020, 3, 31)],
103 : [ (2019, 4, 1), (2019, 9, 30)],
102 : [ (2018, 10, 1), (2019, 3, 31)],
101 : [ (2018, 4, 1), (2018, 9, 30)],
100 : [ (2017, 10, 1), (2018, 3, 31)],
99 : [ (2017, 4, 1), (2017, 9, 30)],
98 : [ (2016, 10, 1), (2017, 3, 31)],
97 : [ (2016, 4, 1), (2016, 9, 30)],
96 : [ (2015, 10, 1), (2016, 3, 31)],
95 : [ (2015, 4, 1), (2015, 9, 30)],
94 : [ (2014, 10, 1), (2015, 3, 31)],
93 : [ (2014, 4, 1), (2014, 9, 30)],
92 : [ (2013, 10, 1), (2014, 3, 31)],
}
def get_ESO_period(period):
""" Return the JD of start and end of ESO period """
assert isinstance(period, str) or isinstance(period, int)
P = int(period)
getjd = lambda y,m,d: pyasl.jdcnv(dt.datetime(y, m, d))
jd_start, jd_end = [getjd(*d) for d in ESO_periods[P]]
return jd_start, jd_end
def StarObsPlot(year=None, targets=None, observatory=None, period=None,
hover=False, sunless_hours=None, remove_watermark=False):
"""
Plot the visibility of target.
Parameters
----------
year: int
The year for which to calculate the visibility.
targets: list
List of targets.
Each target should be a dictionary with keys 'name' and 'coord'.
The key 'name' is a string, 'coord' is a SkyCoord object.
observatory: string
Name of the observatory that pyasl.observatory can resolve.
Basically, any of pyasl.listObservatories().keys()
period: string, optional
ESO period for which to calculate the visibility. Overrides `year`.
hover: boolean, optional
If True, color visibility lines when mouse over.
sunless_hours: float, optional
If not None, plot sunless hours above this airmass
"""
from mpl_toolkits.axes_grid1 import host_subplot
from matplotlib.ticker import MultipleLocator
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
rcParams['xtick.major.pad'] = 12
font0 = FontProperties()
font1 = font0.copy()
font0.set_family('sans-serif')
font0.set_weight('light')
font1.set_family('sans-serif')
font1.set_weight('medium')
# set the observatory
if isinstance(observatory, dict):
obs = observatory
else:
obs = pyasl.observatory(observatory)
fig = plt.figure(figsize=(15,10))
fig.subplots_adjust(left=0.07, right=0.8, bottom=0.15, top=0.88)
# watermak
if not remove_watermark:
fig.text(0.99, 0.99, 'Created with\ngithub.com/iastro-pt/ObservationTools',
fontsize=10, color='gray',
ha='right', va='top', alpha=0.5)
# plotting sunless hours?
shmode = False
if sunless_hours is not None:
shmode = True
# limit in airmass (assumed plane-parallel atm)
shairmass = sunless_hours
# correspoing limit in altitude
from scipy.optimize import bisect
shalt = 90 - bisect(lambda alt: pyasl.airmassPP(alt) - shairmass, 0, 89)
if shmode:
fig.subplots_adjust(hspace=0.35)
ax = host_subplot(211)
axsh = host_subplot(212)
plt.text(0.5, 0.47,
"- sunless hours above airmass {:.1f} - \n".format(shairmass),
transform=fig.transFigure, ha='center', va='bottom', fontsize=12)
plt.text(0.5, 0.465,
"the thick line above the curves represents the total sunless hours "\
"for each day of the year",
transform=fig.transFigure, ha='center', va='bottom', fontsize=10)
else:
ax = host_subplot(111)
for n, target in enumerate(targets):
target_coord = target['coord']
target_ra = target_coord.ra.deg
target_dec = target_coord.dec.deg
if period is not None:
jd_start, jd_end = get_ESO_period(period)
else:
jd_start = pyasl.jdcnv(dt.datetime(year, 1, 1))
jd_end = pyasl.jdcnv(dt.datetime(year, 12, 31))
jdbinsize = 1 # every day
each_day = np.arange(jd_start, jd_end, jdbinsize)
jds = []
## calculate the mid-dark times
sun = ephem.Sun()
for day in each_day:
date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])
s = ephem.Observer();
s.date = date_formatted;
s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])
s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])
jds.append(ephem.julian_date(s.next_antitransit(sun)))
jds = np.array(jds)
# Get JD floating point
jdsub = jds - np.floor(jds[0])
# Get alt/az of object
altaz = pyasl.eq2hor(jds, np.ones_like(jds)*target_ra, np.ones_like(jds)*target_dec, \
lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
ax.plot( jdsub, altaz[0], '-', color='k')
# label for each target
plabel = "[{0:2d}] {1!s}".format(n+1, target['name'])
# number of target at the top of the curve
ind_label = np.argmax(altaz[0])
# or at the bottom if the top is too close to the corners
# if jdsub[ind_label] < 5 or jdsub[ind_label] > jdsub.max()-5:
# ind_label = np.argmin(altaz[0])
ax.text( jdsub[ind_label], altaz[0][ind_label], str(n+1), color="b", fontsize=14, \
fontproperties=font1, va="bottom", ha="center")
if n+1 == 29:
# too many?
ax.text(1.1, 1.0-float(n+1)*0.04, "too many targets", ha="left", va="top", transform=ax.transAxes, \
fontsize=10, fontproperties=font0, color="r")
else:
ax.text(1.1, 1.0-float(n+1)*0.04, plabel, ha="left", va="top", transform=ax.transAxes, \
fontsize=12, fontproperties=font0, color="b")
if shmode:
sunless_hours = []
for day in each_day:
date_formatted = '/'.join([str(i) for i in pyasl.daycnv(day)[:-1]])
s = ephem.Observer();
s.date = date_formatted;
s.lat = ':'.join([str(i) for i in decdeg2dms(obs['latitude'])])
s.lon = ':'.join([str(i) for i in decdeg2dms(obs['longitude'])])
# hours from sunrise to sunset
td = pyasl.daycnv(ephem.julian_date(s.next_setting(sun)), mode='dt') \
- pyasl.daycnv(ephem.julian_date(s.next_rising(sun)), mode='dt')
sunless_hours.append(24 - td.total_seconds() / 3600)
days = each_day - np.floor(each_day[0])
axsh.plot(days, sunless_hours, '-', color='k', lw=2)
axsh.set(ylim=(0, 15), yticks=range(1,15), ylabel='Useful hours',
yticklabels=[r'${}^{{\rm h}}$'.format(n) for n in range(1,15)])
ax.text(1.1, 1.03, "List of targets", ha="left", va="top", transform=ax.transAxes, \
fontsize=12, fontproperties=font0, color="b")
axrange = ax.get_xlim()
if period is None:
months = range(1, 13)
ndays = [0] + [calendar.monthrange(date, m)[1] for m in months]
ax.set_xlim([0, 366])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
ax.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
if shmode:
axsh.set_xlim([0, 366])
axsh.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
axsh.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
else:
if int(period) % 2 == 0:
# even ESO period, Oct -> Mar
months = [10, 11, 12, 1, 2, 3]
ndays = [0] + [calendar.monthrange(date, m)[1] for m in months]
ax.set_xlim([0, 181])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
ax.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
if shmode:
axsh.set_xlim([0, 181])
axsh.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
axsh.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
else:
# odd ESO period, Apr -> Sep
months = range(4, 10)
ndays = [0] + [calendar.monthrange(date, m)[1] for m in months]
ax.set_xlim([0, 182])
ax.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
ax.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
if shmode:
axsh.set_xlim([0, 182])
axsh.set_xticks(np.cumsum(ndays)[:-1] + (np.array(ndays)/2.)[1:])
axsh.set_xticklabels(map(calendar.month_abbr.__getitem__, months), fontsize=10)
if axrange[1]-axrange[0] <= 1.0:
jdhours = np.arange(0,3,1.0/24.)
utchours = (np.arange(0,72,dtype=int)+12)%24
else:
jdhours = np.arange(0,3,1.0/12.)
utchours = (np.arange(0,72, 2, dtype=int)+12)%24
# Make ax2 responsible for "top" axis and "right" axis
ax2 = ax.twin()
# Set upper x ticks
ax2.set_xticks(np.cumsum(ndays))
ax2.set_xlabel("Day")
# plane-parallel airmass
airmass_ang = np.arange(10, 81, 5)
geo_airmass = pyasl.airmass.airmassPP(airmass_ang)[::-1]
ax2.set_yticks(airmass_ang)
airmassformat = []
for t in range(geo_airmass.size):
airmassformat.append("{0:2.2f}".format(geo_airmass[t]))
ax2.set_yticklabels(airmassformat)#, rotation=90)
ax2.set_ylabel("Relative airmass", labelpad=32)
ax2.tick_params(axis="y", pad=6, labelsize=8)
plt.text(1.02,-0.04, "Plane-parallel", transform=ax.transAxes, ha='left', \
va='top', fontsize=10, rotation=90)
ax22 = ax.twin()
ax22.set_xticklabels([])
ax22.set_frame_on(True)
ax22.patch.set_visible(False)
ax22.yaxis.set_ticks_position('right')
ax22.yaxis.set_label_position('right')
ax22.spines['right'].set_position(('outward', 30))
ax22.spines['right'].set_color('k')
ax22.spines['right'].set_visible(True)
airmass2 = list(map(lambda ang: pyasl.airmass.airmassSpherical(90. - ang, obs['altitude']), airmass_ang))
ax22.set_yticks(airmass_ang)
airmassformat = []
for t in range(len(airmass2)):
airmassformat.append(" {0:2.2f}".format(airmass2[t]))
ax22.set_yticklabels(airmassformat, rotation=90)
ax22.tick_params(axis="y", pad=8, labelsize=8)
plt.text(1.05,-0.04, "Spherical+Alt", transform=ax.transAxes, ha='left', va='top', \
fontsize=10, rotation=90)
ax.set_ylim([0, 91])
ax.yaxis.set_major_locator(MultipleLocator(15))
ax.yaxis.set_minor_locator(MultipleLocator(5))
yticks = ax.get_yticks()
ytickformat = []
for t in range(yticks.size):
ytickformat.append(str(int(yticks[t]))+r"$^\circ$")
ax.set_yticklabels(ytickformat, fontsize=16)
ax.set_ylabel("Altitude", fontsize=18)
yticksminor = ax.get_yticks(minor=True)
ymind = np.where( yticksminor % 15. != 0. )[0]
yticksminor = yticksminor[ymind]
ax.set_yticks(yticksminor, minor=True)
m_ytickformat = []
for t in range(yticksminor.size):
m_ytickformat.append(str(int(yticksminor[t]))+r"$^\circ$")
ax.set_yticklabels(m_ytickformat, minor=True)
ax.set_ylim([0, 91])
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.yaxis.grid(color='gray', which="minor", linestyle='dotted')
ax2.xaxis.grid(color='gray', linestyle='dotted')
if period is not None:
plt.text(0.5, 0.95,
"Visibility over P{0!s}\n - altitudes at mid-dark time -".format(period),
transform=fig.transFigure, ha='center', va='bottom', fontsize=12)
else:
plt.text(0.5, 0.95,
"Visibility over {0!s}\n - altitudes at mid-dark time -".format(date),
transform=fig.transFigure, ha='center', va='bottom', fontsize=12)
obsco = "Obs coord.: {0:8.4f}$^\circ$, {1:8.4f}$^\circ$, {2:4f} m".format(obs['longitude'], obs['latitude'], obs['altitude'])
plt.text(0.01,0.97, obsco, transform=fig.transFigure, ha='left', va='center', fontsize=10)
plt.text(0.01,0.95, obs['name'], transform=fig.transFigure, ha='left', va='center', fontsize=10)
# interactive!
if hover:
main_axis = fig.axes[0]
all_lines = set(main_axis.get_lines())
def on_plot_hover(event):
for line in main_axis.get_lines():
if line.contains(event)[0]:
line.set_color('red') # make this line red
# and all others black
all_other_lines = all_lines - set([line])
for other_line in all_other_lines:
other_line.set_color('black')
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', on_plot_hover)
return fig
def VisibilityPlot(date=None, targets=None, observatory=None, plotLegend=True,
showMoonDist=True, print2file=False, remove_watermark=False):
"""
Plot the visibility of target.
Parameters
----------
date: datetime
The date for which to calculate the visibility.
targets: list
List of targets.
Each target should be a dictionary with keys 'name' and 'coord'.
The key 'name' is aa string, 'coord' is a SkyCoord object.
observatory: string
Name of the observatory that pyasl.observatory can resolve.
Basically, any of pyasl.listObservatories().keys()
plotLegend: boolean, optional
If True (default), show a legend.
showMoonDist : boolean, optional
If True (default), the Moon distance will be shown.
"""
from mpl_toolkits.axes_grid1 import host_subplot
from matplotlib.ticker import MultipleLocator
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
rcParams['xtick.major.pad'] = 12
if isinstance(observatory, dict):
obs = observatory
else:
obs = pyasl.observatory(observatory)
# observer = ephem.Observer()
# observer.pressure = 0
# observer.horizon = '-0:34'
# observer.lat, observer.lon = obs['latitude'], obs['longitude']
# observer.date = date
# print(observer.date)
# print(observer.previous_rising(ephem.Sun()))
# print(observer.next_setting(ephem.Sun()))
# print(observer.previous_rising(ephem.Moon()))
# print(observer.next_setting(ephem.Moon()))
# observer.horizon = '-6'
# noon = observer.next_transit(ephem.Sun())
# print(noon)
# print(observer.previous_rising(ephem.Sun(), start=noon, use_center=True))
# print()
fig = plt.figure(figsize=(15,10))
fig.subplots_adjust(left=0.07, right=0.8, bottom=0.15, top=0.88)
# watermak
if not remove_watermark:
fig.text(0.99, 0.99, 'Created with\ngithub.com/iastro-pt/ObservationTools',
fontsize=10, color='gray',
ha='right', va='top', alpha=0.5)
ax = host_subplot(111)
font0 = FontProperties()
font1 = font0.copy()
font0.set_family('sans-serif')
font0.set_weight('light')
font1.set_family('sans-serif')
font1.set_weight('medium')
for n, target in enumerate(targets):
target_coord = target['coord']
target_ra = target_coord.ra.deg
target_dec = target_coord.dec.deg
# JD array
jdbinsize = 1.0/24./20.
# jds = np.arange(allData[n]["Obs jd"][0], allData[n]["Obs jd"][2], jdbinsize)
jd = pyasl.jdcnv(date)
jd_start = pyasl.jdcnv(date)-0.5
jd_end = pyasl.jdcnv(date)+0.5
jds = np.arange(jd_start, jd_end, jdbinsize)
# Get JD floating point
jdsub = jds - np.floor(jds[0])
# Get alt/az of object
altaz = pyasl.eq2hor(jds, np.ones(jds.size)*target_ra, np.ones(jds.size)*target_dec, \
lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
# Get alt/az of Sun
sun_position = pyasl.sunpos(jd)
sun_ra, sun_dec = sun_position[1], sun_position[2]
sunpos_altaz = pyasl.eq2hor(jds, np.ones(jds.size)*sun_ra, np.ones(jds.size)*sun_dec, \
lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
# Define plot label
plabel = "[{0:2d}] {1!s}".format(n+1, target['name'])
# Find periods of: day, twilight, and night
day = np.where( sunpos_altaz[0] >= 0. )[0]
twi = np.where( np.logical_and(sunpos_altaz[0] > -18., sunpos_altaz[0] < 0.) )[0]
night = np.where( sunpos_altaz[0] <= -18. )[0]
if (len(day) == 0) and (len(twi) == 0) and (len(night) == 0):
print
print("VisibilityPlot - no points to draw")
print
mpos = pyasl.moonpos(jds)
# mpha = pyasl.moonphase(jds)
# mpos_altaz = pyasl.eq2hor(jds, mpos[0], mpos[1],
# lon=obs['longitude'], lat=obs['latitude'], alt=obs['altitude'])
# moonind = np.where( mpos_altaz[0] > 0. )[0]
if showMoonDist:
mdist = pyasl.getAngDist(mpos[0], mpos[1], np.ones(jds.size)*target_ra, \
np.ones(jds.size)*target_dec)
bindist = int((2.0/24.)/jdbinsize)
firstbin = np.random.randint(0,bindist)
for mp in range(0, int(len(jds)/bindist)):
bind = firstbin+mp*bindist
if altaz[0][bind]-1. < 5.: continue
ax.text(jdsub[bind], altaz[0][bind]-1., str(int(mdist[bind]))+r"$^\circ$", ha="center", va="top", \
fontsize=8, stretch='ultra-condensed', fontproperties=font0, alpha=1.)
if len(twi) > 1:
# There are points in twilight
linebreak = np.where( (jdsub[twi][1:]-jdsub[twi][:-1]) > 2.0*jdbinsize)[0]
if len(linebreak) > 0:
plotrjd = np.insert(jdsub[twi], linebreak+1, np.nan)
plotdat = np.insert(altaz[0][twi], linebreak+1, np.nan)
ax.plot( plotrjd, plotdat, "-", color='#BEBEBE', linewidth=1.5)
else:
ax.plot( jdsub[twi], altaz[0][twi], "-", color='#BEBEBE', linewidth=1.5)
ax.plot( jdsub[night], altaz[0][night], '.k', label=plabel)
ax.plot( jdsub[day], altaz[0][day], '.', color='#FDB813')
altmax = np.argmax(altaz[0])
ax.text( jdsub[altmax], altaz[0][altmax], str(n+1), color="b", fontsize=14, \
fontproperties=font1, va="bottom", ha="center")
if n+1 == 29:
ax.text( 1.1, 1.0-float(n+1)*0.04, "too many targets", ha="left", va="top", transform=ax.transAxes, \
fontsize=10, fontproperties=font0, color="r")
else:
ax.text( 1.1, 1.0-float(n+1)*0.04, plabel, ha="left", va="top", transform=ax.transAxes, \
fontsize=12, fontproperties=font0, color="b")
ax.text( 1.1, 1.03, "List of targets", ha="left", va="top", transform=ax.transAxes, \
fontsize=12, fontproperties=font0, color="b")
axrange = ax.get_xlim()
ax.set_xlabel("UT [hours]")
if axrange[1]-axrange[0] <= 1.0:
jdhours = np.arange(0,3,1.0/24.)
utchours = (np.arange(0,72,dtype=int)+12)%24
else:
jdhours = np.arange(0,3,1.0/12.)
utchours = (np.arange(0,72, 2, dtype=int)+12)%24
ax.set_xticks(jdhours)
ax.set_xlim(axrange)
ax.set_xticklabels(utchours, fontsize=18)
# Make ax2 responsible for "top" axis and "right" axis
ax2 = ax.twin()
# Set upper x ticks
ax2.set_xticks(jdhours)
ax2.set_xticklabels(utchours, fontsize=18)
ax2.set_xlabel("UT [hours]")
# Horizon angle for airmass
airmass_ang = np.arange(5.,90.,5.)
geo_airmass = pyasl.airmass.airmassPP(90.-airmass_ang)
ax2.set_yticks(airmass_ang)
airmassformat = []
for t in range(geo_airmass.size):
airmassformat.append("{0:2.2f}".format(geo_airmass[t]))
ax2.set_yticklabels(airmassformat, rotation=90)
ax2.set_ylabel("Relative airmass", labelpad=32)
ax2.tick_params(axis="y", pad=10, labelsize=10)
plt.text(1.015,-0.04, "Plane-parallel", transform=ax.transAxes, ha='left', \
va='top', fontsize=10, rotation=90)
ax22 = ax.twin()
ax22.set_xticklabels([])
ax22.set_frame_on(True)
ax22.patch.set_visible(False)
ax22.yaxis.set_ticks_position('right')
ax22.yaxis.set_label_position('right')
ax22.spines['right'].set_position(('outward', 25))
ax22.spines['right'].set_color('k')
ax22.spines['right'].set_visible(True)
airmass2 = list(map(lambda ang: pyasl.airmass.airmassSpherical(90. - ang, obs['altitude']), airmass_ang))
ax22.set_yticks(airmass_ang)
airmassformat = []
for t in airmass2:
airmassformat.append("{0:2.2f}".format(t))
ax22.set_yticklabels(airmassformat, rotation=90)
ax22.tick_params(axis="y", pad=10, labelsize=10)
plt.text(1.045,-0.04, "Spherical+Alt", transform=ax.transAxes, ha='left', va='top', \
fontsize=10, rotation=90)
ax3 = ax.twiny()
ax3.set_frame_on(True)
ax3.patch.set_visible(False)
ax3.xaxis.set_ticks_position('bottom')
ax3.xaxis.set_label_position('bottom')
ax3.spines['bottom'].set_position(('outward', 50))
ax3.spines['bottom'].set_color('k')
ax3.spines['bottom'].set_visible(True)
ltime, ldiff = pyasl.localtime.localTime(utchours, np.repeat(obs['longitude'], len(utchours)))
jdltime = jdhours - ldiff/24.
ax3.set_xticks(jdltime)
ax3.set_xticklabels(utchours)
ax3.set_xlim([axrange[0],axrange[1]])
ax3.set_xlabel("Local time [hours]")
ax.set_ylim([0, 91])
ax.yaxis.set_major_locator(MultipleLocator(15))
ax.yaxis.set_minor_locator(MultipleLocator(5))
yticks = ax.get_yticks()
ytickformat = []
for t in range(yticks.size): ytickformat.append(str(int(yticks[t]))+r"$^\circ$")
ax.set_yticklabels(ytickformat, fontsize=16)
ax.set_ylabel("Altitude", fontsize=18)
yticksminor = ax.get_yticks(minor=True)
ymind = np.where( yticksminor % 15. != 0. )[0]
yticksminor = yticksminor[ymind]
ax.set_yticks(yticksminor, minor=True)
m_ytickformat = []
for t in range(yticksminor.size): m_ytickformat.append(str(int(yticksminor[t]))+r"$^\circ$")
ax.set_yticklabels(m_ytickformat, minor=True)
ax.set_ylim([0, 91])
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.yaxis.grid(color='gray', which="minor", linestyle='dotted')
ax2.xaxis.grid(color='gray', linestyle='dotted')
plt.text(0.5,0.95,"Visibility on {0!s}".format(date.date()), \
transform=fig.transFigure, ha='center', va='bottom', fontsize=20)
if plotLegend:
line1 = matplotlib.lines.Line2D((0,0),(1,1), color='#FDB813', linestyle="-", linewidth=2)
line2 = matplotlib.lines.Line2D((0,0),(1,1), color='#BEBEBE', linestyle="-", linewidth=2)
line3 = matplotlib.lines.Line2D((0,0),(1,1), color='k', linestyle="-", linewidth=2)
lgd2 = plt.legend((line1,line2,line3),("day","twilight","night",), \
bbox_to_anchor=(0.88, 0.13), loc='best', borderaxespad=0.,prop={'size':12}, fancybox=True)
lgd2.get_frame().set_alpha(.5)
obsco = "Obs coord.: {0:8.4f}$^\circ$, {1:8.4f}$^\circ$, {2:4f} m".format(obs['longitude'], obs['latitude'], obs['altitude'])
plt.text(0.01,0.97, obsco, transform=fig.transFigure, ha='left', va='center', fontsize=10)
plt.text(0.01,0.95, obs['name'], transform=fig.transFigure, ha='left', va='center', fontsize=10)
return fig
if __name__ == '__main__':
args = _parser()
target_names = args.targets[0].split(',')
## Get coordinates for all the targets
targets = []
# flush keyword was not backported to Python < 3.3
if sys.version_info[:2] < (3, 3):
print('Sending queries to CDS...', end=' '); sys.stdout.flush()
else:
print('Sending queries to CDS...', end=' ', flush=True)
for target_name in tqdm(target_names):
try:
targets.append({'name': target_name,
'coord': CacheSkyCoord.from_name(target_name)})
except name_resolve.NameResolveError as e:
print('Could not find target: {0!s}'.format(target_name))
## Just print coordinates in STARALT format and exit
if args.c:
print('Coordinates for {0!s}\n'.format(args.targets[0]))
for target in targets:
## name hh mm ss ±dd mm ss
out = '{0!s}'.format(target['name'])
ra = target['coord'].ra.hms
out += ' {0:02d} {1:02d} {2:5.3f}'.format(int(ra.h), int(ra.m), ra.s)
dec = target['coord'].dec.dms
out += ' {0:02d} {1:02d} {2:5.3f}'.format(int(dec.d), int(dec.m), dec.s)
print(out)
sys.exit(0)
## Actually calculate the visibility curves
print('Calculating visibility for {0!s}'.format(args.targets[0]))
P = args.period
if args.period is not None:
if args.mode != 'starobs':
print('Specifying ESO period is only possible in "starobs" mode')
sys.exit(1)
P = args.period[0]
P = P.replace('P','') # if user gave --period P100, for example
if args.date == 'today':
if args.mode == 'staralt':
today = dt.datetime.now() # now() gives the current time which we don't want
date = dt.datetime(today.year, today.month, today.day, tzinfo=tz.tzutc())
elif args.mode == 'starobs':
date = dt.datetime.now().year
else:
if args.mode == 'staralt':
if "-" not in args.date:
raise ValueError("Date needs to be provided as YYYY-MM-DD for staralt mode.")
ymd = [int(i) for i in args.date.split('-')]
date = dt.datetime(*ymd)
elif args.mode == 'starobs':
if "-" in args.date:
date = int(args.date.split('-')[0])
else:
date = int(args.date)
## Find observatory
if args.loc is None:
available_sites = pyasl.listObservatories(show=False)
if args.site not in available_sites.keys():
print('"{0!s}" is not a valid observatory code. Try one of the following:\n'.format(args.site))
maxCodeLen = max(map(len, available_sites.keys()))
print(("{0:"+str(maxCodeLen)+"s} ").format("Code") + "Observatory name")
print("-" * (21+maxCodeLen))
for k in sorted(available_sites.keys(), key=lambda s: s.lower()):
print(("{0:"+str(maxCodeLen)+"s} --- ").format(k) + available_sites[k]["name"])
sys.exit(1)
site = args.site
else:
loc = list(map(float, args.loc.split(',')))
site = {'altitude':loc[0], 'latitude': loc[1], 'longitude':loc[2], 'tz':loc[3], 'name':'unknown'}
if args.mode == 'staralt':
fig = VisibilityPlot(date=date, targets=targets, observatory=site,
remove_watermark=args.remove_watermark)
elif args.mode == 'starobs':
if args.A is not None:
am = args.A[0]
else:
am = None
fig = StarObsPlot(year=date, targets=targets, observatory=site,
period=P, hover=args.hover, sunless_hours=am,
remove_watermark=args.remove_watermark)
if args.save is not None:
print('Saving the figure to {}'.format(args.save[0]))
fig.savefig(args.save[0])
else:
plt.show()
| 37.259124 | 127 | 0.631795 | 629 | 0.020537 | 0 | 0 | 596 | 0.019459 | 0 | 0 | 7,915 | 0.258424 |
4820cb952aabf646bc6ba2c9f17988cb0a784a1d | 2,410 | py | Python | brainrender/Utils/parsers/rat.py | maithamn/BrainRender | 9359ccc5b278f58ee3124bcf75b9ebefe0378bbc | [
"MIT"
]
| null | null | null | brainrender/Utils/parsers/rat.py | maithamn/BrainRender | 9359ccc5b278f58ee3124bcf75b9ebefe0378bbc | [
"MIT"
]
| null | null | null | brainrender/Utils/parsers/rat.py | maithamn/BrainRender | 9359ccc5b278f58ee3124bcf75b9ebefe0378bbc | [
"MIT"
]
| null | null | null | import sys
sys.path.append('./')
import os
import pandas as pd
from vtkplotter import load
from brainrender import DEFAULT_STRUCTURE_COLOR
def get_rat_regions_metadata(metadata_fld):
"""
:param metadata_fld:
"""
return pd.read_pickle(os.path.join(metadata_fld, "rat_structures.pkl"))
def get_rat_mesh_from_region(region, paths, use_original_color=False, **kwargs):
"""
:param region:
:param paths:
:param use_original_color: (Default value = False)
:param **kwargs:
"""
if not isinstance(region, (tuple, list)):
region = [region]
check = False
else: check = True
metadata = get_rat_regions_metadata(paths.metadata)
meshes = []
for reg in region:
if isinstance(reg, int):
entry = metadata.loc[metadata.Id == reg]
elif isinstance(reg, str):
entry = metadata.loc[metadata['Name'] == reg]
else:
raise ValueError("Unrecognized value for region while trying to get mesh for rat: {}".format(reg))
try:
meshname = os.path.join(paths.rat_meshes, "label_{}.stl".format(entry.Id.values[0]))
if not os.path.isfile(meshname):
raise FileExistsError(meshname)
if use_original_color:
c = entry["rgb"].values[0]
if isinstance(c, str):
c = c.replace("[", "")
c = c.replace("]", "")
cols = c.split(",")
color = [int(c) for c in cols]
else:
color = c
else:
if "color" in list(kwargs.keys()):
color = kwargs.pop("color", DEFAULT_STRUCTURE_COLOR)
elif "c" in list(kwargs.keys()):
color = kwargs.pop("c", DEFAULT_STRUCTURE_COLOR)
if "color" in list(kwargs.keys()): del kwargs["color"]
elif "c" in list(kwargs.keys()): del kwargs["c"]
mesh = load(meshname, c=color, **kwargs)
mesh = mesh.smoothLaplacian().subdivide(2)
meshes.append(mesh)
except:
print("Could not load rat region: {}".format(entry["Name"].values[0]))
return None
if not check:
return meshes[0]
else:
return meshes
if __name__ == "__main__":
pass
#fix_data() ## UNDEFINED!!??
| 29.036145 | 110 | 0.546473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 414 | 0.171784 |
482206f17e299eb89f694f10375879badc6e9f3d | 8,678 | py | Python | components/isceobj/Alos2burstProc/runFrameMosaic.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
]
| 1 | 2020-08-18T13:00:39.000Z | 2020-08-18T13:00:39.000Z | components/isceobj/Alos2burstProc/runFrameMosaic.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | components/isceobj/Alos2burstProc/runFrameMosaic.py | vincentschut/isce2 | 1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | #
# Author: Cunren Liang
# Copyright 2015-present, NASA-JPL/Caltech
#
import os
import logging
import isceobj
from isceobj.Alos2Proc.runFrameMosaic import frameMosaic
from isceobj.Alos2Proc.runFrameMosaic import frameMosaicParameters
from isceobj.Alos2Proc.Alos2ProcPublic import create_xml
logger = logging.getLogger('isce.alos2burstinsar.runFrameMosaic')
def runFrameMosaic(self):
'''mosaic frames
'''
catalog = isceobj.Catalog.createCatalog(self._insar.procDoc.name)
self.updateParamemetersFromUser()
referenceTrack = self._insar.loadTrack(reference=True)
secondaryTrack = self._insar.loadTrack(reference=False)
mosaicDir = 'insar'
os.makedirs(mosaicDir, exist_ok=True)
os.chdir(mosaicDir)
numberOfFrames = len(referenceTrack.frames)
if numberOfFrames == 1:
import shutil
frameDir = os.path.join('f1_{}/mosaic'.format(self._insar.referenceFrames[0]))
if not os.path.isfile(self._insar.interferogram):
os.symlink(os.path.join('../', frameDir, self._insar.interferogram), self._insar.interferogram)
#shutil.copy2() can overwrite
shutil.copy2(os.path.join('../', frameDir, self._insar.interferogram+'.vrt'), self._insar.interferogram+'.vrt')
shutil.copy2(os.path.join('../', frameDir, self._insar.interferogram+'.xml'), self._insar.interferogram+'.xml')
if not os.path.isfile(self._insar.amplitude):
os.symlink(os.path.join('../', frameDir, self._insar.amplitude), self._insar.amplitude)
shutil.copy2(os.path.join('../', frameDir, self._insar.amplitude+'.vrt'), self._insar.amplitude+'.vrt')
shutil.copy2(os.path.join('../', frameDir, self._insar.amplitude+'.xml'), self._insar.amplitude+'.xml')
# os.rename(os.path.join('../', frameDir, self._insar.interferogram), self._insar.interferogram)
# os.rename(os.path.join('../', frameDir, self._insar.interferogram+'.vrt'), self._insar.interferogram+'.vrt')
# os.rename(os.path.join('../', frameDir, self._insar.interferogram+'.xml'), self._insar.interferogram+'.xml')
# os.rename(os.path.join('../', frameDir, self._insar.amplitude), self._insar.amplitude)
# os.rename(os.path.join('../', frameDir, self._insar.amplitude+'.vrt'), self._insar.amplitude+'.vrt')
# os.rename(os.path.join('../', frameDir, self._insar.amplitude+'.xml'), self._insar.amplitude+'.xml')
#update track parameters
#########################################################
#mosaic size
referenceTrack.numberOfSamples = referenceTrack.frames[0].numberOfSamples
referenceTrack.numberOfLines = referenceTrack.frames[0].numberOfLines
#NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
#range parameters
referenceTrack.startingRange = referenceTrack.frames[0].startingRange
referenceTrack.rangeSamplingRate = referenceTrack.frames[0].rangeSamplingRate
referenceTrack.rangePixelSize = referenceTrack.frames[0].rangePixelSize
#azimuth parameters
referenceTrack.sensingStart = referenceTrack.frames[0].sensingStart
referenceTrack.prf = referenceTrack.frames[0].prf
referenceTrack.azimuthPixelSize = referenceTrack.frames[0].azimuthPixelSize
referenceTrack.azimuthLineInterval = referenceTrack.frames[0].azimuthLineInterval
#update track parameters, secondary
#########################################################
#mosaic size
secondaryTrack.numberOfSamples = secondaryTrack.frames[0].numberOfSamples
secondaryTrack.numberOfLines = secondaryTrack.frames[0].numberOfLines
#NOTE THAT WE ARE STILL USING SINGLE LOOK PARAMETERS HERE
#range parameters
secondaryTrack.startingRange = secondaryTrack.frames[0].startingRange
secondaryTrack.rangeSamplingRate = secondaryTrack.frames[0].rangeSamplingRate
secondaryTrack.rangePixelSize = secondaryTrack.frames[0].rangePixelSize
#azimuth parameters
secondaryTrack.sensingStart = secondaryTrack.frames[0].sensingStart
secondaryTrack.prf = secondaryTrack.frames[0].prf
secondaryTrack.azimuthPixelSize = secondaryTrack.frames[0].azimuthPixelSize
secondaryTrack.azimuthLineInterval = secondaryTrack.frames[0].azimuthLineInterval
else:
#choose offsets
if self.frameOffsetMatching:
rangeOffsets = self._insar.frameRangeOffsetMatchingReference
azimuthOffsets = self._insar.frameAzimuthOffsetMatchingReference
else:
rangeOffsets = self._insar.frameRangeOffsetGeometricalReference
azimuthOffsets = self._insar.frameAzimuthOffsetGeometricalReference
#list of input files
inputInterferograms = []
inputAmplitudes = []
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
inputInterferograms.append(os.path.join('../', frameDir, 'mosaic', self._insar.interferogram))
inputAmplitudes.append(os.path.join('../', frameDir, 'mosaic', self._insar.amplitude))
#note that track parameters are updated after mosaicking
#mosaic amplitudes
frameMosaic(referenceTrack, inputAmplitudes, self._insar.amplitude,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1,
updateTrack=False, phaseCompensation=False, resamplingMethod=0)
#mosaic interferograms
frameMosaic(referenceTrack, inputInterferograms, self._insar.interferogram,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1,
updateTrack=True, phaseCompensation=True, resamplingMethod=1)
create_xml(self._insar.amplitude, referenceTrack.numberOfSamples, referenceTrack.numberOfLines, 'amp')
create_xml(self._insar.interferogram, referenceTrack.numberOfSamples, referenceTrack.numberOfLines, 'int')
#update secondary parameters here
#do not match for secondary, always use geometrical
rangeOffsets = self._insar.frameRangeOffsetGeometricalSecondary
azimuthOffsets = self._insar.frameAzimuthOffsetGeometricalSecondary
frameMosaicParameters(secondaryTrack, rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1)
os.chdir('../')
#save parameter file
self._insar.saveProduct(referenceTrack, self._insar.referenceTrackParameter)
self._insar.saveProduct(secondaryTrack, self._insar.secondaryTrackParameter)
#mosaic spectral diversity inteferograms
mosaicDir = 'sd'
os.makedirs(mosaicDir, exist_ok=True)
os.chdir(mosaicDir)
numberOfFrames = len(referenceTrack.frames)
if numberOfFrames == 1:
import shutil
frameDir = os.path.join('f1_{}/mosaic'.format(self._insar.referenceFrames[0]))
for sdFile in self._insar.interferogramSd:
if not os.path.isfile(sdFile):
os.symlink(os.path.join('../', frameDir, sdFile), sdFile)
shutil.copy2(os.path.join('../', frameDir, sdFile+'.vrt'), sdFile+'.vrt')
shutil.copy2(os.path.join('../', frameDir, sdFile+'.xml'), sdFile+'.xml')
else:
#choose offsets
if self.frameOffsetMatching:
rangeOffsets = self._insar.frameRangeOffsetMatchingReference
azimuthOffsets = self._insar.frameAzimuthOffsetMatchingReference
else:
rangeOffsets = self._insar.frameRangeOffsetGeometricalReference
azimuthOffsets = self._insar.frameAzimuthOffsetGeometricalReference
#list of input files
inputSd = [[], [], []]
for i, frameNumber in enumerate(self._insar.referenceFrames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
for k, sdFile in enumerate(self._insar.interferogramSd):
inputSd[k].append(os.path.join('../', frameDir, 'mosaic', sdFile))
#mosaic spectral diversity interferograms
for inputSdList, outputSdFile in zip(inputSd, self._insar.interferogramSd):
frameMosaic(referenceTrack, inputSdList, outputSdFile,
rangeOffsets, azimuthOffsets, self._insar.numberRangeLooks1, self._insar.numberAzimuthLooks1,
updateTrack=False, phaseCompensation=True, resamplingMethod=1)
for sdFile in self._insar.interferogramSd:
create_xml(sdFile, referenceTrack.numberOfSamples, referenceTrack.numberOfLines, 'int')
os.chdir('../')
catalog.printToLog(logger, "runFrameMosaic")
self._insar.procDoc.addAllFromCatalog(catalog)
| 50.748538 | 139 | 0.698779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,750 | 0.201659 |
48223ac36e2493351b3ff6303137a45f254fb804 | 820 | py | Python | acronym.py | steffenschroeder/python-playground | 3c94a7c92a26d41e69118e8245e8ac9db7cf5ed2 | [
"MIT"
]
| null | null | null | acronym.py | steffenschroeder/python-playground | 3c94a7c92a26d41e69118e8245e8ac9db7cf5ed2 | [
"MIT"
]
| null | null | null | acronym.py | steffenschroeder/python-playground | 3c94a7c92a26d41e69118e8245e8ac9db7cf5ed2 | [
"MIT"
]
| null | null | null | import unittest
def abbreviate(text):
return "".join((i[0].upper() for i in text.split()))
class AcronymTest(unittest.TestCase):
def test_basic(self):
self.assertEqual('PNG', abbreviate('Portable Network Graphics'))
def test_lowercase_words(self):
self.assertEqual('ROR', abbreviate('Ruby on Rails'))
def test_camelcase(self):
self.assertEqual('HTML', abbreviate('HyperText Markup Language'))
def test_punctuation(self):
self.assertEqual('FIFO', abbreviate('First In, First Out'))
def test_all_caps_words(self):
self.assertEqual('PHP', abbreviate('PHP: Hypertext Preprocessor'))
def test_hyphenated(self):
self.assertEqual('CMOS', abbreviate('Complementary metal-oxide semiconductor'))
if __name__ == '__main__':
unittest.main() | 27.333333 | 87 | 0.687805 | 672 | 0.819512 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.25 |
4822b67c5088178025d58774742a32a17ce91c77 | 834 | py | Python | env/lib/python3.8/site-packages/plotly/validators/waterfall/_connector.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
]
| 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/lib/python3.8/site-packages/plotly/validators/waterfall/_connector.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
]
| 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/lib/python3.8/site-packages/plotly/validators/waterfall/_connector.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
]
| 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class ConnectorValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="connector", parent_name="waterfall", **kwargs):
super(ConnectorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Connector"),
data_docs=kwargs.pop(
"data_docs",
"""
line
:class:`plotly.graph_objects.waterfall.connecto
r.Line` instance or dict with compatible
properties
mode
Sets the shape of connector lines.
visible
Determines if connector lines are drawn.
""",
),
**kwargs
)
| 33.36 | 83 | 0.57554 | 795 | 0.953237 | 0 | 0 | 0 | 0 | 0 | 0 | 377 | 0.452038 |
482318efaad6f890a578bab42ca3ad7a7b532213 | 27 | py | Python | src/euler_python_package/euler_python/medium/p207.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
]
| null | null | null | src/euler_python_package/euler_python/medium/p207.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
]
| null | null | null | src/euler_python_package/euler_python/medium/p207.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
]
| null | null | null | def problem207():
pass
| 9 | 17 | 0.62963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4823bb4588dd2055c421f82a83ae6da7fc0c5d90 | 7,915 | py | Python | ssd_data/augmentations/geometrics.py | star-baba/res50_sa_ssd | c7c0f218307b50e7ac1fd8945868df01f1743467 | [
"MIT"
]
| 1 | 2020-06-16T12:53:25.000Z | 2020-06-16T12:53:25.000Z | ssd_data/augmentations/geometrics.py | star-baba/res50_sa_ssd | c7c0f218307b50e7ac1fd8945868df01f1743467 | [
"MIT"
]
| 3 | 2020-06-06T02:00:34.000Z | 2020-06-23T16:38:25.000Z | ssd_data/augmentations/geometrics.py | star-baba/res50_sa_ssd | c7c0f218307b50e7ac1fd8945868df01f1743467 | [
"MIT"
]
| 1 | 2021-04-27T06:40:21.000Z | 2021-04-27T06:40:21.000Z | from numpy import random
import numpy as np
import logging
from ._utils import decision
from ssd.core.boxes.utils import iou_numpy, centroids2corners_numpy, corners2centroids_numpy
from .base import Compose
class RandomExpand(object):
def __init__(self, filled_rgb_mean=(103.939, 116.779, 123.68), rmin=1, rmax=4, p=0.5):
self.filled_rgb_mean = filled_rgb_mean
self.ratio_min = rmin
self.ratio_max = rmax
self.p = p
assert self.ratio_min >= 0, "must be more than 0"
assert self.ratio_max >= self.ratio_min, "must be more than factor min"
def __call__(self, img, bboxes, labels, flags, *args):
# IMPORTANT: img = rgb order, bboxes: minmax coordinates with PERCENT
if decision(self.p):
h, w, c = img.shape
# get ratio randomly
ratio = random.uniform(self.ratio_min, self.ratio_max)
new_h = int(h*ratio)
new_w = int(w*ratio)
# get top left coordinates of original image randomly
topleft_x = int(random.uniform(0, new_w - w))
topleft_y = int(random.uniform(0, new_h - h))
# filled with normalized mean value
new_img = np.ones((new_h, new_w, c)) * np.broadcast_to(self.filled_rgb_mean, shape=(1, 1, c))
# put original image to selected topleft coordinates
new_img[topleft_y:topleft_y+h, topleft_x:topleft_x+w] = img
img = new_img
# convert box coordinates
# bbox shape = (*, 4=(xmin, ymin, xmax, ymax))
# reconstruct original coordinates
bboxes[:, 0::2] *= float(w)
bboxes[:, 1::2] *= float(h)
# move new position
bboxes[:, 0::2] += topleft_x
bboxes[:, 1::2] += topleft_y
# to percent
bboxes[:, 0::2] /= float(new_w)
bboxes[:, 1::2] /= float(new_h)
return img, bboxes, labels, flags, args
class _SampledPatchOp(object):
class UnSatisfy(Exception):
pass
class EntireSample(_SampledPatchOp):
def __call__(self, img, bboxes, labels, flags, *args):
return img, bboxes, labels, flags, args
class RandomIoUSampledPatch(_SampledPatchOp):
def __init__(self, iou_min=None, iou_max=None, ar_min=0.5, ar_max=2):
"""
:param iou_min: float or None, if it's None, set iou_min as -inf
:param iou_max: float or None, if it's None, set iou_max as inf
"""
self.iou_min = iou_min if iou_min else float('-inf')
self.iou_max = iou_max if iou_max else float('inf')
self.aspect_ration_min = ar_min
self.aspect_ration_max = ar_max
def __call__(self, img, bboxes, labels, flags, *args):
# IMPORTANT: img = rgb order, bboxes: minmax coordinates with PERCENT
h, w, _ = img.shape
ret_img = img.copy()
ret_bboxes = bboxes.copy()
# get patch width and height, and aspect ratio randomly
patch_w = random.randint(int(0.3 * w), w)
patch_h = random.randint(int(0.3 * h), h)
aspect_ratio = patch_h / float(patch_w)
# aspect ratio constraint b/t .5 & 2
if not (aspect_ratio >= 0.5 and aspect_ratio <= 2):
raise _SampledPatchOp.UnSatisfy
#aspect_ratio = random.uniform(self.aspect_ration_min, self.aspect_ration_max)
#patch_h, patch_w = int(aspect_ratio*h), int(aspect_ratio*w)
patch_topleft_x = random.randint(w - patch_w)
patch_topleft_y = random.randint(h - patch_h)
# shape = (1, 4)
patch = np.array((patch_topleft_x, patch_topleft_y,
patch_topleft_x + patch_w, patch_topleft_y + patch_h))
patch = np.expand_dims(patch, 0)
# IoU
overlaps = iou_numpy(bboxes, patch)
if overlaps.min() < self.iou_min and overlaps.max() > self.iou_max:
raise _SampledPatchOp.UnSatisfy
#return None
# cut patch
ret_img = ret_img[patch_topleft_y:patch_topleft_y+patch_h, patch_topleft_x:patch_topleft_x+patch_w]
# reconstruct box coordinates
ret_bboxes[:, 0::2] *= float(w)
ret_bboxes[:, 1::2] *= float(h)
# convert minmax to centroids coordinates of bboxes
# shape = (*, 4=(cx, cy, w, h))
centroids_boxes = corners2centroids_numpy(ret_bboxes)
# check if centroids of boxes is in patch
mask_box = (centroids_boxes[:, 0] > patch_topleft_x) * (centroids_boxes[:, 0] < patch_topleft_x+patch_w) *\
(centroids_boxes[:, 1] > patch_topleft_y) * (centroids_boxes[:, 1] < patch_topleft_y+patch_h)
if not mask_box.any():
raise _SampledPatchOp.UnSatisfy
#return None
# filtered out the boxes with unsatisfied above condition
ret_bboxes = ret_bboxes[mask_box, :].copy()
ret_labels = labels[mask_box]
# adjust boxes within patch
ret_bboxes[:, :2] = np.maximum(ret_bboxes[:, :2], patch[:, :2])
ret_bboxes[:, 2:] = np.minimum(ret_bboxes[:, 2:], patch[:, 2:])
# move new position
ret_bboxes[:, :2] -= patch[:, :2]
ret_bboxes[:, 2:] -= patch[:, :2]
# to percent
ret_bboxes[:, 0::2] /= float(patch_w)
ret_bboxes[:, 1::2] /= float(patch_h)
return ret_img, ret_bboxes, ret_labels, flags, args
class RandomSampledPatch(RandomIoUSampledPatch):
def __init__(self):
super().__init__(None, None)
class RandomSampled(object):
def __init__(self, options=(
EntireSample(),
RandomIoUSampledPatch(0.1, None),
RandomIoUSampledPatch(0.3, None),
RandomIoUSampledPatch(0.5, None),
RandomIoUSampledPatch(0.7, None),
RandomIoUSampledPatch(0.9, None),
RandomSampledPatch()
), max_iteration=20):
# check argument is proper
for op in options:
if not isinstance(op, _SampledPatchOp):
raise ValueError('All of option\'s element must be inherited to _SampledPatchOp')
if not any([isinstance(op, EntireSample) for op in options]):
logging.warning("Option does not contain entire sample. Could not return value in worst case")
self.options = options
self.max_iteration = max_iteration
def __call__(self, img, bboxes, labels, flags, *args):
import time
s = time.time()
while True:
# select option randomly
op = random.choice(self.options)
if isinstance(op, EntireSample):
return op(img, bboxes, labels, flags, args)
for _ in range(self.max_iteration):
try:
return op(img, bboxes, labels, flags, args)
except _SampledPatchOp.UnSatisfy:
continue
"""
ret = op(img, bboxes, labels, flags)
if ret:
print(time.time()-s)
return ret
"""
class RandomFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, bboxes, labels, flags, *args):
if decision(self.p):
_, w_, _ = img.shape
"""
copy because ->>>>
ValueError: some of the strides of a given numpy array are negative.
This is currently not supported, but will be added in future releases.
"""
img = img[:, ::-1].copy()
ret_bboxes = bboxes.copy()
ret_bboxes[:, 0::2] = 1 - ret_bboxes[:, 2::-2]
bboxes = ret_bboxes.clip(min=0, max=1)
return img, bboxes, labels, flags, args
class GeometricDistortions(Compose):
def __init__(self):
gmdists = [
RandomExpand(),
RandomSampled(),
RandomFlip()
]
super().__init__(gmdists) | 36.307339 | 115 | 0.586734 | 7,689 | 0.971447 | 0 | 0 | 0 | 0 | 0 | 0 | 1,785 | 0.225521 |
48246329c18e90c00165cc92ef48bb7d9a328558 | 5,200 | py | Python | tests/unit_tests/prepare_email/test_mail_segmenting.py | farthur/melusine | 121fbb17da221b12186a275d5843b466ce65d954 | [
"Apache-2.0"
]
| null | null | null | tests/unit_tests/prepare_email/test_mail_segmenting.py | farthur/melusine | 121fbb17da221b12186a275d5843b466ce65d954 | [
"Apache-2.0"
]
| null | null | null | tests/unit_tests/prepare_email/test_mail_segmenting.py | farthur/melusine | 121fbb17da221b12186a275d5843b466ce65d954 | [
"Apache-2.0"
]
| null | null | null | import pandas as pd
from melusine.prepare_email.mail_segmenting import structure_email, tag_signature
structured_historic = [
{
"text": " \n \n \n Bonjours, \n \n Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la \n somme que je vous \
dois afin d'd'être en régularisation . \n \n Merci bonne journée",
"meta": "",
},
{
"text": " \n Bonjour. \n \n Merci de bien vouloir prendre connaissance \
du document ci-joint : \n 1 - Relevé d'identité postal MUTUELLE \
(contrats) \n \n Sentiments mutualistes. \n \n La Mutuelle \n \n \
La visualisation des fichiers PDF nécessite Adobe Reader. \n ",
"meta": " \n \n Le mar. 22 mai 2018 à 10:20, \
<[email protected]> a écrit\xa0:",
},
]
output = [
{
"meta": {"date": None, "from": None, "to": None},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjours, ", "tags": "HELLO"},
{
"part": " Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la somme que je vous dois \
afin d'd'être en régularisation . \n \n ",
"tags": "BODY",
},
{"part": "Merci bonne journée", "tags": "GREETINGS"},
],
},
},
{
"meta": {
"date": " mar. 22 mai 2018 à 10:20",
"from": " <[email protected]> ",
"to": None,
},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjour. \n \n ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre \
connaissance du document ci-joint : 1 - Relevé d'identité postal MUTUELLE \
(contrats) ",
"tags": "BODY",
},
{"part": " Sentiments mutualistes. ", "tags": "GREETINGS"},
{"part": " La Mutuelle ", "tags": "BODY"},
{
"part": " La visualisation des fichiers \
PDF nécessite Adobe Reader. \n",
"tags": "FOOTER",
},
],
},
},
]
def test_structure_email():
input_df = pd.DataFrame({"structured_historic": [structured_historic]})
output_df = pd.Series([output])
result = input_df.apply(structure_email, axis=1)
pd.testing.assert_series_equal(result, output_df)
structured_historic_signature = [
{
"text": " \n \n \n Bonjours, \n \n Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la \n somme que je vous \
dois afin d'd'être en régularisation . \n \n Merci bonne journée\nJean Dupont",
"meta": "",
},
{
"text": " \n Bonjour. \n \n Merci de bien vouloir prendre connaissance \
du document ci-joint : \n 1 - Relevé d'identité postal MUTUELLE \
(contrats) \n \n Sentiments mutualistes. \n \n La Mutuelle \n \n \
La visualisation des fichiers PDF nécessite Adobe Reader. \n ",
"meta": " \n \n Le mar. 22 mai 2018 à 10:20, \
<[email protected]> a écrit\xa0:",
},
]
output_signature = [
{
"meta": {"date": None, "from": None, "to": None},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjours, ", "tags": "HELLO"},
{
"part": " Suite a notre conversation \
téléphonique de Mardi , pourriez vous me dire la somme que je vous dois \
afin d'd'être en régularisation . \n \n ",
"tags": "BODY",
},
{"part": "Merci bonne journée", "tags": "GREETINGS"},
{"part": "Jean Dupont", "tags": "SIGNATURE"},
],
},
},
{
"meta": {
"date": " mar. 22 mai 2018 à 10:20",
"from": " <[email protected]> ",
"to": None,
},
"structured_text": {
"header": None,
"text": [
{"part": " Bonjour. \n \n ", "tags": "HELLO"},
{
"part": "Merci de bien vouloir prendre \
connaissance du document ci-joint : 1 - Relevé d'identité postal MUTUELLE \
(contrats) ",
"tags": "BODY",
},
{"part": " Sentiments mutualistes. ", "tags": "GREETINGS"},
{"part": " La Mutuelle ", "tags": "BODY"},
{
"part": " La visualisation des fichiers PDF nécessite Adobe Reader. \n",
"tags": "FOOTER",
},
],
},
},
]
def test_tag_signature():
input_df = pd.DataFrame({"structured_historic": [structured_historic_signature]})
output_df = pd.Series([output_signature])
input_df["structured_body"] = input_df.apply(structure_email, axis=1)
result = input_df.apply(tag_signature, axis=1)
pd.testing.assert_series_equal(result, output_df)
| 35.616438 | 93 | 0.497308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,863 | 0.546583 |
48253cfafc062954525ca06fd5152e38d4e70a79 | 295 | py | Python | Modulos/Modulo2/app3.py | Trallyan/Curso_Udemy | 2a5c0becd14352f2cc2bf2362d1e5494edd6198d | [
"MIT"
]
| null | null | null | Modulos/Modulo2/app3.py | Trallyan/Curso_Udemy | 2a5c0becd14352f2cc2bf2362d1e5494edd6198d | [
"MIT"
]
| null | null | null | Modulos/Modulo2/app3.py | Trallyan/Curso_Udemy | 2a5c0becd14352f2cc2bf2362d1e5494edd6198d | [
"MIT"
]
| null | null | null | idade = 18
carteiramotorista = True
print (idade >= 18 and carteiramotorista == True)
print ("Pode Dirigir")
velocidade = 90
radar = 100
radarfuncionando = False
print (velocidade > radar and radarfuncionando == True)
print ("Não foi multado")
velocidade1 = 101
print (velocidade1 >= radar) | 18.4375 | 55 | 0.735593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.108108 |
482633d918d23f0a510e111cc0ad6f51458a51a4 | 1,233 | py | Python | examples/basic/merge_instance.py | talashilkarraj/spydrnet-physical | d13bcbb0feef7d5c93aa60af4a916f837128a5ad | [
"BSD-3-Clause"
]
| 3 | 2021-11-05T18:25:21.000Z | 2022-03-02T22:03:02.000Z | examples/basic/merge_instance.py | talashilkarraj/spydrnet-physical | d13bcbb0feef7d5c93aa60af4a916f837128a5ad | [
"BSD-3-Clause"
]
| null | null | null | examples/basic/merge_instance.py | talashilkarraj/spydrnet-physical | d13bcbb0feef7d5c93aa60af4a916f837128a5ad | [
"BSD-3-Clause"
]
| 2 | 2022-01-10T14:27:59.000Z | 2022-03-13T08:21:33.000Z | """
===================================
Merging two instances in the design
===================================
This example demonstrate how to merge two instance in the design to create a new
merged definition
.. hdl-diagram:: ../../../examples/basic/_initial_design_merge.v
:type: netlistsvg
:align: center
:module: top
**Output1** Merged design Instance
.. hdl-diagram:: ../../../examples/basic/_merged_design.v
:type: netlistsvg
:align: center
:module: top
"""
from os import path
import spydrnet as sdn
import spydrnet_physical as sdnphy
import logging
logger = logging.getLogger('spydrnet_logs')
sdn.enable_file_logging(LOG_LEVEL='INFO')
netlist = sdnphy.load_netlist_by_name('nested_hierarchy')
sdn.compose(netlist, '_initial_design_merge.v', skip_constraints=True)
netlist = sdnphy.load_netlist_by_name('nested_hierarchy')
top = netlist.top_instance.reference
inst1 = next(top.get_instances("inst_1_0"))
inst2 = next(top.get_instances("inst_1_1"))
top.merge_instance([inst1, inst2],
new_definition_name="merged_module",
new_instance_name="merged_module_instance_0")
top.create_unconn_wires()
sdn.compose(netlist, '_merged_design.v', skip_constraints=True)
| 26.804348 | 80 | 0.703163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 650 | 0.52717 |
482697dcf4d097846528ae15ee8dbca33b6e86d7 | 525 | py | Python | splunge.py | neilebliss/reddit_bot | 74be4b57ddbdf9fe0d9876207388ee2778b4a50d | [
"Unlicense"
]
| null | null | null | splunge.py | neilebliss/reddit_bot | 74be4b57ddbdf9fe0d9876207388ee2778b4a50d | [
"Unlicense"
]
| null | null | null | splunge.py | neilebliss/reddit_bot | 74be4b57ddbdf9fe0d9876207388ee2778b4a50d | [
"Unlicense"
]
| null | null | null | import praw
import re
import os
reddit = praw.Reddit('Splunge Bot v1', client_id=os.environ['REDDIT_CLIENT_ID'], client_secret=os.environ['REDDIT_CLIENT_SECRET'], password=os.environ['REDDIT_PASSWORD'], username=os.environ['REDDIT_USERNAME'])
subreddit = reddit.subreddit('tubasaur')
for submission in subreddit.new(limit=5):
for top_level_comment in submission.comments:
if re.search('splunge', top_level_comment.body, re.IGNORECASE):
top_level_comment.reply("Well, yeah, splunge for me too!")
print("Splunged.")
| 40.384615 | 210 | 0.775238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.291429 |
48285ffa4d4045b7cf655571731a70ba6854e4b3 | 19,519 | py | Python | cogv3/admin/managecommands.py | XFazze/discordbot | 6b4201a6d6ff1bed5f65de4b4d30738b4d51e223 | [
"MIT"
]
| 2 | 2021-07-29T02:39:36.000Z | 2021-07-29T02:39:38.000Z | cogv3/admin/managecommands.py | XFazze/discordbot | 6b4201a6d6ff1bed5f65de4b4d30738b4d51e223 | [
"MIT"
]
| 2 | 2021-08-16T08:31:24.000Z | 2021-09-20T16:34:58.000Z | cogv3/admin/managecommands.py | XFazze/discordbot | 6b4201a6d6ff1bed5f65de4b4d30738b4d51e223 | [
"MIT"
]
| null | null | null | import discord
from discord import embeds
from discord.ext import commands
from discord.ext.commands.core import command
from pymongo import MongoClient, collation
from discord_components import Button, Select, SelectOption, ComponentsBot
from discord.utils import get
class managecommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Enable/disable command
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def disable(self, ctx, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if role.id not in settings[command]['disabled_guild']:
settings[command]['disabled_guild'].append(role.id)
else:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
if role.id in settings[command]['guild']:
settings[command]['guild'].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Disabled "+command+" on server for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def disablecategory(self, ctx, category: discord.CategoryChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(category.id) not in settings[command]['disabled_category'].keys():
settings[command]['disabled_category'][str(category.id)] = [
role.id]
else:
if role.id in settings[command]['disabled_category'][str(category.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['disabled_category'][str(
category.id)].append(role.id)
if str(category.id) in settings[command]['category'].keys():
if role.id in settings[command]['category'][str(category.id)]:
settings[command]['category'][str(category.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Disabled "+command+" in category " + category.name+" for "+role.name + category.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def disablechannel(self, ctx, channel: discord.TextChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(channel.id) not in settings[command]['disabled_channel'].keys():
settings[command]['disabled_channel'][str(channel.id)] = [role.id]
else:
if role.id in settings[command]['disabled_channel'][str(channel.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['disabled_channel'][str(
channel.id)].append(role.id)
if str(channel.id) in settings[command]['channel'].keys():
if role.id in settings[command]['channel'][str(channel.id)]:
settings[command]['channel'][str(channel.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Disabled "+command+" in channel " + channel.name+" for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def enable(self, ctx, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if role.id not in settings[command]['guild']:
settings[command]['guild'].append(role.id)
else:
await ctx.reply(embed=discord.Embed(title="Command is already enabled", color=0xFD3333))
return
if role.id in settings[command]['disabled_guild']:
settings[command]['disabled_guild'].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Enabled "+command+" on server for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def enablecategory(self, ctx, category: discord.CategoryChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(category.id) not in settings[command]['category'].keys():
settings[command]['category'][str(category.id)] = [role.id]
else:
if role.id in settings[command]['category'][str(category.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['category'][str(category.id)].append(role.id)
if str(category.id) in settings[command]['disabled_category'].keys():
if role.id in settings[command]['disabled_category'][str(category.id)]:
settings[command]['disabled_category'][str(
category.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Enabled "+command+" in category " + category.name + " for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def enablechannel(self, ctx, channel: discord.TextChannel = None, command: str = None, role: discord.Role = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
if role == None:
role = ctx.guild.default_role
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
if command not in settings.keys():
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}
}
if str(channel.id) not in settings[command]['channel'].keys():
settings[command]['channel'][str(channel.id)] = [role.id]
else:
if role.id in settings[command]['channel'][str(channel.id)]:
await ctx.reply(embed=discord.Embed(title="Command is already disabled", color=0xFD3333))
return
else:
settings[command]['channel'][str(channel.id)].append(role.id)
if str(channel.id) in settings[command]['disabled_channel'].keys():
if role.id in settings[command]['disabled_channel'][str(channel.id)]:
settings[command]['disabled_channel'][str(
channel.id)].remove(role.id)
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Enabled "+command+" in channel " + channel.name + " for "+role.name, color=0x00FF42))
@commands.command(pass_context=True)
@commands.has_permissions(manage_guild=True)
async def resetperms(self, ctx, command: str = None):
validcommand = False
for cmd in self.bot.commands:
if command == cmd.name:
validcommand = True
break
if not validcommand:
await ctx.reply(embed=discord.Embed(title="Provide a valid command", color=0xFD3333))
return
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
settings[command] = {
"guild": [],
"disabled_guild": [],
"category": {},
"disabled_category": {},
"channel": {},
"disabled_channel": {}}
newvalue = {"$set": {"settings": settings}}
collection.update_one(myquery, newvalue)
await ctx.reply(embed=discord.Embed(title="Reset command permissions", color=0x00FF42))
@commands.command(pass_context=True)
async def showperms(self, ctx):
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": ctx.guild.id}
settings = collection.find_one(myquery)["settings"]
options=[]
for setting in settings.keys():
options.append(SelectOption(label=setting, value=setting))
message = await ctx.reply("The lower in the hiearchy will go over the other. So channel enable will go over guild disable.", components=[Select(placeholder="Select something!", options=options, custom_id="commandperms",)])
while True:
interaction = await self.bot.wait_for("select_option")
embed = discord.Embed(name="Command permissions for ", value=interaction.values[0], color=0xFFFFFF)
if len(settings[interaction.values[0]]["guild"]) > 0:
msg = ""
for roleid in settings[interaction.values[0]]["guild"]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
else:
msg="None"
embed.add_field(name="Guild wide allowed", value=msg)
if len(settings[interaction.values[0]]["guild"]) > 0:
msg = ""
for roleid in settings[interaction.values[0]]["disabled_guild"]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
else:
msg="None"
embed.add_field(name="Guild wide denied", value=msg)
# this is no longer a list
# its a dictionary
embed.add_field(name="Category wide allowed", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["category"].keys()) > 0:
for key in settings[interaction.values[0]]["category"].keys():
if len(settings[interaction.values[0]]["category"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["category"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.categories, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "None"
embed.add_field(name="Category wide denied", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["disabled_category"].keys()) > 0:
for key in settings[interaction.values[0]]["disabled_category"].keys():
if len(settings[interaction.values[0]]["disabled_category"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["disabled_category"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.categories, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "None"
embed.add_field(name="Channel wide allowed", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["channel"].keys()) > 0:
for key in settings[interaction.values[0]]["channel"].keys():
if len(settings[interaction.values[0]]["channel"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["channel"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.text_channels, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "None"
embed.add_field(name="Channel wide denied", value="\u200b", inline=False)
if len(settings[interaction.values[0]]["disabled_channel"].keys()) > 0:
for key in settings[interaction.values[0]]["disabled_channel"].keys():
if len(settings[interaction.values[0]]["disabled_channel"][key]) == 0:
continue
msg = ""
for roleid in settings[interaction.values[0]]["disabled_channel"][key]:
role_obj = get(ctx.guild.roles, id=roleid)
msg += role_obj.name+'\n'
name = get(ctx.guild.text_channels, id=int(key))
embed.add_field(name=name, value=msg)
else:
msg = "There "
await message.edit(embed=embed,components=[Select(placeholder="Select something!", options=options, custom_id="commandperms",)])
def setup(bot):
bot.add_cog(managecommands(bot))
def perms(context):
command = context.command.name #str
guild_id = context.guild.id
channel_id = str(context.message.channel.id)
category_id = str(context.message.channel.category_id)
roles = []
for role in context.author.roles:
roles.append(role.id)
collection = MongoClient('localhost', 27017).maindb.guilds
myquery = {"id": guild_id}
settings = collection.find_one(myquery)["settings"]
if command in settings.keys():
if channel_id in settings[command]["channel"].keys():
print("channels exist")
if bool(set(roles) & set(settings[command]["channel"][channel_id])):
return True
elif channel_id in settings[command]["disabled_channel"].keys():
if bool(set(roles) & set(settings[command]["disabled_channel"][channel_id])):
return False
elif category_id in settings[command]["category"].keys():
if bool(set(roles) & set(settings[command]["category"][category_id])):
return True
elif category_id in settings[command]["disabled_category"].keys():
if bool(set(roles) & set(settings[command]["disabled_category"][category_id])):
return False
elif bool(set(roles) & set(settings[command]["disabled_guild"])):
return False
elif bool(set(roles) & set(settings[command]["guild"])):
return True
return True | 42.06681 | 230 | 0.56217 | 17,552 | 0.899226 | 0 | 0 | 17,383 | 0.890568 | 16,712 | 0.856191 | 2,853 | 0.146165 |
48289ef712fad809681babbffb67acddcce6b08d | 13,910 | py | Python | edb/pgsql/compiler/context.py | OhBonsai/edgedb | 786c853090b90f3005cb65014194d0dbd45d6fcc | [
"Apache-2.0"
]
| 2 | 2019-01-21T05:43:52.000Z | 2019-05-24T02:53:14.000Z | edb/pgsql/compiler/context.py | ciusji/edgedb | 1c68c02430a92464839f03f43c4e5ad6f7ede4e0 | [
"Apache-2.0"
]
| null | null | null | edb/pgsql/compiler/context.py | ciusji/edgedb | 1c68c02430a92464839f03f43c4e5ad6f7ede4e0 | [
"Apache-2.0"
]
| null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""IR compiler context."""
from __future__ import annotations
from typing import *
import collections
import contextlib
import itertools
import enum
import uuid
from edb.common import compiler
from edb.pgsql import ast as pgast
from . import aliases
if TYPE_CHECKING:
from edb.ir import ast as irast
class ContextSwitchMode(enum.Enum):
TRANSPARENT = enum.auto()
SUBREL = enum.auto()
NEWREL = enum.auto()
SUBSTMT = enum.auto()
NEWSCOPE = enum.auto()
class ShapeFormat(enum.Enum):
SERIALIZED = enum.auto()
FLAT = enum.auto()
class OutputFormat(enum.Enum):
#: Result data output in PostgreSQL format.
NATIVE = enum.auto()
#: Result data output as a single JSON string.
JSON = enum.auto()
#: Result data output as a single PostgreSQL JSONB type value.
JSONB = enum.auto()
#: Result data output as a JSON string for each element in returned set.
JSON_ELEMENTS = enum.auto()
#: Script mode: query result not returned, cardinality of result set
#: is returned instead.
SCRIPT = enum.auto()
#: Like NATIVE, but objects without an explicit shape are serialized
#: as UUIDs.
NATIVE_INTERNAL = enum.auto()
NO_STMT = pgast.SelectStmt()
class CompilerContextLevel(compiler.ContextLevel):
#: static compilation environment
env: Environment
#: mapping of named args to position
argmap: Dict[str, pgast.Param]
#: next argument number for named arguments
next_argument: Iterator[int]
#: whether compiling in singleton expression mode
singleton_mode: bool
#: the top-level SQL statement
toplevel_stmt: pgast.Query
#: Record of DML CTEs generated for the corresponding IR DML.
#: CTEs generated for DML-containing FOR statements are keyed
#: by their iterator set.
dml_stmts: Dict[Union[irast.MutatingStmt, irast.Set],
pgast.CommonTableExpr]
#: SQL statement corresponding to the IR statement
#: currently being compiled.
stmt: pgast.SelectStmt
#: Current SQL subquery
rel: pgast.SelectStmt
#: SQL query hierarchy
rel_hierarchy: Dict[pgast.Query, pgast.Query]
#: CTEs representing schema types, when rewritten based on access policy
type_ctes: Dict[uuid.UUID, pgast.CommonTableExpr]
#: A set of type CTEs currently being generated
pending_type_ctes: Set[uuid.UUID]
#: The logical parent of the current query in the
#: query hierarchy
parent_rel: Optional[pgast.Query]
#: Query to become current in the next SUBSTMT switch.
pending_query: Optional[pgast.SelectStmt]
#: Whether the expression currently being processed is
#: directly exposed to the output of the statement.
expr_exposed: Optional[bool]
#: Expression to use to force SQL expression volatility in this context
#: (Delayed with a lambda to avoid inserting it when not used.)
volatility_ref: Tuple[Callable[[], pgast.BaseExpr], ...]
# Current path_id we are INSERTing, so that we can avoid creating
# a bogus volatility ref to it...
current_insert_path_id: Optional[irast.PathId]
group_by_rels: Dict[
Tuple[irast.PathId, irast.PathId],
Union[pgast.BaseRelation, pgast.CommonTableExpr]
]
#: Paths, for which semi-join is banned in this context.
disable_semi_join: Set[irast.PathId]
#: Paths, which need to be explicitly wrapped into SQL
#: optionality scaffolding.
force_optional: Set[irast.PathId]
#: Specifies that references to a specific Set must be narrowed
#: by only selecting instances of type specified by the mapping value.
intersection_narrowing: Dict[irast.Set, irast.Set]
#: Which SQL query holds the SQL scope for the given PathId
path_scope: ChainMap[irast.PathId, pgast.SelectStmt]
#: Relevant IR scope for this context.
scope_tree: irast.ScopeTreeNode
#: A stack of dml statements currently being compiled. Used for
#: figuring out what to record in type_rel_overlays.
dml_stmt_stack: List[irast.MutatingStmt]
#: Relations used to "overlay" the main table for
#: the type. Mostly used with DML statements.
type_rel_overlays: DefaultDict[
Optional[irast.MutatingStmt],
DefaultDict[
uuid.UUID,
List[
Tuple[
str,
Union[pgast.BaseRelation, pgast.CommonTableExpr],
irast.PathId,
]
],
],
]
#: Relations used to "overlay" the main table for
#: the pointer. Mostly used with DML statements.
ptr_rel_overlays: DefaultDict[
Optional[irast.MutatingStmt],
DefaultDict[
str,
List[
Tuple[
str,
Union[pgast.BaseRelation, pgast.CommonTableExpr],
irast.PathId,
]
],
],
]
#: The CTE and some metadata of any enclosing iterator-like
#: construct (which includes iterators, insert/update, and INSERT
#: ELSE select clauses) currently being compiled.
enclosing_cte_iterator: Optional[pgast.IteratorCTE]
#: Sets to force shape compilation on, because the values are
#: needed by DML.
shapes_needed_by_dml: Set[irast.Set]
def __init__(
self,
prevlevel: Optional[CompilerContextLevel],
mode: ContextSwitchMode,
*,
env: Optional[Environment] = None,
scope_tree: Optional[irast.ScopeTreeNode] = None,
) -> None:
if prevlevel is None:
assert env is not None
assert scope_tree is not None
self.env = env
self.argmap = collections.OrderedDict()
self.next_argument = itertools.count(1)
self.singleton_mode = False
self.toplevel_stmt = NO_STMT
self.stmt = NO_STMT
self.rel = NO_STMT
self.rel_hierarchy = {}
self.type_ctes = {}
self.pending_type_ctes = set()
self.dml_stmts = {}
self.parent_rel = None
self.pending_query = None
self.expr_exposed = None
self.volatility_ref = ()
self.current_insert_path_id = None
self.group_by_rels = {}
self.disable_semi_join = set()
self.force_optional = set()
self.intersection_narrowing = {}
self.path_scope = collections.ChainMap()
self.scope_tree = scope_tree
self.dml_stmt_stack = []
self.type_rel_overlays = collections.defaultdict(
lambda: collections.defaultdict(list))
self.ptr_rel_overlays = collections.defaultdict(
lambda: collections.defaultdict(list))
self.enclosing_cte_iterator = None
self.shapes_needed_by_dml = set()
else:
self.env = prevlevel.env
self.argmap = prevlevel.argmap
self.next_argument = prevlevel.next_argument
self.singleton_mode = prevlevel.singleton_mode
self.toplevel_stmt = prevlevel.toplevel_stmt
self.stmt = prevlevel.stmt
self.rel = prevlevel.rel
self.rel_hierarchy = prevlevel.rel_hierarchy
self.type_ctes = prevlevel.type_ctes
self.pending_type_ctes = prevlevel.pending_type_ctes
self.dml_stmts = prevlevel.dml_stmts
self.parent_rel = prevlevel.parent_rel
self.pending_query = prevlevel.pending_query
self.expr_exposed = prevlevel.expr_exposed
self.volatility_ref = prevlevel.volatility_ref
self.current_insert_path_id = prevlevel.current_insert_path_id
self.group_by_rels = prevlevel.group_by_rels
self.disable_semi_join = prevlevel.disable_semi_join.copy()
self.force_optional = prevlevel.force_optional.copy()
self.intersection_narrowing = prevlevel.intersection_narrowing
self.path_scope = prevlevel.path_scope
self.scope_tree = prevlevel.scope_tree
self.dml_stmt_stack = prevlevel.dml_stmt_stack
self.type_rel_overlays = prevlevel.type_rel_overlays
self.ptr_rel_overlays = prevlevel.ptr_rel_overlays
self.enclosing_cte_iterator = prevlevel.enclosing_cte_iterator
self.shapes_needed_by_dml = prevlevel.shapes_needed_by_dml
if mode is ContextSwitchMode.SUBSTMT:
if self.pending_query is not None:
self.rel = self.pending_query
else:
self.rel = pgast.SelectStmt()
if prevlevel.parent_rel is not None:
parent_rel = prevlevel.parent_rel
else:
parent_rel = prevlevel.rel
self.rel_hierarchy[self.rel] = parent_rel
self.stmt = self.rel
self.pending_query = None
self.parent_rel = None
elif mode is ContextSwitchMode.SUBREL:
self.rel = pgast.SelectStmt()
if prevlevel.parent_rel is not None:
parent_rel = prevlevel.parent_rel
else:
parent_rel = prevlevel.rel
self.rel_hierarchy[self.rel] = parent_rel
self.pending_query = None
self.parent_rel = None
elif mode is ContextSwitchMode.NEWREL:
self.rel = pgast.SelectStmt()
self.pending_query = None
self.parent_rel = None
self.path_scope = collections.ChainMap()
self.rel_hierarchy = {}
self.scope_tree = prevlevel.scope_tree.root
self.disable_semi_join = set()
self.force_optional = set()
self.intersection_narrowing = {}
self.pending_type_ctes = set(prevlevel.pending_type_ctes)
elif mode == ContextSwitchMode.NEWSCOPE:
self.path_scope = prevlevel.path_scope.new_child()
def subrel(
self,
) -> compiler.CompilerContextManager[CompilerContextLevel]:
return self.new(ContextSwitchMode.SUBREL)
def newrel(
self,
) -> compiler.CompilerContextManager[CompilerContextLevel]:
return self.new(ContextSwitchMode.NEWREL)
def substmt(
self,
) -> compiler.CompilerContextManager[CompilerContextLevel]:
return self.new(ContextSwitchMode.SUBSTMT)
def newscope(
self,
) -> compiler.CompilerContextManager[CompilerContextLevel]:
return self.new(ContextSwitchMode.NEWSCOPE)
def up_hierarchy(
self,
n: int, q: Optional[pgast.Query]=None
) -> Optional[pgast.Query]:
# mostly intended as a debugging helper
q = q or self.rel
for _ in range(n):
if q:
q = self.rel_hierarchy.get(q)
return q
class CompilerContext(compiler.CompilerContext[CompilerContextLevel]):
ContextLevelClass = CompilerContextLevel
default_mode = ContextSwitchMode.TRANSPARENT
class Environment:
"""Static compilation environment."""
aliases: aliases.AliasGenerator
output_format: Optional[OutputFormat]
use_named_params: bool
ptrref_source_visibility: Dict[irast.BasePointerRef, bool]
expected_cardinality_one: bool
ignore_object_shapes: bool
explicit_top_cast: Optional[irast.TypeRef]
singleton_mode: bool
query_params: List[irast.Param]
type_rewrites: Dict[uuid.UUID, irast.Set]
external_rvars: Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]
def __init__(
self,
*,
output_format: Optional[OutputFormat],
use_named_params: bool,
expected_cardinality_one: bool,
ignore_object_shapes: bool,
singleton_mode: bool,
explicit_top_cast: Optional[irast.TypeRef],
query_params: List[irast.Param],
type_rewrites: Dict[uuid.UUID, irast.Set],
external_rvars: Optional[
Mapping[Tuple[irast.PathId, str], pgast.PathRangeVar]
] = None,
) -> None:
self.aliases = aliases.AliasGenerator()
self.output_format = output_format
self.use_named_params = use_named_params
self.ptrref_source_visibility = {}
self.expected_cardinality_one = expected_cardinality_one
self.ignore_object_shapes = ignore_object_shapes
self.singleton_mode = singleton_mode
self.explicit_top_cast = explicit_top_cast
self.query_params = query_params
self.type_rewrites = type_rewrites
self.external_rvars = external_rvars or {}
# XXX: this context hack is necessary until pathctx is converted
# to use context levels instead of using env directly.
@contextlib.contextmanager
def output_format(
ctx: CompilerContextLevel,
output_format: OutputFormat,
) -> Generator[None, None, None]:
original_output_format = ctx.env.output_format
ctx.env.output_format = output_format
try:
yield
finally:
ctx.env.output_format = original_output_format
| 33.680387 | 76 | 0.649029 | 12,415 | 0.892523 | 300 | 0.021567 | 327 | 0.023508 | 0 | 0 | 3,245 | 0.233285 |
4828cf4e5987d77ba633b24834a9ab7cbcc2c32c | 1,111 | py | Python | tests/unit/test_resources_log.py | CarlosAMolina/logs-analyzer | b381d0c7ae4c5a8b6911d2b7019baa74208192c6 | [
"MIT"
]
| null | null | null | tests/unit/test_resources_log.py | CarlosAMolina/logs-analyzer | b381d0c7ae4c5a8b6911d2b7019baa74208192c6 | [
"MIT"
]
| null | null | null | tests/unit/test_resources_log.py | CarlosAMolina/logs-analyzer | b381d0c7ae4c5a8b6911d2b7019baa74208192c6 | [
"MIT"
]
| null | null | null | import unittest
import mock
from src.api.resources import log
from tests import LOGS_PATH
class TestLogListResource(unittest.TestCase):
def setUp(self):
self.class_ = log.LogListResource()
def test_post_with_file_that_exits(self):
class FakeRequest:
@staticmethod
def get_json():
return {"logs-file": LOGS_PATH}
with mock.patch("src.api.resources.log.flask.request", FakeRequest):
result = self.class_.post()
self.assertEqual(3, len(result.json))
self.assertEqual("200 OK", result.status)
def test_post_with_file_that_does_not_exit(self):
class FakeRequest:
@staticmethod
def get_json():
return {"logs-file": "/foo/bar"}
with self.assertRaises(FileNotFoundError) as cm:
with mock.patch("src.api.resources.log.flask.request", FakeRequest):
self.class_.post()
the_exception = cm.exception
self.assertIsInstance(the_exception, FileNotFoundError)
if __name__ == "__main__":
unittest.main()
| 29.236842 | 80 | 0.636364 | 968 | 0.871287 | 0 | 0 | 179 | 0.161116 | 0 | 0 | 124 | 0.111611 |
482a0469f8aaa784c2bee17a9875456c7d03fc8d | 4,378 | py | Python | src/rqt_py_trees/message_loader_thread.py | alexfneves/rqt_py_trees | 87237c3dcf25db419ad783ec29b9a40fcfa7b75c | [
"BSD-3-Clause"
]
| 4 | 2021-04-19T04:04:06.000Z | 2022-02-08T10:13:37.000Z | src/rqt_py_trees/message_loader_thread.py | alexfneves/rqt_py_trees | 87237c3dcf25db419ad783ec29b9a40fcfa7b75c | [
"BSD-3-Clause"
]
| 7 | 2016-11-25T04:53:29.000Z | 2018-10-07T21:49:10.000Z | src/rqt_py_trees/message_loader_thread.py | alexfneves/rqt_py_trees | 87237c3dcf25db419ad783ec29b9a40fcfa7b75c | [
"BSD-3-Clause"
]
| 3 | 2021-01-08T10:47:21.000Z | 2021-07-26T15:18:39.000Z | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import threading
class MessageLoaderThread(threading.Thread):
"""
Waits for a new playhead position on the given topic, then loads the message at that position and notifies the view threads.
One thread per topic. Maintains a cache of recently loaded messages.
"""
def __init__(self, timeline, topic):
threading.Thread.__init__(self)
self.timeline = timeline
self.topic = topic
self.topic_playhead_position = None
self._message_cache_capacity = 50
self._message_cache = {}
self._message_cache_keys = []
self._stop_flag = False
self.setDaemon(True)
self.start()
def reset(self):
self.bag_playhead_position = None
def run(self):
while not self._stop_flag:
# Wait for a new entry
cv = self.timeline._playhead_positions_cvs[self.topic]
with cv:
while (self.topic not in self.timeline._playhead_positions) or (self.topic_playhead_position == self.timeline._playhead_positions[self.topic]):
cv.wait()
if self._stop_flag:
return
playhead_position = self.timeline._playhead_positions[self.topic]
self.topic_playhead_position = playhead_position
# Don't bother loading the message if there are no listeners
if not self.timeline.has_listeners(self.topic):
continue
# Load the message
if playhead_position is None:
msg_data = None
else:
msg_data = self._get_message(playhead_position)
# Inform the views
messages_cv = self.timeline._messages_cvs[self.topic]
with messages_cv:
self.timeline._messages[self.topic] = msg_data
messages_cv.notify_all() # notify all views that a message is loaded
def _get_message(self, position):
key = str(position)
if key in self._message_cache:
return self._message_cache[key]
msg_data = self.timeline.read_message(self.topic, position)
self._message_cache[key] = msg_data
self._message_cache_keys.append(key)
if len(self._message_cache) > self._message_cache_capacity:
oldest_key = self._message_cache_keys[0]
del self._message_cache[oldest_key]
self._message_cache_keys.remove(oldest_key)
return msg_data
def stop(self):
self._stop_flag = True
cv = self.timeline._playhead_positions_cvs[self.topic]
with cv:
print("DJS: self.timeline._playhead_positions_cvs[self.topic].notify_all() [MessageLoader:stop")
cv.notify_all()
| 38.403509 | 159 | 0.680448 | 2,753 | 0.628826 | 0 | 0 | 0 | 0 | 0 | 0 | 2,038 | 0.465509 |
482b6bc1045b43ed5348ce5b37a40561a89cb30b | 2,796 | py | Python | run_ddpg.py | huangwl18/geometry-dex | 0c629316258ef560b360c6103d83d5cb828e3ccd | [
"MIT"
]
| 29 | 2021-11-11T23:05:02.000Z | 2022-03-10T06:05:23.000Z | run_ddpg.py | huangwl18/geometry-dex | 0c629316258ef560b360c6103d83d5cb828e3ccd | [
"MIT"
]
| 2 | 2021-12-13T16:18:14.000Z | 2022-03-09T14:04:37.000Z | run_ddpg.py | huangwl18/geometry-dex | 0c629316258ef560b360c6103d83d5cb828e3ccd | [
"MIT"
]
| 2 | 2021-11-18T06:00:30.000Z | 2021-12-17T03:04:52.000Z | from rl_modules.utils import *
import torch
import random
from rl_modules.ddpg_agent import ddpg_agent
from arguments_ddpg import get_args
import os
import numpy as np
import dex_envs
import wandb
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
"""
train the agent
"""
def init_callback(args, prefix):
if not args.eval and not args.fresh:
resume_mode = 'allow'
else:
resume_mode = None
run_name = '{}_{:04d}'.format(prefix, args.expID)
wandb.init(name=run_name, id=run_name, resume=resume_mode,
save_code=True, anonymous="allow")
wandb.config.update(args, allow_val_change=True)
def log_callback(log_dict):
wandb.log(log_dict)
def get_env_params(env, args):
obs = env.reset()
# close the environment
params = {'goal': obs['desired_goal'].shape[-1],
'action': env.action_space.shape[-1],
'action_max': env.action_space.high[-1],
'max_timesteps': env._max_episode_steps,
'obs_to_normalize': obs['minimal_obs'].shape[-1]
}
if args.point_cloud:
params['obs'] = obs['pc_obs'].shape[-1]
else:
params['obs'] = params['obs_to_normalize']
return params
def get_policy_params(env, args):
obs = env.reset()
params = dict(state_dim=obs['minimal_obs'].shape[-1] + obs['desired_goal'].shape[-1],
action_dim=env.action_space.shape[-1],
max_action=env.action_space.high[-1],
args=args)
return params
def launch(init_callback=None, log_callback=None):
args = get_args()
# create dummy env for accessing spaces attr
dummy_env = makeEnv((args.train_names + args.test_names)[0], 0, args)()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.no_cuda:
torch.cuda.manual_seed(args.seed)
# get the environment parameters
# assume all envs high-level attributes are the same, use arbitrary one
env_params = get_env_params(dummy_env, args)
# assume all envs high-level attributes are the same, use arbitrary one
policy_params = get_policy_params(dummy_env, args)
# create the ddpg agent to interact with the environment
trainer = ddpg_agent(args, dummy_env, env_params, policy_params)
init_callback(args=args, prefix=trainer.agent_type)
if args.eval:
trainer.eval(log_callback=log_callback)
else:
trainer.learn(log_callback=log_callback)
dummy_env.close()
if __name__ == "__main__":
# env setting ========================================================================
# do not enable wandb output
os.environ["WANDB_SILENT"] = "true"
launch(init_callback=init_callback, log_callback=log_callback)
| 31.41573 | 90 | 0.658798 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 646 | 0.231044 |
482da58116dfb913fbea2c87dc9df1955becba11 | 3,528 | py | Python | code/a_train_generalist.py | seba-1511/specialists | 9888e639707142db80aafe6ae7bf25f572d34505 | [
"Apache-2.0"
]
| 1 | 2016-05-31T07:54:31.000Z | 2016-05-31T07:54:31.000Z | code/a_train_generalist.py | seba-1511/specialists | 9888e639707142db80aafe6ae7bf25f572d34505 | [
"Apache-2.0"
]
| null | null | null | code/a_train_generalist.py | seba-1511/specialists | 9888e639707142db80aafe6ae7bf25f572d34505 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is an experiment that will train a specified generalist network.
"""
import random
import numpy as np
from neon.backends import gen_backend
from neon.data import DataIterator, load_cifar10
from neon.transforms.cost import Misclassification
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
from neon.util.persist import save_obj
from keras.datasets import cifar100
from cifar_net import get_custom_vgg
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
DATASET_NAME = 'cifar100'
EXPERIMENT_DIR = 'experiments/' + DATASET_NAME + '/'
VALIDATION = True
def split_train_set(X_train, y_train):
return (X_train[:-5000], y_train[:-5000]), (X_train[-5000:], y_train[-5000:])
def load_data(name):
if name == 'cifar10':
(X_train, y_train), (X_test, y_test), nout = load_cifar10(path=args.data_dir)
nout = 16
elif name == 'cifar100':
(X_train, y_train), (X_test, y_test) = cifar100.load_data(label_mode='fine')
X_train = X_train.reshape(50000, 3072)
X_test = X_test.reshape(10000, 3072)
nout = 128
elif name == 'svhn':
from scipy.io import loadmat
train = loadmat('../data/svhm_train.mat')
test = loadmat('../data/svhn_test.mat')
(X_train, y_train), (X_test, y_test) = (train['X'], train['y']), (test['X'], test['y'])
s = X_train.shape
X_train = X_train.reshape(-1, s[-1]).transpose()
s = X_test.shape
X_test = X_test.reshape(-1, s[-1]).transpose()
temp = np.empty(X_train.shape, dtype=np.uint)
np.copyto(temp, X_train)
X_train = temp
temp = np.empty(X_test.shape, dtype=np.uint)
np.copyto(temp, X_test)
X_test = temp
nout = 16
return (X_train, y_train), (X_test, y_test), nout
if __name__ == '__main__':
# hyperparameters
batch_size = 64
num_epochs = args.epochs
num_epochs = 74 if num_epochs == 10 else num_epochs
rng_seed = 1234
np.random.seed(rng_seed)
random.seed(rng_seed)
# setup backend
be = gen_backend(
backend=args.backend,
batch_size=batch_size,
rng_seed=rng_seed,
device_id=args.device_id,
default_dtype=args.datatype,
)
(X_train, y_train), (X_test, y_test), nout = load_data(DATASET_NAME)
if VALIDATION:
(X_train, y_train), (X_valid, y_valid) = split_train_set(X_train, y_train)
model, opt, cost = get_custom_vgg(nout=nout)
train_set = DataIterator(X_train, y_train, nclass=nout, lshape=(3, 32, 32))
test_set = DataIterator(X_test, y_test, nclass=nout, lshape=(3, 32, 32))
callbacks = Callbacks(model, train_set, args, eval_set=test_set)
if VALIDATION:
valid_set = DataIterator(X_valid, y_valid, nclass=nout, lshape=(3, 32, 32))
callbacks = Callbacks(model, train_set, args, eval_set=valid_set)
model.fit(train_set, optimizer=opt, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
print 'Validation: ', VALIDATION
print 'Train misclassification error: ', model.eval(train_set, metric=Misclassification())
if VALIDATION:
print 'Valid misclassification error: ', model.eval(valid_set, metric=Misclassification())
print 'Test misclassification error: ', model.eval(test_set, metric=Misclassification())
if args.save_path is not None:
save_obj(model.serialize(), EXPERIMENT_DIR + args.save_path)
| 33.283019 | 98 | 0.67602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.122166 |
482db97435aafc9eda667eac490cfcdd4c5b28e6 | 1,393 | py | Python | lilies/terminal/ansicodes.py | mrz1988/lilies | 9525770fabab7e142ebedc40ab5d0c8027aa90ba | [
"MIT"
]
| null | null | null | lilies/terminal/ansicodes.py | mrz1988/lilies | 9525770fabab7e142ebedc40ab5d0c8027aa90ba | [
"MIT"
]
| 51 | 2019-06-18T16:35:56.000Z | 2021-02-23T00:32:23.000Z | lilies/terminal/ansicodes.py | mrz1988/lilies | 9525770fabab7e142ebedc40ab5d0c8027aa90ba | [
"MIT"
]
| null | null | null | from __future__ import unicode_literals
# Leading control character
CSI = "\033["
FULLRESET = 0
BOLD = BRIGHT = 1
DIM = 2
ITALIC = 3
UNDERLINE = 4
BLINK = 5
# Unsupported
################
# RAPIDBLINK = 6
# REVERSE = 7
# CONCEAL = 8
STRIKE = 9
# Unsupported
################
# PRIMARY_FONT = 10
# ALTFONT1 = 11
# ALTFONT2 = 12
# ALTFONT3 = 13
# ALTFONT4 = 14
# ALTFONT5 = 15
# ALTFONT6 = 16
# ALTFONT7 = 17
# ALTFONT8 = 18
# ALTFONT9 = 19
# FRAKTUR = 20
# DOUBLEUNDERLINE = 21
NOBOLDDIM = 22
NOITALIC = 23
NOUNDERLINE = 24
NOBLINK = 25
# Unsupported
################
# 26 is missing?
# NOREVERSE = 27
# NOCONCEAL = 28
NOSTRIKE = 29
# COLORS!
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
LIGHTGRAY = 37
NOCOLOR = 39
# 16-color extended,
# Only kind of supported
DARKGRAY = 90
BRIGHTRED = 91
BRIGHTGREEN = 92
BRIGHTYELLOW = 93
BRIGHTBLUE = 94
BRIGHTMAGENTA = 95
BRIGHTCYAN = 96
WHITE = 97
ATTR_ON_CODES = {
"bold": BOLD,
"dim": DIM,
"italic": ITALIC,
"underline": UNDERLINE,
"blink": BLINK,
}
ATTR_OFF_CODES = {
"bold": NOBOLDDIM,
"dim": NOBOLDDIM,
"italic": NOITALIC,
"underline": NOUNDERLINE,
"blink": NOBLINK,
}
def fg_to_bg(ansi):
if ansi is None:
return None
return ansi + 10
def esc(code):
if code is None:
return ""
return "{csi}{code}m".format(csi=CSI, code=code)
| 13.656863 | 52 | 0.61809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 544 | 0.390524 |
482df5bdcc006ad0be823ed8da879646f9d15872 | 2,220 | py | Python | preprocess/step1.py | wenhuchen/KGPT | f898577d8e0ebbf48ea84915777c7b01e616ca3a | [
"MIT"
]
| 119 | 2020-10-06T08:21:21.000Z | 2022-03-25T12:00:10.000Z | preprocess/step1.py | wenhuchen/KGPT | f898577d8e0ebbf48ea84915777c7b01e616ca3a | [
"MIT"
]
| 7 | 2020-10-29T09:34:14.000Z | 2021-12-28T14:27:27.000Z | preprocess/step1.py | wenhuchen/KGPT | f898577d8e0ebbf48ea84915777c7b01e616ca3a | [
"MIT"
]
| 16 | 2020-10-07T18:58:48.000Z | 2022-02-23T07:42:29.000Z | import json
import regex
import nltk.data
from nltk.tokenize import word_tokenize
import sys
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
def tokenize(string):
return word_tokenize(string)
def split_paragraphs(text):
"""
remove urls, lowercase all words and separate paragraphs
"""
splits = regex.split(r'\n+', text)
paras = []
for split in splits[1:]: # skip the titles
split = split.strip()
if len(split) == 0:
continue
if 'Section::' in split:
continue
paras.append(split)
paras = " ".join(paras)
return sent_detector.tokenize(paras)
def split_sent(sent):
strings = regex.split('<a |</a>', sent)
new_strings = []
count = 0
for s in strings:
s = s.strip()
if s:
if 'href=' in s:
s = s.lstrip('href="')
href, text = s.split('">')
new_strings.append((text, href))
count += 1
else:
ss = tokenize(s)
new_strings.extend([(_, None) for _ in ss])
return new_strings, count / len(new_strings), count
fw = open('out-more.json', 'w')
with open('en.json', 'r') as f:
for i, line in enumerate(f):
data = json.loads(line)
entry = {"id": data['id'], "url": data['url'], 'title': data['title']}
outputs = []
if len(data['text']) > 50:
try:
sents = split_paragraphs(data['text'])
for sent in sents:
if len(sent) < 400:
output, ratio, count = split_sent(sent)
if count > 1 and ratio >= 0.10 and len(output) >= 8 and output[0][0][0].isupper():
text = [_[0] for _ in output]
hyperlink = [_[1] for _ in output]
outputs.append((text, hyperlink))
except Exception:
pass
if len(outputs) > 0:
entry['text'] = outputs
fw.write(json.dumps(entry) + '\n')
sys.stdout.write('finished {}/{} \r'.format(i, 5989879))
fw.close()
| 30.833333 | 106 | 0.498198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.123423 |
482eeb2176d1d54a0f7e399cf1d1c1710f4d9f12 | 454 | py | Python | mintools/zmqmin/client.py | jtimon/elements-explorer | 397089593e860c4bdceb3a1222687a9120db0022 | [
"MIT"
]
| 9 | 2018-01-25T16:32:18.000Z | 2018-10-10T18:47:33.000Z | mintools/zmqmin/client.py | jtimon/elements-explorer | 397089593e860c4bdceb3a1222687a9120db0022 | [
"MIT"
]
| 2 | 2018-03-13T20:50:33.000Z | 2018-03-13T21:01:22.000Z | mintools/zmqmin/client.py | jtimon/elements-explorer | 397089593e860c4bdceb3a1222687a9120db0022 | [
"MIT"
]
| 2 | 2018-02-20T17:50:18.000Z | 2018-02-26T07:29:15.000Z | # Copyright (c) 2012-2018 The Mintools developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import pyzmq
from .messenger import Messenger
class Client(Messenger):
def _init_socket(self):
self.socket = self.context.socket(pyzmq.REQ)
def send_request(self, request):
self.send_message(request)
return self.receive_message()
| 26.705882 | 69 | 0.737885 | 217 | 0.477974 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.405286 |
482f26a433eaf7f306d04205a3bac702463a9adc | 1,879 | py | Python | scripts/filter_genes_matrix.py | fengwanwan/st_analysis | 24ef6326efce7ddb1d7cfe9497a6733e48da8331 | [
"MIT"
]
| 4 | 2017-03-15T15:32:12.000Z | 2020-12-09T08:03:14.000Z | scripts/filter_genes_matrix.py | Coke-Zhang/st_analysis | 9ec446c0f1bff8e485f2016206b43dcdcf543119 | [
"MIT"
]
| 1 | 2021-05-06T16:57:21.000Z | 2021-05-06T16:58:38.000Z | scripts/filter_genes_matrix.py | Coke-Zhang/st_analysis | 9ec446c0f1bff8e485f2016206b43dcdcf543119 | [
"MIT"
]
| 4 | 2018-03-19T12:02:41.000Z | 2019-12-13T08:41:07.000Z | #! /usr/bin/env python
"""
Script that takes ST dataset (matrix of counts)
where the columns are genes and the rows
are spot coordinates
gene gene
XxY
XxY
And removes the columns of genes
matching the regular expression given as input.
@Author Jose Fernandez Navarro <[email protected]>
"""
import argparse
import sys
import os
import pandas as pd
import re
def main(counts_matrix, reg_exps, outfile):
if not os.path.isfile(counts_matrix):
sys.stderr.write("Error, input file not present or invalid format\n")
sys.exit(1)
if not outfile:
outfile = "filtered_{}".format(os.path.basename(counts_matrix).split(".")[0])
# Read the data frame (genes as columns)
counts_table = pd.read_table(counts_matrix, sep="\t", header=0, index_col=0)
genes = counts_table.columns
# Filter out genes that match any of the reg-exps
genes = [gene for gene in genes if any([re.match(regex,gene) for regex in reg_exps])]
counts_table.drop(genes, axis=1, inplace=True)
# Write filtered table
counts_table.to_csv(outfile, sep='\t')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--counts-matrix", required=True,
help="Matrix with gene counts (genes as columns)")
parser.add_argument("--outfile", help="Name of the output file")
parser.add_argument("--filter-genes", help="Regular expression for \
gene symbols to filter out. Can be given several times.",
default=None,
type=str,
action='append')
args = parser.parse_args()
main(args.counts_matrix, args.filter_genes, args.outfile)
| 34.796296 | 90 | 0.651943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 749 | 0.398616 |
482f35fcca776fd3b82f536d756e301830e31fbf | 83 | py | Python | libs/models/__init__.py | tonyngjichun/pspnet-pytorch | 75297aa4fdb4f7a712ef9185be1ec805044f8328 | [
"MIT"
]
| 56 | 2017-12-07T12:29:14.000Z | 2021-05-14T16:45:59.000Z | libs/models/__init__.py | tonyngjichun/pspnet-pytorch | 75297aa4fdb4f7a712ef9185be1ec805044f8328 | [
"MIT"
]
| 7 | 2017-12-26T09:00:23.000Z | 2019-01-14T03:55:56.000Z | libs/models/__init__.py | tonyngjichun/pspnet-pytorch | 75297aa4fdb4f7a712ef9185be1ec805044f8328 | [
"MIT"
]
| 16 | 2017-12-20T00:36:51.000Z | 2020-12-31T07:41:06.000Z | from __future__ import absolute_import
from .resnet import *
from .pspnet import *
| 20.75 | 38 | 0.807229 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
482fb0fc0f3fd414d792e168bab4aaa39e2474d7 | 1,405 | py | Python | modules/tools/record_analyzer/common/distribution_analyzer.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
]
| 3 | 2020-04-01T14:49:24.000Z | 2020-04-01T14:49:28.000Z | modules/tools/record_analyzer/common/distribution_analyzer.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
]
| 7 | 2021-03-10T18:14:25.000Z | 2022-02-27T04:46:46.000Z | modules/tools/record_analyzer/common/distribution_analyzer.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
]
| 2 | 2020-08-05T12:52:42.000Z | 2021-10-19T13:07:49.000Z | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from statistical_analyzer import PrintColors
class DistributionAnalyzer:
"""statistical analzer class"""
def print_distribution_results(self, data):
"""distribution analyzer"""
if len(data) == 0:
print(PrintColors.FAIL + "No Data Generated!" + PrintColors.ENDC)
return
total = 0
for k, v in data.items():
total += v
for k, v in data.items():
percentage = "{0:.2f}".format((float(v) / total) * 100)
print(PrintColors.OKBLUE + k + " = " + str(v) + \
"(" + percentage + "%)" + PrintColors.ENDC)
| 36.025641 | 79 | 0.582206 | 573 | 0.407829 | 0 | 0 | 0 | 0 | 0 | 0 | 865 | 0.615658 |
483016a462d5e8b33445c247250dee4a2ae74ecd | 2,261 | py | Python | models/batch.py | scaleapi/sail | 1bd857b5db34cbd08c7d4e2476beafdb353a458d | [
"Apache-2.0"
]
| 7 | 2021-03-10T23:37:12.000Z | 2022-01-13T01:14:58.000Z | models/batch.py | scaleapi/sail | 1bd857b5db34cbd08c7d4e2476beafdb353a458d | [
"Apache-2.0"
]
| 2 | 2021-01-04T15:54:27.000Z | 2021-03-30T22:45:03.000Z | models/batch.py | scaleapi/sail | 1bd857b5db34cbd08c7d4e2476beafdb353a458d | [
"Apache-2.0"
]
| 3 | 2021-07-31T04:03:12.000Z | 2021-10-03T05:51:48.000Z | from helpers.concurrency import execute
from scaleapi import exceptions
def upsert(client, project_name, batches):
print("\n\nCreating Batches...")
print("===================")
def upsert_batch(desired_batch):
batch_name = desired_batch['name']
batch_callback_url = desired_batch['callback_url']
try:
current_batch = client.get_batch(desired_batch['name'])
# Batch already exists - validate is still in "staging" mode
if (not batches.get('batchStatusOverride', False) and current_batch.status != 'staging'):
raise(Exception(
f"❌ Trying to submit to a non-staging batch, '{desired_batch['name']}' is in status '{current_batch.status}' | Exiting now"))
return f"✅ Batch '{desired_batch['name']}' already exists, skipping"
except exceptions.ScaleResourceNotFound as err:
try:
new_batch = client.create_batch(
project_name, batch_name, batch_callback_url)
return f"✅ Successfully created batch `{desired_batch['name']}`"
except exceptions.ScaleException as err:
return f"❌ Batch creation for '{desired_batch['name']}' failed <Status Code {err.code}: {err.message}>"
except exceptions.ScaleException as err:
return f"❌ Batch fetch for '{desired_batch['name']}' failed <Status Code {err.code}: {err.message}>"
execute(upsert_batch, batches['batches'])
def finalize(client, batches):
print("\n\nFinalizing Batches...")
print("=====================")
def finalize_batch(batch):
batch_name = batch["name"]
# See if this batch was already finalized (finalizing again gives bad request)
try:
batch = client.get_batch(batch_name)
if (batch.status == 'in_progress'):
return f"✅ Batch '{batch_name}' was already finalized, skipping"
batch.finalize()
return f"✅ Succesfuly finalized batch '{batch_name}'"
except exceptions.ScaleException as err:
return f"❌ Attempt to finalize batch '{batch_name}' failed <Status Code {err.code}: {err.message}>"
execute(finalize_batch, batches['batches'])
| 38.322034 | 145 | 0.62008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 968 | 0.425121 |
4830669ccefdde03889bd71b019ac1ba14c36c86 | 315 | py | Python | worker/xs.py | hoshimaemi/XZZ | 6d712906fa2f1fcf16155cfd5d89245ef8e0aff8 | [
"MIT"
]
| 29 | 2020-02-27T13:49:48.000Z | 2021-02-26T15:44:14.000Z | worker/xs.py | hoshimaemi/XZZ | 6d712906fa2f1fcf16155cfd5d89245ef8e0aff8 | [
"MIT"
]
| 3 | 2021-03-30T11:31:49.000Z | 2021-12-07T12:11:56.000Z | worker/xs.py | hoshimaemi/XZZ | 6d712906fa2f1fcf16155cfd5d89245ef8e0aff8 | [
"MIT"
]
| 14 | 2020-02-29T07:25:12.000Z | 2021-01-03T05:12:25.000Z | from zzcore import StdAns, mysakuya
import requests
class Ans(StdAns):
def GETMSG(self):
msg=''
try:
msg += xs()
except:
msg += '可能是机器人笑死了!'
return msg
def xs():
url = "http://api-x.aya1.xyz:6/"
text = requests.get(url=url).text
return text
| 18.529412 | 37 | 0.533333 | 179 | 0.534328 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.179104 |
48331ce668f0e91220bd7b7d009bc8b5666778cd | 304 | py | Python | bot.py | matthewzhaocc/discord-server-status | c9c58271ab3f7142f7e827b88d5c960cc442b355 | [
"MIT"
]
| null | null | null | bot.py | matthewzhaocc/discord-server-status | c9c58271ab3f7142f7e827b88d5c960cc442b355 | [
"MIT"
]
| null | null | null | bot.py | matthewzhaocc/discord-server-status | c9c58271ab3f7142f7e827b88d5c960cc442b355 | [
"MIT"
]
| null | null | null | #a discord bot for playing with CICD
#system dependencies
import os
#3rd party dependencies
import discord
TOKEN = os.environ.get("DISCORD_API_TOKEN")
client = discord.Client()
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
client.run(TOKEN) | 19 | 54 | 0.720395 | 0 | 0 | 0 | 0 | 91 | 0.299342 | 76 | 0.25 | 143 | 0.470395 |
4834a12f8b0a1adc974a9695986c5da1d9c04010 | 603 | py | Python | repost/api/schemas/user.py | pckv/fastapi-backend | 0f561528086ac3fdcabbf9efeac888421eeb66de | [
"MIT"
]
| 9 | 2020-02-03T11:17:06.000Z | 2021-06-15T13:20:34.000Z | repost/api/schemas/user.py | pckv/fastapi-backend | 0f561528086ac3fdcabbf9efeac888421eeb66de | [
"MIT"
]
| 40 | 2020-02-03T11:23:59.000Z | 2020-05-19T08:05:41.000Z | repost/api/schemas/user.py | pckv/fastapi-backend | 0f561528086ac3fdcabbf9efeac888421eeb66de | [
"MIT"
]
| 1 | 2020-03-11T02:47:40.000Z | 2020-03-11T02:47:40.000Z | """API schemas for users."""
from datetime import datetime
from typing import Optional
from pydantic import BaseModel
class User(BaseModel):
"""Schema for a user account"""
username: str
bio: Optional[str]
avatar_url: Optional[str]
created: datetime
edited: Optional[datetime]
class Config:
orm_mode = True
class CreateUser(BaseModel):
"""Schema for creating a new user account"""
username: str
password: str
class EditUser(BaseModel):
"""Schema for editing a user account"""
bio: Optional[str] = None
avatar_url: Optional[str] = None
| 20.1 | 48 | 0.681592 | 475 | 0.787728 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.235489 |
483592e4049e6951c186723536311a58d0a2c2a3 | 1,459 | py | Python | gluon/packages/dal/pydal/adapters/sap.py | GeorgesBrantley/ResistanceGame | 65ec925ec8399af355e176c4814a749fde5f907d | [
"BSD-3-Clause"
]
| 408 | 2015-01-01T10:31:47.000Z | 2022-03-26T17:41:21.000Z | gluon/packages/dal/pydal/adapters/sap.py | GeorgesBrantley/ResistanceGame | 65ec925ec8399af355e176c4814a749fde5f907d | [
"BSD-3-Clause"
]
| 521 | 2015-01-08T14:45:54.000Z | 2022-03-24T11:15:22.000Z | gluon/packages/dal/pydal/adapters/sap.py | GeorgesBrantley/ResistanceGame | 65ec925ec8399af355e176c4814a749fde5f907d | [
"BSD-3-Clause"
]
| 158 | 2015-01-25T20:02:00.000Z | 2022-03-01T06:29:12.000Z | import re
from .._compat import integer_types, long
from .base import SQLAdapter
from . import adapters
@adapters.register_for("sapdb")
class SAPDB(SQLAdapter):
dbengine = "sapdb"
drivers = ("sapdb",)
REGEX_URI = (
"^(?P<user>[^:@]+)(:(?P<password>[^@]*))?"
r"@(?P<host>[^:/]+|\[[^\]]+\])/(?P<db>[^?]+)$"
)
def _initialize_(self):
super(SAPDB, self)._initialize_()
ruri = self.uri.split("://", 1)[1]
m = re.match(self.REGEX_URI, ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = self.credential_decoder(m.group("user"))
password = self.credential_decoder(m.group("password"))
if password is None:
password = ""
host = m.group("host")
db = m.group("db")
self.driver_args.update(user=user, password=password, database=db, host=host)
def connector(self):
self.driver.connect(**self.driver_args)
def lastrowid(self, table):
self.execute("select %s.NEXTVAL from dual" % table._sequence_name)
return long(self.cursor.fetchone()[0])
def create_sequence_and_triggers(self, query, table, **args):
self.execute("CREATE SEQUENCE %s;" % table._sequence_name)
self.execute(
"ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');"
% (table._rname, table._id._rname, table._sequence_name)
)
self.execute(query)
| 32.422222 | 85 | 0.592186 | 1,320 | 0.904729 | 0 | 0 | 1,352 | 0.926662 | 0 | 0 | 278 | 0.190541 |
4835ed45283fa22be5264491ed9fa12710bc4c24 | 54,436 | py | Python | search.py | Hawxo/GoWDiscordTeamBot | ebe062f88b8d39615ba871476471d466e6759e7a | [
"BSD-3-Clause"
]
| null | null | null | search.py | Hawxo/GoWDiscordTeamBot | ebe062f88b8d39615ba871476471d466e6759e7a | [
"BSD-3-Clause"
]
| null | null | null | search.py | Hawxo/GoWDiscordTeamBot | ebe062f88b8d39615ba871476471d466e6759e7a | [
"BSD-3-Clause"
]
| null | null | null | import copy
import datetime
import importlib
import logging
import operator
import re
from calendar import different_locale
import translations
from data_source.game_data import GameData
from game_constants import COLORS, EVENT_TYPES, RARITY_COLORS, SOULFORGE_REQUIREMENTS, TROOP_RARITIES, \
UNDERWORLD_SOULFORGE_REQUIREMENTS, WEAPON_RARITIES
from models.bookmark import Bookmark
from models.toplist import Toplist
from util import dig, extract_search_tag, get_next_monday_in_locale, translate_day
LOGLEVEL = logging.DEBUG
formatter = logging.Formatter('%(asctime)-15s [%(levelname)s] %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(LOGLEVEL)
log = logging.getLogger(__name__)
log.setLevel(LOGLEVEL)
log.addHandler(handler)
t = translations.Translations()
_ = t.get
def update_translations():
global _
importlib.reload(translations)
del _
_ = translations.Translations().get
class TeamExpander:
def __init__(self):
world = GameData()
world.populate_world_data()
self.troops = world.troops
self.troop_types = world.troop_types
self.spells = world.spells
self.effects = world.effects
self.positive_effects = world.positive_effects
self.weapons = world.weapons
self.classes = world.classes
self.banners = world.banners
self.traits = world.traits
self.kingdoms = world.kingdoms
self.pet_effects = world.pet_effects
self.pets = world.pets
self.talent_trees = world.talent_trees
self.spoilers = world.spoilers
self.events = world.events
self.campaign_tasks = world.campaign_tasks
self.reroll_tasks = world.campaign_rerolls
self.soulforge = world.soulforge
self.traitstones = world.traitstones
self.levels = world.levels
self.rooms = {}
self.toplists = Toplist()
self.bookmarks = Bookmark()
self.adventure_board = world.adventure_board
self.drop_chances = world.drop_chances
self.event_kingdoms = world.event_kingdoms
self.weekly_event = world.weekly_event
self.active_gems = world.gem_events
@classmethod
def extract_code_from_message(cls, raw_code):
numbers = [int(n.strip()) for n in raw_code.split(',') if n]
return numbers
def get_team_from_code(self, code, lang):
result = {
'troops': [],
'banner': {},
'class': None,
'talents': [],
'class_title': _('[CLASS]', lang),
'troops_title': _('[TROOPS]', lang),
}
has_weapon = False
has_class = False
for i, element in enumerate(code):
troop = self.troops.get(element)
weapon = self.weapons.get(element)
if troop:
troop = troop.copy()
self.translate_troop(troop, lang)
result['troops'].append(troop)
continue
elif weapon:
weapon = weapon.copy()
self.translate_weapon(weapon, lang)
result['troops'].append(weapon)
has_weapon = True
continue
_class = self.classes.get(element)
if _class:
result['class'] = _(_class['name'], lang)
result['class_talents'] = _class['talents']
has_class = True
continue
banner = self.banners.get(element)
if banner:
result['banner'] = self.translate_banner(banner, lang)
continue
if 0 <= element <= 3:
result['talents'].append(element)
continue
if i <= 3:
result['troops'].append(self.troops['`?`'])
continue
elif i == 4:
banner = {
'colors': [('questionmark', 1)],
'name': '[REQUIREMENTS_NOT_MET]',
'filename': 'Locked',
'id': '`?`'
}
result['banner'] = self.translate_banner(banner, lang)
continue
elif i == 12:
result['class'] = _('[REQUIREMENTS_NOT_MET]', lang)
result['talents'] = []
has_class = True
continue
if has_weapon and has_class:
new_talents = []
for talent_no, talent_code in enumerate(result['talents']):
talent = '-'
if talent_code > 0:
talent = _(result['class_talents'][talent_code - 1][talent_no]['name'], lang)
new_talents.append(talent)
result['talents'] = new_talents
else:
result['class'] = None
result['talents'] = None
return result
def get_team_from_message(self, user_code, lang):
code = self.extract_code_from_message(user_code)
if not code:
return
return self.get_team_from_code(code, lang)
@staticmethod
def search_item(search_term, lang, items, lookup_keys, translator, sort_by='name'):
if search_term.isdigit() and int(search_term) in items:
item = items.get(int(search_term))
if item:
result = item.copy()
translator(result, lang)
return [result]
return []
possible_matches = []
for base_item in items.values():
if base_item['name'] == '`?`' or base_item['id'] == '`?`':
continue
item = base_item.copy()
translator(item, lang)
lookups = {
k: extract_search_tag(dig(item, k)) for k in lookup_keys
}
real_search = extract_search_tag(search_term)
if real_search == extract_search_tag(item['name']):
return [item]
for key, lookup in lookups.items():
if real_search in lookup:
possible_matches.append(item)
break
return sorted(possible_matches, key=operator.itemgetter(sort_by))
def search_troop(self, search_term, lang):
lookup_keys = [
'name',
'kingdom',
'type',
'roles',
'spell.description',
]
return self.search_item(search_term, lang,
items=self.troops,
lookup_keys=lookup_keys,
translator=self.translate_troop)
def translate_troop(self, troop, lang):
troop['name'] = _(troop['name'], lang)
if self.is_untranslated(troop['name']):
troop['name'] = troop['reference_name']
troop['description'] = _(troop['description'], lang).replace('widerbeleben',
'wiederbeleben')
troop['color_code'] = "".join(troop['colors'])
troop['rarity_title'] = _('[RARITY]', lang)
troop['raw_rarity'] = troop['rarity']
rarity_number = 1
if troop['rarity'] in TROOP_RARITIES:
rarity_number = TROOP_RARITIES.index(troop['rarity'])
troop['rarity'] = _(f'[RARITY_{rarity_number}]', lang)
troop['traits_title'] = _('[TRAITS]', lang)
troop['traits'] = self.enrich_traits(troop['traits'], lang)
troop['roles_title'] = _('[TROOP_ROLE]', lang)
troop['roles'] = [_(f'[TROOP_ROLE_{role.upper()}]', lang) for role in troop['roles']]
troop['type_title'] = _('[FILTER_TROOPTYPE]', lang)
troop['raw_types'] = troop['types']
types = [
_(f'[TROOPTYPE_{_type.upper()}]', lang) for _type in troop['types']
]
troop['type'] = ' / '.join(types)
troop['kingdom_title'] = _('[KINGDOM]', lang)
reference_name = troop['kingdom'].get('reference_name', troop['kingdom']['name'])
troop['kingdom'] = _(troop['kingdom']['name'], lang)
if self.is_untranslated(troop['kingdom']):
troop['kingdom'] = reference_name
troop['spell'] = self.translate_spell(troop['spell_id'], lang)
troop['spell_title'] = _('[TROOPHELP_SPELL0]', lang)
self.translate_traitstones(troop, lang)
troop['bonuses_title'] = _('[BONUSES]', lang)
@staticmethod
def translate_traitstones(item, lang):
item['traitstones_title'] = _('[SOULFORGE_TAB_TRAITSTONES]', lang)
if 'traitstones' not in item:
item['traitstones'] = []
traitstones = []
for rune in item['traitstones']:
traitstones.append(f'{_(rune["name"], lang)} ({rune["amount"]})')
item['traitstones'] = traitstones
@staticmethod
def enrich_traits(traits, lang):
new_traits = []
for trait in traits:
new_trait = trait.copy()
new_trait['name'] = _(trait['name'], lang)
new_trait['description'] = _(trait['description'], lang)
new_traits.append(new_trait)
return new_traits
def search_kingdom(self, search_term, lang, include_warband=True):
lookup_keys = ['name']
return self.search_item(search_term, lang, items=self.kingdoms, lookup_keys=lookup_keys,
translator=self.translate_kingdom)
def kingdom_summary(self, lang):
kingdoms = [k.copy() for k in self.kingdoms.values() if k['location'] == 'krystara' and len(k['colors']) > 0]
for kingdom in kingdoms:
self.translate_kingdom(kingdom, lang)
return sorted(kingdoms, key=operator.itemgetter('name'))
def translate_kingdom(self, kingdom, lang):
kingdom['name'] = _(kingdom['name'], lang)
if self.is_untranslated(kingdom['name']):
kingdom['name'] = kingdom['reference_name']
kingdom['description'] = _(kingdom['description'], lang)
kingdom['punchline'] = _(kingdom['punchline'], lang)
kingdom['troop_title'] = _('[TROOPS]', lang)
kingdom['troops'] = []
for troop_id in kingdom['troop_ids']:
if troop_id not in self.troops:
continue
troop = self.troops[troop_id].copy()
self.translate_troop(troop, lang)
kingdom['troops'].append(troop)
kingdom['troops'] = sorted(kingdom['troops'], key=operator.itemgetter('name'))
kingdom['weapons_title'] = _('[WEAPONS:]', lang)
kingdom['weapons'] = sorted([
{'name': _(self.weapons[_id]['name'], lang),
'id': _id
} for _id in kingdom['weapon_ids']
], key=operator.itemgetter('name'))
kingdom['banner_title'] = _('[BANNERS]', lang)
kingdom['banner'] = self.translate_banner(self.banners[kingdom['id']], lang)
kingdom['linked_kingdom'] = None
if kingdom['linked_kingdom_id']:
kingdom['linked_kingdom'] = _(self.kingdoms[kingdom['linked_kingdom_id']]['name'], lang)
if kingdom['linked_kingdom'] and self.is_untranslated(kingdom['linked_kingdom']):
kingdom['linked_kingdom'] = None
kingdom['map'] = _('[MAPNAME_MAIN]', lang)
kingdom['linked_map'] = _('[MAPNAME_UNDERWORLD]', lang)
if kingdom['underworld']:
kingdom['map'] = _('[MAPNAME_UNDERWORLD]', lang)
kingdom['linked_map'] = _('[MAPNAME_MAIN]', lang)
if 'primary_color' in kingdom:
deed_num = COLORS.index(kingdom['primary_color'])
kingdom['deed'] = _(f'[DEED{deed_num:02d}]', lang)
kingdom['color_title'] = _('[GEM_MASTERY]', lang)
kingdom['stat_title'] = _('[STAT_BONUS]', lang)
if 'class_id' in kingdom:
kingdom['class_title'] = _('[CLASS]', lang)
kingdom['class'] = _(self.classes[kingdom['class_id']]['name'], lang)
if 'primary_stat' in kingdom:
kingdom['primary_stat'] = _(f'[{kingdom["primary_stat"].upper()}]', lang)
if 'pet' in kingdom:
kingdom['pet_title'] = _('[PET_RESCUE_PET]', lang)
kingdom['pet'] = kingdom['pet'].translations[lang]
if 'event_weapon' in kingdom:
kingdom['event_weapon_title'] = _('[FACTION_WEAPON]', lang)
kingdom['event_weapon_id'] = kingdom['event_weapon']['id']
event_weapon = kingdom['event_weapon'].copy()
self.translate_weapon(event_weapon, lang)
kingdom['event_weapon'] = event_weapon
kingdom['max_power_level_title'] = _('[KINGDOM_POWER_LEVELS]', lang)
def search_class(self, search_term, lang):
lookup_keys = ['name']
return self.search_item(search_term, lang,
items=self.classes,
translator=self.translate_class,
lookup_keys=lookup_keys)
def class_summary(self, lang):
classes = [c.copy() for c in self.classes.values()]
for c in classes:
self.translate_class(c, lang)
return sorted(classes, key=operator.itemgetter('name'))
def translate_class(self, _class, lang):
kingdom = self.kingdoms[_class['kingdom_id']]
_class['kingdom'] = _(kingdom['name'], lang, default=kingdom['reference_name'])
weapon = self.weapons[_class['weapon_id']]
_class['weapon'] = _(weapon['name'], lang)
_class['name'] = _(_class['name'], lang)
translated_trees = []
for tree in _class['talents']:
translated_talents = []
for talent in tree:
translated_talents.append({
'name': _(talent['name'], lang),
'description': _(talent['description'], lang)
})
translated_trees.append(translated_talents)
self.translate_traitstones(_class, lang)
_class['talents_title'] = _('[TALENT_TREES]', lang)
_class['kingdom_title'] = _('[KINGDOM]', lang)
_class['traits_title'] = _('[TRAITS]', lang)
_class['traits'] = self.enrich_traits(_class['traits'], lang)
_class['weapon_title'] = _('[WEAPON]', lang)
_class['talents'] = translated_trees
_class['trees'] = [_(f'[TALENT_TREE_{t.upper()}]', lang) for t in _class['trees']]
_class['type_short'] = _(f'[TROOPTYPE_{_class["type"].upper()}]', lang)
_class['type'] = _(f'[PERK_TYPE_{_class["type"].upper()}]', lang)
_class['weapon_bonus'] = _('[MAGIC_BONUS]', lang) + " " + _(
f'[MAGIC_BONUS_{COLORS.index(_class["weapon_color"])}]', lang)
def search_talent(self, search_term, lang):
possible_matches = []
for tree in self.talent_trees.values():
translated_name = extract_search_tag(_(tree['name'], lang))
translated_talents = [_(t['name'], lang) for t in tree['talents']]
talents_search_tags = [extract_search_tag(t) for t in translated_talents]
real_search = extract_search_tag(search_term)
if real_search == translated_name or real_search in talents_search_tags:
result = tree.copy()
self.translate_talent_tree(result, lang)
return [result]
elif real_search in translated_name:
result = tree.copy()
self.translate_talent_tree(result, lang)
possible_matches.append(result)
else:
talent_matches = [t for t in talents_search_tags if real_search in t]
if talent_matches:
result = tree.copy()
result['talent_matches'] = talent_matches
self.translate_talent_tree(result, lang)
possible_matches.append(result)
return sorted(possible_matches, key=operator.itemgetter('name'))
@staticmethod
def translate_talent_tree(tree, lang):
tree['talents_title'] = _('[TALENT_TREES]', lang)
tree['name'] = _(tree['name'], lang)
translated_talents = []
for talent in tree['talents']:
translated_talents.append({
'name': _(talent['name'], lang),
'description': _(talent['description'], lang)
})
tree['talents'] = translated_talents
tree['classes'] = [
{'id': c['id'],
'name': _(c['name'], lang)
}
for c in tree['classes']
]
def get_troops_with_trait(self, trait, lang):
return self.get_objects_by_trait(trait, self.troops, self.translate_troop, lang)
def get_classes_with_trait(self, trait, lang):
return self.get_objects_by_trait(trait, self.classes, self.translate_class, lang)
@staticmethod
def get_objects_by_trait(trait, objects, translator, lang):
result = []
for o in objects.values():
trait_codes = [t['code'] for t in o['traits']] if 'traits' in o else []
if trait['code'] in trait_codes:
translated_object = o.copy()
translator(translated_object, lang)
result.append(translated_object)
return result
def search_trait(self, search_term, lang):
possible_matches = []
for code, trait in self.traits.items():
translated_name = extract_search_tag(_(trait['name'], lang))
translated_description = extract_search_tag(_(trait['description'], lang))
real_search = extract_search_tag(search_term)
if real_search == translated_name:
result = trait.copy()
result['troops'] = self.get_troops_with_trait(trait, lang)
result['troops_title'] = _('[TROOPS]', lang)
result['classes'] = self.get_classes_with_trait(trait, lang)
result['classes_title'] = _('[CLASS]', lang)
if result['troops'] or result['classes']:
return self.enrich_traits([result], lang)
elif real_search in translated_name or real_search in translated_description:
result = trait.copy()
result['troops'] = self.get_troops_with_trait(trait, lang)
result['troops_title'] = _('[TROOPS]', lang)
result['classes'] = self.get_classes_with_trait(trait, lang)
result['classes_title'] = _('[CLASS]', lang)
if result['troops'] or result['classes']:
possible_matches.append(result)
return sorted(self.enrich_traits(possible_matches, lang), key=operator.itemgetter('name'))
def search_pet(self, search_term, lang):
return self.pets.search(search_term, lang)
def search_weapon(self, search_term, lang):
lookup_keys = [
'name',
'type',
'roles',
'spell.description',
]
return self.search_item(search_term, lang,
items=self.weapons,
lookup_keys=lookup_keys,
translator=self.translate_weapon)
def translate_weapon(self, weapon, lang):
weapon['name'] = _(weapon['name'], lang)
weapon['description'] = _(weapon['description'], lang)
weapon['color_code'] = "".join(sorted(weapon['colors']))
weapon['spell_title'] = _('[TROOPHELP_SPELL0]', lang)
weapon['rarity_title'] = _('[RARITY]', lang)
weapon['raw_rarity'] = weapon['rarity']
rarity_number = WEAPON_RARITIES.index(weapon['rarity'])
weapon['rarity'] = _(f'[RARITY_{rarity_number}]', lang)
weapon['spell'] = self.translate_spell(weapon['spell_id'], lang)
weapon['upgrade_title'] = _('[UPGRADE_WEAPON]', lang)
bonus_title = _('[BONUS]', lang)
upgrade_numbers = zip(weapon['armor_increase'], weapon['attack_increase'], weapon['health_increase'],
weapon['magic_increase'])
upgrade_titles = (
_('[ARMOR]', lang),
_('[ATTACK]', lang),
_('[LIFE]', lang),
_('[MAGIC]', lang),
)
upgrades = []
for upgrade in upgrade_numbers:
for i, amount in enumerate(upgrade):
if amount:
upgrades.append(
{'name': f'{upgrade_titles[i]} {bonus_title}',
'description': f'+{amount} {upgrade_titles[i]}'})
weapon['upgrades'] = upgrades + [self.translate_spell(spell['id'], lang) for spell in weapon['affixes']]
weapon['kingdom_title'] = _('[KINGDOM]', lang)
weapon['kingdom_id'] = weapon['kingdom']['id']
weapon['kingdom'] = _(weapon['kingdom']['name'], lang)
weapon['roles_title'] = _('[WEAPON_ROLE]', lang)
weapon['roles'] = [_(f'[TROOP_ROLE_{role.upper()}]', lang) for role in weapon['roles']]
weapon['type_title'] = _('[FILTER_WEAPONTYPE]', lang)
weapon['type'] = _(f'[WEAPONTYPE_{weapon["type"].upper()}]', lang)
weapon['has_mastery_requirement_color'] = False
if weapon['requirement'] < 1000:
weapon['requirement_text'] = _('[WEAPON_MASTERY_REQUIRED]', lang) + \
str(weapon['requirement'])
weapon['has_mastery_requirement_color'] = True
elif weapon['requirement'] == 1000:
weapon['requirement_text'] = _('[WEAPON_AVAILABLE_FROM_CHESTS_AND_EVENTS]', lang)
elif weapon['requirement'] == 1002:
_class = _(weapon.get('class', '[NO_CLASS]'), lang)
weapon['requirement_text'] = _('[CLASS_REWARD_TITLE]', lang) + f' ({_class})'
elif weapon['requirement'] == 1003:
weapon['requirement_text'] = _('[SOULFORGE_WEAPONS_TAB_EMPTY_ERROR]', lang)
if weapon.get('event_faction'):
weapon['requirement_text'] += ' (' + _(f'[{weapon["event_faction"]}_NAME]', lang) + ' ' + _(
'[FACTION_WEAPON]', lang) + ')'
def search_affix(self, search_term, lang):
real_search = extract_search_tag(search_term)
results = {}
for weapon in self.weapons.values():
my_weapon = weapon.copy()
self.translate_weapon(my_weapon, lang)
affixes = [affix for affix in my_weapon['upgrades'] if 'cost' in affix]
for affix in affixes:
search_name = extract_search_tag(affix['name'])
search_desc = extract_search_tag(affix['description'])
if real_search == search_name \
or real_search == search_desc \
or real_search in search_name \
or real_search in search_desc:
if affix['name'] in results:
results[affix['name']]['weapons'].append(my_weapon)
results[affix['name']]['num_weapons'] += 1
else:
results[affix['name']] = affix.copy()
results[affix['name']]['weapons_title'] = _('[SOULFORGE_TAB_WEAPONS]', lang)
results[affix['name']]['weapons'] = [my_weapon]
results[affix['name']]['num_weapons'] = 1
for name, affix in results.items():
if real_search == extract_search_tag(name):
return [affix]
return sorted(results.values(), key=operator.itemgetter('name'))
def search_traitstone(self, search_term, lang):
return self.search_item(search_term, lang,
items=self.traitstones,
lookup_keys=['name'],
translator=self.translate_traitstone)
def translate_traitstone(self, traitstone, lang):
troops = []
for troop_id in traitstone['troop_ids']:
amount = sum([t['amount'] for t in self.troops[troop_id]['traitstones'] if t['id'] == traitstone['id']])
troops.append([_(self.troops[troop_id]['name'], lang), amount])
traitstone['troops'] = sorted(troops, key=operator.itemgetter(1), reverse=True)
classes = []
for class_id in traitstone['class_ids']:
amount = sum([t['amount'] for t in self.classes[class_id]['traitstones'] if t['id'] == traitstone['id']])
classes.append([_(self.classes[class_id]['name'], lang), amount])
traitstone['classes'] = classes
kingdoms = []
for kingdom_id in traitstone['kingdom_ids']:
kingdoms.append(_(self.kingdoms[int(kingdom_id)]['name'], lang))
if not traitstone['kingdom_ids']:
kingdoms.append(_('[ALL_KINGDOMS]', lang))
traitstone['kingdoms'] = kingdoms
traitstone['name'] = _(traitstone['name'], lang)
traitstone['troops_title'] = _('[TROOPS]', lang)
traitstone['classes_title'] = _('[CLASS]', lang)
traitstone['kingdoms_title'] = _('[KINGDOMS]', lang)
def translate_spell(self, spell_id, lang):
spell = self.spells[spell_id]
magic = _('[MAGIC]', lang)
description = _(spell['description'], lang)
for i, (multiplier, amount) in enumerate(spell['effects'], start=1):
spell_amount = f' + {amount}' if amount else ''
multiplier_text = ''
if multiplier > 1:
if multiplier == int(multiplier):
multiplier_text = f'{multiplier:.0f} ⨯ '
else:
multiplier_text = f'{multiplier} ⨯ '
divisor = ''
if multiplier < 1:
number = int(round(1 / multiplier))
divisor = f' / {number}'
damage = f'[{multiplier_text}{magic}{divisor}{spell_amount}]'
number_of_replacements = len(re.findall(r'\{\d\}', description))
has_half_replacement = len(spell['effects']) == number_of_replacements - 1
if '{2}' in description and has_half_replacement:
multiplier *= 0.5
amount *= 0.5
if amount == int(amount):
amount = int(amount)
half_damage = f'[{multiplier} ⨯ {magic}{divisor} + {amount}]'
description = description.replace('{1}', half_damage)
description = description.replace('{2}', damage)
else:
description = description.replace(f'{{{i}}}', damage)
boost = ''
if spell['boost'] and spell['boost'] > 100:
boost = f' [x{int(round(spell["boost"] / 100))}]'
elif spell['boost'] and spell['boost'] != 1 and spell['boost'] <= 100:
boost = f' [{100 / spell["boost"]:0.0f}:1]'
description = f'{description}{boost}'
return {
'name': _(spell['name'], lang),
'cost': spell['cost'],
'description': description,
}
def translate_banner(self, banner, lang):
result = {
'name': _(banner['name'], lang),
'kingdom': _(self.kingdoms[banner['id']]['name'], lang),
'colors': [(_(c[0], 'en').lower(), c[1]) for c in banner['colors'] if c[1]],
'filename': banner['filename'],
}
colors_shorthand = []
for color, amount in result['colors']:
if amount > 0:
colors_shorthand.append(color[0].upper())
else:
colors_shorthand.append(color[0].lower())
result['colors_shorthand'] = ''.join(colors_shorthand)
if not result['colors']:
result['available'] = _('[AVAILABLE_FROM_KINGDOM]', lang).replace('%1', _(f'[{banner["id"]}_NAME]', lang))
return result
def get_event_kingdoms(self, lang):
today = datetime.date.today()
start = today + datetime.timedelta(days=-today.weekday(), weeks=1)
result = self.guess_weekly_kingdom_from_troop_spoilers(lang)
for kingdom_id in self.event_kingdoms:
end = start + datetime.timedelta(days=7)
if kingdom_id != 0:
event_data = {
'start': start,
'end': end,
'kingdom': _(self.kingdoms[kingdom_id]['name'], lang,
default=self.kingdoms[kingdom_id]['reference_name']),
}
result[start] = event_data
start = end
return sorted(result.values(), key=operator.itemgetter('start'))
def guess_weekly_kingdom_from_troop_spoilers(self, lang):
result = {}
latest_date = datetime.datetime.utcnow()
for spoiler in self.spoilers:
if spoiler['type'] == 'troop' \
and spoiler['date'].weekday() == 0 \
and spoiler['date'] > latest_date:
troop = self.troops[spoiler['id']]
if troop['rarity'] == 'Mythic':
continue
kingdom = troop['kingdom']
if not kingdom.get('name') and not kingdom.get('reference_name'):
continue
result[spoiler['date'].date()] = {
'start': spoiler['date'].date(),
'end': spoiler['date'].date() + datetime.timedelta(days=7),
'kingdom': _(kingdom['name'], lang,
default=kingdom['reference_name']) + ' *',
}
latest_date = spoiler['date']
return result
def get_events(self, lang):
today = datetime.date.today()
events = [self.translate_event(e, lang) for e in self.events if today <= e['start']]
return events
def translate_event(self, event, lang):
entry = event.copy()
entry['extra_info'] = ''
if entry['type'] in ('[BOUNTY]', '[HIJACK]') and entry['gacha'] and entry['gacha'] in self.troops:
entry['extra_info'] = _(self.troops[entry['gacha']]['name'], lang)
elif entry['type'] == '[PETRESCUE]' and entry['gacha']:
entry['extra_info'] = self.pets[entry['gacha']][lang].name
elif entry['type'] == '[CLASS_EVENT]' and entry['gacha']:
entry['extra_info'] = _(self.classes[entry['gacha']]['name'], lang)
elif entry['type'] == '[TOWER_OF_DOOM]' and entry['gacha']:
entry['extra_info'] = _(self.troops[entry['gacha']]['name'], lang)
elif entry['type'] == '[DELVE_EVENT]':
entry['extra_info'] = _(self.kingdoms[entry['kingdom_id']]['name'], lang)
elif entry['type'] == '[HIJACK]' and entry['troops']:
entry['extra_info'] = ', '.join(_(self.troops[t]['name'], lang) for t in entry['troops'])
elif entry['type'] == '[INVASION]' and entry['gacha'] and entry['gacha'] in self.troops:
troop = self.troops[entry['gacha']]
troop_name = _(troop['name'], lang)
troop_types = [_(f'[TROOPTYPE_{t.upper()}]', lang) for t in troop['types']]
entry['extra_info'] = f'{troop_name} ({", ".join(troop_types)})'
elif entry['type'] in ('[WEEKLY_EVENT]', '[RARITY_5]') and entry['gacha'] and entry['gacha'] in self.troops:
troop = self.troops[entry['gacha']]
troop_name = _(troop['name'], lang)
kingdom = _(self.kingdoms[entry['kingdom_id']]['name'], lang)
entry['extra_info'] = f'{troop_name} ({kingdom})'
entry['kingdom'] = kingdom
locale = translations.LANGUAGE_CODE_MAPPING.get(lang, lang)
locale = translations.LOCALE_MAPPING.get(locale, 'en_GB') + '.UTF8'
with different_locale(locale):
entry['formatted_start'] = entry['start'].strftime('%b %d')
entry['formatted_end'] = entry['end'].strftime('%b %d')
entry['raw_type'] = entry['type']
entry['type'] = _(entry['type'], lang)
return entry
def get_campaign_tasks(self, lang, _filter=None):
result = {'heading': f'{_("[CAMPAIGN]", lang)}: {_("[TASKS]", lang)}'}
tiers = ['bronze', 'silver', 'gold']
result['campaigns'] = {
f'[MEDAL_LEVEL_{i}]': [self.translate_campaign_task(t, lang) for t in self.campaign_tasks[tier]]
for i, tier in reversed(list(enumerate(tiers))) if _filter is None or tier.lower() == _filter.lower()
}
formatted_start, start_date = get_next_monday_in_locale(date=None, lang=lang)
result['has_content'] = any([len(c) > 0 for c in result['campaigns'].values()])
result['background'] = f'Background/{self.campaign_tasks["kingdom"]["filename"]}_full.png'
result['gow_logo'] = 'Atlas/gow_logo.png'
kingdom_filebase = self.campaign_tasks['kingdom']['filename']
result['kingdom_logo'] = f'Troopcardshields_{kingdom_filebase}_full.png'
result['kingdom'] = _(self.campaign_tasks['kingdom']['name'], lang)
result['raw_date'] = start_date
result['date'] = formatted_start
result['lang'] = lang
result['texts'] = {
'campaign': _('[CAMPAIGN]', lang),
'team': _('[LITE_CHAT_TEAM_START]', lang),
}
return result
def get_reroll_tasks(self, lang, _filter=None):
tiers = ['bronze', 'silver', 'gold']
tasks = {
f'[MEDAL_LEVEL_{i}]': [self.translate_campaign_task(t, lang) for t in self.reroll_tasks[tier]]
for i, tier in reversed(list(enumerate(tiers))) if _filter is None or tier.lower() == _filter.lower()
}
return tasks
def translate_campaign_task(self, task, lang):
new_task = task.copy()
color_code = int(new_task['value1']) if new_task['value1'].isdigit() else 666
color = COLORS[color_code].upper() if color_code < len(COLORS) else '`?`'
if isinstance(new_task.get('y'), str):
new_task['y'] = _(f'[{new_task["y"].upper()}]', lang)
new_task['plural'] = int(new_task.get('x', 1)) != 1
replacements = {
'{WeaponType}': '[WEAPONTYPE_{c:u}]',
'{Kingdom}': '[{d:u}_NAME]',
'{Banner}': '[{c:u}_BANNERNAME]',
'{Class}': '[HEROCLASS_{c:l}_NAME]',
'{Color}': f'[GEM_{color}]',
'{TroopType}': '[TROOPTYPE_{value1:u}]',
'{Troop}': '{{[{value1}][name]}}',
'{Value0}': task['value0'],
'{Value1}': task['value1'],
'{0}': '{x}',
'{1}': task['c'],
'{2}': '{x} {y}',
}
new_task['title'] = _(new_task['title'], lang, plural=new_task['plural'])
new_task['name'] = _(new_task["name"], lang, plural=new_task['plural'])
if '{0}' not in new_task['name'] and '{2}' not in new_task['name']:
new_task['name'] = f'{task["x"]}x ' + new_task['name']
for before, after in replacements.items():
if before in new_task['title'] or before in new_task['name']:
translated = _(after.format(**new_task).format(self.troops), lang, plural=new_task['plural'])
if '`?`' in translated:
translated = '`?`'
new_task['title'] = new_task['title'].replace(before, translated)
new_task['name'] = new_task['name'].replace(before, translated)
where = ''
if new_task['value1'] == '`?`':
pass
elif task['name'] == '[TASK_KILL_TROOP_COLOR]':
color_kingdoms = self.get_color_kingdoms(lang)
target_kingdom = color_kingdoms[color.lower()]['name']
where = f' --> {target_kingdom}'
elif task['name'] == '[TASK_KILL_TROOP_ID]':
target_kingdom = _(self.troops[int(task['value1'])]['kingdom']['name'], lang)
pvp = _('[PVP]', lang)
weekly_event = _('[WEEKLY_EVENT]', lang)
where = f' --> {target_kingdom} / {pvp} / {weekly_event}'
elif task['name'] == '[TASK_KILL_TROOP_TYPE]':
troop_type_kingdoms = dict(self.get_type_kingdoms(lang))
troop_type = _(f'[TROOPTYPE_{task["value1"].upper()}]', lang)
target_kingdom = troop_type_kingdoms[troop_type]['name']
where = f' --> {target_kingdom}'
new_task['name'] += where
return new_task
def get_spoilers(self, lang):
spoilers = []
now = datetime.datetime.utcnow()
near_term_spoilers = [s for s in self.spoilers if now <= s['date'] <= now + datetime.timedelta(days=180)]
for spoiler in near_term_spoilers:
translated = self.translate_spoiler(spoiler, lang)
if translated:
spoilers.append(translated)
return spoilers
def translate_spoiler(self, spoiler, lang):
# FIXME this is transitional until all new models are in place.
if spoiler['type'] in ['pet']:
item = getattr(self, spoiler['type'] + 's').get(spoiler['id'])
if not item:
return
entry = item[translations.LANGUAGE_CODE_MAPPING.get(lang, lang)].data.copy()
else:
entry = getattr(self, spoiler['type'] + 's').get(spoiler['id'], {}).copy()
if not entry:
return None
entry['name'] = _(entry['name'], lang)
if self.is_untranslated(entry['name']):
entry['name'] = entry.get('reference_name', entry['name'])
entry['type'] = spoiler['type']
entry['date'] = spoiler['date'].date()
entry['event'] = _('[GLOG_EVENT]', lang) + ': ' if entry.get('event') else ''
if 'rarity' in entry:
entry['rarity_title'] = _('[RARITY]', lang)
if entry['rarity'] in TROOP_RARITIES:
rarity_number = TROOP_RARITIES.index(entry['rarity'])
entry['rarity'] = _(f'[RARITY_{rarity_number}]', lang)
kingdom_id = entry.get('kingdom_id')
if kingdom_id:
kingdom = self.kingdoms[kingdom_id]
entry['kingdom'] = _(kingdom['name'], lang)
if self.is_untranslated(entry['kingdom']):
entry['kingdom'] = kingdom['reference_name']
return entry
def get_soulforge(self, lang):
title = _('[SOULFORGE]', lang)
craftable_items = {}
for category, recipes in self.soulforge.items():
recipe_type = _(category, lang)
craftable_items[recipe_type] = [self.translate_recipe(r, lang) for r in recipes]
return title, craftable_items
@staticmethod
def translate_recipe(recipe, lang):
new_recipe = recipe.copy()
new_recipe['name'] = _(recipe['name'], lang)
rarity_number = WEAPON_RARITIES.index(new_recipe['rarity'])
new_recipe['rarity_number'] = rarity_number
new_recipe['rarity'] = _(f'[RARITY_{rarity_number}]', lang)
return new_recipe
@staticmethod
def translate_categories(categories, lang):
def try_different_translated_versions_because_devs_are_stupid(cat):
lookup = f'[{cat.upper()}S]'
result = _(lookup, lang)
if result == lookup:
lookup = f'[{cat.upper()}S:]'
result = _(lookup, lang)[:-1]
if result == lookup[:-1]:
result = _(f'[{cat.upper()}]', lang)
return result
translated = [try_different_translated_versions_because_devs_are_stupid(c) for c in categories]
return dict(zip(categories, translated))
def get_levels(self, lang):
levels = [{
'level': level['level'],
'bonus': _(level['bonus'], lang),
} for level in self.levels]
return levels
def translate_toplist(self, toplist_id, lang):
toplist = self.toplists.get(toplist_id)
if not toplist:
return None
result = toplist.copy()
result['items'] = []
for item_search in toplist['items']:
items = self.search_troop(item_search, lang)
if not items:
items = self.search_weapon(item_search, lang)
if not items:
continue
result['items'].append(items[0])
return result
async def create_toplist(self, message, description, items, lang, update_id):
toplist_id = await self.toplists.add(message.author.id, message.author.display_name, description, items,
update_id)
toplist = self.translate_toplist(toplist_id, lang)
return toplist
def kingdom_percentage(self, filter_name, filter_values, lang):
result = {}
now = datetime.datetime.utcnow()
hidden_kingdoms = [3032, 3033, 3034, 3038]
for filter_ in filter_values:
kingdoms = []
for kingdom in self.kingdoms.values():
if kingdom['location'] != 'krystara':
continue
if kingdom['id'] in hidden_kingdoms:
continue
all_troops = [self.troops.get(t) for t in kingdom['troop_ids']]
available_troops = [t for t in all_troops if t and t.get('release_date', now) <= now]
if not available_troops:
continue
fitting_troops = [t for t in available_troops if filter_ in t[filter_name]]
kingdoms.append({
'name': _(kingdom['name'], lang),
'total': len(available_troops),
'fitting_troops': len(fitting_troops),
'percentage': len(fitting_troops) / len(available_troops),
})
top_kingdom = sorted(kingdoms, key=operator.itemgetter('percentage'), reverse=True)[0]
result[filter_] = top_kingdom
return result
def get_color_kingdoms(self, lang):
colors_without_skulls = COLORS[:6]
return self.kingdom_percentage('colors', colors_without_skulls, lang)
def get_type_kingdoms(self, lang):
forbidden_types = {'None', 'Boss', 'Tower', 'Castle', 'Doom', 'Gnome'}
troop_types = self.troop_types - forbidden_types
result = self.kingdom_percentage('types', troop_types, lang)
translated_result = {
_(f"[TROOPTYPE_{troop_type.upper()}]", lang): kingdom
for troop_type, kingdom in result.items()
}
return sorted(translated_result.items(), key=operator.itemgetter(0))
def get_adventure_board(self, lang):
result = []
for adventure in self.adventure_board:
result.append(self.translate_adventure(adventure, lang))
return result
@staticmethod
def translate_adventure(adventure, lang):
def change_form(key, value):
if value == 1 and key.startswith('[KEYTYPE'):
key = key.replace('_TITLE', '_SINGLE')
return _(key, lang).replace('%1 ', ''), value
result = adventure.copy()
result['name'] = _(result['name'], lang)
result['reward_types'] = set(result['rewards'].keys())
result['rewards'] = dict([change_form(key, value) for key, value in result['rewards'].items()])
result['rarity'] = _(result['rarity'], lang)
return result
@staticmethod
def is_untranslated(param):
if not param:
return True
return param[0] + param[-1] == '[]'
def get_toplist_troop_ids(self, items, lang):
result = []
for search_term in items.split(','):
items = self.search_troop(search_term, lang)
if not items:
items = self.search_weapon(search_term, lang)
if items:
result.append(str(items[0]['id']))
return result
def get_soulforge_weapon_image_data(self, search_term, date, switch, lang):
search_result = self.search_weapon(search_term, lang)
if len(search_result) != 1:
return
weapon = search_result[0].copy()
requirements = SOULFORGE_REQUIREMENTS[weapon['raw_rarity']].copy()
alternate_kingdom_id = weapon.get('event_faction')
if alternate_kingdom_id:
requirements = UNDERWORLD_SOULFORGE_REQUIREMENTS[weapon['raw_rarity']].copy()
jewels = []
for color in weapon['colors']:
color_code = COLORS.index(color)
filename = f'Runes_Jewel{color_code:02n}_full.png'
jewels.append({
'filename': filename,
'amount': requirements['jewels'],
'available_on': translate_day(color_code, lang),
'kingdoms': sorted([_(kingdom['name'], lang) for kingdom in self.kingdoms.values()
if 'primary_color' in kingdom
and color == kingdom['primary_color']
and kingdom['location'] == 'krystara']),
})
requirements['jewels'] = jewels
kingdom = self.kingdoms[weapon['kingdom_id']]
alternate_kingdom = None
alternate_kingdom_name = None
alternate_kingdom_filename = None
if alternate_kingdom_id:
alternate_kingdom = self.kingdoms[alternate_kingdom_id]
alternate_kingdom_name = _(alternate_kingdom['name'], lang)
alternate_kingdom_filename = alternate_kingdom['filename']
affixes = [{
'name': _(affix['name'], lang),
'description': _(affix['description'], lang),
'color': list(RARITY_COLORS.values())[i],
} for i, affix in enumerate(weapon['affixes'], start=1)]
mana_colors = ''.join([c.title() for c in weapon['colors']]).replace('Brown', 'Orange')
kingdom_filebase = self.kingdoms[weapon['kingdom_id']]['filename']
in_soulforge_text = _('[WEAPON_AVAILABLE_FROM_SOULFORGE]', lang)
if alternate_kingdom_id:
in_soulforge_text += ' (' + _(f'[{weapon["event_faction"]}_NAME]', lang) + ' ' + _(
'[FACTION_WEAPON]', lang) + ')'
date = get_next_monday_in_locale(date, lang)[0]
result = {
'switch': switch,
'name': weapon['name'],
'rarity_color': RARITY_COLORS[weapon['raw_rarity']],
'rarity': weapon['rarity'],
'filename': f'Spells/Cards_{weapon["spell_id"]}_full.png',
'description': weapon['spell']['description'],
'kingdom': weapon['kingdom'],
'alternate_kingdom': alternate_kingdom_name,
'kingdom_logo': f'Troopcardshields_{kingdom_filebase}_full.png',
'alternate_kingdom_logo': f'Troopcardshields_{alternate_kingdom_filename}_full.png',
'type': _(weapon['type'], lang),
'background': f'Background/{kingdom["filename"]}_full.png',
'gow_logo': 'Atlas/gow_logo.png',
'requirements': requirements,
'affixes': affixes,
'affix_icon': 'Atlas/affix.png',
'gold_medal': 'Atlas/medal_gold.png',
'mana_color': f'Troopcardall_{mana_colors}_full.png',
'mana_cost': weapon['spell']['cost'],
'stat_increases': {'attack': sum(weapon['attack_increase']),
'health': sum(weapon['health_increase']),
'armor': sum(weapon['armor_increase']),
'magic': sum(weapon['magic_increase'])},
'stat_icon': 'Atlas/{stat}.png',
'texts': {
'from_battles': _('[PET_LOOT_BONUS]', lang).replace('+%1% %2 ', '').replace('+%1 %2 ', ''),
'gem_bounty': _('[DUNGEON_OFFER_NAME]', lang),
'kingdom_challenges': f'{_("[KINGDOM]", lang)} {_("[CHALLENGES]", lang)}',
'soulforge': _('[SOULFORGE]', lang),
'resources': _('[RESOURCES]', lang),
'dungeon': _('[DUNGEON]', lang),
'dungeon_battles': _('[TASK_WIN_DUNGEON_BATTLES]', lang).replace('{0}', '3').replace('\x19', 's'),
'tier_8': _('[CHALLENGE_TIER_8_ROMAN]', lang),
'available': _('[AVAILABLE]', lang),
'in_soulforge': in_soulforge_text,
'n_gems': _('[GEMS_GAINED]', lang).replace('%1', '50'),
},
'date': date,
}
return result
def translate_drop_chances(self, data: dict, lang):
for key, item in data.copy().items():
if not self.is_untranslated(key):
continue
new_key = _(key, lang)
if key == '[KEYTYPE_5_TITLE]':
new_key = f'{new_key}*'
data[new_key] = item.copy()
if key != new_key:
del data[key]
if isinstance(data[new_key], dict):
self.translate_drop_chances(data[new_key], lang)
def get_drop_chances(self, lang):
drop_chances = self.drop_chances.copy()
self.translate_drop_chances(drop_chances, lang)
return drop_chances
def get_current_event(self, lang, emojis):
event = copy.deepcopy(self.weekly_event)
kingdoms = self.search_kingdom(event['kingdom_id'], lang)
if kingdoms:
event['kingdom'] = kingdoms[0]
event['name'] = event['name'].get(lang, _(EVENT_TYPES[event['type']], lang))
event['lore'] = event['lore'].get(lang, '')
event['currencies'] = [{
'name': currency['name'].get(lang, ''),
'value': _('[N_TIMES_POINTS]', lang).replace('%1', str(currency['value']))
} for currency in event['currencies']]
for stage in event['rewards'].keys():
for reward in event['rewards'][stage]['rewards']:
reward_type = reward['type']
reward['type'] = _(reward_type, lang).replace('%1', '').strip()
if reward_type == '[TITLE]':
reward['type'] += ' (' + _(f'[TITLE_{reward["data"]}]', lang) + ')'
if reward_type == '[TROOP]':
reward['type'] = _(self.troops.get(reward['data'])['name'], lang)
for item in ('token', 'badge', 'medal'):
if not event[item]:
continue
event[item] = {
'name': _(f'[WONDER_{event[item]}_NAME]', lang),
'description': _(f'[WONDER_{event[item]}_DESC]', lang),
}
def translate_restriction(r):
if isinstance(r, int):
return emojis.get(COLORS[r])
return _(r, lang)
def translate_battle(b):
result = b.copy()
result['name'] = b['names'].get(lang)
del result['names']
return result
event['restrictions'] = {_(r, lang): ', '.join([translate_restriction(i) for i in v]) for r, v in
event['restrictions'].items() if v}
event['troop'] = _(event['troop'], lang)
if event['weapon_id']:
event['weapon'] = _(self.weapons.get(event['weapon_id'], {'name': ''})['name'], lang)
new_battles = []
for battle in event['battles']:
tb = translate_battle(battle)
if tb not in new_battles:
new_battles.append(tb)
event['battles'] = new_battles
return event
def get_effects(self, lang):
positive = _('[TROOPHELP_ALLPOSITIVESTATUSEFFECTS_1]', lang)
negative = _('[TROOPHELP_ALLNEGATIVESTATUSEFFECTS_1]', lang)
result = {
positive: [],
negative: [],
}
for effect in self.effects:
key = positive if effect in self.positive_effects else negative
result[key].append({
'name': _(f'[TROOPHELP_{effect}_1]', lang),
'description': _(f'[TROOPHELP_{effect}_2]', lang),
})
result[positive] = sorted(result[positive], key=operator.itemgetter('name'))
result[negative] = sorted(result[negative], key=operator.itemgetter('name'))
return result
def get_active_gems(self):
return [g['gem_type'] for g in self.active_gems.values()]
@staticmethod
def get_storms(lang):
storms = {}
fields = {
'1': 'name',
'2': 'description',
}
p = re.compile(r'\[TROOPHELP_STORM\d+_\d+')
for key, value in t.translations[lang].items():
if not p.match(key):
continue
field = fields[key[-2]]
storm_key = key[:-2]
storms.setdefault(storm_key, {})[field] = value
return storms
def get_warbands(self, lang):
warbands = [k.copy() for k in self.kingdoms.values() if 'WARBAND' in k['reference_name']]
for warband in warbands:
self.translate_kingdom(warband, lang)
return warbands
def get_map_data(self, lang, location):
if not location:
location = 'krystara'
base_folder = 'Worldmap'
map_data = {
'krystara': {
'title': _('[MAPNAME_MAIN]', lang),
'map': f'{base_folder}/Main/Main_Albedo_full.png',
'water': f'{base_folder}/Main/Water_Main_Albedo_full.png',
'height': f'{base_folder}/Main/Main_Height_full.png',
'blend_mode': 'overlay',
},
'underworld': {
'title': _('[MAPNAME_UNDERWORLD]', lang),
'map': f'{base_folder}/Underworld/Underworld_Albedo_full.png',
'water': f'{base_folder}/Underworld/Water_Underworld_Albedo_full.png',
'height': f'{base_folder}/Underworld/Underworld_Height_full.png',
'blend_mode': 'stereo',
}
}
result = map_data[location]
result['kingdoms'] = []
result['title'] = f"Gary's Gems of War Map: {result['title']}"
def is_pseudo_kingdom(k):
return k['location'] == 'krystara' and k['links'] == {-1}
for kingdom in self.kingdoms.values():
if 'description' not in kingdom:
continue
if kingdom['location'] != location:
continue
if is_pseudo_kingdom(kingdom):
continue
my_kingdom = kingdom.copy()
self.translate_kingdom(my_kingdom, lang)
if self.is_untranslated(my_kingdom['name']):
continue
result['kingdoms'].append(my_kingdom)
return result
| 44.185065 | 118 | 0.557701 | 53,488 | 0.982477 | 0 | 0 | 5,169 | 0.094945 | 329 | 0.006043 | 11,288 | 0.20734 |
4836dce172471538808ff516434e702497a39d34 | 39,841 | py | Python | troposphere/sagemaker.py | filipepmo/troposphere | b1590f58ed8cc86ba18a19ed93fc9380d6f7306b | [
"BSD-2-Clause"
]
| null | null | null | troposphere/sagemaker.py | filipepmo/troposphere | b1590f58ed8cc86ba18a19ed93fc9380d6f7306b | [
"BSD-2-Clause"
]
| null | null | null | troposphere/sagemaker.py | filipepmo/troposphere | b1590f58ed8cc86ba18a19ed93fc9380d6f7306b | [
"BSD-2-Clause"
]
| null | null | null | # Copyright (c) 2012-2022, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 51.0.0
from . import AWSObject, AWSProperty, PropsDictType, Tags
from .validators import boolean, double, integer
class ResourceSpec(AWSProperty):
"""
`ResourceSpec <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-userprofile-resourcespec.html>`__
"""
props: PropsDictType = {
"InstanceType": (str, False),
"SageMakerImageArn": (str, False),
"SageMakerImageVersionArn": (str, False),
}
class App(AWSObject):
"""
`App <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-app.html>`__
"""
resource_type = "AWS::SageMaker::App"
props: PropsDictType = {
"AppName": (str, True),
"AppType": (str, True),
"DomainId": (str, True),
"ResourceSpec": (ResourceSpec, False),
"Tags": (Tags, False),
"UserProfileName": (str, True),
}
class FileSystemConfig(AWSProperty):
"""
`FileSystemConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-appimageconfig-filesystemconfig.html>`__
"""
props: PropsDictType = {
"DefaultGid": (integer, False),
"DefaultUid": (integer, False),
"MountPath": (str, False),
}
class KernelSpec(AWSProperty):
"""
`KernelSpec <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-appimageconfig-kernelspec.html>`__
"""
props: PropsDictType = {
"DisplayName": (str, False),
"Name": (str, True),
}
class KernelGatewayImageConfig(AWSProperty):
"""
`KernelGatewayImageConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-appimageconfig-kernelgatewayimageconfig.html>`__
"""
props: PropsDictType = {
"FileSystemConfig": (FileSystemConfig, False),
"KernelSpecs": ([KernelSpec], True),
}
class AppImageConfig(AWSObject):
"""
`AppImageConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-appimageconfig.html>`__
"""
resource_type = "AWS::SageMaker::AppImageConfig"
props: PropsDictType = {
"AppImageConfigName": (str, True),
"KernelGatewayImageConfig": (KernelGatewayImageConfig, False),
"Tags": (Tags, False),
}
class GitConfig(AWSProperty):
"""
`GitConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-coderepository-gitconfig.html>`__
"""
props: PropsDictType = {
"Branch": (str, False),
"RepositoryUrl": (str, True),
"SecretArn": (str, False),
}
class CodeRepository(AWSObject):
"""
`CodeRepository <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-coderepository.html>`__
"""
resource_type = "AWS::SageMaker::CodeRepository"
props: PropsDictType = {
"CodeRepositoryName": (str, False),
"GitConfig": (GitConfig, True),
"Tags": (Tags, False),
}
class DataQualityAppSpecification(AWSProperty):
"""
`DataQualityAppSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-dataqualityjobdefinition-dataqualityappspecification.html>`__
"""
props: PropsDictType = {
"ContainerArguments": ([str], False),
"ContainerEntrypoint": ([str], False),
"Environment": (dict, False),
"ImageUri": (str, True),
"PostAnalyticsProcessorSourceUri": (str, False),
"RecordPreprocessorSourceUri": (str, False),
}
class ConstraintsResource(AWSProperty):
"""
`ConstraintsResource <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-constraintsresource.html>`__
"""
props: PropsDictType = {
"S3Uri": (str, False),
}
class StatisticsResource(AWSProperty):
"""
`StatisticsResource <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-statisticsresource.html>`__
"""
props: PropsDictType = {
"S3Uri": (str, False),
}
class DataQualityBaselineConfig(AWSProperty):
"""
`DataQualityBaselineConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-dataqualityjobdefinition-dataqualitybaselineconfig.html>`__
"""
props: PropsDictType = {
"BaseliningJobName": (str, False),
"ConstraintsResource": (ConstraintsResource, False),
"StatisticsResource": (StatisticsResource, False),
}
class EndpointInput(AWSProperty):
"""
`EndpointInput <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-endpointinput.html>`__
"""
props: PropsDictType = {
"EndpointName": (str, True),
"LocalPath": (str, True),
"S3DataDistributionType": (str, False),
"S3InputMode": (str, False),
}
class DataQualityJobInput(AWSProperty):
"""
`DataQualityJobInput <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-dataqualityjobdefinition-dataqualityjobinput.html>`__
"""
props: PropsDictType = {
"EndpointInput": (EndpointInput, True),
}
class S3Output(AWSProperty):
"""
`S3Output <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-s3output.html>`__
"""
props: PropsDictType = {
"LocalPath": (str, True),
"S3UploadMode": (str, False),
"S3Uri": (str, True),
}
class MonitoringOutput(AWSProperty):
"""
`MonitoringOutput <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-monitoringoutput.html>`__
"""
props: PropsDictType = {
"S3Output": (S3Output, True),
}
class MonitoringOutputConfig(AWSProperty):
"""
`MonitoringOutputConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-monitoringoutputconfig.html>`__
"""
props: PropsDictType = {
"KmsKeyId": (str, False),
"MonitoringOutputs": ([MonitoringOutput], True),
}
class ClusterConfig(AWSProperty):
"""
`ClusterConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-clusterconfig.html>`__
"""
props: PropsDictType = {
"InstanceCount": (integer, True),
"InstanceType": (str, True),
"VolumeKmsKeyId": (str, False),
"VolumeSizeInGB": (integer, True),
}
class MonitoringResources(AWSProperty):
"""
`MonitoringResources <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-monitoringresources.html>`__
"""
props: PropsDictType = {
"ClusterConfig": (ClusterConfig, True),
}
class VpcConfig(AWSProperty):
"""
`VpcConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-vpcconfig.html>`__
"""
props: PropsDictType = {
"SecurityGroupIds": ([str], True),
"Subnets": ([str], True),
}
class NetworkConfig(AWSProperty):
"""
`NetworkConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-networkconfig.html>`__
"""
props: PropsDictType = {
"EnableInterContainerTrafficEncryption": (boolean, False),
"EnableNetworkIsolation": (boolean, False),
"VpcConfig": (VpcConfig, False),
}
class StoppingCondition(AWSProperty):
"""
`StoppingCondition <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-stoppingcondition.html>`__
"""
props: PropsDictType = {
"MaxRuntimeInSeconds": (integer, True),
}
class DataQualityJobDefinition(AWSObject):
"""
`DataQualityJobDefinition <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-dataqualityjobdefinition.html>`__
"""
resource_type = "AWS::SageMaker::DataQualityJobDefinition"
props: PropsDictType = {
"DataQualityAppSpecification": (DataQualityAppSpecification, True),
"DataQualityBaselineConfig": (DataQualityBaselineConfig, False),
"DataQualityJobInput": (DataQualityJobInput, True),
"DataQualityJobOutputConfig": (MonitoringOutputConfig, True),
"JobDefinitionName": (str, False),
"JobResources": (MonitoringResources, True),
"NetworkConfig": (NetworkConfig, False),
"RoleArn": (str, True),
"StoppingCondition": (StoppingCondition, False),
"Tags": (Tags, False),
}
class DeviceProperty(AWSProperty):
"""
`DeviceProperty <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-device-device.html>`__
"""
props: PropsDictType = {
"Description": (str, False),
"DeviceName": (str, True),
"IotThingName": (str, False),
}
class Device(AWSObject):
"""
`Device <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-device.html>`__
"""
resource_type = "AWS::SageMaker::Device"
props: PropsDictType = {
"Device": (DeviceProperty, False),
"DeviceFleetName": (str, True),
"Tags": (Tags, False),
}
class EdgeOutputConfig(AWSProperty):
"""
`EdgeOutputConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-devicefleet-edgeoutputconfig.html>`__
"""
props: PropsDictType = {
"KmsKeyId": (str, False),
"S3OutputLocation": (str, True),
}
class DeviceFleet(AWSObject):
"""
`DeviceFleet <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-devicefleet.html>`__
"""
resource_type = "AWS::SageMaker::DeviceFleet"
props: PropsDictType = {
"Description": (str, False),
"DeviceFleetName": (str, True),
"OutputConfig": (EdgeOutputConfig, True),
"RoleArn": (str, True),
"Tags": (Tags, False),
}
class JupyterServerAppSettings(AWSProperty):
"""
`JupyterServerAppSettings <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-userprofile-jupyterserverappsettings.html>`__
"""
props: PropsDictType = {
"DefaultResourceSpec": (ResourceSpec, False),
}
class CustomImage(AWSProperty):
"""
`CustomImage <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-userprofile-customimage.html>`__
"""
props: PropsDictType = {
"AppImageConfigName": (str, True),
"ImageName": (str, True),
"ImageVersionNumber": (integer, False),
}
class KernelGatewayAppSettings(AWSProperty):
"""
`KernelGatewayAppSettings <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-userprofile-kernelgatewayappsettings.html>`__
"""
props: PropsDictType = {
"CustomImages": ([CustomImage], False),
"DefaultResourceSpec": (ResourceSpec, False),
}
class SharingSettings(AWSProperty):
"""
`SharingSettings <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-userprofile-sharingsettings.html>`__
"""
props: PropsDictType = {
"NotebookOutputOption": (str, False),
"S3KmsKeyId": (str, False),
"S3OutputPath": (str, False),
}
class UserSettings(AWSProperty):
"""
`UserSettings <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-userprofile-usersettings.html>`__
"""
props: PropsDictType = {
"ExecutionRole": (str, False),
"JupyterServerAppSettings": (JupyterServerAppSettings, False),
"KernelGatewayAppSettings": (KernelGatewayAppSettings, False),
"SecurityGroups": ([str], False),
"SharingSettings": (SharingSettings, False),
}
class Domain(AWSObject):
"""
`Domain <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-domain.html>`__
"""
resource_type = "AWS::SageMaker::Domain"
props: PropsDictType = {
"AppNetworkAccessType": (str, False),
"AuthMode": (str, True),
"DefaultUserSettings": (UserSettings, True),
"DomainName": (str, True),
"KmsKeyId": (str, False),
"SubnetIds": ([str], True),
"Tags": (Tags, False),
"VpcId": (str, True),
}
class Alarm(AWSProperty):
"""
`Alarm <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpoint-alarm.html>`__
"""
props: PropsDictType = {
"AlarmName": (str, True),
}
class AutoRollbackConfig(AWSProperty):
"""
`AutoRollbackConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpoint-autorollbackconfig.html>`__
"""
props: PropsDictType = {
"Alarms": ([Alarm], True),
}
class CapacitySize(AWSProperty):
"""
`CapacitySize <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpoint-capacitysize.html>`__
"""
props: PropsDictType = {
"Type": (str, True),
"Value": (integer, True),
}
class TrafficRoutingConfig(AWSProperty):
"""
`TrafficRoutingConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpoint-trafficroutingconfig.html>`__
"""
props: PropsDictType = {
"CanarySize": (CapacitySize, False),
"LinearStepSize": (CapacitySize, False),
"Type": (str, True),
"WaitIntervalInSeconds": (integer, False),
}
class BlueGreenUpdatePolicy(AWSProperty):
"""
`BlueGreenUpdatePolicy <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpoint-bluegreenupdatepolicy.html>`__
"""
props: PropsDictType = {
"MaximumExecutionTimeoutInSeconds": (integer, False),
"TerminationWaitInSeconds": (integer, False),
"TrafficRoutingConfiguration": (TrafficRoutingConfig, True),
}
class DeploymentConfig(AWSProperty):
"""
`DeploymentConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpoint-deploymentconfig.html>`__
"""
props: PropsDictType = {
"AutoRollbackConfiguration": (AutoRollbackConfig, False),
"BlueGreenUpdatePolicy": (BlueGreenUpdatePolicy, True),
}
class VariantProperty(AWSProperty):
"""
`VariantProperty <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpoint-variantproperty.html>`__
"""
props: PropsDictType = {
"VariantPropertyType": (str, False),
}
class Endpoint(AWSObject):
"""
`Endpoint <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-endpoint.html>`__
"""
resource_type = "AWS::SageMaker::Endpoint"
props: PropsDictType = {
"DeploymentConfig": (DeploymentConfig, False),
"EndpointConfigName": (str, True),
"EndpointName": (str, False),
"ExcludeRetainedVariantProperties": ([VariantProperty], False),
"RetainAllVariantProperties": (boolean, False),
"RetainDeploymentConfig": (boolean, False),
"Tags": (Tags, False),
}
class AsyncInferenceClientConfig(AWSProperty):
"""
`AsyncInferenceClientConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpointconfig-asyncinferenceclientconfig.html>`__
"""
props: PropsDictType = {
"MaxConcurrentInvocationsPerInstance": (integer, False),
}
class AsyncInferenceNotificationConfig(AWSProperty):
"""
`AsyncInferenceNotificationConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpointconfig-asyncinferencenotificationconfig.html>`__
"""
props: PropsDictType = {
"ErrorTopic": (str, False),
"SuccessTopic": (str, False),
}
class AsyncInferenceOutputConfig(AWSProperty):
"""
`AsyncInferenceOutputConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpointconfig-asyncinferenceoutputconfig.html>`__
"""
props: PropsDictType = {
"KmsKeyId": (str, False),
"NotificationConfig": (AsyncInferenceNotificationConfig, False),
"S3OutputPath": (str, True),
}
class AsyncInferenceConfig(AWSProperty):
"""
`AsyncInferenceConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpointconfig-asyncinferenceconfig.html>`__
"""
props: PropsDictType = {
"ClientConfig": (AsyncInferenceClientConfig, False),
"OutputConfig": (AsyncInferenceOutputConfig, True),
}
class CaptureContentTypeHeader(AWSProperty):
"""
`CaptureContentTypeHeader <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpointconfig-datacaptureconfig-capturecontenttypeheader.html>`__
"""
props: PropsDictType = {
"CsvContentTypes": ([str], False),
"JsonContentTypes": ([str], False),
}
class CaptureOption(AWSProperty):
"""
`CaptureOption <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpointconfig-captureoption.html>`__
"""
props: PropsDictType = {
"CaptureMode": (str, True),
}
class DataCaptureConfig(AWSProperty):
"""
`DataCaptureConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpointconfig-datacaptureconfig.html>`__
"""
props: PropsDictType = {
"CaptureContentTypeHeader": (CaptureContentTypeHeader, False),
"CaptureOptions": ([CaptureOption], True),
"DestinationS3Uri": (str, True),
"EnableCapture": (boolean, False),
"InitialSamplingPercentage": (integer, True),
"KmsKeyId": (str, False),
}
class ServerlessConfig(AWSProperty):
"""
`ServerlessConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpointconfig-productionvariant-serverlessconfig.html>`__
"""
props: PropsDictType = {
"MaxConcurrency": (integer, True),
"MemorySizeInMB": (integer, True),
}
class ProductionVariant(AWSProperty):
"""
`ProductionVariant <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-endpointconfig-productionvariant.html>`__
"""
props: PropsDictType = {
"AcceleratorType": (str, False),
"InitialInstanceCount": (integer, False),
"InitialVariantWeight": (double, True),
"InstanceType": (str, False),
"ModelName": (str, True),
"ServerlessConfig": (ServerlessConfig, False),
"VariantName": (str, True),
}
class EndpointConfig(AWSObject):
"""
`EndpointConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-endpointconfig.html>`__
"""
resource_type = "AWS::SageMaker::EndpointConfig"
props: PropsDictType = {
"AsyncInferenceConfig": (AsyncInferenceConfig, False),
"DataCaptureConfig": (DataCaptureConfig, False),
"EndpointConfigName": (str, False),
"KmsKeyId": (str, False),
"ProductionVariants": ([ProductionVariant], True),
"Tags": (Tags, False),
}
class FeatureDefinition(AWSProperty):
"""
`FeatureDefinition <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-featuregroup-featuredefinition.html>`__
"""
props: PropsDictType = {
"FeatureName": (str, True),
"FeatureType": (str, True),
}
class FeatureGroup(AWSObject):
"""
`FeatureGroup <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-featuregroup.html>`__
"""
resource_type = "AWS::SageMaker::FeatureGroup"
props: PropsDictType = {
"Description": (str, False),
"EventTimeFeatureName": (str, True),
"FeatureDefinitions": ([FeatureDefinition], True),
"FeatureGroupName": (str, True),
"OfflineStoreConfig": (dict, False),
"OnlineStoreConfig": (dict, False),
"RecordIdentifierFeatureName": (str, True),
"RoleArn": (str, False),
"Tags": (Tags, False),
}
class Image(AWSObject):
"""
`Image <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-image.html>`__
"""
resource_type = "AWS::SageMaker::Image"
props: PropsDictType = {
"ImageDescription": (str, False),
"ImageDisplayName": (str, False),
"ImageName": (str, True),
"ImageRoleArn": (str, True),
"Tags": (Tags, False),
}
class ImageVersion(AWSObject):
"""
`ImageVersion <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-imageversion.html>`__
"""
resource_type = "AWS::SageMaker::ImageVersion"
props: PropsDictType = {
"BaseImage": (str, True),
"ImageName": (str, True),
}
class RepositoryAuthConfig(AWSProperty):
"""
`RepositoryAuthConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-model-containerdefinition-imageconfig-repositoryauthconfig.html>`__
"""
props: PropsDictType = {
"RepositoryCredentialsProviderArn": (str, True),
}
class ImageConfig(AWSProperty):
"""
`ImageConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-model-containerdefinition-imageconfig.html>`__
"""
props: PropsDictType = {
"RepositoryAccessMode": (str, True),
"RepositoryAuthConfig": (RepositoryAuthConfig, False),
}
class MultiModelConfig(AWSProperty):
"""
`MultiModelConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-model-containerdefinition-multimodelconfig.html>`__
"""
props: PropsDictType = {
"ModelCacheSetting": (str, False),
}
class ContainerDefinition(AWSProperty):
"""
`ContainerDefinition <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-model-containerdefinition.html>`__
"""
props: PropsDictType = {
"ContainerHostname": (str, False),
"Environment": (dict, False),
"Image": (str, False),
"ImageConfig": (ImageConfig, False),
"InferenceSpecificationName": (str, False),
"Mode": (str, False),
"ModelDataUrl": (str, False),
"ModelPackageName": (str, False),
"MultiModelConfig": (MultiModelConfig, False),
}
class InferenceExecutionConfig(AWSProperty):
"""
`InferenceExecutionConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-model-inferenceexecutionconfig.html>`__
"""
props: PropsDictType = {
"Mode": (str, True),
}
class Model(AWSObject):
"""
`Model <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-model.html>`__
"""
resource_type = "AWS::SageMaker::Model"
props: PropsDictType = {
"Containers": ([ContainerDefinition], False),
"EnableNetworkIsolation": (boolean, False),
"ExecutionRoleArn": (str, True),
"InferenceExecutionConfig": (InferenceExecutionConfig, False),
"ModelName": (str, False),
"PrimaryContainer": (ContainerDefinition, False),
"Tags": (Tags, False),
"VpcConfig": (VpcConfig, False),
}
class ModelBiasAppSpecification(AWSProperty):
"""
`ModelBiasAppSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-modelbiasjobdefinition-modelbiasappspecification.html>`__
"""
props: PropsDictType = {
"ConfigUri": (str, True),
"Environment": (dict, False),
"ImageUri": (str, True),
}
class ModelBiasBaselineConfig(AWSProperty):
"""
`ModelBiasBaselineConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-modelbiasjobdefinition-modelbiasbaselineconfig.html>`__
"""
props: PropsDictType = {
"BaseliningJobName": (str, False),
"ConstraintsResource": (ConstraintsResource, False),
}
class MonitoringGroundTruthS3Input(AWSProperty):
"""
`MonitoringGroundTruthS3Input <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-modelqualityjobdefinition-monitoringgroundtruths3input.html>`__
"""
props: PropsDictType = {
"S3Uri": (str, True),
}
class ModelBiasJobInput(AWSProperty):
"""
`ModelBiasJobInput <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-modelbiasjobdefinition-modelbiasjobinput.html>`__
"""
props: PropsDictType = {
"EndpointInput": (EndpointInput, True),
"GroundTruthS3Input": (MonitoringGroundTruthS3Input, True),
}
class ModelBiasJobDefinition(AWSObject):
"""
`ModelBiasJobDefinition <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-modelbiasjobdefinition.html>`__
"""
resource_type = "AWS::SageMaker::ModelBiasJobDefinition"
props: PropsDictType = {
"JobDefinitionName": (str, False),
"JobResources": (MonitoringResources, True),
"ModelBiasAppSpecification": (ModelBiasAppSpecification, True),
"ModelBiasBaselineConfig": (ModelBiasBaselineConfig, False),
"ModelBiasJobInput": (ModelBiasJobInput, True),
"ModelBiasJobOutputConfig": (MonitoringOutputConfig, True),
"NetworkConfig": (NetworkConfig, False),
"RoleArn": (str, True),
"StoppingCondition": (StoppingCondition, False),
"Tags": (Tags, False),
}
class ModelExplainabilityAppSpecification(AWSProperty):
"""
`ModelExplainabilityAppSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-modelexplainabilityjobdefinition-modelexplainabilityappspecification.html>`__
"""
props: PropsDictType = {
"ConfigUri": (str, True),
"Environment": (dict, False),
"ImageUri": (str, True),
}
class ModelExplainabilityBaselineConfig(AWSProperty):
"""
`ModelExplainabilityBaselineConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-modelexplainabilityjobdefinition-modelexplainabilitybaselineconfig.html>`__
"""
props: PropsDictType = {
"BaseliningJobName": (str, False),
"ConstraintsResource": (ConstraintsResource, False),
}
class ModelExplainabilityJobInput(AWSProperty):
"""
`ModelExplainabilityJobInput <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-modelexplainabilityjobdefinition-modelexplainabilityjobinput.html>`__
"""
props: PropsDictType = {
"EndpointInput": (EndpointInput, True),
}
class ModelExplainabilityJobDefinition(AWSObject):
"""
`ModelExplainabilityJobDefinition <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-modelexplainabilityjobdefinition.html>`__
"""
resource_type = "AWS::SageMaker::ModelExplainabilityJobDefinition"
props: PropsDictType = {
"JobDefinitionName": (str, False),
"JobResources": (MonitoringResources, True),
"ModelExplainabilityAppSpecification": (
ModelExplainabilityAppSpecification,
True,
),
"ModelExplainabilityBaselineConfig": (ModelExplainabilityBaselineConfig, False),
"ModelExplainabilityJobInput": (ModelExplainabilityJobInput, True),
"ModelExplainabilityJobOutputConfig": (MonitoringOutputConfig, True),
"NetworkConfig": (NetworkConfig, False),
"RoleArn": (str, True),
"StoppingCondition": (StoppingCondition, False),
"Tags": (Tags, False),
}
class ModelPackageGroup(AWSObject):
"""
`ModelPackageGroup <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-modelpackagegroup.html>`__
"""
resource_type = "AWS::SageMaker::ModelPackageGroup"
props: PropsDictType = {
"ModelPackageGroupDescription": (str, False),
"ModelPackageGroupName": (str, True),
"ModelPackageGroupPolicy": (dict, False),
"Tags": (Tags, False),
}
class ModelQualityAppSpecification(AWSProperty):
"""
`ModelQualityAppSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-modelqualityjobdefinition-modelqualityappspecification.html>`__
"""
props: PropsDictType = {
"ContainerArguments": ([str], False),
"ContainerEntrypoint": ([str], False),
"Environment": (dict, False),
"ImageUri": (str, True),
"PostAnalyticsProcessorSourceUri": (str, False),
"ProblemType": (str, True),
"RecordPreprocessorSourceUri": (str, False),
}
class ModelQualityBaselineConfig(AWSProperty):
"""
`ModelQualityBaselineConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-modelqualityjobdefinition-modelqualitybaselineconfig.html>`__
"""
props: PropsDictType = {
"BaseliningJobName": (str, False),
"ConstraintsResource": (ConstraintsResource, False),
}
class ModelQualityJobInput(AWSProperty):
"""
`ModelQualityJobInput <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-modelqualityjobdefinition-modelqualityjobinput.html>`__
"""
props: PropsDictType = {
"EndpointInput": (EndpointInput, True),
"GroundTruthS3Input": (MonitoringGroundTruthS3Input, True),
}
class ModelQualityJobDefinition(AWSObject):
"""
`ModelQualityJobDefinition <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-modelqualityjobdefinition.html>`__
"""
resource_type = "AWS::SageMaker::ModelQualityJobDefinition"
props: PropsDictType = {
"JobDefinitionName": (str, False),
"JobResources": (MonitoringResources, True),
"ModelQualityAppSpecification": (ModelQualityAppSpecification, True),
"ModelQualityBaselineConfig": (ModelQualityBaselineConfig, False),
"ModelQualityJobInput": (ModelQualityJobInput, True),
"ModelQualityJobOutputConfig": (MonitoringOutputConfig, True),
"NetworkConfig": (NetworkConfig, False),
"RoleArn": (str, True),
"StoppingCondition": (StoppingCondition, False),
"Tags": (Tags, False),
}
class MonitoringExecutionSummary(AWSProperty):
"""
`MonitoringExecutionSummary <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-monitoringexecutionsummary.html>`__
"""
props: PropsDictType = {
"CreationTime": (str, True),
"EndpointName": (str, False),
"FailureReason": (str, False),
"LastModifiedTime": (str, True),
"MonitoringExecutionStatus": (str, True),
"MonitoringScheduleName": (str, True),
"ProcessingJobArn": (str, False),
"ScheduledTime": (str, True),
}
class BaselineConfig(AWSProperty):
"""
`BaselineConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-baselineconfig.html>`__
"""
props: PropsDictType = {
"ConstraintsResource": (ConstraintsResource, False),
"StatisticsResource": (StatisticsResource, False),
}
class MonitoringAppSpecification(AWSProperty):
"""
`MonitoringAppSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-monitoringappspecification.html>`__
"""
props: PropsDictType = {
"ContainerArguments": ([str], False),
"ContainerEntrypoint": ([str], False),
"ImageUri": (str, True),
"PostAnalyticsProcessorSourceUri": (str, False),
"RecordPreprocessorSourceUri": (str, False),
}
class MonitoringInput(AWSProperty):
"""
`MonitoringInput <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-monitoringinput.html>`__
"""
props: PropsDictType = {
"EndpointInput": (EndpointInput, True),
}
class MonitoringJobDefinition(AWSProperty):
"""
`MonitoringJobDefinition <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-monitoringjobdefinition.html>`__
"""
props: PropsDictType = {
"BaselineConfig": (BaselineConfig, False),
"Environment": (dict, False),
"MonitoringAppSpecification": (MonitoringAppSpecification, True),
"MonitoringInputs": ([MonitoringInput], True),
"MonitoringOutputConfig": (MonitoringOutputConfig, True),
"MonitoringResources": (MonitoringResources, True),
"NetworkConfig": (NetworkConfig, False),
"RoleArn": (str, True),
"StoppingCondition": (StoppingCondition, False),
}
class ScheduleConfig(AWSProperty):
"""
`ScheduleConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-scheduleconfig.html>`__
"""
props: PropsDictType = {
"ScheduleExpression": (str, True),
}
class MonitoringScheduleConfig(AWSProperty):
"""
`MonitoringScheduleConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-monitoringschedule-monitoringscheduleconfig.html>`__
"""
props: PropsDictType = {
"MonitoringJobDefinition": (MonitoringJobDefinition, False),
"MonitoringJobDefinitionName": (str, False),
"MonitoringType": (str, False),
"ScheduleConfig": (ScheduleConfig, False),
}
class MonitoringSchedule(AWSObject):
"""
`MonitoringSchedule <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-monitoringschedule.html>`__
"""
resource_type = "AWS::SageMaker::MonitoringSchedule"
props: PropsDictType = {
"EndpointName": (str, False),
"FailureReason": (str, False),
"LastMonitoringExecutionSummary": (MonitoringExecutionSummary, False),
"MonitoringScheduleConfig": (MonitoringScheduleConfig, True),
"MonitoringScheduleName": (str, True),
"MonitoringScheduleStatus": (str, False),
"Tags": (Tags, False),
}
class NotebookInstance(AWSObject):
"""
`NotebookInstance <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-notebookinstance.html>`__
"""
resource_type = "AWS::SageMaker::NotebookInstance"
props: PropsDictType = {
"AcceleratorTypes": ([str], False),
"AdditionalCodeRepositories": ([str], False),
"DefaultCodeRepository": (str, False),
"DirectInternetAccess": (str, False),
"InstanceType": (str, True),
"KmsKeyId": (str, False),
"LifecycleConfigName": (str, False),
"NotebookInstanceName": (str, False),
"PlatformIdentifier": (str, False),
"RoleArn": (str, True),
"RootAccess": (str, False),
"SecurityGroupIds": ([str], False),
"SubnetId": (str, False),
"Tags": (Tags, False),
"VolumeSizeInGB": (integer, False),
}
class NotebookInstanceLifecycleHook(AWSProperty):
"""
`NotebookInstanceLifecycleHook <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-notebookinstancelifecycleconfig-notebookinstancelifecyclehook.html>`__
"""
props: PropsDictType = {
"Content": (str, False),
}
class NotebookInstanceLifecycleConfig(AWSObject):
"""
`NotebookInstanceLifecycleConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-notebookinstancelifecycleconfig.html>`__
"""
resource_type = "AWS::SageMaker::NotebookInstanceLifecycleConfig"
props: PropsDictType = {
"NotebookInstanceLifecycleConfigName": (str, False),
"OnCreate": ([NotebookInstanceLifecycleHook], False),
"OnStart": ([NotebookInstanceLifecycleHook], False),
}
class Pipeline(AWSObject):
"""
`Pipeline <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-pipeline.html>`__
"""
resource_type = "AWS::SageMaker::Pipeline"
props: PropsDictType = {
"PipelineDefinition": (dict, True),
"PipelineDescription": (str, False),
"PipelineDisplayName": (str, False),
"PipelineName": (str, True),
"RoleArn": (str, True),
"Tags": (Tags, False),
}
class Project(AWSObject):
"""
`Project <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-project.html>`__
"""
resource_type = "AWS::SageMaker::Project"
props: PropsDictType = {
"ProjectDescription": (str, False),
"ProjectName": (str, True),
"ServiceCatalogProvisioningDetails": (dict, True),
"Tags": (Tags, False),
}
class UserProfile(AWSObject):
"""
`UserProfile <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-userprofile.html>`__
"""
resource_type = "AWS::SageMaker::UserProfile"
props: PropsDictType = {
"DomainId": (str, True),
"SingleSignOnUserIdentifier": (str, False),
"SingleSignOnUserValue": (str, False),
"Tags": (Tags, False),
"UserProfileName": (str, True),
"UserSettings": (UserSettings, False),
}
class CognitoMemberDefinition(AWSProperty):
"""
`CognitoMemberDefinition <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-workteam-cognitomemberdefinition.html>`__
"""
props: PropsDictType = {
"CognitoClientId": (str, True),
"CognitoUserGroup": (str, True),
"CognitoUserPool": (str, True),
}
class MemberDefinition(AWSProperty):
"""
`MemberDefinition <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-workteam-memberdefinition.html>`__
"""
props: PropsDictType = {
"CognitoMemberDefinition": (CognitoMemberDefinition, True),
}
class NotificationConfiguration(AWSProperty):
"""
`NotificationConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sagemaker-workteam-notificationconfiguration.html>`__
"""
props: PropsDictType = {
"NotificationTopicArn": (str, True),
}
class Workteam(AWSObject):
"""
`Workteam <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-workteam.html>`__
"""
resource_type = "AWS::SageMaker::Workteam"
props: PropsDictType = {
"Description": (str, False),
"MemberDefinitions": ([MemberDefinition], False),
"NotificationConfiguration": (NotificationConfiguration, False),
"Tags": (Tags, False),
"WorkteamName": (str, False),
}
| 32.207761 | 206 | 0.680681 | 39,242 | 0.984965 | 0 | 0 | 0 | 0 | 0 | 0 | 21,883 | 0.549258 |
483774235763ac392213eaed3e87eadcdbd2e771 | 1,199 | py | Python | src/lib/divergence.py | evolutics/sparse-approximation | fda419b2ca0f6563a4668bae23ca0b94936ff8e8 | [
"MIT"
]
| null | null | null | src/lib/divergence.py | evolutics/sparse-approximation | fda419b2ca0f6563a4668bae23ca0b94936ff8e8 | [
"MIT"
]
| null | null | null | src/lib/divergence.py | evolutics/sparse-approximation | fda419b2ca0f6563a4668bae23ca0b94936ff8e8 | [
"MIT"
]
| null | null | null | import math
from numpy import linalg
from scipy import stats
from scipy.spatial import distance
import numpy
def euclidean(p, Q):
return numpy.apply_along_axis(lambda q: linalg.norm(p - q), 0, Q)
def hellinger(p, Q):
factor = 1 / math.sqrt(2)
sqrt_p = numpy.sqrt(p)
return factor * numpy.apply_along_axis(
lambda q: linalg.norm(sqrt_p - numpy.sqrt(q)), 0, Q
)
def jensen_shannon_distance(p, Q):
"""Square root of Jensen-Shannon divergence."""
return numpy.apply_along_axis(lambda q: distance.jensenshannon(p, q), 0, Q)
def k_directed(p, Q):
"""See: Jianhua Lin. "Divergence Measures Based on the Shannon Entropy". 1991."""
return numpy.apply_along_axis(lambda q: stats.entropy(p, (p + q) / 2), 0, Q)
def kullback_leibler(p, Q):
return numpy.apply_along_axis(lambda q: stats.entropy(p, q), 0, Q)
def neyman_chi_square(p, Q):
return numpy.apply_along_axis(lambda q: numpy.sum(numpy.square(p - q) / q), 0, Q)
def pearson_chi_square(p, Q):
return numpy.apply_along_axis(lambda q: numpy.sum(numpy.square(p - q) / p), 0, Q)
def total_variation(p, Q):
return 0.5 * numpy.apply_along_axis(lambda q: linalg.norm(p - q, 1), 0, Q)
| 25.510638 | 85 | 0.683069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.106756 |
4838335f5aaabe1145c8cd0b7af080ad9ce72fb6 | 10,319 | py | Python | vad/data_models/voice_activity.py | zsl24/voice-activity-detection | a034be23c6283121c6b72e778c6ff6711045cbe3 | [
"MIT"
]
| 74 | 2021-02-22T17:35:52.000Z | 2022-03-29T03:08:12.000Z | vad/data_models/voice_activity.py | zsl24/voice-activity-detection | a034be23c6283121c6b72e778c6ff6711045cbe3 | [
"MIT"
]
| 1 | 2021-08-15T07:56:39.000Z | 2021-08-15T07:56:39.000Z | vad/data_models/voice_activity.py | zsl24/voice-activity-detection | a034be23c6283121c6b72e778c6ff6711045cbe3 | [
"MIT"
]
| 9 | 2021-07-22T16:46:11.000Z | 2022-03-27T13:19:24.000Z | import json
import math
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
from pathlib import Path
from typing import List, Optional
import numpy as np
from vad.util.time_utils import (
format_timedelta_to_milliseconds,
format_timedelta_to_timecode,
parse_timecode_to_timedelta,
)
class VoiceActivityVersion(Enum):
v01 = "v0.1"
v02 = "v0.2"
v03 = "v0.3"
class VoiceActivityMillisecondsVersion(Enum):
v01 = "v0.1"
v02 = "v0.2"
v03 = "v0.3"
@dataclass
class Activity:
start: timedelta
end: timedelta
@dataclass
class VoiceActivity:
duration: timedelta
activities: List[Activity]
probs_sample_rate: Optional[int]
probs: Optional[List[float]]
@classmethod
def load(cls, path: Path):
with path.open() as file:
voice_activity_data = json.load(file)
return VoiceActivity.from_json(voice_activity_data)
@classmethod
def from_json(cls, voice_activity_data: dict):
version = voice_activity_data["version"]
if version == VoiceActivityVersion.v01.value:
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(speech_block["start_time"]),
end=parse_timecode_to_timedelta(speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif version == VoiceActivityVersion.v02.value:
if voice_activity_data["time_format"] == "timecode":
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(speech_block["start_time"]),
end=parse_timecode_to_timedelta(speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif voice_activity_data["time_format"] == "millisecond":
voice_activity = cls(
duration=timedelta(milliseconds=voice_activity_data["duration"]),
activities=[
Activity(
start=timedelta(milliseconds=speech_block["start_time"]),
end=timedelta(milliseconds=speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
elif version == VoiceActivityVersion.v03.value:
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(activity["start"]),
end=parse_timecode_to_timedelta(activity["end"]),
)
for activity in voice_activity_data["activities"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
return voice_activity
def save(self, path: Path, version: VoiceActivityVersion = VoiceActivityVersion.v03):
voice_activity_data = self.to_json(version)
with path.open("w") as file:
json.dump(voice_activity_data, file, ensure_ascii=False, indent=4)
def to_json(self, version: VoiceActivityVersion = VoiceActivityVersion.v03):
if version == VoiceActivityVersion.v01:
voice_activity_formatted = {
"version": VoiceActivityVersion.v01.value,
"duration": format_timedelta_to_timecode(self.duration),
"voice_activity": [
{
"start_time": format_timedelta_to_timecode(activity.start),
"end_time": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityVersion.v02:
voice_activity_formatted = {
"version": VoiceActivityVersion.v02.value,
"duration": format_timedelta_to_timecode(self.duration),
"time_format": "timecode",
"voice_activity": [
{
"start_time": format_timedelta_to_timecode(activity.start),
"end_time": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityVersion.v03:
voice_activity_formatted = {
"version": VoiceActivityVersion.v03.value,
"duration": format_timedelta_to_timecode(self.duration),
"activities": [
{
"start": format_timedelta_to_timecode(activity.start),
"end": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
else:
raise NotImplementedError
return voice_activity_formatted
def to_milliseconds(
self, version: VoiceActivityMillisecondsVersion = VoiceActivityMillisecondsVersion.v03
):
if version == VoiceActivityMillisecondsVersion.v02:
voice_activity_milliseconds = {
"version": version.value,
"duration": format_timedelta_to_milliseconds(self.duration),
"time_format": "millisecond",
"voice_activity": [
{
"start_time": format_timedelta_to_milliseconds(activity.start),
"end_time": format_timedelta_to_milliseconds(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityMillisecondsVersion.v03:
voice_activity_milliseconds = {
"version": version.value,
"duration": {"total_milliseconds": format_timedelta_to_milliseconds(self.duration)},
"activities": [
{
"start": {
"total_milliseconds": format_timedelta_to_milliseconds(activity.start)
},
"end": {
"total_milliseconds": format_timedelta_to_milliseconds(activity.end)
},
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
else:
raise NotImplementedError
return voice_activity_milliseconds
@classmethod
def from_milliseconds(cls, voice_activity_data: dict):
version = voice_activity_data["version"] # version of milliseconds format
if version == VoiceActivityMillisecondsVersion.v02.value:
voice_activity = VoiceActivity(
duration=timedelta(milliseconds=voice_activity_data["duration"]),
activities=[
Activity(
start=timedelta(milliseconds=speech_block["start_time"]),
end=timedelta(milliseconds=speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif version == VoiceActivityMillisecondsVersion.v03.value:
voice_activity = VoiceActivity(
duration=timedelta(
milliseconds=voice_activity_data["duration"]["total_milliseconds"]
),
activities=[
Activity(
start=timedelta(milliseconds=segment["start"]["total_milliseconds"]),
end=timedelta(milliseconds=segment["end"]["total_milliseconds"]),
)
for segment in voice_activity_data["activities"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
return voice_activity
def to_labels(self, sample_rate: int) -> np.array:
total_samples = int(self.duration.total_seconds() * sample_rate)
labels = np.zeros(total_samples, dtype=np.long)
for activity in self.activities:
start_sample = int(activity.start.total_seconds() * sample_rate)
end_sample = int(activity.end.total_seconds() * sample_rate)
labels[start_sample:end_sample] = 1
return labels
| 41.777328 | 100 | 0.560907 | 9,953 | 0.964531 | 0 | 0 | 9,795 | 0.94922 | 0 | 0 | 1,110 | 0.107569 |
4839b9f176bff6bb0c25323ed01d0f68d5ef1760 | 1,807 | py | Python | face_recognition/project/schema.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
]
| null | null | null | face_recognition/project/schema.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
]
| null | null | null | face_recognition/project/schema.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
]
| null | null | null | # coding: utf-8
SCHEMA_MAPPING = {
"persons": {
"type": "object",
"patternProperties": {
r"\d+": {
"type": "object",
"properties": {
"first_name": {"type": "string"},
"last_name": {"type": "string"},
},
"patternProperties": {
r".+": {"type": ["integer", "string"]}
},
"required": ["first_name", "last_name"]
}
}
},
"camera": {
"type": "object",
"properties": {
"camera_id": {"type": "integer"},
"camera_close_key": {"type": "string"},
"camera_frame_shape": {"type": "array", "items": {"type": "integer"}, "minItems": 3, "maxItems": 3}
},
"required": ["camera_id", "camera_close_key", "camera_frame_shape"]
},
"model_config": {
"type": "object",
"properties": {
"class_name": {"type": "string"},
"config": {
"type": "object",
"properties": {
"name": {"type": "string"},
"layers": {
"type": "array",
"items": {
"type": "object",
"properties": {
"class_name": {"type": "string"},
"config": {
"type": "object"
}
}
}
}
}
},
"keras_version": {"type": "string"},
"backend": {"type": "string", "enum": ["theano", "tensorflow"]}
}
}
}
| 30.116667 | 111 | 0.328168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 746 | 0.412839 |
483a75dbbf41e0a2382e74253427bd94ad78ce29 | 303 | py | Python | resources/www/scripts/recibido.py | miguelarman/Redes-de-comunicaciones-ii-practica1 | 8f90dddcf9025f7d9c08dfb6ca1aa8dc24e9fa13 | [
"MIT"
]
| null | null | null | resources/www/scripts/recibido.py | miguelarman/Redes-de-comunicaciones-ii-practica1 | 8f90dddcf9025f7d9c08dfb6ca1aa8dc24e9fa13 | [
"MIT"
]
| null | null | null | resources/www/scripts/recibido.py | miguelarman/Redes-de-comunicaciones-ii-practica1 | 8f90dddcf9025f7d9c08dfb6ca1aa8dc24e9fa13 | [
"MIT"
]
| null | null | null | import sys
import urllib.parse as urlparse
print("Argumentos recibidos por STDIN: ")
try:
for line in sys.stdin:
url = 'foo.com/?' + line
parsed = urlparse.urlparse(url)
print('Recibido: {}'.format(urlparse.parse_qs(parsed.query)))
except:
ignorar = True
| 21.642857 | 70 | 0.623762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.194719 |
483a82b33807937515011fa3de571cf7d20b8db3 | 8,620 | py | Python | gui/main.py | aman-v1729/CommonAudioVideoCLI | c2245a02bbafd1ff9899dba2b02f246f98538746 | [
"MIT"
]
| null | null | null | gui/main.py | aman-v1729/CommonAudioVideoCLI | c2245a02bbafd1ff9899dba2b02f246f98538746 | [
"MIT"
]
| 1 | 2020-05-14T13:20:45.000Z | 2020-05-14T19:08:06.000Z | gui/main.py | aman-v1729/CommonAudioVideoCLI | c2245a02bbafd1ff9899dba2b02f246f98538746 | [
"MIT"
]
| 5 | 2020-05-17T17:00:43.000Z | 2020-07-25T06:19:57.000Z | import tkinter
import subprocess
from tkinter import filedialog, messagebox
import os
import pyqrcode
def clip_filename_with_extension(filename):
""" clips long file names """
clipped = filename[filename.rfind("/") + 1 :]
if len(clipped) > 15:
clipped = clipped[:6] + "..." + clipped[clipped.rfind(".") - 4 :]
return clipped
def select_vid_file():
""" Presents file dialog box to select .mp4/.mkv files """
global curr_dir
# print(curr_dir)
global vid_filename
filename = filedialog.askopenfilename(
initialdir=curr_dir,
title="Select Video File",
filetypes=[("Video", ".mp4 .mkv")],
)
if filename.endswith(".mp4") or filename.endswith(".mkv"):
vid_filename = filename
video_btn["text"] = clip_filename_with_extension(vid_filename)
else:
if len(vid_filename) > 0:
pass
else:
video_btn["text"] = "Choose File"
def select_sub_file():
""" Presents file dialog box to select .srt files """
global curr_dir
global sub_filename
filename = filedialog.askopenfilename(
initialdir=curr_dir,
title="Select Subtitle File",
filetypes=[("Subtitle", ".srt")],
)
if filename.endswith(".srt"):
sub_filename = filename
sub_btn["text"] = clip_filename_with_extension(sub_filename)
else:
if len(sub_filename) > 0:
pass
else:
sub_btn["text"] = "Choose File"
def change_sub_state():
""" Enable/Disable subtitle file """
state = allow_sub.get()
if state:
sub_btn["state"] = tkinter.NORMAL
else:
sub_btn["state"] = tkinter.DISABLED
def run_checks_before_play():
""" File selection checks before calling CLI """
global vid_filename, sub_filename
if not (vid_filename.endswith(".mp4")) and not (vid_filename.endswith(".mkv")):
return 1
if allow_sub.get() and not (sub_filename.endswith(".srt")):
return 2
return 0
def generate_qr():
""" Generates QR code for room link """
global link
global photo, qrImage, myQr
print(link)
top = tkinter.Toplevel()
top.title("QR Code")
qr_lbl = tkinter.Label(top)
myQr = pyqrcode.create(link)
qrImage = myQr.xbm(scale=6)
photo = tkinter.BitmapImage(data=qrImage)
qr_lbl.config(image=photo, state=tkinter.NORMAL)
qr_lbl.pack()
def copy_link():
""" Copies room link to clipboard """
global link
root.clipboard_append(link)
copy_link_btn["text"] = "Link Copied!"
def retrieve_link(bash_command):
""" Gets room link retrieved from the CLI """
import tkinter
global link
global curr_dir
print(curr_dir)
subprocess.Popen(bash_command)
while not os.path.exists(curr_dir + "/invite_link.txt"):
root.after(2000)
f = open("invite_link.txt", "r")
link = f.readline()
print(link)
f.close()
os.remove("invite_link.txt")
tkinter.messagebox.showinfo(
"Success", "Room Creation Successful! Share the link or scan the QR to join!"
)
success_lbl.config(text="Share this link and enjoy: " + link, state=tkinter.NORMAL)
success_lbl.config(font=("Courier", 14))
success_lbl.grid(row=7, column=0, columnspan=6)
copy_link_btn.config(state=tkinter.NORMAL)
copy_link_btn.grid(row=8, column=0, columnspan=3, sticky=tkinter.E)
qr_gen_btn["state"] = tkinter.NORMAL
qr_gen_btn.grid(row=8, column=3, columnspan=3, sticky=tkinter.W)
def play():
""" Gathers widget configurations to create CLI command """
global curr_dir, vid_filename, sub_filename
err_status = run_checks_before_play()
if err_status == 0:
bash_command = []
bash_command.append("python3")
bash_command.append(curr_dir + "cli/main.py")
bash_command.append("-f")
bash_command.append(vid_filename)
if allow_sub.get():
bash_command.append("-s")
bash_command.append(sub_filename)
if not server.get():
bash_command.append("--web")
quality = audio_quality.get()
if quality == 0:
bash_command.append("--audio-quality")
bash_command.append("low")
elif quality == 2:
bash_command.append("--audio-quality")
bash_command.append("high")
"""
if(show_qr.get()):
bash_command.append('--qr')
"""
if host_control.get():
bash_command.append("--control")
print(bash_command)
for widget in root.winfo_children():
widget["state"] = tkinter.DISABLED
retrieve_link(bash_command)
elif err_status == 1:
tkinter.messagebox.showerror("ERROR", "No video file chosen")
elif err_status == 2:
tkinter.messagebox.showerror("ERROR", "No subtitle file chosen")
def on_closing():
""" Confirms session closing """
if messagebox.askokcancel(
"Quit",
"Closing this window will stop this session."
+ "Are you sure you want to quit?",
):
root.destroy()
if __name__ == "__main__":
global curr_dir
curr_dir = __file__
curr_dir = curr_dir[: curr_dir.rfind("gui/main.py")]
curr_dir = (
subprocess.run("pwd", capture_output=True, text=True).stdout.strip()
+ "/"
+ curr_dir
)
# Create root window
root = tkinter.Tk()
root.title("Common Audio Video GUI")
# Remove previously created links
if os.path.exists("invite_link.txt"):
os.remove("invite_link.txt")
# Place welcome label
wlcm_lbl = tkinter.Label(root, text="Welcome to Common Audio Video Host GUI!")
wlcm_lbl.grid(row=0, column=0, columnspan=5)
# Video File Selection
global vid_filename
vid_filename = ""
video_btn = tkinter.Button(root, text="Select Video File", command=select_vid_file)
video_btn.grid(row=1, column=0, columnspan=5)
# Subtitle File Check
allow_sub = tkinter.IntVar()
check_sub = tkinter.Checkbutton(
root,
text="Add subtitles:",
command=change_sub_state,
variable=allow_sub,
onvalue=1,
offvalue=0,
)
check_sub.deselect()
check_sub.grid(row=2, column=0, columnspan=2, sticky=tkinter.E)
# Subtitle File Selection
global sub_filename
sub_filename = ""
sub_btn = tkinter.Button(
root, text="Choose File", command=select_sub_file, state=tkinter.DISABLED
)
sub_btn.grid(row=2, column=2, columnspan=3, sticky=tkinter.W)
# Server Selection
server = tkinter.IntVar()
server.set(0)
radio_server_web = tkinter.Radiobutton(root, text="Web", variable=server, value=0)
radio_server_local = tkinter.Radiobutton(
root, text="Local", variable=server, value=1
)
tkinter.Label(root, text="Server: ").grid(row=3, column=0, columnspan=2)
radio_server_web.grid(row=3, column=2)
radio_server_local.grid(row=3, column=3)
# Audio Quality Selection
audio_quality = tkinter.IntVar()
audio_quality.set(1)
radio_quality_low = tkinter.Radiobutton(
root, text="Low", variable=audio_quality, value=0
)
radio_quality_medium = tkinter.Radiobutton(
root, text="Medium", variable=audio_quality, value=1
)
radio_quality_high = tkinter.Radiobutton(
root, text="High", variable=audio_quality, value=2
)
quality_lbl = tkinter.Label(root, text="Audio Quality: ")
quality_lbl.grid(row=4, column=0, columnspan=2)
radio_quality_low.grid(row=4, column=2)
radio_quality_medium.grid(row=4, column=3)
radio_quality_high.grid(row=4, column=4)
# Control
host_control = tkinter.IntVar()
check_control = tkinter.Checkbutton(
root, text="Only host can control", variable=host_control, onvalue=1, offvalue=0
)
check_control.deselect()
check_control.grid(row=5, column=0, columnspan=5)
"""
# Show QR
show_qr = tkinter.IntVar()
check_qr = tkinter.Checkbutton(
root, text="Show QR", variable=show_qr, onvalue=1, offvalue=0
)
check_qr.select()
check_qr.grid(row=5, column=3, columnspan=2)
"""
# Play Button
play_btn = tkinter.Button(root, text="PLAY!", command=play)
play_btn.grid(row=6, column=0, columnspan=5)
# Post room creation options
success_lbl = tkinter.Label(root)
copy_link_btn = tkinter.Button(root, text="Copy Link", command=copy_link)
qr_gen_btn = tkinter.Button(root, text="Generate QR", command=generate_qr)
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
| 28.448845 | 88 | 0.641763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,956 | 0.226914 |
483af36db857e32be1df5b6afda8e6fc42b22d40 | 2,170 | py | Python | virtex/core/profile.py | chrislarson1/virtex | 36eb47d1ace297951cae36edc8a00544b85fed79 | [
"Apache-2.0"
]
| 5 | 2020-06-17T06:22:32.000Z | 2022-03-04T09:25:31.000Z | virtex/core/profile.py | virtexlabs/virtex | 36eb47d1ace297951cae36edc8a00544b85fed79 | [
"Apache-2.0"
]
| null | null | null | virtex/core/profile.py | virtexlabs/virtex | 36eb47d1ace297951cae36edc8a00544b85fed79 | [
"Apache-2.0"
]
| null | null | null | # -------------------------------------------------------------------
# Copyright 2021 Virtex authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# -------------------------------------------------------------------
import asyncio
from functools import wraps
from typing import Callable, Any
from virtex.core.timing import now, async_now
def profile(profile_fn,
*fn_args,
tstamp_fn: Callable[[float, float], Any],
loop: asyncio.BaseEventLoop = None):
"""
Parameters
----------
profile_fn: ``Callable[Any, Any]``
Wrapped function
fn_args: ``Tuple[Any]``
Wrapped function arguments
tstamp_fn: ``Callable[[float, float], Any]``
A function that accepts a start_time,end_time
argument pair and returns the profile value
loop: ``Optional[asyncio.BaseEventLoop]``
Event loop to be used for async functions
"""
def _execute(func):
@wraps(func)
async def timeit_async(*args, **kwargs):
start_time = async_now(loop)
result = await func(*args, **kwargs)
end_time = async_now(loop)
profile_fn(*fn_args, tstamp_fn(start_time, end_time))
return result
@wraps(func)
def timeit(*args, **kwargs):
start_time = now()
result = func(*args, **kwargs)
end_time = now()
profile_fn(*fn_args, tstamp_fn(start_time, end_time))
return result
if asyncio.iscoroutinefunction(func):
assert loop is not None
return timeit_async
return timeit
return _execute
| 31.911765 | 69 | 0.602765 | 0 | 0 | 0 | 0 | 526 | 0.242396 | 261 | 0.120276 | 1,144 | 0.527189 |
483bb446decbf48fa9ae87d928153944790671cf | 4,855 | py | Python | apps/molecular_generation/JT_VAE/src/mol_tree.py | agave233/PaddleHelix | e5578f72c2a203a27d9df7da111f1ced826c1429 | [
"Apache-2.0"
]
| 454 | 2020-11-21T01:02:45.000Z | 2022-03-29T12:53:40.000Z | apps/molecular_generation/JT_VAE/src/mol_tree.py | chupvl/PaddleHelix | 6e082f89b8090c3c360593d40a08bffc884165dd | [
"Apache-2.0"
]
| 161 | 2020-12-12T06:35:54.000Z | 2022-03-27T11:31:13.000Z | apps/molecular_generation/JT_VAE/src/mol_tree.py | chupvl/PaddleHelix | 6e082f89b8090c3c360593d40a08bffc884165dd | [
"Apache-2.0"
]
| 108 | 2020-12-07T09:01:10.000Z | 2022-03-31T14:42:29.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MolTree"""
import rdkit
import rdkit.Chem as Chem
from src.chemutils import get_clique_mol, tree_decomp, get_mol, get_smiles, set_atommap, enum_assemble, decode_stereo
from src.vocab import Vocab
class MolTreeNode(object):
"""MolTreeNode"""
def __init__(self, smiles, clique=[]):
self.smiles = smiles
self.mol = get_mol(self.smiles)
self.clique = [x for x in clique]
self.neighbors = []
def add_neighbor(self, nei_node):
"""add a neighbor node """
self.neighbors.append(nei_node)
def recover(self, original_mol):
"""tbd"""
clique = []
clique.extend(self.clique)
if not self.is_leaf:
for cidx in self.clique:
original_mol.GetAtomWithIdx(cidx).SetAtomMapNum(self.nid)
for nei_node in self.neighbors:
clique.extend(nei_node.clique)
if nei_node.is_leaf:
continue
for cidx in nei_node.clique:
if cidx not in self.clique or len(nei_node.clique) == 1:
atom = original_mol.GetAtomWithIdx(cidx)
atom.SetAtomMapNum(nei_node.nid)
clique = list(set(clique))
label_mol = get_clique_mol(original_mol, clique)
self.label = Chem.MolToSmiles(Chem.MolFromSmiles(get_smiles(label_mol)))
for cidx in clique:
original_mol.GetAtomWithIdx(cidx).SetAtomMapNum(0)
return self.label
def assemble(self):
"""get candidate subgraph info"""
neighbors = [nei for nei in self.neighbors if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x: x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in self.neighbors if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cands, aroma = enum_assemble(self, neighbors, [], [])
new_cands = [cand for i, cand in enumerate(cands) if aroma[i] >= 0]
if len(new_cands) > 0:
cands = new_cands
if len(cands) > 0:
self.cands, _ = zip(*cands)
self.cands = list(self.cands)
else:
self.cands = []
class MolTree(object):
"""MolTree"""
def __init__(self, smiles):
self.smiles = smiles
self.mol = get_mol(smiles)
cliques, edges = tree_decomp(self.mol)
self.nodes = []
root = 0
for i, c in enumerate(cliques):
cmol = get_clique_mol(self.mol, c)
node = MolTreeNode(get_smiles(cmol), c)
self.nodes.append(node)
if min(c) == 0: root = i
for x, y in edges:
self.nodes[x].add_neighbor(self.nodes[y])
self.nodes[y].add_neighbor(self.nodes[x])
if root > 0:
self.nodes[0], self.nodes[root] = self.nodes[root], self.nodes[0]
for i, node in enumerate(self.nodes):
node.nid = i + 1
if len(node.neighbors) > 1:
set_atommap(node.mol, node.nid)
node.is_leaf = (len(node.neighbors) == 1)
def size(self):
"""return nodes nums"""
return len(self.nodes)
def recover(self):
"""recover nodes"""
for node in self.nodes:
node.recover(self.mol)
def assemble(self):
"""assemble nodes"""
for node in self.nodes:
node.assemble()
def dfs(node, fa_idx):
"""dfs"""
max_depth = 0
for child in node.neighbors:
if child.idx == fa_idx: continue
max_depth = max(max_depth, dfs(child, node.idx))
return max_depth + 1
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--train_path', required=True)
parser.add_argument('--vocab_path', required=True)
args = parser.parse_args()
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
with open(args.train_path, 'r') as f:
data = f.read().splitlines()
cset = set()
for item in data:
smiles = item.split()[0]
mol = MolTree(smiles)
for c in mol.nodes:
cset.add(c.smiles)
with open(args.vocab_path, 'w') as f:
for c in cset:
f.write(c + '\n')
| 30.923567 | 117 | 0.603502 | 3,181 | 0.655201 | 0 | 0 | 0 | 0 | 0 | 0 | 829 | 0.170752 |
484313fcfa513337e375cc555180add4dbd721a7 | 1,663 | py | Python | torch/indicator/vision/object_detection/iou.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
]
| 1 | 2018-12-09T06:09:29.000Z | 2018-12-09T06:09:29.000Z | torch/indicator/vision/object_detection/iou.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
]
| null | null | null | torch/indicator/vision/object_detection/iou.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
]
| null | null | null | # coding = utf-8
from abc import ABCMeta, abstractmethod
import torch
from Putil.torch.indicator.vision.object_detection import box
##@brief 计算iou
# @note
# @return
def _iou(x11, y11, x12, y12, x21, y21, x22, y22):
cap, cup = box._cap_cup(x11, y11, x12, y12, x21, y21, x22, y22)
return cap / cup
def _cap_cup_iou(cap, cup):
return cap / cup
##@brief 计算IoU,基于[batch, box, ...]进行计算,box的结构是[top_left_x, top_left_y, width, height],
# 返回的是[batch, 1, ...],第二维表示的是iou值,当前单元不存在gt_box的情况使用[0, 0, 0, 0]代表,
# 那么不同的iou,针对不存在gt的情况获得的值就不一样,需要特别注明 **一般情况下,计算一个batch的MeanIoU都是需要
# 进
# @note
class iou(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
pass
##@brief 返回当前对象的准确iou值索引,有些的返回值可能有多个数据(包含过程数据以及基础iou等),需要该接口方便的返回对应iou的索引
# @return int 索引
@abstractmethod
def iou_index(self):
pass
@abstractmethod
def iou_mean(self, iou):
pass
class MeanIoU(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
pass
def forward(self, iou, obj_gt):
iou_filtered = iou * obj_gt
iou = torch.nansum(iou_filtered) / ((torch.isnan(iou_filtered).eq(False) * obj_gt).sum() + 1e-32)
return iou
##@brief
# @note
class IoU(iou):
def iou_index(self):
return 0
def __init__(self):
iou.__init__(self)
pass
def forward(self, box1, box2):
box1 = box._tlwh_to_tlbr(box1)
box2 = box._tlwh_to_tlbr(box2)
x11, y11, x12, y12 = box._to_xyxy(box1)
x21, y21, x22, y22 = box._to_xyxy(box2)
iou = _iou(x11, y11, x12, y12, x21, y21, x22, y22)
return iou, | 26.396825 | 105 | 0.6362 | 1,154 | 0.589678 | 0 | 0 | 110 | 0.056208 | 0 | 0 | 675 | 0.344916 |
4843692979b67bbb7eade27d08ade8ca10f18066 | 2,012 | py | Python | magPi_05_mountains.py | oniMoNaku/thePit | f82d2dc70346e6188fca493a4b9373aa99ccfa32 | [
"Unlicense"
]
| null | null | null | magPi_05_mountains.py | oniMoNaku/thePit | f82d2dc70346e6188fca493a4b9373aa99ccfa32 | [
"Unlicense"
]
| null | null | null | magPi_05_mountains.py | oniMoNaku/thePit | f82d2dc70346e6188fca493a4b9373aa99ccfa32 | [
"Unlicense"
]
| null | null | null | # today is 389f
# the python pit
# magPi - 05
# MOUNTAINS
import os, pygame; from pygame.locals import *
pygame.init(); clock = pygame.time.Clock()
os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'
pygame.display.set_caption("Mountains")
screen=pygame.display.set_mode([600,382],0,32)
sky = pygame.Surface((600,255))
r=0; g=64; b=128
for l in range (0,255):
pygame.draw.rect(sky,(r,g,b),(0,l-1,600,l))
r=r+1;g=g+1;b=b+1
if r>=255: r=255
if g>=255: g=255
if b>=255: b=255
ground = pygame.Surface((600,128))
r=192; g=255; b=192
for l in range (0,128):
pygame.draw.rect(ground,(r,g,b),(0,l-2,600,l))
r=r-2;g=g-2;b=b-2
if r<=0: r=0
if g<=0: g=0
if b<=0: b=0
# Add in an extra surface for the mountains
mountain = pygame.Surface((600,128))
mountain.set_colorkey([0,0,0]) # Black is transparent
r=96; g=64; b=255
for l in range (0,128):
pygame.draw.rect(mountain,(r,g,b),(0,l-2,600,l))
r=r+2;g=g+2;b=b+2
if r>=255: r=255
if g>=255: g=255
if b>=255: b=255
# Draw some black (Transparent) polygons to create mountain peaks
# The screen is 600 wide so I've drawn 10 polygons at 60 pixels wide each
pygame.draw.polygon(mountain,[0,0,0],[(0,0),(60,0),(60,10),(0,40)])
pygame.draw.polygon(mountain,[0,0,0],[(60,0),(120,0),(120,30),(60,10)])
pygame.draw.polygon(mountain,[0,0,0],[(120,0),(180,0),(180,20),(120,30)])
pygame.draw.polygon(mountain,[0,0,0],[(180,0),(240,0),(240,50),(180,20)])
pygame.draw.polygon(mountain,[0,0,0],[(240,0),(300,0),(300,40),(240,50)])
pygame.draw.polygon(mountain,[0,0,0],[(300,0),(360,0),(360,10),(300,40)])
pygame.draw.polygon(mountain,[0,0,0],[(360,0),(420,0),(420,35),(360,10)])
pygame.draw.polygon(mountain,[0,0,0],[(420,0),(480,0),(480,45),(420,35)])
pygame.draw.polygon(mountain,[0,0,0],[(480,0),(540,0),(540,42),(480,45)])
pygame.draw.polygon(mountain,[0,0,0],[(540,0),(600,0),(600,15),(540,42)])
screen.blit(sky,(0,0))
screen.blit(ground,(0,255))
screen.blit(mountain,(0,128))
pygame.display.update()
pygame.time.wait(30000) | 34.101695 | 73 | 0.638171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 298 | 0.148111 |
48461f7075c6cb1cc7aff2cd4d853dffd50a16bd | 6,041 | py | Python | bots/parkour/reports.py | Marcin1396/parkour | 25d7d888b178eb7860a897e6df7578f2de0a729a | [
"MIT"
]
| null | null | null | bots/parkour/reports.py | Marcin1396/parkour | 25d7d888b178eb7860a897e6df7578f2de0a729a | [
"MIT"
]
| null | null | null | bots/parkour/reports.py | Marcin1396/parkour | 25d7d888b178eb7860a897e6df7578f2de0a729a | [
"MIT"
]
| null | null | null | """
Handles reports
"""
from parkour.env import env
from parkour.utils import normalize_name
import asyncio
import aiotfm
import time
class Reports(aiotfm.Client):
def __init__(self, *args, **kwargs):
self.rep_id = 0
self.reports = {}
self.reported = []
self.reporters = []
super().__init__(*args, **kwargs)
self.loop.create_task(self.check_reports())
async def check_reports(self):
while not self.main.open:
await asyncio.sleep(3.0)
while self.main.open:
now = time.time()
to_remove = []
for report, data in self.reports.items():
# reporter, reported, sent to discord,
# discord date, expiration date
if not data[2] and now >= data[3]:
data[2] = True
await self.report_discord(report)
elif now >= data[4]: # expired
self.reported.remove(data[1])
to_remove.append(report)
await self.mod_chat.channel.send(
"Report id {} has expired.".format(report)
)
for report in to_remove:
del self.reports[report]
await asyncio.sleep(30.0)
async def report_discord(self, report):
reporter, reported = self.reports[report][:2]
file = await self.load_player_file(reported)
if file is None:
room = "unknown"
else:
room = file["room"]
await self.send_channel(
env.report_channel,
"@everyone `{}` reported `{}` (room: `{}`, report id: `{}`). "
"Connect to the game and use the handle command in modchat."
.format(reporter, reported, room, report)
)
def report_cooldown(self, name):
reports = 0
remove_until = -1
now = time.time()
for index, (expire, reporter) in enumerate(self.reporters):
if now >= expire:
remove_until = index
elif reporter == name:
reports += 1
if remove_until >= 0:
del self.reporters[:remove_until + 1]
if reports >= 2:
return True
return False
async def on_channel_command(self, channel, name, author, ranks, cmd, args):
if name == "mod":
if cmd == "handle":
if (not ranks["admin"]
and not ranks["mod"]
and not ranks["trainee"]):
return True
if not args or not args[0].isdigit():
await channel.send("Usage: .handle [id] (silent?)")
return True
rep_id = int(args[0])
if len(args) > 1:
silent = args[1].lower() in ("silent", "silence", "s")
else:
silent = False
if rep_id not in self.reports:
return await channel.send("Report id {} not found".format(rep_id))
report = self.reports[rep_id]
del self.reports[rep_id]
file = await self.load_player_file(report[1])
if file is None:
extra = "Could not get reported player information."
else:
extra = "Sent you the player's room in whispers."
await self.whisper(
author,
"{}'s room: {}".format(report[1], file["room"])
)
await channel.send(
"{} will be handling the report {}. {}"
.format(author, rep_id, extra)
)
if not silent:
await self.whisper(
report[0],
"A parkour moderator is now handling your report."
)
else:
return False
else:
return False
return True
async def on_whisper_command(self, whisper, author, ranks, cmd, args):
if await super().on_whisper_command(
whisper, author, ranks, cmd, args
):
return True
if cmd == "norep":
if not ranks["admin"] and not ranks["mod"]:
return True
if not args:
await whisper.reply("Usage: .norep Username#0000")
return True
target = normalize_name(args[0])
pid, name, online = await self.get_player_info(target)
if name is None or not online:
await whisper.reply("That player ({}) is not online.".format(target))
return True
file = await self.load_player_file(name, online_check=False)
if file is None:
await whisper.reply("Could not load {}'s file.".format(name))
return True
file["report"] = not file["report"]
if not await self.save_player_file(
name, file, "report", online_check=False
):
await whisper.reply("Could not modify {}'s file.".format(name))
return True
action = "enabled" if file["report"] else "disabled"
await self.send_webhook(
env.webhooks.sanctions,
"**`[NOREP]:`** `{}` has {} reports from `{}` (ID: `{}`)"
.format(author, action, name, pid)
)
await whisper.reply(
"Reports from {} (ID: {}) have been {}."
.format(name, pid, action)
)
elif cmd == "report":
# Argument check
if not args:
await whisper.reply("Usage: .report Username#0000")
return True
reported = normalize_name(args[0])
if reported == author:
await whisper.reply("Why are you trying to report yourself?")
return True
pid, name, online = await self.get_player_info(reported)
if name is None or not online:
await whisper.reply("That player ({}) is not online.".format(reported))
return True
await whisper.reply("Your report of the player {} will be handled shortly.".format(reported))
# Player information check
if self.report_cooldown(author):
return True
if reported in self.reported:
return True
file = await self.load_player_file(author, online_check=False)
if file is None or not file["report"]:
return True
file = await self.load_player_file(reported, online_check=False)
if file is None:
return True
now = self.tfm_time()
if now < file.get("killed", 0):
return
ban = file.get("banned", 0)
if ban == 2 or now < ban:
return True
# Create report
report = self.rep_id
self.rep_id += 1
online = len(self.mod_chat.players) - 1
now = time.time()
self.reports[report] = [
author, reported, online == 0,
now + 60 * 5, now + 60 * 30
]
self.reported.append(reported)
self.reporters.append((now + 60 * 5, author))
if online == 0:
await self.report_discord(report)
else:
await self.mod_chat.channel.send(
"{} reported {} (report id: {}) (room: {}) "
"(use the handle command here before handling it)"
.format(author, reported, report, file["room"])
)
else:
return False
return True | 24.556911 | 96 | 0.640953 | 5,904 | 0.977322 | 0 | 0 | 0 | 0 | 5,302 | 0.877669 | 1,201 | 0.198808 |
48473f9998c2721254601aaa70efd1a6c575862d | 3,053 | py | Python | data_analysis_scripts/mouse_et_ephys_viz.py | idc9/mvmm_sim | 5819d9ff95e36310536fd436bba50baba4f0ca71 | [
"MIT"
]
| null | null | null | data_analysis_scripts/mouse_et_ephys_viz.py | idc9/mvmm_sim | 5819d9ff95e36310536fd436bba50baba4f0ca71 | [
"MIT"
]
| null | null | null | data_analysis_scripts/mouse_et_ephys_viz.py | idc9/mvmm_sim | 5819d9ff95e36310536fd436bba50baba4f0ca71 | [
"MIT"
]
| null | null | null | from joblib import load
from os.path import join
import argparse
import numpy as np
import matplotlib.pyplot as plt
from mvmm_sim.simulation.sim_viz import save_fig
from mvmm_sim.data_analysis.utils import load_data
from mvmm_sim.simulation.utils import make_and_get_dir
from mvmm_sim.mouse_et.MouseETPaths import MouseETPaths
from mvmm_sim.mouse_et.raw_ephys_loading import load_raw_ephys
from mvmm_sim.mouse_et.ephys_viz import get_ephys_super_data,\
plot_top_clust_ephys_curves, plot_cluster_ephys_curve
parser = argparse.\
ArgumentParser(description='Cluster interpretation.')
parser.add_argument('--results_dir', default=None,
help='Directory to save results.')
parser.add_argument('--fpaths', nargs='+',
help='Paths to data sets.')
args = parser.parse_args()
inches = 8
n_top_clust = 10
results_dir = args.results_dir
fpaths = args.fpaths
fitting_dir = join(results_dir, 'model_fitting')
ephys_viz_dir = join(results_dir, 'interpret', 'bd_mvmm', 'ephys_pca_feats')
# load models and data
models = load(join(fitting_dir, 'selected_models'))
view_data, dataset_names, sample_names, view_feat_names = load_data(*fpaths)
# load raw ephys data
orig_data_dir = join(MouseETPaths().raw_data_dir, 'inh_patchseq_spca_files',
'orig_data_csv')
ephys_raw = load_raw_ephys(orig_data_dir, concat=False)
for k in ephys_raw.keys():
ephys_raw[k] = ephys_raw[k].loc[sample_names]
print(k, ephys_raw[k].shape)
n_datasets = len(ephys_raw)
# get data for plotting
v = 1
cluster_super_means, super_data_means, super_data_stds, y_cnts = \
get_ephys_super_data(model=models['bd_mvmm'].final_.view_models_[v],
fit_data=view_data[v],
ephys_raw=ephys_raw)
clust_labels = ['cluster_{}'.format(cl_idx + 1)
for cl_idx in range(len(y_cnts))]
# plot top several clusters
plot_top_clust_ephys_curves(cluster_super_means,
y_cnts=y_cnts,
overall_means=super_data_means,
overall_stds=super_data_stds,
clust_labels=clust_labels,
n_to_show=n_top_clust,
inches=inches)
save_fig(join(ephys_viz_dir, 'ephys_curves_top_clust.png'))
# plot each (non-trival) cluster
# non_trivial_clusters = y_cnts[y_cnts >= 5].index.values
non_trivial_clusters = y_cnts[y_cnts >= 0].index.values
save_dir = make_and_get_dir(ephys_viz_dir, 'cluster_curves')
for cl_idx in non_trivial_clusters:
label = clust_labels[cl_idx]
values = {}
for name in cluster_super_means.keys():
values[name] = cluster_super_means[name][cl_idx]
plt.figure(figsize=(2 * n_datasets * inches, inches))
plot_cluster_ephys_curve(values,
overall_means=super_data_means,
overall_stds=super_data_stds,
y_label=label)
save_fig(join(save_dir, '{}_ephys_curve.png'.format(label)))
| 32.827957 | 76 | 0.689158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 478 | 0.156567 |
48474668430bd56c9be0fa4e96a14ac44c7e0f55 | 1,831 | py | Python | gimmemotifs/commands/match.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
]
| null | null | null | gimmemotifs/commands/match.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
]
| null | null | null | gimmemotifs/commands/match.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# Copyright (c) 2009-2016 Simon van Heeringen <[email protected]>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from __future__ import print_function
from gimmemotifs.comparison import MotifComparer
from gimmemotifs.motif import pwmfile_to_motifs, Motif
from gimmemotifs.plot import match_plot
def match(args):
sample = dict([(m.id, m) for m in pwmfile_to_motifs(args.pwmfile)])
db = dict([(m.id, m) for m in pwmfile_to_motifs(args.dbpwmfile)])
mc = MotifComparer()
result = mc.get_closest_match(sample.values(), db.values(), "partial", "wic", "mean")
print("Motif\tMatch\tScore\tP-value")
for motif, match in result.items():
pval, pos, orient = mc.compare_motifs(sample[motif], db[match[0]], "partial", "wic", "mean", pval=True)
print("%s\t%s\t%0.2f\t%0.3e" % (motif, match[0], match[1][0], pval))
if args.img:
plotdata = []
for query, match in result.items():
motif = sample[query]
dbmotif = db[match[0]]
pval, pos, orient = mc.compare_motifs(motif, dbmotif, "partial", "wic", "mean", pval=True)
if orient == -1:
tmp = dbmotif.id
dbmotif = dbmotif.rc()
dbmotif.id = tmp
if pos < 0:
tmp = motif.id
motif = Motif([[0.25,0.25,0.25,0.25]] * -pos + motif.pwm)
motif.id = tmp
elif pos > 0:
tmp = dbmotif.id
dbmotif = Motif([[0.25,0.25,0.25,0.25]] * pos + dbmotif.pwm)
dbmotif.id = tmp
plotdata.append((motif, dbmotif, pval))
match_plot(plotdata, args.img)
| 38.145833 | 111 | 0.588749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.205352 |
4847f5739e2a2a4fe3f2279bc69fc734031f35e3 | 5,610 | py | Python | rest-service/manager_rest/rest/resources_v3/users.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
]
| null | null | null | rest-service/manager_rest/rest/resources_v3/users.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
]
| null | null | null | rest-service/manager_rest/rest/resources_v3/users.py | TS-at-WS/cloudify-manager | 3e062e8dec16c89d2ab180d0b761cbf76d3f7ddc | [
"Apache-2.0"
]
| null | null | null | #########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from flask_security import current_user
from manager_rest import constants
from manager_rest.storage import models, user_datastore
from manager_rest.security.authorization import authorize
from manager_rest.security import (SecuredResource,
MissingPremiumFeatureResource)
from manager_rest.manager_exceptions import BadParametersError
from .. import rest_decorators, rest_utils
from ..responses_v3 import UserResponse
try:
from cloudify_premium.multi_tenancy.secured_tenant_resource \
import SecuredMultiTenancyResource
except ImportError:
SecuredMultiTenancyResource = MissingPremiumFeatureResource
class User(SecuredResource):
@authorize('user_get_self')
@rest_decorators.marshal_with(UserResponse)
def get(self):
"""
Get details for the current user
"""
return user_datastore.get_user(current_user.username)
class Users(SecuredMultiTenancyResource):
@authorize('user_list')
@rest_decorators.marshal_with(UserResponse)
@rest_decorators.create_filters(models.User)
@rest_decorators.paginate
@rest_decorators.sortable(models.User)
@rest_decorators.search('username')
def get(self, multi_tenancy, _include=None, filters=None, pagination=None,
sort=None, search=None, **kwargs):
"""
List users
"""
return multi_tenancy.list_users(
_include,
filters,
pagination,
sort,
search
)
@authorize('user_create')
@rest_decorators.marshal_with(UserResponse)
@rest_decorators.no_external_authenticator('create user')
def put(self, multi_tenancy):
"""
Create a user
"""
request_dict = rest_utils.get_json_and_verify_params(
{
'username': {
'type': unicode,
},
'password': {
'type': unicode,
},
'role': {
'type': unicode,
'optional': True,
},
}
)
# The password shouldn't be validated here
password = request_dict.pop('password')
password = rest_utils.validate_and_decode_password(password)
rest_utils.validate_inputs(request_dict)
role = request_dict.get('role', constants.DEFAULT_SYSTEM_ROLE)
rest_utils.verify_role(role, is_system_role=True)
return multi_tenancy.create_user(
request_dict['username'],
password,
role,
)
class UsersId(SecuredMultiTenancyResource):
@authorize('user_update')
@rest_decorators.marshal_with(UserResponse)
def post(self, username, multi_tenancy):
"""
Set password/role for a certain user
"""
request_dict = rest_utils.get_json_and_verify_params()
password = request_dict.get('password')
role_name = request_dict.get('role')
if password:
if role_name:
raise BadParametersError('Both `password` and `role` provided')
password = rest_utils.validate_and_decode_password(password)
return multi_tenancy.set_user_password(username, password)
elif role_name:
rest_utils.verify_role(role_name, is_system_role=True)
return multi_tenancy.set_user_role(username, role_name)
else:
raise BadParametersError('Neither `password` nor `role` provided')
@authorize('user_get')
@rest_decorators.marshal_with(UserResponse)
def get(self, username, multi_tenancy):
"""
Get details for a single user
"""
rest_utils.validate_inputs({'username': username})
return multi_tenancy.get_user(username)
@authorize('user_delete')
@rest_decorators.marshal_with(UserResponse)
@rest_decorators.no_external_authenticator('delete user')
def delete(self, username, multi_tenancy):
"""
Delete a user
"""
rest_utils.validate_inputs({'username': username})
return multi_tenancy.delete_user(username)
class UsersActive(SecuredMultiTenancyResource):
@authorize('user_set_activated')
@rest_decorators.marshal_with(UserResponse)
def post(self, username, multi_tenancy):
"""
Activate a user
"""
request_dict = rest_utils.get_json_and_verify_params({'action'})
if request_dict['action'] == 'activate':
return multi_tenancy.activate_user(username)
else:
return multi_tenancy.deactivate_user(username)
class UsersUnlock(SecuredMultiTenancyResource):
@authorize('user_unlock')
@rest_decorators.marshal_with(UserResponse)
def post(self, username, multi_tenancy):
"""
Unlock user account
"""
rest_utils.validate_inputs({'username': username})
return multi_tenancy.unlock_user(username)
| 34.207317 | 79 | 0.659893 | 4,303 | 0.767023 | 0 | 0 | 4,054 | 0.722638 | 0 | 0 | 1,397 | 0.24902 |
484898b58a6b8f0e2cf8c6f249de3cb0f85b7504 | 6,659 | py | Python | src/m2_run_this_on_laptop.py | Petersl13/99-CapstoneProject-201920 | 4ec25ebf064e93745b9280a09c9212f8b16f76a1 | [
"MIT"
]
| null | null | null | src/m2_run_this_on_laptop.py | Petersl13/99-CapstoneProject-201920 | 4ec25ebf064e93745b9280a09c9212f8b16f76a1 | [
"MIT"
]
| null | null | null | src/m2_run_this_on_laptop.py | Petersl13/99-CapstoneProject-201920 | 4ec25ebf064e93745b9280a09c9212f8b16f76a1 | [
"MIT"
]
| null | null | null | """
Capstone Project. Code to run on a LAPTOP (NOT the robot).
Displays the Graphical User Interface (GUI) and communicates with the robot.
Authors: Your professors (for the framework)
and Nathalie Grier.
Winter term, 2018-2019.
"""
import mqtt_remote_method_calls as com
import tkinter
from tkinter import ttk
import shared_gui
import m2_sprint_3
import rosebot
def main():
"""
This code, which must run on a LAPTOP:
1. Constructs a GUI for my part of the Capstone Project.
2. Communicates via MQTT with the code that runs on the EV3 robot.
"""
# -------------------------------------------------------------------------
# Construct and connect the MQTT Client:
# -------------------------------------------------------------------------
mqtt_sender = com.MqttClient()
mqtt_sender.connect_to_ev3()
# -------------------------------------------------------------------------
# The root TK object for the GUI:
# -------------------------------------------------------------------------
root = tkinter.Tk()
root.title('CSSE 120, Nathalie Grier, Winter 2018-19')
# -------------------------------------------------------------------------
# The main frame, upon which the other frames are placed.
# -------------------------------------------------------------------------
main_frame = ttk.Frame(root, padding=10, borderwidth=5, relief='groove')
main_frame.grid()
# -------------------------------------------------------------------------
# Sub-frames for the shared GUI that the team developed:
# -------------------------------------------------------------------------
#teleop_frame, arm_fram, control_frame, go_straight_frame, beep_frame, color_frame, go_straight, camera_frame, sprint_3 = get_shared_frames(main_frame, mqtt_sender)
sprint_3, control_frame = new_shared_frames(main_frame, mqtt_sender)
# -------------------------------------------------------------------------
# Frames that are particular to my individual contributions to the project.
# -------------------------------------------------------------------------
# DONE: Implement and call get_my_frames(...)
# -------------------------------------------------------------------------
# Grid the frames.
# -------------------------------------------------------------------------
#grid_frames(teleop_frame, arm_fram, control_frame, go_straight_frame, beep_frame, color_frame, go_straight, camera_frame, sprint_3)
new_grid_frames(sprint_3, control_frame)
# -------------------------------------------------------------------------
# The event loop:
# -------------------------------------------------------------------------
root.mainloop()
def sprint_3_nathalie(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Entry and Button objects that control the EV3 robot's Arm
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief='ridge')
frame.grid()
frame_label = ttk.Label(frame, text='Sprint 3 Nathalie')
frame_label.grid(row=0, column=1)
sprint_3_button = ttk.Button(frame, text='Sprint 3')
sprint_3_button.grid(row=2, column=0)
sprint_3_button["command"] = lambda: handle_sprint_3(mqtt_sender, speed_entry)
speed_entry = ttk.Entry(frame, width=8)
speed_label = ttk.Label(frame, text='Speed:')
speed_entry.grid(row=2, column=1)
speed_label.grid(row=1, column=1)
bark_button = ttk.Button(frame, text='Bark!')
bark_button.grid(row=2, column=2)
bark_button["command"] = lambda: handle_bark(mqtt_sender)
trick_1_button = ttk.Button(frame, text='Trick 1')
trick_2_button = ttk.Button(frame, text='Trick 2')
trick_1_button.grid(row=3, column=0)
trick_2_button.grid(row=3, column=2)
trick_1_button["command"] = lambda: handle_trick_1(mqtt_sender, speed_entry)
trick_2_button["command"] = lambda: handle_trick_2(mqtt_sender, speed_entry)
return frame
def get_shared_frames(main_frame, mqtt_sender):
teleop_frame = shared_gui.get_teleoperation_frame(main_frame, mqtt_sender)
arm_frame = shared_gui.get_arm_frame(main_frame, mqtt_sender)
control_frame = shared_gui.get_control_frame(main_frame, mqtt_sender)
go_straight_frame = shared_gui.get_go_straight_frame(main_frame, mqtt_sender)
beep_frame = shared_gui.beep_frame(main_frame, mqtt_sender)
color_frame = shared_gui.colors(main_frame, mqtt_sender)
go_straight = shared_gui.go_straight_until_frame(main_frame, mqtt_sender)
camera_frame = shared_gui.camera_sensor_window(main_frame, mqtt_sender)
sprint_3 = sprint_3_nathalie(main_frame, mqtt_sender)
return teleop_frame, arm_frame, control_frame, go_straight_frame, beep_frame, color_frame, go_straight, camera_frame, sprint_3
def new_shared_frames(main_frame, mqtt_sender):
sprint_3 = sprint_3_nathalie(main_frame, mqtt_sender)
control_frame = shared_gui.get_control_frame(main_frame, mqtt_sender)
return sprint_3, control_frame
def grid_frames(teleop_frame, arm_frame, control_frame, go_straight_frame, beep_frame, color_frame, go_straight, camera_frame, sprint_3):
teleop_frame.grid(row=0, column=0)
arm_frame.grid(row=1, column=0)
control_frame.grid(row=4, column=0)
go_straight_frame.grid(row=0, column=1)
beep_frame.grid(row=2, column=0)
color_frame.grid(row=3, column=0)
go_straight.grid(row=1, column=1)
camera_frame.grid(row=2, column=1)
sprint_3.grid(row=0, column=0)
def new_grid_frames(sprint_3, control_frame):
sprint_3.grid(row=0, column=0)
control_frame.grid(row=1, column=0)
def handle_sprint_3(mqtt_sender, speed):
print('Got sprint 3, speed:', speed.get())
mqtt_sender.send_message('sprint_3', [speed.get()])
def handle_bark(mqtt_sender):
print('Got bark')
mqtt_sender.send_message('bark_m2')
def handle_trick_1(mqtt_sender, speed):
print('Got Trick 1 at speed:', speed.get())
mqtt_sender.send_message('trick_1_m2', [speed.get()])
def handle_trick_2(mqtt_sender, speed):
print('Got Trick 2 at speed:', speed.get())
mqtt_sender.send_message('trick_2_m2', [speed.get()])
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main() | 40.357576 | 168 | 0.59138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,951 | 0.44316 |
4848f32ae381a42ff50e4c60f9915efe85ae845d | 853 | py | Python | python/tree/0704_binary_search.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
]
| 6 | 2019-07-15T13:23:57.000Z | 2020-01-22T03:12:01.000Z | python/tree/0704_binary_search.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
]
| null | null | null | python/tree/0704_binary_search.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
]
| 1 | 2019-07-24T02:15:31.000Z | 2019-07-24T02:15:31.000Z | class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
return self.binary_search(nums, target, 0, len(nums) - 1)
def binary_search(self, nums, target, low, hight):
mid = low + (hight - low) // 2
if nums[mid] == target:
return mid
if nums[mid] < target:
if mid + 1 > hight:
return -1
return self.binary_search(nums, target, mid + 1, hight)
if low > mid - 1:
return -1
return self.binary_search(nums, target, low, mid - 1)
def test_search():
s = Solution()
assert 4 == s.search([-1, 0, 3, 5, 9, 12], 9)
assert -1 == s.search([-1, 0, 3, 5, 9, 12], 2)
assert 0 == s.search([5], 5)
assert 1 == s.search([2, 5], 5)
| 29.413793 | 67 | 0.501758 | 642 | 0.752638 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.106682 |
4848f46a84c4346593a1d98c3f7f6dead3b394ab | 2,905 | py | Python | alphamind/benchmarks/data/neutralize.py | rongliang-tech/alpha-mind | 39f720974c637d17e185e445dc05c9fc4863a241 | [
"MIT"
]
| 186 | 2017-11-27T01:26:44.000Z | 2022-03-28T16:11:33.000Z | alphamind/benchmarks/data/neutralize.py | atefar2/alpha-mind | 66d839affb5d81d31d5cac7e5e224278e3f99a8b | [
"MIT"
]
| 2 | 2017-12-19T02:47:36.000Z | 2021-01-09T05:25:18.000Z | alphamind/benchmarks/data/neutralize.py | atefar2/alpha-mind | 66d839affb5d81d31d5cac7e5e224278e3f99a8b | [
"MIT"
]
| 65 | 2017-11-27T01:26:47.000Z | 2022-03-17T10:50:52.000Z | # -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import datetime as dt
import numpy as np
from sklearn.linear_model import LinearRegression
from alphamind.data.neutralize import neutralize
def benchmark_neutralize(n_samples: int, n_features: int, n_loops: int) -> None:
print("-" * 60)
print("Starting least square fitting benchmarking")
print("Parameters(n_samples: {0}, n_features: {1}, n_loops: {2})".format(n_samples, n_features,
n_loops))
y = np.random.randn(n_samples, 5)
x = np.random.randn(n_samples, n_features)
start = dt.datetime.now()
for _ in range(n_loops):
calc_res = neutralize(x, y)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
benchmark_model = LinearRegression(fit_intercept=False)
benchmark_model.fit(x, y)
exp_res = y - x @ benchmark_model.coef_.T
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
np.testing.assert_array_almost_equal(calc_res, exp_res)
def benchmark_neutralize_with_groups(n_samples: int, n_features: int, n_loops: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting least square fitting with group benchmarking")
print(
"Parameters(n_samples: {0}, n_features: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_features,
n_loops,
n_groups))
y = np.random.randn(n_samples, 5)
x = np.random.randn(n_samples, n_features)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
_ = neutralize(x, y, groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
model = LinearRegression(fit_intercept=False)
for _ in range(n_loops):
for i in range(n_groups):
curr_x = x[groups == i]
curr_y = y[groups == i]
model.fit(curr_x, curr_y)
_ = curr_y - curr_x @ model.coef_.T
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_neutralize(3000, 10, 1000)
benchmark_neutralize_with_groups(3000, 10, 1000, 30)
| 35.864198 | 102 | 0.561102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 451 | 0.15525 |
484bd025db2a6036894e42cd251bae437f17440a | 329 | py | Python | python/__init__.py | seangal/xAODAnaHelpers | 49f15c8525bf4aed9beceec2c58e58964d57e034 | [
"Apache-2.0"
]
| null | null | null | python/__init__.py | seangal/xAODAnaHelpers | 49f15c8525bf4aed9beceec2c58e58964d57e034 | [
"Apache-2.0"
]
| null | null | null | python/__init__.py | seangal/xAODAnaHelpers | 49f15c8525bf4aed9beceec2c58e58964d57e034 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-,
from __future__ import absolute_import
from __future__ import print_function
from . import logging as xAH_logging
try:
from .config import Config
except:
xAH_logging.logger.warning("xAH::Config could not be imported.")
__version__ = "1.0.0"
__all__ = ["utils", "config"]
| 21.933333 | 68 | 0.723404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.31307 |
484e5a10424681e3b5a649a30275352d6bd27762 | 8,118 | py | Python | process/extract.py | kogakenji/kasatomaru | 6e3cd36ea54a5e7c8042d17beed4675f210f1a36 | [
"MIT"
]
| null | null | null | process/extract.py | kogakenji/kasatomaru | 6e3cd36ea54a5e7c8042d17beed4675f210f1a36 | [
"MIT"
]
| 2 | 2021-03-31T19:40:45.000Z | 2021-12-13T20:34:12.000Z | process/extract.py | kogakenji/kasatomaru | 6e3cd36ea54a5e7c8042d17beed4675f210f1a36 | [
"MIT"
]
| null | null | null | from bs4 import BeautifulSoup
import lxml.html
import pathlib
import db
import datetime
from concurrent.futures import ThreadPoolExecutor
import threading
# Define the lock globally
lock = threading.Lock()
def files_list(start, end):
"""Generate url list with given start and end of indexes"""
resultlist = []
for i in range(start, end):
resultlist.append(f"page_{i}.html")
return resultlist
def extract_main_pages():
"""Extracts content from main pages """
pages = files_list(1, 49278)
# There was a problematic file: 44663.html removed #&...
print(len(pages))
for page in pages:
path = pathlib.Path.cwd().parent / "main_files" / page
print(path)
with open(str(path), encoding="ISO-8859-1") as p:
soup = BeautifulSoup(p.read(), 'html.parser')
# soup = BeautifulSoup(p.read(), 'lxml')
# table = soup.find_all("table", bgcolor="#FFFFFF")
# print(table)
data = soup.find_all("tr", {'class': 'texto'})
for i, d in enumerate(data):
tds = data[i].find_all('td')
ship = tds[0].a.contents[0].strip()
link_family = "http://www.museubunkyo.org.br/ashiato/web2/" + tds[1].a.get("href")
family_id_register = link_family[link_family.find("=") + 1:link_family.index("&")]
leave_date = tds[1].a.contents[0].strip()
leave_date = datetime.datetime.strptime(leave_date, '%m/%d/%Y').strftime('%d/%m/%y')
arrive_date = tds[1].a.contents[2].strip()
arrive_date = datetime.datetime.strptime(arrive_date, '%m/%d/%Y').strftime('%d/%m/%y')
province = tds[2].a.contents[0].strip()
destination = tds[3].a.contents[0].strip()
surname = tds[4].a.contents[0][0:4].strip()
name = tds[5].a.contents[0].strip()
print(
f"Ship: {ship} - leave_date: {leave_date} - arrive_date: {arrive_date} - province: {province} - destination: {destination} - surname: {surname} - name: {name}")
# print(f"link_family: {link_family} - idRegistro: {id_register}")
db.insert_person(name, surname, province, ship, destination, leave_date, arrive_date, link_family,
family_id_register)
def extract_jp_pages():
"""Extracts content from main pages """
pages = files_list(1, 49277)
# There was a problematic file: 44663.html removed #&...
print(len(pages))
for page in pages:
path = pathlib.Path.cwd().parent / "jp_files" / "jp" /page
print(path)
with open(str(path), encoding="ISO-8859-1") as p:
soup = BeautifulSoup(p.read(), 'html.parser')
# soup = BeautifulSoup(p.read(), 'lxml')
# table = soup.find_all("table", bgcolor="#FFFFFF")
# print(table)
data = soup.find_all("tr", {'class': 'texto'})
for i, d in enumerate(data):
tds = data[i].find_all('td')
ship = tds[0].a.contents[0].strip()
link_family = "http://www.museubunkyo.org.br/ashiato/web2/" + tds[1].a.get("href")
family_id_register = link_family[link_family.find("=") + 1:link_family.index("&")]
leave_date = tds[1].a.contents[0].strip()
leave_date = datetime.datetime.strptime(leave_date, '%m/%d/%Y').strftime('%d/%m/%y')
arrive_date = tds[1].a.contents[2].strip()
arrive_date = datetime.datetime.strptime(arrive_date, '%m/%d/%Y').strftime('%d/%m/%y')
province = tds[2].a.contents[0].strip()
destination = tds[3].a.contents[0].strip()
surname = tds[4].a.contents[0][0:4].strip()
name = tds[5].a.contents[0].strip()
try:
print(
f"Ship: {ship} - leave_date: {leave_date} - arrive_date: {arrive_date} - province: {province} - destination: {destination} - surname: {surname} - name: {name}")
# print(f"link_family: {link_family} - idRegistro: {id_register}")
db.insert_person(name, surname, province, ship, destination, leave_date, arrive_date, link_family,
family_id_register)
except Exception as exp:
print(exp)
pass
def has_class_but_no_id(tag):
tag.has_attr('class') and tag.attrs()
tag.contents
return
class Ship():
def __init__(self, name, leave_date, arrival_date, destination, farm, station):
self.name = name
self.leave_date = leave_date
self.arrival_date = arrival_date
self.destination = destination
self.farm = farm
self.station = station
def __str__(self):
return f"[SHIP_INFO]: Name:{self.name} LeaveDate:{self.leave_date} Arrival_date:{self.arrival_date} Destination:{self.destination} Farm:{self.farm} Station:{self.station}"
class Person():
def __init__(self, name, surname, name_kanji, surname_kanji, ship):
self.name = name
self.surname = surname
self.name_kanji = name_kanji
self.surname_kanji = surname_kanji
self.ship = ship
def __str__(self):
return f"[PERSON_INFO] Name:{self.name} Surname:{self.surname} NameKanji:{self.name_kanji} SurnameKanji:{self.surname_kanji} \n \t {self.ship}"
def get_family_content(id_family_register):
# id, name, surname, id_family_register, link_family = family
path = pathlib.Path.cwd().parent / "families_files" / "families" / f"page_{id_family_register[0]}.html"
print(f"caminho do arquivo: {path}")
with open(str(path), encoding="ISO-8859-1") as p:
soup = BeautifulSoup(p.read(), "html.parser")
# print(soup)
# print("=================fazenda================")
td = soup.find_all("span", {'class': 'titulo'})
for titulo in td:
if titulo.get_text() == "Navio:":
ship = titulo.parent.get_text().split(': ')[1]
if titulo.get_text() == "Destino:":
destination = titulo.parent.get_text().split(': ')[1]
if titulo.get_text() == "Partida:":
leave_date = titulo.parent.get_text().split(': ')[1]
leave_date = datetime.datetime.strptime(leave_date, '%m/%d/%Y').strftime('%d/%m/%y')
if titulo.get_text() == "Chegada:":
arrival_date = titulo.parent.get_text().split(': ')[1]
arrival_date = datetime.datetime.strptime(arrival_date, '%m/%d/%Y').strftime('%d/%m/%y')
if titulo.get_text() == "Fazenda:":
farm = titulo.parent.get_text().split(': ')[1]
if titulo.get_text() == "Estação:":
station = titulo.parent.get_text().split(': ')[1]
ship_info = Ship(ship, leave_date, arrival_date, destination, farm, station)
# print("===================pessoas=====================")
data = soup.find_all("tr", {'class': 'texto'})
for d in data:
record = d.find_all("td")
list = [name.get_text() for name in record]
p = Person(list[1], list[0], list[3], list[2], ship_info)
lock.acquire(True)
try:
db.update_jp_family(p.name, p.surname, p.name_kanji, p.surname_kanji, p.ship.name, p.ship.destination,
p.ship.leave_date, p.ship.arrival_date, p.ship.farm, p.ship.station)
except Exception as err:
print(err)
pass
lock.release()
def extract_family_content():
"""Extracts content from family pages"""
families = db.person_info()
print(f"TOTAL SIZE: {len(families)}")
with ThreadPoolExecutor(max_workers=3) as executor:
# executor.map(get_family_content, path)
executor.map(get_family_content, families)
if __name__ == "__main__":
# extract_main_pages()
extract_family_content()
# extract_jp_pages()
| 44.604396 | 184 | 0.572555 | 921 | 0.113424 | 0 | 0 | 0 | 0 | 0 | 0 | 2,200 | 0.270936 |
484f03e9c4b7ff5aefbc6845368e72fc3dfe1209 | 114 | py | Python | tests/shunit/data/bad_i18n_newline_5.py | nicole331/TWLight | fab9002e76868f8a2ef36f9279c777de34243b2c | [
"MIT"
]
| 67 | 2017-12-14T22:27:48.000Z | 2022-03-13T18:21:31.000Z | tests/shunit/data/bad_i18n_newline_5.py | nicole331/TWLight | fab9002e76868f8a2ef36f9279c777de34243b2c | [
"MIT"
]
| 433 | 2017-03-24T22:51:23.000Z | 2022-03-31T19:36:22.000Z | tests/shunit/data/bad_i18n_newline_5.py | Mahuton/TWLight | 90b299d07b0479f21dc90e17b8d05f5a221b0de1 | [
"MIT"
]
| 105 | 2017-06-23T03:53:41.000Z | 2022-03-30T17:24:29.000Z | # Single-quoted string is preceded and succeeded by newlines.
# Translators: This is a helpful comment.
_(
'5'
)
| 16.285714 | 61 | 0.736842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.921053 |
48511c78e308c11777c5277149036d4e3f1a72d0 | 9,090 | py | Python | deciphon/protein_profile.py | EBI-Metagenomics/deciphon-py | 81df946c4f2f53c55ac96fc78ed2f95958b291d8 | [
"MIT"
]
| null | null | null | deciphon/protein_profile.py | EBI-Metagenomics/deciphon-py | 81df946c4f2f53c55ac96fc78ed2f95958b291d8 | [
"MIT"
]
| 1 | 2021-07-02T10:24:19.000Z | 2021-07-02T10:24:19.000Z | deciphon/protein_profile.py | EBI-Metagenomics/deciphon-py | 81df946c4f2f53c55ac96fc78ed2f95958b291d8 | [
"MIT"
]
| null | null | null | from __future__ import annotations
from math import log
from typing import List, Type, Union
from imm import MuteState, Sequence, lprob_add, lprob_zero
from nmm import (
AminoAlphabet,
AminoLprob,
BaseLprob,
CodonLprob,
CodonMarg,
DNAAlphabet,
FrameState,
RNAAlphabet,
codon_iter,
)
from .codon_table import CodonTable
from .hmmer_model import HMMERModel
from .model import AltModel, EntryDistr, Node, NullModel, SpecialNode, Transitions
from .profile import Profile, ProfileID
__all__ = ["ProteinProfile", "create_profile"]
class ProteinProfile(Profile):
@classmethod
def create(
cls: Type[ProteinProfile],
profid: ProfileID,
factory: ProteinStateFactory,
null_aminot: AminoLprob,
core_nodes: List[Node],
core_trans: List[Transitions],
entry_distr: EntryDistr,
) -> ProteinProfile:
base_alphabet = factory.genetic_code.base_alphabet
R = factory.create(b"R", null_aminot)
null_model = NullModel.create(R)
special_node = SpecialNode(
S=MuteState.create(b"S", base_alphabet),
N=factory.create(b"N", null_aminot),
B=MuteState.create(b"B", base_alphabet),
E=MuteState.create(b"E", base_alphabet),
J=factory.create(b"J", null_aminot),
C=factory.create(b"C", null_aminot),
T=MuteState.create(b"T", base_alphabet),
)
alt_model = AltModel.create(
special_node,
core_nodes,
core_trans,
entry_distr,
)
# alt_model.set_fragment_length(self._special_transitions)
return cls(profid, base_alphabet, null_model, alt_model, False)
# @property
# def epsilon(self) -> float:
# nodes = self._alt_model.core_nodes()
# return nodes[0].M.epsilon
# @classmethod
# def create_from_binary(
# cls: Type[ProteinProfile],
# profid: ProfileID,
# null_model: nmm.Model,
# alt_model: nmm.Model,
# ):
# special_node = wrap.special_node(alt_model.hmm)
# core_nodes = wrap.core_nodes(alt_model.hmm)
# alt = AltModel.create_from_hmm(
# special_node, core_nodes, alt_model.hmm, alt_model.dp
# )
# null = NullModel.create_from_hmm(null_model.hmm)
# return cls(profid, alt_model.hmm.alphabet, null, alt, False)
# @property
# def window_length(self) -> int:
# return super().window_length
# @window_length.setter
# def window_length(self, length: int) -> None:
# if length < -1:
# raise ValueError("Length must be greater than or equal to -1.")
# if length == -1:
# length = 2 * 3 * self._alt_model.core_length
# self._window_length = length
def create_sequence(self, sequence: bytes) -> Sequence:
return Sequence.create(sequence, self.alphabet)
@property
def null_model(self) -> NullModel:
return self._null_model
@property
def alt_model(self) -> AltModel:
return self._alt_model
# def search(self, sequence: SequenceABC) -> SearchResults:
# self._set_target_length_model(len(sequence))
# alt_results = self._alt_model.viterbi(sequence, self.window_length)
# def create_fragment(
# seq: SequenceABC, path: Path, homologous: bool
# ):
# return ProteinFragment(seq, path, homologous)
# search_results = SearchResults(sequence, create_fragment)
# for alt_result in alt_results:
# subseq = alt_result.sequence
# # TODO: temporary fix for reading from binary file
# # and consequently alt and null model having different alphabets
# s = Sequence.create(bytes(subseq), self._null_model.hmm.alphabet)
# viterbi_score0 = self._null_model.loglikelihood(s)
# if len(alt_result.path) == 0:
# viterbi_score1 = lprob_invalid()
# else:
# viterbi_score1 = self._alt_model.loglikelihood(alt_result.sequence,
# alt_result.path)
# score = viterbi_score1 - viterbi_score0
# window = Interval(subseq.start, subseq.start + len(subseq))
# search_results.append(
# score, window, alt_result.path, viterbi_score1, viterbi_score0
# )
# return search_results
# def create_profile(
# hmm: HMMERModel,
# base_abc: Union[RNAAlphabet, DNAAlphabet],
# window_length: int = 0,
# epsilon: float = 0.1,
# ) -> ProteinProfile:
# amino_abc = hmm.alphabet
# assert isinstance(amino_abc, AminoAlphabet)
# lprobs = lprob_normalize(hmm.insert_lprobs(0))
# null_aminot = AminoLprob.create(amino_abc, lprobs)
# factory = ProteinStateFactory(CodonTable(base_abc, amino_abc), epsilon)
# nodes: List[Node] = []
# for m in range(1, hmm.model_length + 1):
# lprobs = lprob_normalize(hmm.match_lprobs(m))
# M = factory.create(f"M{m}".encode(), AminoLprob.create(amino_abc, lprobs))
# lprobs = lprob_normalize(hmm.insert_lprobs(m))
# I = factory.create(f"I{m}".encode(), AminoLprob.create(amino_abc, lprobs))
# D = MuteState.create(f"D{m}".encode(), base_abc)
# nodes.append(Node(M, I, D))
# trans: List[Transitions] = []
# for t in hmm.transitions:
# t.normalize()
# trans.append(t)
# profid = ProfileID(hmm.model_id.name, hmm.model_id.acc)
# prof = ProteinProfile.create(
# profid, factory, null_aminot, nodes, trans, EntryDistr.UNIFORM
# )
# prof.window_length = window_length
# return prof
def create_profile(
hmm: HMMERModel,
base_abc: Union[RNAAlphabet, DNAAlphabet],
window_length: int = 0,
epsilon: float = 0.1,
) -> ProteinProfile:
amino_abc = hmm.alphabet
assert isinstance(amino_abc, AminoAlphabet)
null_lprobs = hmm.null_lprobs
null_log_odds = [0.0] * len(null_lprobs)
null_aminot = AminoLprob.create(amino_abc, null_lprobs)
factory = ProteinStateFactory(CodonTable(base_abc, amino_abc), epsilon)
nodes: List[Node] = []
for m in range(1, hmm.model_length + 1):
lodds = [v0 - v1 for v0, v1 in zip(hmm.match_lprobs(m), null_lprobs)]
M = factory.create(f"M{m}".encode(), AminoLprob.create(amino_abc, lodds))
I = factory.create(
f"I{m}".encode(), AminoLprob.create(amino_abc, null_log_odds)
)
D = MuteState.create(f"D{m}".encode(), base_abc)
nodes.append(Node(M, I, D))
trans = hmm.transitions
profid = ProfileID(hmm.model_id.name, hmm.model_id.acc)
entry_distr = EntryDistr.OCCUPANCY
prof = ProteinProfile.create(
profid, factory, null_aminot, nodes, trans, entry_distr
)
prof.window_length = window_length
return prof
class ProteinStateFactory:
def __init__(
self,
gcode: CodonTable,
epsilon: float,
):
self._gcode = gcode
self._epsilon = epsilon
def create(self, name: bytes, aminot: AminoLprob) -> FrameState:
codonp = _create_codon_prob(aminot, self._gcode)
baset = _create_base_table(codonp)
codonm = CodonMarg.create(codonp)
return FrameState.create(name, baset, codonm, self._epsilon)
@property
def genetic_code(self) -> CodonTable:
return self._gcode
@property
def epsilon(self) -> float:
return self._epsilon
def _create_base_table(codonp: CodonLprob):
base_abc = codonp.alphabet
base_lprob = {base: lprob_zero() for base in base_abc.symbols}
norm = log(3)
for codon in codon_iter(base_abc):
lprob = codonp.get_lprob(codon)
triplet = codon.symbols
base_lprob[triplet[0]] = lprob_add(base_lprob[triplet[0]], lprob - norm)
base_lprob[triplet[1]] = lprob_add(base_lprob[triplet[1]], lprob - norm)
base_lprob[triplet[2]] = lprob_add(base_lprob[triplet[2]], lprob - norm)
assert len(base_lprob) == 4
bases = base_abc.symbols
assert len(bases) == 4
return BaseLprob.create(
base_abc,
(
base_lprob[bases[0]],
base_lprob[bases[1]],
base_lprob[bases[2]],
base_lprob[bases[3]],
),
)
def _create_codon_prob(aminot: AminoLprob, gencode: CodonTable) -> CodonLprob:
codonp = CodonLprob.create(gencode.base_alphabet)
codon_lprobs = []
lprob_norm = lprob_zero()
for i in range(len(aminot.alphabet.symbols)):
aa = aminot.alphabet.symbols[i : i + 1]
lprob = aminot.lprob(aa)
codons = gencode.codons(aa)
if len(codons) == 0:
continue
norm = log(len(codons))
for codon in codons:
codon_lprobs.append((codon, lprob - norm))
lprob_norm = lprob_add(lprob_norm, codon_lprobs[-1][1])
for codon, lprob in codon_lprobs:
codonp.set_lprob(codon, lprob - lprob_norm)
return codonp
| 31.344828 | 85 | 0.627063 | 4,547 | 0.50022 | 0 | 0 | 1,435 | 0.157866 | 0 | 0 | 3,573 | 0.393069 |
48514c4855c82f6511561bc091163063091c1e9c | 664 | py | Python | ptranking/ltr_adhoc/util/one_hot_utils.py | junj2ejj/ptranking.github.io | 06fa9751dd2eca89749ba4bb9641e4272cfc30a1 | [
"MIT"
]
| 1 | 2020-09-24T10:38:53.000Z | 2020-09-24T10:38:53.000Z | ptranking/ltr_adhoc/util/one_hot_utils.py | junj2ejj/ptranking.github.io | 06fa9751dd2eca89749ba4bb9641e4272cfc30a1 | [
"MIT"
]
| null | null | null | ptranking/ltr_adhoc/util/one_hot_utils.py | junj2ejj/ptranking.github.io | 06fa9751dd2eca89749ba4bb9641e4272cfc30a1 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description
"""
import torch
from ptranking.ltr_global import global_gpu as gpu
def get_one_hot_reprs(batch_stds):
""" Get one-hot representation of batch ground-truth labels """
batch_size = batch_stds.size(0)
hist_size = batch_stds.size(1)
int_batch_stds = batch_stds.type(torch.cuda.LongTensor) if gpu else batch_stds.type(torch.LongTensor)
hot_batch_stds = torch.cuda.FloatTensor(batch_size, hist_size, 3) if gpu else torch.FloatTensor(batch_size, hist_size, 3)
hot_batch_stds.zero_()
hot_batch_stds.scatter_(2, torch.unsqueeze(int_batch_stds, 2), 1)
return hot_batch_stds
| 30.181818 | 125 | 0.74247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.189759 |
485441df6c93c795b69160386a1e913eee4699da | 5,108 | py | Python | src/data_module.py | enningxie/lightning-semantic-matching | 156ce3d40c53436b8166679c718b80f45782fe37 | [
"MIT"
]
| 2 | 2020-10-21T01:02:22.000Z | 2021-07-29T01:56:53.000Z | src/data_module.py | enningxie/lightning-semantic-matching | 156ce3d40c53436b8166679c718b80f45782fe37 | [
"MIT"
]
| null | null | null | src/data_module.py | enningxie/lightning-semantic-matching | 156ce3d40c53436b8166679c718b80f45782fe37 | [
"MIT"
]
| null | null | null | # Created by xieenning at 2020/10/19
from argparse import ArgumentParser, Namespace
from typing import Optional, Union, List
from pytorch_lightning import LightningDataModule
from transformers import BertTokenizer
from transformers import ElectraTokenizer
from transformers.utils import logging
import torch
from torch.utils.data import DataLoader, TensorDataset
from src.data_processor import SemanticMatchingProcessor, convert_examples_to_features
logger = logging.get_logger(__name__)
class SemanticMatchingDataModule(LightningDataModule):
def __init__(self, hparams: Namespace):
super().__init__()
self.data_path = hparams.data_path
self.model_name_or_path = hparams.model_name_or_path
self.max_length = hparams.max_length
self.train_batch_size = hparams.train_batch_size
self.val_batch_size = hparams.val_batch_size
self.loader_workers = hparams.loader_workers
self.tokenizer = ElectraTokenizer.from_pretrained(hparams.model_name_or_path)
self.processor = SemanticMatchingProcessor()
self.train_features = None
self.val_features = None
self.train_dataset = None
self.val_dataset = None
def prepare_data(self, *args, **kwargs):
train_examples = self.processor.get_train_examples(self.data_path)
self.train_features = convert_examples_to_features(train_examples,
self.tokenizer,
label_list=self.processor.get_labels(),
max_length=self.max_length)
val_examples = self.processor.get_dev_examples(self.data_path)
self.val_features = convert_examples_to_features(val_examples,
self.tokenizer,
label_list=self.processor.get_labels(),
max_length=self.max_length)
logger.info("`prepare_data` finished!")
@staticmethod
def generate_dataset(features):
return TensorDataset(
torch.tensor([f.input_ids for f in features], dtype=torch.long),
torch.tensor([f.attention_mask for f in features], dtype=torch.long),
torch.tensor([f.token_type_ids for f in features], dtype=torch.long),
torch.tensor([f.label for f in features], dtype=torch.long)
)
def setup(self, stage: Optional[str] = None):
self.train_dataset = self.generate_dataset(self.train_features)
self.val_dataset = self.generate_dataset(self.val_features)
logger.info("`setup` finished!")
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return DataLoader(self.train_dataset, shuffle=True, batch_size=self.train_batch_size,
num_workers=self.loader_workers)
def val_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(self.val_dataset, batch_size=self.val_batch_size, num_workers=self.loader_workers)
def test_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(self.val_dataset, batch_size=self.val_batch_size, num_workers=self.loader_workers)
@classmethod
def add_data_specific_args(
cls, parser: ArgumentParser
) -> ArgumentParser:
""" Parser for Estimator specific arguments/hyperparameters.
:param parser: argparse.ArgumentParser
Returns:
- updated parser
"""
parser.add_argument(
"--data_path",
default="/Data/enningxie/Codes/lightning-semantic-matching/data",
type=str
)
parser.add_argument(
"--max_length",
default=64,
type=int
)
parser.add_argument(
"--train_batch_size",
default=64,
type=int
)
parser.add_argument(
"--val_batch_size",
default=64,
type=int
)
parser.add_argument(
"--loader_workers",
default=64,
type=int,
help="How many subprocesses to use for data loading. 0 means that \
the data will be loaded in the main process.",
)
return parser
if __name__ == '__main__':
tmp_parser = ArgumentParser()
tmp_parser.add_argument(
"--model_name_or_path",
type=str,
default="/Data/public/pretrained_models/pytorch/chinese-bert-wwm-ext"
)
tmp_parser = SemanticMatchingDataModule.add_data_specific_args(tmp_parser)
hparams = tmp_parser.parse_args()
tmp_data_module = SemanticMatchingDataModule(hparams)
tmp_data_module.prepare_data()
tmp_data_module.setup()
train_dataloader = tmp_data_module.val_dataloader()
for batch in train_dataloader:
print(type(batch))
print(batch)
print('break point.')
print('break point.')
| 39.292308 | 108 | 0.634495 | 3,955 | 0.774276 | 0 | 0 | 1,486 | 0.290916 | 0 | 0 | 638 | 0.124902 |
48544b3690b1859057fd2e593fbf385719c5db3e | 14,076 | py | Python | mainwin.py | hatmann1944/pyqt-http-file-svr | 3e95a222dc7d662921da44654aadb1721cba0382 | [
"Apache-2.0"
]
| 1 | 2015-08-27T13:22:42.000Z | 2015-08-27T13:22:42.000Z | mainwin.py | hatmann1944/pyqt-http-file-svr | 3e95a222dc7d662921da44654aadb1721cba0382 | [
"Apache-2.0"
]
| null | null | null | mainwin.py | hatmann1944/pyqt-http-file-svr | 3e95a222dc7d662921da44654aadb1721cba0382 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/mnt/hgfs/tmpcode/pyqt-http/untitled.ui'
#
# Created: Fri Jun 5 10:59:33 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
import socket
import signal
import errno
import sys
import os
import platform
import time
#from sendfile import sendfile
class Worker(QtCore.QThread):
trigger = QtCore.pyqtSignal(int, int, str)
def __init__(self,parent=None):
super(Worker,self).__init__(parent)
def __del__(self):
self.wait()
def set(self, strHost, port, httpheader, fullFileName, totalLen):
self.ip = strHost
self.p = port
self.hdr = httpheader
self.fn = fullFileName
self.fileLen = totalLen
def run(self):
#signal.signal(signal.SIGUSR1,sigHander)
global lisfd
lisfd = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
lisfd.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
lisfd.bind((self.ip, self.p))
lisfd.listen(10)
self.runflag = True
lisfd.setblocking(0)
while self.runflag:
if self.runflag == False:
break
try:
confd,addr = lisfd.accept()
except socket.error, msg:
if msg.errno == errno.EINTR or msg.errno == errno.EAGAIN or msg.errno == errno.EWOULDBLOCK:
print msg
else:
raise
time.sleep(1)
continue
print "connect by ",addr
ip = addr[0]
port = addr[1]
addrStr = "%s:%d"%(ip, port)
confd.settimeout(10)
try:
#print "recving"
#data = confd.recv(1024, socket.MSG_DONTWAIT)
data = confd.recv(1024)
except socket.error, msg:
#print msg
confd.close()
continue
#print "recv end"
if not data:
break
print(data)
confd.send(self.hdr)
print addrStr
self.trigger.emit(0, self.fileLen, addrStr)
file = open(self.fn, "rb")
#offset = 0
#totalSent = long(0);
while True:
if self.runflag == False:
return
chunk = file.read(65536)
if not chunk:
break # EOF
try:
confd.sendall(chunk)
except socket.error, msg:
print msg
lisfd.close()
return
#totalSent += 65536
self.trigger.emit(65536, self.fileLen, addrStr)
#confd.send('\n\n')
confd.close()
self.trigger.emit(self.fileLen, self.fileLen, addrStr)
print "send fin"
else:
lisfd.close()
print "stop"
def GetFileSize(filename):
len = os.path.getsize(filename)
return len
def HttpResponse(header,filename):
f = open(filename, "rb")
contxtlist = f.readlines()
size=os.path.getsize(filename)
context = ''.join(contxtlist)
response = "%s %d\n\n%s\n\n" % (header,size,context)
return response
def TestPlatform():
print ("----------Operation System--------------------------")
#Windows will be : (32bit, WindowsPE)
#Linux will be : (32bit, ELF)
print(platform.architecture())
#Windows will be : Windows-XP-5.1.2600-SP3 or Windows-post2008Server-6.1.7600
#Linux will be : Linux-2.6.18-128.el5-i686-with-redhat-5.3-Final
print(platform.platform())
#Windows will be : Windows
#Linux will be : Linux
print(platform.system())
print ("--------------Python Version-------------------------")
#Windows and Linux will be : 3.1.1 or 3.1.3
print(platform.python_version())
def WhichPlatform():
sysstr = platform.system()
if(sysstr =="Windows"):
print ("Call Windows tasks")
return "windows"
elif(sysstr == "Linux"):
print ("Call Linux tasks")
return "linux"
else:
print ("Other System tasks")
return "others"
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
from PyQt4 import QtGui, QtCore
from PIL import ImageQt
import qrcode
class Image(qrcode.image.base.BaseImage):
def __init__(self, border, width, box_size):
self.border = border
self.width = width
self.box_size = box_size
size = (width + border * 2) * box_size
self._image = QtGui.QImage(
size, size, QtGui.QImage.Format_RGB16)
self._image.fill(QtCore.Qt.white)
def pixmap(self):
return QtGui.QPixmap.fromImage(self._image)
def drawrect(self, row, col):
painter = QtGui.QPainter(self._image)
painter.fillRect(
(col + self.border) * self.box_size,
(row + self.border) * self.box_size,
self.box_size, self.box_size,
QtCore.Qt.black)
def save(self, stream, kind=None):
pass
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.pushButton = QtGui.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(650, 220, 100, 50))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.linkLabel = QtGui.QLabel(self.centralwidget)
self.linkLabel.setGeometry(QtCore.QRect(450, 0, 300, 160))
self.linkLabel.setObjectName(_fromUtf8("label"))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(455, 160, 200, 60))
self.label.setObjectName(_fromUtf8("label"))
self.label.setWordWrap(True)
self.addr = QtGui.QLabel(self.centralwidget)
self.addr.setGeometry(QtCore.QRect(450, 105, 150, 30))
self.addr.setObjectName(_fromUtf8("addr"))
self.addr.setWordWrap(True)
self.addr.setText("remoteaddr");
self.ratio = QtGui.QLabel(self.centralwidget)
self.ratio.setGeometry(QtCore.QRect(610, 105, 250, 30))
self.ratio.setObjectName(_fromUtf8("ratio"))
self.ratio.setWordWrap(True)
self.ratio.setText("ratio");
self.textEdit = QtGui.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(680, 180, 50, 30))
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.pushButton_2 = QtGui.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(450, 220, 100, 50))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pb = QtGui.QProgressBar(self.centralwidget)
self.pb.setGeometry(QtCore.QRect(450, 130, 300, 20))
self.pb.setObjectName(_fromUtf8("pb"))
self.pb.setRange(0, 0)
self.pb.setRange(0, 100)
self.pb.setValue(0)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 23))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.qrLabel = QtGui.QLabel(self.centralwidget)
self.qrLabel.setGeometry(QtCore.QRect(20, 2, 300, 300))
self.qrLabel.setObjectName(_fromUtf8("label"))
#self.refreshQRCode()
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def refreshQRCode(self, port):
global localIP
text = unicode("http://%s:%d"%(localIP, port))
self.linkLabel.setText("Please visit: %s"%text);
print text
self.qrLabel.setPixmap(
qrcode.make(text, image_factory=Image).pixmap())
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.pushButton.setText(_translate("MainWindow", "run", None))
self.label.setText(_translate("MainWindow", "choose a file", None))
self.textEdit.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'SimSun\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">1234</p></body></html>", None))
self.pushButton_2.setText(_translate("MainWindow", "choose file", None))
class Window( QtGui.QMainWindow ):
def __init__( self ):
super( Window, self ).__init__()
self.setWindowTitle( "hello" )
self.resize( 200, 300 )
self.uiWin = Ui_MainWindow()
self.uiWin.setupUi(self)
self.fullFileName = ""
self.fileName = ""
self.thread=Worker()
self.connect(self.uiWin.pushButton,
QtCore.SIGNAL('clicked()'),
self.runHttpSvr)
self.connect(self.uiWin.pushButton_2,
QtCore.SIGNAL('clicked()'),
self.chooseFile)
self.running = False
self.thread.trigger.connect(self.updatePb)
def updatePb(self, sent2, total2, addr):
if sent2 == 0:
self.sentLen = 0
#print sent
#print total2
self.sentLen += sent2
total = self.fileLen
#print total
val = self.sentLen/float(total)*100
if val <= 100:
self.uiWin.pb.setValue(val)
self.uiWin.addr.setText(addr)
self.uiWin.ratio.setText("%d/%d"%(self.sentLen, total))
else:
self.uiWin.pb.setValue(100)
self.uiWin.addr.setText(addr)
self.uiWin.ratio.setText("%d/%d"%(total, total))
def runHttpSvr(self):
if self.running :
#global lisfd
#lisfd.close()
self.thread.runflag = False
self.running = False
#self.uiWin.label.setText("svr is not running")
self.uiWin.pushButton.setText("run")
#os.kill(1234, signal.SIGUSR1)
return
if len(self.fullFileName) == 0 or len(self.fileName) == 0:
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Warning,
"error", "not choose file",
QtGui.QMessageBox.NoButton, self)
msgBox.show()
return
if self.fileName and os.path.exists(self.fullFileName):
print 'OK, the "%s" file exists.'%self.fullFileName
else:
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Warning,
"error", "Sorry, I cannot find the '%s' file."%self.fullFileName,
QtGui.QMessageBox.NoButton, self)
msgBox.show()
return
port = int(self.uiWin.textEdit.toPlainText())
if port < 1 or port > 65535:
msgBox = QtGui.QMessageBox(QtGui.QMessageBox.Warning,
"error", "port[%s] error"%self.uiWin.textEdit.toPlainText(),
QtGui.QMessageBox.NoButton, self)
msgBox.show()
return
strHost = "0.0.0.0"
self.fileLen = GetFileSize(self.fullFileName)
httpheader = '''\
HTTP/1.1 200 OK
Context-Type: bin;charset=UTF-8
Server: Python-slp version 1.0
'''
httpheader += "Content-Disposition: attachment;filename=%s\n" % self.fileName
httpheader += 'Context-Length: %d\n\n'% self.fileLen
print httpheader
self.sentLen = 0
self.thread.set(strHost, port, httpheader, self.fullFileName, self.fileLen)
self.thread.start()
self.running = True
self.uiWin.refreshQRCode(port)
#self.uiWin.label.setText("svr is running")
self.uiWin.pushButton.setText("stop")
def chooseFile(self):
#self.uiWin.label.setText("choosefile")
name = QtGui.QFileDialog.getOpenFileName(self)
if name:
self.fullFileName = unicode(name , "utf8")
saperator = '/'
self.fileName = self.fullFileName.split(saperator)[-1]
#print self.fullFileName
#print self.fileName
self.uiWin.label.setText(self.fullFileName)
import socket
if WhichPlatform() == "linux":
import fcntl
import struct
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
if __name__ == '__main__':
global localIP
#localIP = socket.gethostbyname(socket.gethostname())
if WhichPlatform() != "windows":
localIP = get_ip_address("eth0")
print "local ip:%s "%localIP
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| 32.210526 | 157 | 0.586957 | 11,146 | 0.791844 | 0 | 0 | 0 | 0 | 0 | 0 | 2,565 | 0.182225 |
4854e27a28f0ec8896c437afdc84226fabdac5c2 | 905 | py | Python | monzo/handlers/echo.py | petermcd/monzo-api | e7b09d7564d07d00c0d0031b300f72e4479d8690 | [
"MIT"
]
| 1 | 2022-02-08T23:13:56.000Z | 2022-02-08T23:13:56.000Z | monzo/handlers/echo.py | petermcd/monzo-api | e7b09d7564d07d00c0d0031b300f72e4479d8690 | [
"MIT"
]
| 12 | 2021-09-21T20:09:50.000Z | 2022-03-13T14:39:02.000Z | monzo/handlers/echo.py | petermcd/monzo-api | e7b09d7564d07d00c0d0031b300f72e4479d8690 | [
"MIT"
]
| 1 | 2021-12-05T17:47:33.000Z | 2021-12-05T17:47:33.000Z | """Class to echo credentials."""
from monzo.handlers.storage import Storage
class Echo(Storage):
"""Class that will echo out credentials."""
def store(
self,
access_token: str,
client_id: str,
client_secret: str,
expiry: int,
refresh_token: str = ''
) -> None:
"""
Echo the Monzo credentials.
Args:
access_token: New access token
client_id: Monzo client ID
client_secret: Monzo client secret
expiry: Access token expiry as a unix timestamp
refresh_token: Refresh token that can be used to renew an access token
"""
print(f"client_id = '{client_id}'")
print(f"client_secret = '{client_secret}'")
print(f"access_token = '{access_token}'")
print(f'expiry = {expiry}')
print(f"refresh_token = '{refresh_token}'")
| 29.193548 | 82 | 0.583425 | 826 | 0.912707 | 0 | 0 | 0 | 0 | 0 | 0 | 569 | 0.628729 |
4854e666dca1f05f5b35de7678011b69bdfaadb9 | 359 | py | Python | grappelli/settings.py | theatlantic/django-grappelli-old | f4a5f10a2e68024873556d4cc249cf0351eb1335 | [
"BSD-3-Clause"
]
| 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | base/site-packages/grappelli/settings.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
]
| null | null | null | base/site-packages/grappelli/settings.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
]
| 9 | 2019-12-23T12:59:25.000Z | 2022-03-15T05:12:11.000Z | # coding: utf-8
# DJANGO IMPORTS
from django.conf import settings
# Admin Site Title
ADMIN_HEADLINE = getattr(settings, "GRAPPELLI_ADMIN_HEADLINE", 'Grappelli')
ADMIN_TITLE = getattr(settings, "GRAPPELLI_ADMIN_TITLE", 'Grappelli')
# Link to your Main Admin Site (no slashes at start and end)
ADMIN_URL = getattr(settings, "GRAPPELLI_ADMIN_URL", '/admin/') | 29.916667 | 75 | 0.768802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.584958 |
4855ae459d96ebb92658af5b5f4e917cfa5c95f9 | 638 | py | Python | epikjjh/baekjoon/2178.py | 15ers/Solve_Naively | 23ee4a3aedbedb65b9040594b8c9c6d9cff77090 | [
"MIT"
]
| 3 | 2019-05-19T13:44:39.000Z | 2019-07-03T11:15:20.000Z | epikjjh/baekjoon/2178.py | 15ers/Solve_Naively | 23ee4a3aedbedb65b9040594b8c9c6d9cff77090 | [
"MIT"
]
| 7 | 2019-05-06T02:37:26.000Z | 2019-06-29T07:28:02.000Z | epikjjh/baekjoon/2178.py | 15ers/Solve_Naively | 23ee4a3aedbedb65b9040594b8c9c6d9cff77090 | [
"MIT"
]
| 1 | 2019-07-28T06:24:54.000Z | 2019-07-28T06:24:54.000Z | import sys
def conv(stream):
return [int(e) for e in stream]
input = sys.stdin.readline
n,m = map(int,input().split())
arr = [conv(input().split()[0]) for i in range(n)]
visit = [[0]*m for i in range(n)]
visit[0][0] = 1
direction = [(0,1),(0,-1),(1,0),(-1,0)]
queue = [[0,0]]
while queue:
y,x = queue.pop(0)
if y==n-1 and x==m-1:
print(visit[y][x])
break
for i in range(4):
n_y = y+direction[i][0]
n_x = x+direction[i][1]
if 0<=n_y<n and 0<=n_x<m and arr[n_y][n_x] and not visit[n_y][n_x]:
visit[n_y][n_x] = visit[y][x] + 1
queue.append([n_y,n_x]) | 26.583333 | 75 | 0.525078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
485611bfa6d80f65f56625abee3ae8772d391fbe | 2,877 | py | Python | DockerHubPackages/code/analyzer/analyzers/python_packages.py | halcyondude/datasets | f91cec403b09d6ca060c41bf0147fb3a15fac1fc | [
"Apache-2.0"
]
| 283 | 2018-01-27T21:51:21.000Z | 2022-03-07T11:23:44.000Z | DockerHubPackages/code/analyzer/analyzers/python_packages.py | halcyondude/datasets | f91cec403b09d6ca060c41bf0147fb3a15fac1fc | [
"Apache-2.0"
]
| 100 | 2018-01-28T18:02:41.000Z | 2021-11-10T11:00:38.000Z | DockerHubPackages/code/analyzer/analyzers/python_packages.py | halcyondude/datasets | f91cec403b09d6ca060c41bf0147fb3a15fac1fc | [
"Apache-2.0"
]
| 79 | 2018-01-28T17:57:38.000Z | 2022-03-21T11:44:16.000Z | from ..utils import run
import logging
logger = logging.getLogger(__name__)
def process_one_package(path, package, python_version="3"):
"""Get details about one precise python package in the given image.
:param path: path were the docker image filesystem is expanded.
:type path: string
:param package: name of the python package to get info from.
:type package: string
:param python_version: version of python to use. can be "2" or "3". default to "3".
:type python_version: string
:return: list containing package name, version and size
:rtype: list[string, string, int]
"""
command = f"sudo chroot {path} pip{python_version} show {package}"
info = get_ipython().getoutput(command)
for line in info:
if "Name" in line:
name = line.split(" ").pop()
if "Version" in line:
version = line.split(" ").pop()
if "Location" in line:
location = line.split(" ").pop()
result = get_ipython().getoutput(
f"du --max-depth=0 {path}{location}/{name}").pop()
# If the folder does not exist, try lowercase
if "cannot access" in result:
result = get_ipython().getoutput(
f"du --max-depth=0 {path}{location}/{name.lower()}").pop()
# If the lowercase folder do not exist either
if "cannot access" not in result:
size = int(result.split('\t').pop(0))
# List the files by hand
else:
command = f"sudo chroot {path} pip{python_version} show {package} -f"
info = get_ipython().getoutput(command)
flag = False
size = 0
for line in info:
if flag:
command = f"du {path}{location}/{line.strip()}"
size += int(get_ipython().getoutput(command).pop().split('\t').pop(0))
if 'Files' in line:
flag = True
return [name, version, size]
def get_python_packages_info(path, python_version="3"):
"""Get details about all python packages in an image filesystem.
:param path: path were the docker image filesystem is expanded.
:type path: string
:param python_version: version of python to use. can be "2" or "3". default to "3".
:type python_version: string
:return: list containing lists of each package's name, version and size
:rtype: list[list[string, string, int]]
"""
command = f"sudo chroot {path} pip{python_version} list --format freeze --no-cache-dir 2>/dev/null"
packages = [package.split('==')
for package in get_ipython().getoutput(command)]
package_list = []
for package in packages:
try:
package_list.append(process_one_package(path, package[0]))
except Exception as e:
logger.error("Error processing python packages", package[0], e)
pass
return package_list
| 37.855263 | 103 | 0.620438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,476 | 0.513034 |
4856168e71f578517034764f4b9110679f5820fe | 24 | py | Python | src/maho/modules/__init__.py | evangelos-ch/maho-bot | 458c3ed0e4cb4d8edd300441b2defbc481aaf3f3 | [
"MIT"
]
| null | null | null | src/maho/modules/__init__.py | evangelos-ch/maho-bot | 458c3ed0e4cb4d8edd300441b2defbc481aaf3f3 | [
"MIT"
]
| null | null | null | src/maho/modules/__init__.py | evangelos-ch/maho-bot | 458c3ed0e4cb4d8edd300441b2defbc481aaf3f3 | [
"MIT"
]
| 1 | 2021-02-16T13:06:56.000Z | 2021-02-16T13:06:56.000Z | """Maho bot modules."""
| 12 | 23 | 0.583333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.958333 |
485cca825ed78a1668753f45f923d308e840da2c | 6,785 | py | Python | backend/dc_tests/api_views.py | gitter-badger/djangochannel | f9e33254739457c461e84b66879172007512f9b0 | [
"BSD-3-Clause"
]
| 2 | 2021-11-29T15:34:24.000Z | 2021-12-02T14:47:20.000Z | backend/dc_tests/api_views.py | gitter-badger/djangochannel | f9e33254739457c461e84b66879172007512f9b0 | [
"BSD-3-Clause"
]
| null | null | null | backend/dc_tests/api_views.py | gitter-badger/djangochannel | f9e33254739457c461e84b66879172007512f9b0 | [
"BSD-3-Clause"
]
| null | null | null | from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, JsonResponse
from django.views.generic.base import View
from django.contrib.auth.mixins import LoginRequiredMixin
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions
from .models import (
TestCategory,
Test,
Question,
PossibleAnswer,
AnswersCounter
)
from .serializers import (
TestCategorySerializer,
TestSerializer,
QuestionSerializer,
PossibleAnswerSerializer
)
from backend.courses.models import Task, RealizationTask, Course
from backend.courses.api_views import CompletedTasks
from backend.utils.api import BlankGetAPIView
class AllCategories(BlankGetAPIView):
"""
Вывод всех категорий,
параметров нет
"""
permission_classes = [permissions.IsAuthenticated]
model = TestCategory
serializer = TestCategorySerializer
class TestsInCategory(BlankGetAPIView):
"""
Вывод тестов в отдельной категории,
параметр: pk, значение: id категории, тесты которой нужны
"""
permission_classes = [permissions.IsAuthenticated]
model = Test
serializer = TestSerializer
filter_name = 'category_id'
class QuestionsInTest(LoginRequiredMixin, View):
"""Вывод вопросов в отдельном тесте,
параметр: pk, значение: id теста, вопросы которого нужны
"""
def get(self, request):
"""Get"""
quest = Question.objects.filter(test_id=request.GET.get("pk", None)).order_by("-id")
counter = CompleteQuestion().get_counter(request.user, request.GET.get("pk", None))
serializer = QuestionSerializer(quest, many=True)
return JsonResponse(serializer.data, safe=False)
# class QuestionsInTest(BlankGetAPIView):
# """
# Вывод вопросов в отдельном тесте,
# параметр: pk, значение: id теста, вопросы которого нужны
# """
# permission_classes = [permissions.IsAuthenticated]
# model = Question
# serializer = QuestionSerializer
# filter_name = 'test_id'
# order_params = 'id'
class AnswersInQuestion(BlankGetAPIView):
"""
Вывод вариантов ответа к вопросу,
параметр: pk, значение: id вопроса, ответы которого нужны
"""
permission_classes = [permissions.IsAuthenticated]
model = PossibleAnswer
serializer = PossibleAnswerSerializer
filter_name = 'question_id'
order_params = '-id'
class CompleteQuestion(LoginRequiredMixin, View):
"""Вывод результатов теста и его прохождение"""
def post(self, request):
"""Post"""
pks = request.POST.getlist('pks[]', None)
if not pks:
return JsonResponse({'task': {
'exists': None,
'success': None,
'next': None
},
"message": 'Нет ответов!'})
variants = PossibleAnswer.objects.filter(id__in=pks)
# Наличие привязанного таска
task_exists = variants.first().question.test.tasks.exists()
# Количество верных вариантов
right_count = variants.filter(is_right=True).count()
# Общее количество вопросов в тесте
total_questions = variants.first().question.test.questions.count()
# total_variants = variants.filter(is_right=True)
# Проверка на совпадение количества правильных ответов
# и общего количества вопросов
if not variants.filter(is_right=False).exists() and right_count >= total_questions:
success = True
mess = ""
elif variants.filter(is_right=False).exists() and right_count >= total_questions:
success = False
mess = "Тест не пройден"
else:
success = False
mess = ""
course_pk = request.POST.get('course_pk', None)
link = None
if success:
# Получаем RealizationTask текущего юзера и отмечаем его пройденным
realization = self.get_realization(request.user, variants.first())
if realization is not None:
realization.success = True
realization.save()
else:
link = Course.objects.get(id=course_pk).buy_link #, test_in_course=variants.first().question.test
next_task = None
if course_pk:
next_task = CompletedTasks().get_next_task(request, course_pk=course_pk)
return JsonResponse({
'task': {
'exists': task_exists,
'success': success if task_exists else None,
'next': next_task.id if next_task else None
},
'success': success,
'total': total_questions,
'right': right_count,
'link': link,
'message': mess,
})
# def post(self, request):
# """Прохождение теста"""
# pk = request.data.get('pk') # id варианта ответа
#
# try:
# variant = PossibleAnswer.objects.get(id=pk)
# except ObjectDoesNotExist:
# return Response('Нет такого варианта', status=404)
#
# counter = self.get_counter(request.user, variant.question.test.id)
#
# if variant.is_right:
# counter.counter += 1
# counter.save()
#
# if counter.counter >= counter.questions_count:
# realization = self.get_realization(request.user, variant)
#
# if realization is None:
# counter.delete()
# return Response('Не жульничай', status=400)
#
# realization.success = True
# realization.save()
#
# return Response(status=200)
def get(self, request):
"""Вывод результатов"""
pk = request.GET.get('pk') # id теста
counter = self.get_counter(request.user, pk)
return JsonResponse({'total': counter.questions_count,
'right': counter.counter})
@staticmethod
def get_counter(user, pk):
"""Получение счетчика правильных ответов"""
test = Test.objects.get(id=pk)
try:
counter = AnswersCounter.objects.get(user=user, test=test)
except ObjectDoesNotExist:
counter = AnswersCounter.objects.create(user=user, test=test)
# counter = AnswersCounter.objects.get_or_create(user=user, test=test)
return counter
@staticmethod
def get_realization(user, variant):
"""Получение модели выполнения задания"""
try:
realization = RealizationTask.objects.get(
student=user,
task__test__questions__answers__id=variant.id
)
return realization
except ObjectDoesNotExist:
return None
| 32.464115 | 113 | 0.627708 | 6,334 | 0.845322 | 0 | 0 | 863 | 0.115174 | 0 | 0 | 2,995 | 0.399706 |
485d74659bc61cba2ba9b5ae45bb87b9fe1df6b3 | 2,066 | py | Python | sdk/python/pulumi_kubernetes/apps/v1/ControllerRevision.py | rosskevin/pulumi-kubernetes | e4fa04b13a20929c879aca1bbe58fb5a95d16f7c | [
"Apache-2.0"
]
| null | null | null | sdk/python/pulumi_kubernetes/apps/v1/ControllerRevision.py | rosskevin/pulumi-kubernetes | e4fa04b13a20929c879aca1bbe58fb5a95d16f7c | [
"Apache-2.0"
]
| null | null | null | sdk/python/pulumi_kubernetes/apps/v1/ControllerRevision.py | rosskevin/pulumi-kubernetes | e4fa04b13a20929c879aca1bbe58fb5a95d16f7c | [
"Apache-2.0"
]
| null | null | null | import pulumi
import pulumi.runtime
from ... import tables
class ControllerRevision(pulumi.CustomResource):
"""
ControllerRevision implements an immutable snapshot of state data. Clients are responsible for
serializing and deserializing the objects that contain their internal state. Once a
ControllerRevision has been successfully created, it can not be updated. The API Server will
fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may,
however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers
for update and rollback, this object is beta. However, it may be subject to name and
representation changes in future releases, and clients should not depend on its stability. It is
primarily for internal use by controllers.
"""
def __init__(self, __name__, __opts__=None, data=None, metadata=None, revision=None):
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, str):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'apps/v1'
__props__['kind'] = 'ControllerRevision'
if revision is None:
raise TypeError('Missing required property revision')
__props__['revision'] = revision
__props__['data'] = data
__props__['metadata'] = metadata
super(ControllerRevision, self).__init__(
"kubernetes:apps/v1:ControllerRevision",
__name__,
__props__,
__opts__)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| 44.913043 | 100 | 0.703291 | 2,004 | 0.96999 | 0 | 0 | 0 | 0 | 0 | 0 | 1,031 | 0.499032 |
485ec5eb7e878a442433e3d945a0ad573fe3057e | 1,479 | py | Python | backend/python_scripts/feedback_frequency.py | bartaliskrisztian/sapifeedback | a63e38c0b767458509e47c1d5ccad0f6ce21a285 | [
"MIT"
]
| null | null | null | backend/python_scripts/feedback_frequency.py | bartaliskrisztian/sapifeedback | a63e38c0b767458509e47c1d5ccad0f6ce21a285 | [
"MIT"
]
| null | null | null | backend/python_scripts/feedback_frequency.py | bartaliskrisztian/sapifeedback | a63e38c0b767458509e47c1d5ccad0f6ce21a285 | [
"MIT"
]
| null | null | null | import sys
import json
import pandas as pd
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import os
import base64
def main():
# getting the array of feedback dates
argv_tmp = eval(sys.argv[1])
if type(argv_tmp) == int:
dates_ts = [argv_tmp]
else:
dates_ts = list(argv_tmp)
dates_temp = pd.Series([datetime.fromtimestamp(float(date)).date()
for date in dates_ts])
occurences = dates_temp.value_counts().to_dict()
dates = []
frequency = []
# creating arrays of dates and occurences of them
end = dates_temp.max()
date = dates_temp.min()
while date <= end:
dates.append(date)
if date in occurences:
frequency.append(occurences[date])
else:
frequency.append(0)
date += timedelta(days=1)
# plotting the feedback frequency
figure = plt.figure(figsize=(9, 5))
plt.plot(dates, frequency, 'b-o')
plt.xlabel("Date")
plt.ylabel("Number of feedbacks")
# make the y ticks integers, not floats
yint = []
locs, _ = plt.yticks()
for each in locs:
yint.append(int(each))
plt.yticks(yint)
plt.title('Feedback frequency')
file_name = "freq.jpg"
figure.savefig(file_name)
with open(file_name, 'rb') as img:
data = base64.b64encode(img.read())
os.remove(file_name)
print(data)
# Start process
if __name__ == '__main__':
main()
| 23.47619 | 70 | 0.619337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.168357 |
485f584dda3b7ed9cbcd49b969e57d33ae96c239 | 6,720 | py | Python | tempest/tests/lib/services/placement/test_resource_providers_client.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
]
| null | null | null | tempest/tests/lib/services/placement/test_resource_providers_client.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
]
| null | null | null | tempest/tests/lib/services/placement/test_resource_providers_client.py | AurelienLourot/tempest | 4d14a22a1a0eb7aaa4aafb917273baa0739f55c3 | [
"Apache-2.0"
]
| null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.placement import resource_providers_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestResourceProvidersClient(base.BaseServiceTest):
FAKE_RESOURCE_PROVIDER_UUID = '3722a86e-a563-11e9-9abb-c3d41b6d3abf'
FAKE_ROOT_PROVIDER_UUID = '4a6a57c8-a563-11e9-914e-f3e0478fce53'
FAKE_RESOURCE_PROVIDER = {
'generation': 0,
'name': 'Ceph Storage Pool',
'uuid': FAKE_RESOURCE_PROVIDER_UUID,
'parent_provider_uuid': FAKE_ROOT_PROVIDER_UUID,
'root_provider_uuid': FAKE_ROOT_PROVIDER_UUID
}
FAKE_RESOURCE_PROVIDERS = {
'resource_providers': [FAKE_RESOURCE_PROVIDER]
}
FAKE_RESOURCE_PROVIDER_INVENTORIES = {
'inventories': {
'DISK_GB': {
'allocation_ratio': 1.0,
'max_unit': 35,
'min_unit': 1,
'reserved': 0,
'step_size': 1,
'total': 35
}
},
'resource_provider_generation': 7
}
FAKE_AGGREGATE_UUID = '1166be40-a567-11e9-9f2a-53827f9311fa'
FAKE_RESOURCE_PROVIDER_AGGREGATES = {
'aggregates': [FAKE_AGGREGATE_UUID]
}
FAKE_RESOURCE_UPDATE_INVENTORIES_RESPONSE = {
"inventories": {
"MEMORY_MB": {
"allocation_ratio": 2.0,
"max_unit": 16,
"min_unit": 1,
"reserved": 0,
"step_size": 4,
"total": 128
},
"VCPU": {
"allocation_ratio": 10.0,
"max_unit": 2147483647,
"min_unit": 1,
"reserved": 2,
"step_size": 1,
"total": 64
}
},
"resource_provider_generation": 2
}
FAKE_RESOURCE_UPDATE_INVENTORIES_REQUEST = {
"inventories": {
"MEMORY_MB": {
"allocation_ratio": 2.0,
"max_unit": 16,
"step_size": 4,
"total": 128
},
"VCPU": {
"allocation_ratio": 10.0,
"reserved": 2,
"total": 64
}
},
"resource_provider_generation": 1
}
def setUp(self):
super(TestResourceProvidersClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = resource_providers_client.ResourceProvidersClient(
fake_auth, 'placement', 'regionOne')
def _test_list_resource_providers(self, bytes_body=False):
self.check_service_client_function(
self.client.list_resource_providers,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_RESOURCE_PROVIDERS,
to_utf=bytes_body,
status=200
)
def test_list_resource_providers_with_bytes_body(self):
self._test_list_resource_providers()
def test_list_resource_providers_with_str_body(self):
self._test_list_resource_providers(bytes_body=True)
def _test_show_resource_provider(self, bytes_body=False):
self.check_service_client_function(
self.client.show_resource_provider,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_RESOURCE_PROVIDER,
to_utf=bytes_body,
status=200,
rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
)
def test_show_resource_provider_with_str_body(self):
self._test_show_resource_provider()
def test_show_resource_provider_with_bytes_body(self):
self._test_show_resource_provider(bytes_body=True)
def _test_list_resource_provider_inventories(self, bytes_body=False):
self.check_service_client_function(
self.client.list_resource_provider_inventories,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_RESOURCE_PROVIDER_INVENTORIES,
to_utf=bytes_body,
status=200,
rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
)
def test_list_resource_provider_inventories_with_str_body(self):
self._test_list_resource_provider_inventories()
def test_list_resource_provider_inventories_with_bytes_body(self):
self._test_list_resource_provider_inventories(bytes_body=True)
def _test_update_resource_providers_inventories(self, bytes_body=False):
self.check_service_client_function(
self.client.update_resource_providers_inventories,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_RESOURCE_UPDATE_INVENTORIES_RESPONSE,
to_utf=bytes_body,
status=200,
rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID,
**self.FAKE_RESOURCE_UPDATE_INVENTORIES_REQUEST
)
def test_update_resource_providers_inventories_with_str_body(self):
self._test_update_resource_providers_inventories()
def test_update_resource_providers_inventories_with_bytes_body(self):
self._test_update_resource_providers_inventories(bytes_body=True)
def test_delete_resource_providers_inventories(self):
self.check_service_client_function(
self.client.delete_resource_providers_inventories,
'tempest.lib.common.rest_client.RestClient.delete',
{},
status=204,
rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID,
)
def _test_list_resource_provider_aggregates(self, bytes_body=False):
self.check_service_client_function(
self.client.list_resource_provider_aggregates,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_RESOURCE_PROVIDER_AGGREGATES,
to_utf=bytes_body,
status=200,
rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
)
def test_list_resource_provider_aggregates_with_str_body(self):
self._test_list_resource_provider_aggregates()
def test_list_resource_provider_aggregates_with_bytes_body(self):
self._test_list_resource_provider_aggregates(bytes_body=True)
| 36.721311 | 78 | 0.654315 | 5,982 | 0.890179 | 0 | 0 | 0 | 0 | 0 | 0 | 1,550 | 0.230655 |
485f7ffc14de09acdf65c094b7c9e15395d4ca1b | 1,001 | py | Python | problems/095.py | JoshKarpel/Euler | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
]
| 1 | 2017-09-20T22:26:24.000Z | 2017-09-20T22:26:24.000Z | problems/095.py | JoshKarpel/euler-python | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
]
| null | null | null | problems/095.py | JoshKarpel/euler-python | 9c4a89cfe4b0114d84a82e2b2894c7b8af815e93 | [
"MIT"
]
| null | null | null | from problems import utils, mymath
@utils.memoize
def sum_proper_factors(n):
return sum(mymath.proper_factorization(n))
def solve():
upper_bound = 1000000
chains = dict()
for start_number in range(1, upper_bound):
chain = [start_number]
current_number = sum_proper_factors(start_number)
while current_number != start_number:
if current_number > upper_bound or current_number == 0 or len(chain) > 100:
break
elif current_number in chains:
chain += chains[current_number]
break
else:
chain.append(current_number)
current_number = sum_proper_factors(current_number)
if current_number == start_number:
chains[start_number] = chain
chain_lengths = {i: len(chains[i]) for i in chains}
max_key = mymath.key_of_max_value(chain_lengths)
return min(chains[max_key])
if __name__ == '__main__':
print(solve())
| 26.342105 | 87 | 0.632368 | 0 | 0 | 0 | 0 | 88 | 0.087912 | 0 | 0 | 10 | 0.00999 |
4860d4c2fef20a3559333f3c07fba155be5e079a | 12,166 | py | Python | core/utils.py | jojo23333/mcan-vqa | 294cf672155a3c01d148450afc6542412a8837e6 | [
"Apache-2.0"
]
| null | null | null | core/utils.py | jojo23333/mcan-vqa | 294cf672155a3c01d148450afc6542412a8837e6 | [
"Apache-2.0"
]
| null | null | null | core/utils.py | jojo23333/mcan-vqa | 294cf672155a3c01d148450afc6542412a8837e6 | [
"Apache-2.0"
]
| null | null | null | import copy
import logging
import re
import torch
import json
from fvcore.common.checkpoint import (
get_missing_parameters_message,
get_unexpected_parameters_message,
)
from core.data.data_utils import ans_stat
class HierarchicClassification(object):
def __init__(self, __C):
self.__C = __C
self.loss_type = __C.LOSS_TYPE
self.ans_to_ix, self.ix_to_ans = ans_stat('core/data/answer_dict.json')
self.init_abs_tree()
self.init_tree_matrix()
self.layers = [x.cuda() for x in self.layers]
self.tree_matrix = self.tree_matrix.cuda()
def get_loss(self, pred, pred_abs, gt_ans, gt_abs, mask_ans, mask_abs, loss_fn):
'''
abs_group batch_size * N list
loss_fn should use mean reduction
'''
if self.__C.USE_ABS_MASKED_PRED:
pred, _ = self.get_abs_masked_pred(pred, pred_abs)
if self.loss_type == "mcan":
loss_ans = loss_fn(pred, gt_ans)
return loss_ans, torch.tensor(0.)
elif self.loss_type == "abs_bce":
s_pred_ans = torch.masked_select(pred, mask_ans)
s_gt_ans = torch.masked_select(gt_ans, mask_ans)
loss_ans = loss_fn(s_pred_ans, s_gt_ans)
s_pred_abs = torch.masked_select(pred_abs, mask_abs)
s_gt_abs = torch.masked_select(gt_abs, mask_abs)
loss_abs = loss_fn(s_pred_abs, s_gt_abs)
return loss_ans, loss_abs
elif self.loss_type == "all_bce":
loss_ans = loss_fn(pred, gt_ans)
loss_abs = loss_fn(pred_abs, gt_abs)
return loss_ans, loss_abs
def inference_abs(self, pred_abs, gt_abs):
prediction = pred_abs > 0.5
p_all = prediction.sum()
gt_all = gt_abs.sum()
tp = torch.masked_select(prediction, gt_abs).sum()
precision = tp / p_all
recall = tp / gt_all
return precision, recall
def get_abs_masked_pred(self, pred, pred_abs):
'''
tree: num_abs, num_pred
layers: list of list like [[1,2],[3,4,5,6]]
'''
# abs_masks: (batch, num_abs, num_pred)
abs_masks = pred_abs.unsqueeze(-1) * self.tree_matrix.unsqueeze(0)
#print(abs_masks.shape)
abs_masks_by_layer = []
for layer in self.layers:
layer_cnt = self.tree_matrix[layer, :].sum(dim=0, keepdim=True)
assert (layer_cnt > 0).all(), "layer not covering all leafs"
abs_masks_by_layer.append(
abs_masks[:, layer, :].sum(dim=1) / layer_cnt
)
# for multi-layer tree structure
# do production along the depth direction
abs_masks_by_layer = torch.stack(abs_masks_by_layer, dim=1)
assert (abs_masks_by_layer <= 1.0).all(), "mask exceed 1.0!"
# abs_maks: (batch, num_pred)
abs_mask = torch.prod(abs_masks_by_layer, dim=1)
masked_pred = pred * abs_mask
return masked_pred, abs_mask
def init_tree_matrix(self):
'''
return (number_of_abs_node, number_of_leaf)
'''
tree_matrix = np.zeros((self.abs_to_ix.__len__(), self.ans_to_ix.__len__()), dtype=np.float32)
for ans_ in self.ans_to_ix.keys():
ans_id = self.ans_to_ix[ans_]
abspath = self.ans_to_abspath[ans_]
for abs_ in abspath[1:]:
abs_id = self.abs_to_ix[abs_]
tree_matrix[abs_id, ans_id] = 1.0
self.tree_matrix = torch.from_numpy(tree_matrix)
return tree_matrix
def init_abs_tree(self):
with open('core/data/answer_dict_hierarchical.json', 'r') as f:
data = json.load(f)
# edge link of the abs tree
self.abs_tree = data['tree_dict']
# list of id from abs to ix
self.abs_to_ix = data['abs_dict']
# given ans, give all possible nodes of path to the ans, the first comonent is always '_root'
self.ans_to_abspath = {x:[] for x in self.ans_to_ix.keys()}
layers = []
def dfs_search(current_node, path, tree, d):
# if not leaf node yey
if current_node in tree:
print(f"Processing node: {current_node}:{path}")
if d > 0:
if len(layers) < d:
layers.append([current_node])
else:
layers[d-1].append(current_node)
for child in tree[current_node]:
dfs_search(child, path+[current_node], tree, d+1)
else:
for x in path:
if x not in self.ans_to_abspath[current_node]:
self.ans_to_abspath[current_node].append(x)
dfs_search('_rt', [], self.abs_tree, 0)
self.layers = [
torch.tensor([self.abs_to_ix[abs_] for abs_ in abs_nodes])
for abs_nodes in layers
]
print("Processing of tree finished")
# losses_ans = []
# losses_abs = []
# batch_size, num_class = pred.shape
# print(pred.shape)
# print(loss_groups)
# assert batch_size == len(loss_groups)
# for i in range(batch_size):
# loss_groups = []
# # loss for abstraction nodes
# for g in loss_groups[i][:-1]:
# loss_groups.append(loss_fn(pred_abs[i, g], gt_abs[i, g]))
# loss_abs = torch.mean(torch.stack(loss_groups))
# losses_abs.append(loss_abs)
# # loss for leaf nodes
# ans_group = loss_groups[i][-1]
# loss_ans = loss_fn(pred[i, ans_group], gt_ans[i, ans_group])
# losses_ans.append(loss_ans)
# loss_ans = torch.mean(torch.stack(losses_ans))
# loss_abs = torch.mean(torch.stack(losses_abs))
# return loss_ans, loss_abs
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict):
"""
Match names between the two state-dict, and update the values of model_state_dict in-place with
copies of the matched tensor in ckpt_state_dict.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(model_state_dict.keys())
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(ckpt_state_dict.keys())
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
# used for logging
max_len_model = max(len(key) for key in model_keys) if model_keys else 1
max_len_ckpt = max(len(key) for key in ckpt_keys) if ckpt_keys else 1
log_str_template = "{: <{}} loaded from {: <{}} of shape {}"
# logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
print(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
print(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
model_state_dict[key_model] = value_ckpt.clone()
if key_ckpt in matched_keys: # already added to matched_keys
print(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
print(
log_str_template.format(
key_model,
max_len_model,
original_keys[key_ckpt],
max_len_ckpt,
tuple(shape_in_model),
)
)
matched_model_keys = matched_keys.values()
matched_ckpt_keys = matched_keys.keys()
# print warnings about unmatched keys on both side
unmatched_model_keys = [k for k in model_keys if k not in matched_model_keys]
if len(unmatched_model_keys):
print(get_missing_parameters_message(unmatched_model_keys))
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in matched_ckpt_keys]
if len(unmatched_ckpt_keys):
print(
get_unexpected_parameters_message(original_keys[x] for x in unmatched_ckpt_keys)
)
import numpy as np
class TrainLossMeter(object):
def __init__(self):
self.total_steps = 0
self.iter_steps = 0
def init_meter(self, loss_keys):
self.loss_iters = {x:0 for x in loss_keys}
self.loss_sum = {x:0 for x in loss_keys}
def update_iter(self, d):
losses = d#d["losses"]
if self.total_steps == 0:
self.init_meter(losses.keys())
for x in losses:
self.loss_iters[x] += losses[x]
self.loss_sum[x] += losses[x]
self.total_steps += 1
self.iter_steps += 1
def log_iter(self):
loss_str = ""
for x in self.loss_iters:
loss_str = loss_str + f"{x}: {self.loss_iters[x]/self.iter_steps} "
self.loss_iters = {x:0 for x in self.loss_iters}
self.iter_steps = 0
return loss_str
def log_epoch(self):
loss_str = ""
for x in self.loss_sum:
loss_str = loss_str + f"{x}: {self.loss_sum[x]/self.total_steps} "
self.total_steps = 0
self.iter_steps = 0
return loss_str
def get_param_group_finetune(model, base_lr=1e-4):
parameters_classifier = []
parameters_backbone = []
for module_param_name, value in model.named_parameters():
if not value.requires_grad:
continue
if 'classifier' not in module_param_name:
parameters_backbone.append(value)
else:
parameters_classifier.append(value)
return [{"params": parameters_backbone, "lr": base_lr*0.1},
{"params": parameters_classifier, "lr": base_lr}], [0.1, 1.]
| 39.888525 | 102 | 0.616472 | 6,688 | 0.549729 | 0 | 0 | 0 | 0 | 0 | 0 | 3,894 | 0.320072 |
486361edc3e5c1d568dba14a5be4788c38396ea5 | 6,589 | py | Python | spid_cie_oidc/entity/trust_chain_operations.py | peppelinux/spid-cie-oidc-authority | 816636fece10f410f5d6fce85fd79bb409d0c8b8 | [
"Apache-2.0"
]
| 4 | 2022-03-08T09:05:13.000Z | 2022-03-16T17:59:43.000Z | spid_cie_oidc/entity/trust_chain_operations.py | peppelinux/spid-cie-oidc-authority | 816636fece10f410f5d6fce85fd79bb409d0c8b8 | [
"Apache-2.0"
]
| 64 | 2022-03-08T01:11:40.000Z | 2022-03-31T17:23:49.000Z | spid_cie_oidc/entity/trust_chain_operations.py | peppelinux/spid-cie-oidc-authority | 816636fece10f410f5d6fce85fd79bb409d0c8b8 | [
"Apache-2.0"
]
| 8 | 2022-03-09T12:00:08.000Z | 2022-03-31T13:52:14.000Z | import logging
from django.utils import timezone
from typing import Union
from .exceptions import InvalidTrustchain, TrustchainMissingMetadata
from .models import FetchedEntityStatement, TrustChain
from .statements import EntityConfiguration, get_entity_configurations
from .settings import HTTPC_PARAMS
from .trust_chain import TrustChainBuilder
from .utils import datetime_from_timestamp
logger = logging.getLogger(__name__)
def trust_chain_builder(
subject: str,
trust_anchor: EntityConfiguration,
httpc_params: dict = HTTPC_PARAMS,
required_trust_marks: list = []
) -> Union[TrustChainBuilder, bool]:
"""
Trust Chain builder
"""
tc = TrustChainBuilder(
subject,
trust_anchor=trust_anchor,
required_trust_marks=required_trust_marks,
httpc_params=httpc_params
)
tc.start()
if not tc.is_valid:
logger.error(
"The tree of trust cannot be validated for "
f"{tc.subject}: {tc.tree_of_trust}"
)
return False
else:
return tc
def dumps_statements_from_trust_chain_to_db(trust_chain: TrustChainBuilder) -> list:
entity_statements = []
for stat in trust_chain.trust_path:
data = dict(
exp=datetime_from_timestamp(stat.payload["exp"]),
iat=datetime_from_timestamp(stat.payload["iat"]),
statement=stat.payload,
jwt=stat.jwt,
)
fes = FetchedEntityStatement.objects.filter(sub=stat.sub, iss=stat.iss)
if fes:
fes.update(**data)
else:
fes = FetchedEntityStatement.objects.create(
sub=stat.sub, iss=stat.iss, **data
)
entity_statements.append(fes)
if stat.verified_descendant_statements:
for desc_stat_sub in stat.verified_descendant_statements:
payload = stat.verified_descendant_statements[desc_stat_sub]
jwt = stat.verified_descendant_statements_as_jwt[desc_stat_sub]
_data = dict(
exp=datetime_from_timestamp(payload["exp"]),
iat=datetime_from_timestamp(payload["iat"]),
statement=payload,
jwt=jwt,
)
desc_fes = FetchedEntityStatement.objects.filter(
sub=payload["sub"], iss=payload["iss"]
)
if desc_fes:
desc_fes.update(**_data)
else:
desc_fes = FetchedEntityStatement.objects.create(
sub=payload["sub"], iss=payload["iss"], **_data
)
entity_statements.append(desc_fes)
return entity_statements
def get_or_create_trust_chain(
subject: str,
trust_anchor: str,
httpc_params: dict = HTTPC_PARAMS,
required_trust_marks: list = [],
force: bool = False,
) -> Union[TrustChain, None]:
"""
returns a TrustChain model object if any available
if available it return it
if not available it create a new one
if available and expired it return the expired one
if flag force is set to True -> renew the trust chain, update it and
return the updated one
"""
fetched_trust_anchor = FetchedEntityStatement.objects.filter(
sub=trust_anchor, iss=trust_anchor
)
if not fetched_trust_anchor or fetched_trust_anchor.first().is_expired or force:
jwts = get_entity_configurations([trust_anchor], httpc_params=httpc_params)
ta_conf = EntityConfiguration(jwts[0], httpc_params=httpc_params)
data = dict(
exp=datetime_from_timestamp(ta_conf.payload["exp"]),
iat=datetime_from_timestamp(ta_conf.payload["iat"]),
statement=ta_conf.payload,
jwt=ta_conf.jwt,
)
if not fetched_trust_anchor:
# trust to the anchor should be absolute trusted!
# ta_conf.validate_by_itself()
fetched_trust_anchor = FetchedEntityStatement.objects.create(
sub=ta_conf.sub, iss=ta_conf.iss, **data
)
else:
fetched_trust_anchor.update(
exp=datetime_from_timestamp(ta_conf.payload["exp"]),
iat=datetime_from_timestamp(ta_conf.payload["iat"]),
statement=ta_conf.payload,
jwt=ta_conf.jwt,
)
fetched_trust_anchor = fetched_trust_anchor.first()
else:
fetched_trust_anchor = fetched_trust_anchor.first()
ta_conf = fetched_trust_anchor.get_entity_configuration_as_obj()
tc = TrustChain.objects.filter(sub=subject, trust_anchor__sub=trust_anchor).first()
if tc and not tc.is_active:
# if manualy disabled by staff
return None
elif force or not tc or tc.is_expired:
trust_chain = trust_chain_builder(
subject=subject,
trust_anchor=ta_conf,
required_trust_marks=required_trust_marks
)
if not trust_chain:
raise InvalidTrustchain(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} is not found"
)
elif not trust_chain.is_valid:
raise InvalidTrustchain(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} is not valid"
)
elif not trust_chain.final_metadata:
raise TrustchainMissingMetadata(
f"Trust chain for subject {subject} and "
f"trust_anchor {trust_anchor} doesn't have any metadata"
)
dumps_statements_from_trust_chain_to_db(trust_chain)
tc = TrustChain.objects.filter(
sub=subject, trust_anchor__sub=trust_anchor
)
data = dict(
exp=trust_chain.exp_datetime,
processing_start = timezone.localtime(),
chain=trust_chain.serialize(),
metadata=trust_chain.final_metadata,
parties_involved=[i.sub for i in trust_chain.trust_path],
status="valid",
trust_marks=[
{"id": i.id, "trust_mark": i.jwt}
for i in trust_chain.verified_trust_marks
],
is_active=True,
)
if tc:
tc.update(**data)
tc = tc.first()
else:
tc = TrustChain.objects.create(
sub=subject,
trust_anchor=fetched_trust_anchor,
**data,
)
return tc
| 32.29902 | 87 | 0.610563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 869 | 0.131886 |
486394bb559615b84fa49567fcdb6a63df1d44d1 | 19,445 | py | Python | code.py | FoamyGuy/CircuitPython_CSV_TileMap_Game | 4cf1661dd7db1cecd434e9fba6e07eb375ffc06d | [
"MIT"
]
| 1 | 2020-05-14T02:35:09.000Z | 2020-05-14T02:35:09.000Z | code.py | FoamyGuy/CircuitPython_CSV_TileMap_Game | 4cf1661dd7db1cecd434e9fba6e07eb375ffc06d | [
"MIT"
]
| null | null | null | code.py | FoamyGuy/CircuitPython_CSV_TileMap_Game | 4cf1661dd7db1cecd434e9fba6e07eb375ffc06d | [
"MIT"
]
| null | null | null | import board
import displayio
import adafruit_imageload
from displayio import Palette
from adafruit_pybadger import PyBadger
import time
# Direction constants for comparison
UP = 0
DOWN = 1
RIGHT = 2
LEFT = 3
# how long to wait between rendering frames
FPS_DELAY = 1/30
# how many tiles can fit on thes screen. Tiles are 16x16
SCREEN_HEIGHT_TILES = 8
SCREEN_WIDTH_TILES = 10
# hold the map state as it came out of the csv. Only holds non-entities.
ORIGINAL_MAP = {}
# hold the current map state if/when it changes. Only holds non-entities.
CURRENT_MAP = {}
# dictionary with tuple keys that map to tile type values
# e.x. {(0,0): "left_wall", (1,1): "floor"}
CAMERA_VIEW = {}
# how far offset the camera is from the CURRENT_MAP
# used to determine where things are at in the camera view vs. the MAP
CAMERA_OFFSET_X = 0
CAMERA_OFFSET_Y = 0
# list of sprite objects, one for each entity
ENTITY_SPRITES = []
# Dictionary with touple keys that map to lists of entity objects.
# Each one has the index of the sprite in the ENTITY_SPRITES list
# and the tile type string
ENTITY_SPRITES_DICT = {}
# list of entities that need to be on the screen currently based on the camera view
NEED_TO_DRAW_ENTITIES = []
# hold the location of the player in tile coordinates
PLAYER_LOC = (0,0)
# return from CURRENT_MAP the tile name of the tile of the given coords
def get_tile(coords):
return CURRENT_MAP[coords[0], coords[1]]
# return from TILES dict the tile object with stats and behavior for the tile at the given coords.
def get_tile_obj(coords):
return TILES[CURRENT_MAP[coords[0], coords[1]]]
# check the can_walk property of the tile at the given coordinates
def is_tile_moveable(tile_coords):
return TILES[CURRENT_MAP[tile_coords[0], tile_coords[1]]]['can_walk']
# behavior function that allows the player to push the entity
def allow_push(to_coords, from_coords, entity_obj):
push_x_offset = 0
push_y_offset = 0
print("inside allow push")
print("%s -> %s" % (from_coords, to_coords))
if to_coords[0] < from_coords[0]:
# moving left
push_x_offset = -1
push_y_offset = 0
elif to_coords[0] > from_coords[0]:
# moving right
push_x_offset = 1
push_y_offset = 0
elif to_coords[1] < from_coords[1]:
# moving up
push_x_offset = 0
push_y_offset = -1
elif to_coords[1] > from_coords[1]:
# moving down
push_x_offset = 0
push_y_offset = 1
# coords where we will be pushing the entity to
push_to_tile_coords = (to_coords[0]+ push_x_offset, to_coords[1]+ push_y_offset)
# check if the entity is allowed to move to there
if is_tile_moveable(push_to_tile_coords):
#print("dict before %s" % ENTITY_SPRITES_DICT)
# check if there are etity(s) at the tile we are trying to push to.
if push_to_tile_coords in ENTITY_SPRITES_DICT:
# append the thing we are pushing to the the list at the new coordinates in the dictionary
ENTITY_SPRITES_DICT[push_to_tile_coords].append(entity_obj)
else:
# create a list with the thing we are pushing and store it in the dictionary
ENTITY_SPRITES_DICT[push_to_tile_coords] = [entity_obj]
# remove the thing we are pushing from it's old location
ENTITY_SPRITES_DICT[to_coords].remove(entity_obj)
# if there are no entities left in the old location
if len(ENTITY_SPRITES_DICT[to_coords]) == 0:
# delete the empty lyst
del ENTITY_SPRITES_DICT[to_coords]
#print("dict after %s" % ENTITY_SPRITES_DICT)
# return true to allow player to move
return True
# if we return false player won't be able to move
return False
# main dictionary that maps tile type strings to objects.
# each one stores the sprite_sheet index and any necessary
# behavioral stats like can_walk or before_move
TILES = {
# empty strings default to floor and no walk.
"": {
"sprite_index": 7,
"can_walk": False
},
"floor": {
"sprite_index": 7,
"can_walk": True
},
"top_wall": {
"sprite_index": 4,
"can_walk": False
},
"top_right_wall": {
"sprite_index": 5,
"can_walk": False
},
"top_left_wall": {
"sprite_index": 3,
"can_walk": False
},
"bottom_right_wall": {
"sprite_index": 11,
"can_walk": False
},
"bottom_left_wall": {
"sprite_index": 9,
"can_walk": False
},
"right_wall": {
"sprite_index": 8,
"can_walk": False
},
"left_wall": {
"sprite_index": 6,
"can_walk": False
},
"bottom_wall": {
"sprite_index": 10,
"can_walk": False
},
"robot": {
"sprite_index": 1,
"can_walk": True,
"entity": True,
"before_move": allow_push
},
"heart": {
"sprite_index": 2,
"can_walk": True,
"entity": True,
},
"player": {
"sprite_index": 0,
"entity": True,
}
}
# Badger object for easy button handling
badger = PyBadger()
# display object variable
display = board.DISPLAY
# Load the sprite sheet (bitmap)
sprite_sheet, palette = adafruit_imageload.load("/castle_sprite_sheet.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette)
# make bright pink be transparent so entities can be drawn on top of map tiles
palette.make_transparent(5)
# Create the castle TileGrid
castle = displayio.TileGrid(sprite_sheet, pixel_shader=palette,
width = 10,
height = 8,
tile_width = 16,
tile_height = 16)
# Create a Group to hold the sprites and add it
sprite_group = displayio.Group(max_size=48)
# Create a Group to hold the castle and add it
castle_group = displayio.Group()
castle_group.append(castle)
# Create a Group to hold the sprite and castle
group = displayio.Group()
# Add the sprite and castle to the group
group.append(castle_group)
group.append(sprite_group)
# Open and read raw string from the map csv file
f = open("map.csv", 'r')
map_csv_str = f.read()
f.close()
# split the raw string into lines
map_csv_lines = map_csv_str.replace("\r", "").split("\n")
# set the WIDTH and HEIGHT variables.
# this assumes the map is rectangular.
MAP_HEIGHT = len(map_csv_lines)
MAP_WIDTH = len(map_csv_lines[0].split(","))
#print(TILES.keys())
#print(map_csv_lines)
# loop over each line storing index in y variable
for y, line in enumerate(map_csv_lines):
# ignore empty line
if line != "":
# loop over each tile type separated by commas, storing index in x variable
for x, tile_name in enumerate(line.split(",")):
print("%s '%s'" % (len(tile_name), str(tile_name)))
# if the tile exists in our main dictionary
if tile_name in TILES.keys():
# if the tile is an entity
if 'entity' in TILES[tile_name].keys() and TILES[tile_name]['entity']:
# set the map tiles to floor
ORIGINAL_MAP[x,y] = "floor"
CURRENT_MAP[x,y] = "floor"
# if it's the player
if tile_name == "player":
# Create the sprite TileGrid
sprite = displayio.TileGrid(sprite_sheet, pixel_shader=palette,
width = 1,
height = 1,
tile_width = 16,
tile_height = 16,
default_tile = TILES[tile_name]['sprite_index'])
# set the position of sprite on screen
sprite.x = x*16
sprite.y = y*16
# set position in x,y tile coords for reference later
PLAYER_LOC = (x,y)
# add sprite to the group
sprite_group.append(sprite)
else: # not the player
# Create the sprite TileGrid
entity_srite = displayio.TileGrid(sprite_sheet, pixel_shader=palette,
width = 1,
height = 1,
tile_width = 16,
tile_height = 16,
default_tile = TILES[tile_name]['sprite_index'])
# set the position of sprite on screen
# default to offscreen
entity_srite.x = -16
entity_srite.y = -16
# add the sprite object to ENTITY_SPRITES list
ENTITY_SPRITES.append(entity_srite)
#print("setting entity_sprites_dict[%s,%s]" % (x,y))
# create an entity obj
entity_obj = {
"entity_sprite_index": len(ENTITY_SPRITES) - 1,
"map_tile_name": tile_name
}
# if there are no entities at this location yet
if (x,y) not in ENTITY_SPRITES_DICT:
# create a list and add it to the dictionary at the x,y location
ENTITY_SPRITES_DICT[x, y] = [entity_obj]
else:
# append the entity to the existing list in the dictionary
ENTITY_SPRITES_DICT[x, y].append(entity_obj)
else: # tile is not entity
# set the tile_name into MAP dictionaries
ORIGINAL_MAP[x, y] = tile_name
CURRENT_MAP[x, y] = tile_name
else: # tile type wasn't found in dict
print("tile: %s not found in TILES dict" % tile_name)
# add all entity sprites to the group
for entity in ENTITY_SPRITES:
sprite_group.append(entity)
# Add the Group to the Display
display.show(group)
# variables to store previous value of button state
prev_up = False
prev_down = False
prev_left = False
prev_right = False
# helper function returns true if player is allowed to move given direction
# based on can_walk property of the tiles next to the player
def can_player_move(direction):
if direction == UP:
tile_above_coords = (PLAYER_LOC[0], PLAYER_LOC[1] - 1)
return TILES[CURRENT_MAP[tile_above_coords[0], tile_above_coords[1]]]['can_walk']
if direction == DOWN:
tile_below_coords = (PLAYER_LOC[0], PLAYER_LOC[1] + 1)
return TILES[CURRENT_MAP[tile_below_coords[0], tile_below_coords[1]]]['can_walk']
if direction == LEFT:
tile_left_of_coords = (PLAYER_LOC[0]-1, PLAYER_LOC[1])
return TILES[CURRENT_MAP[tile_left_of_coords[0], tile_left_of_coords[1]]]['can_walk']
if direction == RIGHT:
tile_right_of_coords = (PLAYER_LOC[0] + 1, PLAYER_LOC[1])
return TILES[CURRENT_MAP[tile_right_of_coords[0], tile_right_of_coords[1]]]['can_walk']
# set the appropriate tiles into the CAMERA_VIEW dictionary
# based on given starting coords and size
def set_camera_view(startX, startY, width, height):
global CAMERA_OFFSET_X
global CAMERA_OFFSET_Y
# set the offset variables for use in other parts of the code
CAMERA_OFFSET_X = startX
CAMERA_OFFSET_Y = startY
# loop over the rows and indexes in the desired size section
for y_index, y in enumerate(range(startY, startY+height)):
# loop over columns and indexes in the desired size section
for x_index, x in enumerate(range(startX, startX+width)):
#print("setting camera_view[%s,%s]" % (x_index,y_index))
try:
# set the tile at the current coordinate of the MAP into the CAMERA_VIEW
CAMERA_VIEW[x_index,y_index] = CURRENT_MAP[x,y]
except KeyError:
# if coordinate is out of bounds set it to floor by default
CAMERA_VIEW[x_index,y_index] = "floor"
# draw the current CAMERA_VIEW dictionary and the ENTITY_SPRITES_DICT
def draw_camera_view():
# list that will hold all entities that have been drawn based on their MAP location
# any entities not in this list should get moved off the screen
drew_entities = []
#print(CAMERA_VIEW)
# loop over y tile coordinates
for y in range(0, SCREEN_HEIGHT_TILES):
# loop over x tile coordinates
for x in range(0, SCREEN_WIDTH_TILES):
# tile name at this location
tile_name = CAMERA_VIEW[x,y]
# if tile exists in the main dictionary
if tile_name in TILES.keys():
# if there are entity(s) at this location
if (x + CAMERA_OFFSET_X, y + CAMERA_OFFSET_Y) in ENTITY_SPRITES_DICT:
# default background for entities is floor
castle[x, y] = TILES["floor"]['sprite_index']
# if it's not the player
if tile_name != "player":
# loop over all entities at this location
for entity_obj_at_tile in ENTITY_SPRITES_DICT[x + CAMERA_OFFSET_X, y + CAMERA_OFFSET_Y]:
# set appropriate x,y screen coordinates based on tile coordinates
ENTITY_SPRITES[int(entity_obj_at_tile["entity_sprite_index"])].x = x * 16
ENTITY_SPRITES[int(entity_obj_at_tile["entity_sprite_index"])].y = y * 16
# add the index of the entity sprite to the drew_entities list so we know not to hide it later.
drew_entities.append(entity_obj_at_tile["entity_sprite_index"])
else: # no entities at this location
# set the sprite index of this tile into the CASTLE dictionary
castle[x, y] = TILES[tile_name]['sprite_index']
else: # tile type not found in main dictionary
# default to floor tile
castle[x, y] = TILES["floor"]['sprite_index']
# if the player is at this x,y tile coordinate accounting for camera offset
if PLAYER_LOC == ((x + CAMERA_OFFSET_X, y + CAMERA_OFFSET_Y)):
# set player sprite screen coordinates
sprite.x = x*16
sprite.y = y*16
# loop over all entity sprites
for index in range(0, len(ENTITY_SPRITES)):
# if the sprite wasn't drawn then it's outside the camera view
if index not in drew_entities:
# hide the sprite by moving it off screen
ENTITY_SPRITES[index].x = int(-16)
ENTITY_SPRITES[index].y = int(-16)
# variable to store timestamp of last drawn frame
last_update_time = 0
# variables to store movement offset values
x_offset = 0
y_offset = 0
# main loop
while True:
# auto dim the screen
badger.auto_dim_display(delay=10)
# set the current button values into variables
cur_up = badger.button.up
cur_down = badger.button.down
cur_right = badger.button.right
cur_left = badger.button.left
# check for up button press / release
if not cur_up and prev_up:
if can_player_move(UP):
x_offset = 0
y_offset = - 1
# check for down button press / release
if not cur_down and prev_down:
if can_player_move(DOWN):
x_offset = 0
y_offset = 1
# check for right button press / release
if not cur_right and prev_right:
if can_player_move(RIGHT):
x_offset = 1
y_offset = 0
# check for left button press / release
if not cur_left and prev_left:
if can_player_move(LEFT):
print("can_move left")
x_offset = -1
y_offset = 0
# if any offset is not zero then we need to process player movement
if x_offset != 0 or y_offset != 0:
# variable to store if player is allowed to move
can_move = False
# coordinates the player is moving to
moving_to_coords = (PLAYER_LOC[0] + x_offset, PLAYER_LOC[1] + y_offset)
# tile name of the spot player is moving to
moving_to_tile_name = CURRENT_MAP[moving_to_coords[0], moving_to_coords[1]]
# if there are entity(s) at spot the player is moving to
if moving_to_coords in ENTITY_SPRITES_DICT:
print("found entity(s) where we are moving to")
# loop over all entities at the location player is moving to
for entity_obj in ENTITY_SPRITES_DICT[moving_to_coords]:
print("checking entity %s" % entity_obj["map_tile_name"])
# if the entity has a before_move behavior function
if "before_move" in TILES[entity_obj["map_tile_name"]].keys():
print("calling before_move %s, %s, %s" % (moving_to_coords,PLAYER_LOC,entity_obj))
# call the before_move behavior function act upon it's result
if TILES[entity_obj["map_tile_name"]]['before_move'](moving_to_coords,PLAYER_LOC,entity_obj):
# all the movement if it returned true
can_move = True
else:
# break and don't allow movement if it returned false
break;
else: # entity does not have a before_move function
# allow movement
can_move = True
if can_move:
# set the player loc variable to the new coords
PLAYER_LOC = moving_to_coords
else: # no entities at the location player is moving to
# set player loc variable to new coords
PLAYER_LOC = moving_to_coords
# reset movement offset variables
y_offset = 0
x_offset = 0
# set previos button values for next iteration
prev_up = cur_up
prev_down = cur_down
prev_right = cur_right
prev_left = cur_left
# current time
now = time.monotonic()
# if it has been long enough based on FPS delay
if now > last_update_time + FPS_DELAY:
# if player is past x tile coordinate 4
if PLAYER_LOC[0] > 4:
# set camera to player location offset by 4
set_camera_view(int(PLAYER_LOC[0]-4),0,10,8)
else:
# set camera to 0,0
set_camera_view(0,0,10,8)
# draw the camera
draw_camera_view()
# store the last update time
last_update_time = now
| 37.038095 | 123 | 0.581846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,688 | 0.395372 |
4866676df99cb56da6528e0c45d5fc2aef3aec92 | 13,162 | py | Python | tools/harness/tests/compiler-rt_builtins.py | Harvard-PRINCESS/barrelfish-trunk-mirror | 1c98195d123046d985bb3952a591297c2ef6fdf9 | [
"MIT"
]
| 4 | 2017-09-16T01:23:48.000Z | 2017-09-22T08:02:47.000Z | tools/harness/tests/compiler-rt_builtins.py | Harvard-PRINCESS/barrelfish-trunk-mirror | 1c98195d123046d985bb3952a591297c2ef6fdf9 | [
"MIT"
]
| null | null | null | tools/harness/tests/compiler-rt_builtins.py | Harvard-PRINCESS/barrelfish-trunk-mirror | 1c98195d123046d985bb3952a591297c2ef6fdf9 | [
"MIT"
]
| 1 | 2020-03-06T15:48:10.000Z | 2020-03-06T15:48:10.000Z | ##########################################################################
# Copyright (c) 2009, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import tests
from common import TestCommon
from results import PassFailMultiResult
class CompilerRTBuiltinsAbstract(TestCommon):
def get_finish_string(self):
return "usleeptest_done"
def process_data(self, testdir, rawiter):
# the test passed if no error occurred
errors = []
for line in rawiter:
if "error in" in line:
errors.append(line)
if line.startswith("Assertion failed on core"):
errors.append(line)
return PassFailMultiResult(self.name, errors)
# lists of tests to run for compiler-rt
vector_fp_tests = [
"compiler-rt/test/builtins/Unit/adddf3vfp_test",
"compiler-rt/test/builtins/Unit/addsf3vfp_test",
"compiler-rt/test/builtins/Unit/divdf3vfp_test",
"compiler-rt/test/builtins/Unit/divsf3vfp_test",
"compiler-rt/test/builtins/Unit/eqdf2vfp_test",
"compiler-rt/test/builtins/Unit/eqsf2vfp_test",
"compiler-rt/test/builtins/Unit/extebdsfdf2vfp_test",
"compiler-rt/test/builtins/Unit/fixdfsivfp_test",
"compiler-rt/test/builtins/Unit/fixsfsivfp_test",
"compiler-rt/test/builtins/Unit/fixunsdfsivfp_test",
"compiler-rt/test/builtins/Unit/fixunssfsivfp_test",
"compiler-rt/test/builtins/Unit/floatsidfvfp_test",
"compiler-rt/test/builtins/Unit/floatsisfvfp_test",
"compiler-rt/test/builtins/Unit/floatunssidfvfp_test",
"compiler-rt/test/builtins/Unit/floatunssisfvfp_test",
"compiler-rt/test/builtins/Unit/gedf2vfp_test",
"compiler-rt/test/builtins/Unit/gesf2vfp_test",
"compiler-rt/test/builtins/Unit/gtdf2vfp_test",
"compiler-rt/test/builtins/Unit/gtsf2vfp_test",
"compiler-rt/test/builtins/Unit/ledf2vfp_test",
"compiler-rt/test/builtins/Unit/lesf2vfp_test",
"compiler-rt/test/builtins/Unit/ltdf2vfp_test",
"compiler-rt/test/builtins/Unit/ltsf2vfp_test",
"compiler-rt/test/builtins/Unit/muldf3vfp_test",
"compiler-rt/test/builtins/Unit/mulsf3vfp_test",
"compiler-rt/test/builtins/Unit/nedf2vfp_test",
"compiler-rt/test/builtins/Unit/negdf2vfp_test",
"compiler-rt/test/builtins/Unit/negsf2vfp_test",
"compiler-rt/test/builtins/Unit/nesf2vfp_test",
"compiler-rt/test/builtins/Unit/subdf3vfp_test",
"compiler-rt/test/builtins/Unit/subsf3vfp_test",
"compiler-rt/test/builtins/Unit/truncdfsf2vfp_test",
"compiler-rt/test/builtins/Unit/unorddf2vfp_test",
"compiler-rt/test/builtins/Unit/unordsf2vfp_test",
]
@tests.add_test
class CompilerRTBuiltinsVfp(CompilerRTBuiltinsAbstract):
name = 'compiler-rt-vfp'
def get_modules(self, build, machine):
modules = super(CompilerRTBuiltinsVfp, self).get_modules(build, machine)
for m in vector_fp_tests:
modules.add_module(m)
modules.add_module("usleeptest", [ "5" ])
return modules
fp_tests = [
"compiler-rt/test/builtins/Unit/absvdi2_test",
"compiler-rt/test/builtins/Unit/absvsi2_test",
"compiler-rt/test/builtins/Unit/absvti2_test",
"compiler-rt/test/builtins/Unit/addtf3_test",
"compiler-rt/test/builtins/Unit/addvdi3_test",
"compiler-rt/test/builtins/Unit/addvsi3_test",
"compiler-rt/test/builtins/Unit/addvti3_test",
"compiler-rt/test/builtins/Unit/ashldi3_test",
"compiler-rt/test/builtins/Unit/ashlti3_test",
"compiler-rt/test/builtins/Unit/ashrdi3_test",
"compiler-rt/test/builtins/Unit/ashrti3_test",
"compiler-rt/test/builtins/Unit/bswapdi2_test",
"compiler-rt/test/builtins/Unit/bswapsi2_test",
# "compiler-rt/test/builtins/Unit/clear_cache_test",
"compiler-rt/test/builtins/Unit/clzdi2_test",
"compiler-rt/test/builtins/Unit/clzsi2_test",
"compiler-rt/test/builtins/Unit/clzti2_test",
"compiler-rt/test/builtins/Unit/cmpdi2_test",
"compiler-rt/test/builtins/Unit/cmpti2_test",
"compiler-rt/test/builtins/Unit/comparedf2_test",
"compiler-rt/test/builtins/Unit/comparesf2_test",
"compiler-rt/test/builtins/Unit/ctzdi2_test",
"compiler-rt/test/builtins/Unit/ctzsi2_test",
"compiler-rt/test/builtins/Unit/ctzti2_test",
"compiler-rt/test/builtins/Unit/divdc3_test",
"compiler-rt/test/builtins/Unit/divdi3_test",
"compiler-rt/test/builtins/Unit/divmodsi4_test",
"compiler-rt/test/builtins/Unit/divsc3_test",
"compiler-rt/test/builtins/Unit/divsi3_test",
# "compiler-rt/test/builtins/Unit/divtc3_test",
"compiler-rt/test/builtins/Unit/divtf3_test",
"compiler-rt/test/builtins/Unit/divti3_test",
"compiler-rt/test/builtins/Unit/divxc3_test",
# "compiler-rt/test/builtins/Unit/enable_execute_stack_test",
"compiler-rt/test/builtins/Unit/eqtf2_test",
"compiler-rt/test/builtins/Unit/extenddftf2_test",
# "compiler-rt/test/builtins/Unit/extendhfsf2_test",
"compiler-rt/test/builtins/Unit/extendsftf2_test",
"compiler-rt/test/builtins/Unit/ffsdi2_test",
"compiler-rt/test/builtins/Unit/ffsti2_test",
"compiler-rt/test/builtins/Unit/fixdfdi_test",
"compiler-rt/test/builtins/Unit/fixdfti_test",
"compiler-rt/test/builtins/Unit/fixsfdi_test",
"compiler-rt/test/builtins/Unit/fixsfti_test",
"compiler-rt/test/builtins/Unit/fixtfdi_test",
"compiler-rt/test/builtins/Unit/fixtfsi_test",
"compiler-rt/test/builtins/Unit/fixtfti_test",
# this errors on 0X1P+64
#"compiler-rt/test/builtins/Unit/fixunsdfdi_test",
"compiler-rt/test/builtins/Unit/fixunsdfsi_test",
"compiler-rt/test/builtins/Unit/fixunsdfti_test",
# this errors on 0X1P+64
#"compiler-rt/test/builtins/Unit/fixunssfdi_test",
"compiler-rt/test/builtins/Unit/fixunssfsi_test",
"compiler-rt/test/builtins/Unit/fixunssfti_test",
"compiler-rt/test/builtins/Unit/fixunstfdi_test",
"compiler-rt/test/builtins/Unit/fixunstfsi_test",
"compiler-rt/test/builtins/Unit/fixunstfti_test",
"compiler-rt/test/builtins/Unit/fixunsxfdi_test",
"compiler-rt/test/builtins/Unit/fixunsxfsi_test",
"compiler-rt/test/builtins/Unit/fixunsxfti_test",
"compiler-rt/test/builtins/Unit/fixxfdi_test",
"compiler-rt/test/builtins/Unit/fixxfti_test",
"compiler-rt/test/builtins/Unit/floatdidf_test",
"compiler-rt/test/builtins/Unit/floatdisf_test",
"compiler-rt/test/builtins/Unit/floatditf_test",
"compiler-rt/test/builtins/Unit/floatdixf_test",
"compiler-rt/test/builtins/Unit/floatsitf_test",
"compiler-rt/test/builtins/Unit/floattidf_test",
"compiler-rt/test/builtins/Unit/floattisf_test",
"compiler-rt/test/builtins/Unit/floattixf_test",
"compiler-rt/test/builtins/Unit/floatundidf_test",
"compiler-rt/test/builtins/Unit/floatundisf_test",
"compiler-rt/test/builtins/Unit/floatunditf_test",
"compiler-rt/test/builtins/Unit/floatundixf_test",
"compiler-rt/test/builtins/Unit/floatunsitf_test",
"compiler-rt/test/builtins/Unit/floatuntidf_test",
"compiler-rt/test/builtins/Unit/floatuntisf_test",
"compiler-rt/test/builtins/Unit/floatuntixf_test",
# "compiler-rt/test/builtins/Unit/gcc_personality_test",
"compiler-rt/test/builtins/Unit/getf2_test",
"compiler-rt/test/builtins/Unit/gttf2_test",
"compiler-rt/test/builtins/Unit/letf2_test",
"compiler-rt/test/builtins/Unit/lshrdi3_test",
"compiler-rt/test/builtins/Unit/lshrti3_test",
"compiler-rt/test/builtins/Unit/lttf2_test",
"compiler-rt/test/builtins/Unit/moddi3_test",
"compiler-rt/test/builtins/Unit/modsi3_test",
"compiler-rt/test/builtins/Unit/modti3_test",
"compiler-rt/test/builtins/Unit/muldc3_test",
"compiler-rt/test/builtins/Unit/muldi3_test",
"compiler-rt/test/builtins/Unit/mulodi4_test",
"compiler-rt/test/builtins/Unit/mulosi4_test",
"compiler-rt/test/builtins/Unit/muloti4_test",
"compiler-rt/test/builtins/Unit/mulsc3_test",
"compiler-rt/test/builtins/Unit/multc3_test",
"compiler-rt/test/builtins/Unit/multf3_test",
"compiler-rt/test/builtins/Unit/multi3_test",
"compiler-rt/test/builtins/Unit/mulvdi3_test",
"compiler-rt/test/builtins/Unit/mulvsi3_test",
"compiler-rt/test/builtins/Unit/mulvti3_test",
"compiler-rt/test/builtins/Unit/mulxc3_test",
"compiler-rt/test/builtins/Unit/negdi2_test",
"compiler-rt/test/builtins/Unit/negti2_test",
"compiler-rt/test/builtins/Unit/negvdi2_test",
"compiler-rt/test/builtins/Unit/negvsi2_test",
"compiler-rt/test/builtins/Unit/negvti2_test",
"compiler-rt/test/builtins/Unit/netf2_test",
"compiler-rt/test/builtins/Unit/paritydi2_test",
"compiler-rt/test/builtins/Unit/paritysi2_test",
"compiler-rt/test/builtins/Unit/parityti2_test",
"compiler-rt/test/builtins/Unit/popcountdi2_test",
"compiler-rt/test/builtins/Unit/popcountsi2_test",
"compiler-rt/test/builtins/Unit/popcountti2_test",
"compiler-rt/test/builtins/Unit/powidf2_test",
"compiler-rt/test/builtins/Unit/powisf2_test",
"compiler-rt/test/builtins/Unit/powitf2_test",
"compiler-rt/test/builtins/Unit/powixf2_test",
"compiler-rt/test/builtins/Unit/subtf3_test",
"compiler-rt/test/builtins/Unit/subvdi3_test",
"compiler-rt/test/builtins/Unit/subvsi3_test",
"compiler-rt/test/builtins/Unit/subvti3_test",
# "compiler-rt/test/builtins/Unit/trampoline_setup_test",
# "compiler-rt/test/builtins/Unit/truncdfhf2_test",
"compiler-rt/test/builtins/Unit/truncdfsf2_test",
# "compiler-rt/test/builtins/Unit/truncsfhf2_test",
"compiler-rt/test/builtins/Unit/trunctfdf2_test",
"compiler-rt/test/builtins/Unit/trunctfsf2_test",
"compiler-rt/test/builtins/Unit/ucmpdi2_test",
"compiler-rt/test/builtins/Unit/ucmpti2_test",
"compiler-rt/test/builtins/Unit/udivdi3_test",
"compiler-rt/test/builtins/Unit/udivmoddi4_test",
"compiler-rt/test/builtins/Unit/udivmodsi4_test",
"compiler-rt/test/builtins/Unit/udivmodti4_test",
"compiler-rt/test/builtins/Unit/udivsi3_test",
"compiler-rt/test/builtins/Unit/udivti3_test",
"compiler-rt/test/builtins/Unit/umoddi3_test",
"compiler-rt/test/builtins/Unit/umodsi3_test",
"compiler-rt/test/builtins/Unit/umodti3_test",
"compiler-rt/test/builtins/Unit/unordtf2_test",
]
def get_modules_tpl(ts, self, build, machine):
'''Function template for get_modules() for each compiler-rt test case'''
modules = super(CompilerRTBuiltinsAbstract, self).get_modules(build, machine)
for m in ts:
if machine.name.startswith("panda") and \
(m.endswith("floatdisf_test") or m.endswith("floatdidf_test")):
# Skip failing test on pandaboard
continue
modules.add_module(m)
modules.add_module("usleeptest", [ "5" ])
return modules
def chunker(seq, size):
'''Helper function: this takes a sequence `seq` and splits it up into
`size`-sized chunks, except for the last chunk which is just the <= size
long remainder of the sequence'''
return (seq[pos:pos+size] for pos in xrange(0, len(seq), size))
# generate test-cases with <=CHUNK_SIZE compiler-rt tests each
CHUNK_SIZE=35
# array just to keep the class objects somewhere
compiler_rt_tests_classes = []
for i, ts in enumerate(chunker(fp_tests, CHUNK_SIZE)):
# append new class to our array
compiler_rt_tests_classes.append(
# this is essentially the decorator @tests.add_test
tests.add_test(
# type is the (built-in) base-class for python classes, here we
# construct classes by calling its constructor
# signature of type constructor:
# type(classname, baseclass tuple, dict with methods/attributes)
type('CompilerRTBuiltins%d' % (i+1),
(CompilerRTBuiltinsAbstract,),
{ 'name': 'compiler-rt-fp%d' % (i+1),
# partially bind the get_modules() template to select the
# right set of tests. Note the ts=ts in the lambda
# arguments, this prevents python's default late-binding
# for closure arguments.
'get_modules':
lambda s, b, m, ts=ts: get_modules_tpl(ts, s, b, m)})))
| 50.429119 | 81 | 0.677557 | 827 | 0.062832 | 0 | 0 | 366 | 0.027807 | 0 | 0 | 9,667 | 0.734463 |
4866eb2646559988f7b4c029e556146f3b4e3f4a | 4,090 | py | Python | api_base.py | mpalazzolo/API-Base | b0a7c3ba9bb5add59a43d3dec36354318478e226 | [
"MIT"
]
| null | null | null | api_base.py | mpalazzolo/API-Base | b0a7c3ba9bb5add59a43d3dec36354318478e226 | [
"MIT"
]
| null | null | null | api_base.py | mpalazzolo/API-Base | b0a7c3ba9bb5add59a43d3dec36354318478e226 | [
"MIT"
]
| null | null | null | import requests
from requests.exceptions import HTTPError
import time
class APIBase:
"""
This class is to be used as a base to build an API library.
Authorization token generation and endpoint functions must be written
"""
def __init__(self, root, proxies=None, requests_session=True, max_retries=10, requests_timeout=None):
"""
Initialize the class
:param: root: Root URL for the API
:param proxies: A dictionary of proxies, if needed
:param requests_session: Use request Sessions class. Speeds up API calls significantly when set to True
:param max_retries: Maximum amount of times to retry an API call before stopping
:param requests_timeout: Number of seconds requests should wait before timing out
"""
self.proxies = proxies
self.token_str = "" # Encrypted API token. This will need to be set manually or by a method of a subclass
self.root = root
self.max_retries = max_retries
self.requests_timeout = requests_timeout
if requests_session:
self._session = requests.Session()
else:
self._session = requests.api # individual calls, slower
def _auth_headers(self):
"""
Get header for API request
:return: header in dictionary format
"""
if self.token_str:
return {'Authorization': 'Bearer {}'.format(self.token_str)}
else:
return {}
def _call(self, method, url, params):
"""
Make a call to the API
:param method: 'GET', 'POST', 'DELETE', or 'PUT'
:param url: URL of API endpoint
:param params: API paramaters
:return: JSON data from the API
"""
if not url.startswith('http'):
url = self.root + url
headers = self._auth_headers()
headers['Content-Type'] = 'application/json'
r = self._session.request(method, url,
headers=headers,
proxies=self.proxies,
params=params,
timeout=self.requests_timeout)
r.raise_for_status() # Check for error
return r.json()
def _get(self, url, **kwargs):
"""
GET request from the API
:param url: URL for API endpoint
:return: JSON data from the API
"""
retries = self.max_retries
delay = 1
while retries > 0:
try:
return self._call('GET', url, kwargs)
except HTTPError as e: # Retry for some known issues
retries -= 1
status = e.response.status_code
if status == 429 or (500 <= status < 600):
if retries < 0:
raise
else:
print('retrying ...' + str(delay) + ' secs')
time.sleep(delay + 1)
delay += 1
else:
raise
except Exception as e:
print('exception', str(e))
retries -= 1
if retries >= 0:
print('retrying ...' + str(delay) + 'secs')
time.sleep(delay + 1)
delay += 1
else:
raise
def _post(self, url, **kwargs):
"""
POST request from the API
:param url: URL for API endpoint
:return: JSON data from the API
"""
return self._call('POST', url, kwargs)
def _delete(self, url, **kwargs):
"""
DELETE request from the API
:param url: URL for API endpoint
:return: JSON data from the API
"""
return self._call('DELETE', url, kwargs)
def _put(self, url, **kwargs):
"""
PUT request from the API
:param url: URL for API endpoint
:return: JSON data from the API
"""
return self._call('PUT', url, kwargs)
| 30.75188 | 114 | 0.526406 | 4,016 | 0.981907 | 0 | 0 | 0 | 0 | 0 | 0 | 1,734 | 0.423961 |
486787d9c3efbc67538ff3c74ea68506a0623fb8 | 1,143 | py | Python | plasmasm/arch/X64.py | LRGH/plasmasm | 4cd50546c3dc895763d72dd60b7c46179c1916bc | [
"Apache-2.0"
]
| 1 | 2021-02-28T21:31:18.000Z | 2021-02-28T21:31:18.000Z | plasmasm/arch/X64.py | LRGH/plasmasm | 4cd50546c3dc895763d72dd60b7c46179c1916bc | [
"Apache-2.0"
]
| null | null | null | plasmasm/arch/X64.py | LRGH/plasmasm | 4cd50546c3dc895763d72dd60b7c46179c1916bc | [
"Apache-2.0"
]
| null | null | null | # Copyright (C) 2011-2020 Airbus, [email protected]
containers = { 'ELF': 'X86_64', 'MACHO': 'X86_64' }
try:
from plasmasm.python.compatibility import set
except ImportError:
pass
from plasmasm.arch.I386 import opcodes as opcodes_x86
x64_att_opcodes = set([
'jmpq', 'callq', 'retq', 'popq', 'pushq',
'movq', 'cmpq', 'testq', 'leaq', 'btq', 'bswapq',
'notq', 'orq', 'xorq', 'andq', 'bsfq', 'bslq', 'bsrq',
'rolq', 'rorq', 'sarq', 'salq', 'shrq', 'shlq', 'sbbq',
'negq', 'decq', 'incq', 'adcq', 'addq', 'subq',
'mulq', 'divq', 'imulq', 'idivq', 'shldq', 'shrdq',
'cltq', 'cqto', 'movabsq', 'movsbq', 'movslq', 'movswq',
'insq', 'movsq', 'outsq', 'lodsq', 'stosq', 'cmpsq', 'scasq',
'pextrq', 'pinsrq',
'cvtsi2sdq', 'cvtsi2ssq', 'cvttsd2siq', 'cvttss2siq',
])
suffix = [ 'a', 'ae', 'b', 'be', 'c', 'e', 'g', 'ge', 'l', 'le', 'nb', 'nc', 'ne', 'np', 'ns', 'nz', 'p', 's', ]
x64_att_opcodes.update(set([ 'cmov'+s+'q' for s in suffix ]))
del suffix
x64_att_opcodes.update(opcodes_x86['I386-att'])
opcodes = {
'X64-att': x64_att_opcodes,
}
| 42.333333 | 112 | 0.551181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 565 | 0.494313 |
4868d79bbf2ff6bbae4f4cb4d9abf9fab912436f | 724 | py | Python | ansiblelater/rules/CheckScmInSrc.py | ankitdobhal/ansible-later | a107cd2821e310fd459a7f9b802d5794f2b96f35 | [
"MIT"
]
| 38 | 2020-10-14T09:40:58.000Z | 2022-03-17T10:45:22.000Z | ansiblelater/rules/CheckScmInSrc.py | ankitdobhal/ansible-later | a107cd2821e310fd459a7f9b802d5794f2b96f35 | [
"MIT"
]
| 188 | 2020-09-29T09:43:54.000Z | 2022-03-04T08:45:42.000Z | ansiblelater/rules/CheckScmInSrc.py | ankitdobhal/ansible-later | a107cd2821e310fd459a7f9b802d5794f2b96f35 | [
"MIT"
]
| 4 | 2021-02-10T03:35:19.000Z | 2022-01-17T15:54:39.000Z | from ansible.parsing.yaml.objects import AnsibleMapping
from ansiblelater.standard import StandardBase
class CheckScmInSrc(StandardBase):
sid = "ANSIBLE0005"
description = "Use `scm:` key rather than `src: scm+url`"
helptext = "usage of `src: scm+url` not recommended"
version = "0.1"
types = ["rolesfile"]
def check(self, candidate, settings):
roles, errors = self.get_tasks(candidate, settings)
if not errors:
for role in roles:
if isinstance(role, AnsibleMapping):
if "+" in role.get("src"):
errors.append(self.Error(role["__line__"], self.helptext))
return self.Result(candidate.path, errors)
| 30.166667 | 82 | 0.627072 | 617 | 0.85221 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.180939 |
486936b454230e71425f5f21ffabf8c3b40a119e | 595 | py | Python | DMOJ/CCC/slot machine.py | eddiegz/Personal-C | f7869826216e5c665f8f646502141f0dc680e545 | [
"MIT"
]
| 3 | 2021-05-15T08:18:09.000Z | 2021-05-17T04:41:57.000Z | DMOJ/CCC/slot machine.py | eddiegz/Personal-C | f7869826216e5c665f8f646502141f0dc680e545 | [
"MIT"
]
| null | null | null | DMOJ/CCC/slot machine.py | eddiegz/Personal-C | f7869826216e5c665f8f646502141f0dc680e545 | [
"MIT"
]
| null | null | null | quarter=int(input())
p1=int(input())
p2=int(input())
p3=int(input())
time=0
while quarter>0:
if quarter == 0:
continue
p1+=1
quarter-=1
time+=1
if p1==35:
quarter+=30
p1=0
if quarter == 0:
continue
time+=1
p2+=1
quarter-=1
if p2==100:
p2=0
quarter+=60
if quarter == 0:
continue
p3+=1
time+=1
quarter-=1
if p3==10:
quarter+=9
p3=0
print(f'Martha plays {time} times before going broke.')
| 16.081081 | 56 | 0.438655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.080672 |
4869a5e537b1616b1387d41f76532922834d0c3e | 327 | py | Python | project/app/migrations/0003_auto_20210125_0924.py | dbinetti/kidsallin | 147491cdfbe812ffde91725193ec16c03083c1da | [
"BSD-3-Clause"
]
| null | null | null | project/app/migrations/0003_auto_20210125_0924.py | dbinetti/kidsallin | 147491cdfbe812ffde91725193ec16c03083c1da | [
"BSD-3-Clause"
]
| null | null | null | project/app/migrations/0003_auto_20210125_0924.py | dbinetti/kidsallin | 147491cdfbe812ffde91725193ec16c03083c1da | [
"BSD-3-Clause"
]
| null | null | null | # Generated by Django 3.1.5 on 2021-01-25 16:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20210124_0610'),
]
operations = [
migrations.RenameModel(
old_name='Parent',
new_name='Account',
),
]
| 18.166667 | 47 | 0.590214 | 242 | 0.740061 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.287462 |
486abe98f15277d75707a2bda0dddf48de43bab7 | 28,203 | py | Python | cinder/volume/drivers/emc/emc_vmax_provision.py | kazum/cinder | 370b8e60c3166b289c8da924a227dd1bc63f8b8a | [
"Apache-2.0"
]
| null | null | null | cinder/volume/drivers/emc/emc_vmax_provision.py | kazum/cinder | 370b8e60c3166b289c8da924a227dd1bc63f8b8a | [
"Apache-2.0"
]
| null | null | null | cinder/volume/drivers/emc/emc_vmax_provision.py | kazum/cinder | 370b8e60c3166b289c8da924a227dd1bc63f8b8a | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from cinder import exception
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder.volume.drivers.emc import emc_vmax_utils
LOG = logging.getLogger(__name__)
STORAGEGROUPTYPE = 4
POSTGROUPTYPE = 3
EMC_ROOT = 'root/emc'
THINPROVISIONINGCOMPOSITE = 32768
THINPROVISIONING = 5
class EMCVMAXProvision(object):
"""Provisioning Class for SMI-S based EMC volume drivers.
This Provisioning class is for EMC volume drivers based on SMI-S.
It supports VMAX arrays.
"""
def __init__(self, prtcl):
self.protocol = prtcl
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
def delete_volume_from_pool(
self, conn, storageConfigservice, volumeInstanceName, volumeName):
"""Given the volume instance remove it from the pool.
:param conn: connection the the ecom server
:param storageConfigservice: volume created from job
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param rc: return code
"""
rc, job = conn.InvokeMethod(
'EMCReturnToStoragePool', storageConfigservice,
TheElements=[volumeInstanceName])
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return rc
def create_volume_from_pool(
self, conn, storageConfigService, volumeName,
poolInstanceName, volumeSize):
"""Create the volume in the specified pool.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service
:param volumeName: the volume name (String)
:param poolInstanceName: the pool instance name to create
the dummy volume in
:param volumeSize: volume size (String)
:returns: volumeDict - the volume dict
"""
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, ElementName=volumeName,
InPool=poolInstanceName,
ElementType=self.utils.get_num(THINPROVISIONING, '16'),
Size=self.utils.get_num(volumeSize, '64'),
EMCBindElements=False)
LOG.debug("Create Volume: %(volumename)s Return code: %(rc)lu"
% {'volumename': volumeName,
'rc': rc})
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Find the newly created volume
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
def create_and_get_storage_group(self, conn, controllerConfigService,
storageGroupName, volumeInstanceName):
"""Create a storage group and return it.
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param storageGroupName: the storage group name (String
:param volumeInstanceName: the volume instance name
:returns: foundStorageGroupInstanceName - instance name of the
default storage group
"""
rc, job = conn.InvokeMethod(
'CreateGroup', controllerConfigService, GroupName=storageGroupName,
Type=self.utils.get_num(STORAGEGROUPTYPE, '16'),
Members=[volumeInstanceName])
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'groupName': storageGroupName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
foundStorageGroupInstanceName = self._find_new_storage_group(
conn, job, storageGroupName)
return foundStorageGroupInstanceName
def create_storage_group_no_members(
self, conn, controllerConfigService, groupName):
"""Create a new storage group that has no members.
:param conn: connection the ecom server
:param controllerConfigService: the controller configuration service
:param groupName: the proposed group name
:returns: foundStorageGroupInstanceName - the instance Name of
the storage group
"""
rc, job = conn.InvokeMethod(
'CreateGroup', controllerConfigService, GroupName=groupName,
Type=self.utils.get_num(STORAGEGROUPTYPE, '16'),
DeleteWhenBecomesUnassociated=False)
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'groupName': groupName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
foundStorageGroupInstanceName = self._find_new_storage_group(
conn, job, groupName)
return foundStorageGroupInstanceName
def _find_new_storage_group(
self, conn, maskingGroupDict, storageGroupName):
"""After creating an new storage group find it and return it.
:param conn: connection the ecom server
:param maskingGroupDict: the maskingGroupDict dict
:param storageGroupName: storage group name (String)
:returns: maskingGroupDict['MaskingGroup']
"""
foundStorageGroupInstanceName = None
if 'MaskingGroup' in maskingGroupDict:
foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup']
return foundStorageGroupInstanceName
def get_volume_dict_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
volpath = associators[0].path
volumeDict = {}
volumeDict['classname'] = volpath.classname
keys = {}
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
volumeDict['keybindings'] = keys
return volumeDict
def remove_device_from_storage_group(
self, conn, controllerConfigService, storageGroupInstanceName,
volumeInstanceName, volumeName):
"""Remove a volume from a storage group.
:param conn: the connection to the ecom server
:param controllerConfigService: the controller configuration service
:param storageGroupInstanceName: the instance name of the storage group
:param volumeInstanceName: the instance name of the volume
:param volumeName: the volume name (String)
:returns: rc - the return code of the job
"""
rc, jobDict = conn.InvokeMethod('RemoveMembers',
controllerConfigService,
MaskingGroup=storageGroupInstanceName,
Members=[volumeInstanceName])
if rc != 0L:
rc, errorDesc = self.utils.wait_for_job_complete(conn, jobDict)
if rc != 0L:
exceptionMessage = (_(
"Error removing volume %(vol)s. %(error)s")
% {'vol': volumeName, 'error': errorDesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return rc
def add_members_to_masking_group(
self, conn, controllerConfigService, storageGroupInstanceName,
volumeInstanceName, volumeName):
"""Add a member to a masking group group.
:param conn: the connection to the ecom server
:param controllerConfigService: the controller configuration service
:param storageGroupInstanceName: the instance name of the storage group
:param volumeInstanceName: the instance name of the volume
:param volumeName: the volume name (String)
"""
rc, job = conn.InvokeMethod(
'AddMembers', controllerConfigService,
MaskingGroup=storageGroupInstanceName,
Members=[volumeInstanceName])
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error mapping volume %(vol)s. %(error)s")
% {'vol': volumeName, 'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
def unbind_volume_from_storage_pool(
self, conn, storageConfigService, poolInstanceName,
volumeInstanceName, volumeName):
"""Unbind a volume from a pool and return the unbound volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service
instance name
:param poolInstanceName: the pool instance name
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name
:returns: unboundVolumeInstance - the unbound volume instance
"""
rc, job = conn.InvokeMethod(
'EMCUnBindElement',
storageConfigService,
InPool=poolInstanceName,
TheElement=volumeInstanceName)
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error unbinding volume %(vol)s from pool. %(error)s")
% {'vol': volumeName, 'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return rc, job
def modify_composite_volume(
self, conn, elementCompositionService, theVolumeInstanceName,
inVolumeInstanceName):
"""Given a composite volume add a storage volume to it.
:param conn: the connection to the ecom
:param elementCompositionService: the element composition service
:param theVolumeInstanceName: the existing composite volume
:param inVolumeInstanceName: the volume you wish to add to the
composite volume
:returns: rc - return code
:returns: job - job
"""
rc, job = conn.InvokeMethod(
'CreateOrModifyCompositeElement',
elementCompositionService,
TheElement=theVolumeInstanceName,
InElements=[inVolumeInstanceName])
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error adding volume to composite volume. "
"Error is: %(error)s")
% {'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return rc, job
def create_composite_volume(
self, conn, elementCompositionService, volumeSize, volumeName,
poolInstanceName, compositeType, numMembers):
"""Create a new volume using the auto meta feature.
:param conn: the connection the the ecom server
:param elementCompositionService: the element composition service
:param volumeSize: the size of the volume
:param volumeName: user friendly name
:param poolInstanceName: the pool to bind the composite volume to
:param compositeType: the proposed composite type of the volume
e.g striped/concatenated
:param numMembers: the number of meta members to make up the composite.
If it is 1 then a non composite is created
:returns: rc
:returns: errordesc
"""
newMembers = 2
LOG.debug(
"Parameters for CreateOrModifyCompositeElement: "
"elementCompositionService: %(elementCompositionService)s "
"provisioning: %(provisioning)lu "
"volumeSize: %(volumeSize)s "
"newMembers: %(newMembers)lu "
"poolInstanceName: %(poolInstanceName)s "
"compositeType: %(compositeType)lu "
"numMembers: %(numMembers)s "
% {'elementCompositionService': elementCompositionService,
'provisioning': THINPROVISIONINGCOMPOSITE,
'volumeSize': volumeSize,
'newMembers': newMembers,
'poolInstanceName': poolInstanceName,
'compositeType': compositeType,
'numMembers': numMembers})
rc, job = conn.InvokeMethod(
'CreateOrModifyCompositeElement', elementCompositionService,
ElementName=volumeName,
ElementType=self.utils.get_num(THINPROVISIONINGCOMPOSITE, '16'),
Size=self.utils.get_num(volumeSize, '64'),
ElementSource=self.utils.get_num(newMembers, '16'),
EMCInPools=[poolInstanceName],
CompositeType=self.utils.get_num(compositeType, '16'),
EMCNumberOfMembers=self.utils.get_num(numMembers, '32'))
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Create Volume: %(volumename)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'volumename': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Find the newly created volume
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
def create_new_composite_volume(
self, conn, elementCompositionService, compositeHeadInstanceName,
compositeMemberInstanceName, compositeType):
"""Creates a new composite volume.
Given a bound composite head and an unbound composite member
create a new composite volume.
:param conn: the connection the the ecom server
:param elementCompositionService: the element composition service
:param compositeHeadInstanceName: the composite head. This can be bound
:param compositeMemberInstanceName: the composite member.
This must be unbound
:param compositeType: the composite type e.g striped or concatenated
:returns: rc - return code
:returns: errordesc - descriptions of the error
"""
rc, job = conn.InvokeMethod(
'CreateOrModifyCompositeElement', elementCompositionService,
ElementType=self.utils.get_num('2', '16'),
InElements=(
[compositeHeadInstanceName, compositeMemberInstanceName]),
CompositeType=self.utils.get_num(compositeType, '16'))
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Creating new composite Volume Return code: %(rc)lu."
"Error: %(error)s")
% {'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return rc, job
def _migrate_volume(
self, conn, storageRelocationServiceInstanceName,
volumeInstanceName, targetPoolInstanceName):
"""Migrate a volume to another pool.
:param conn: the connection to the ecom server
:param storageRelocationServiceInstanceName: the storage relocation
service
:param volumeInstanceName: the volume to be migrated
:param targetPoolInstanceName: the target pool to migrate the volume to
:returns: rc - return code
"""
rc, job = conn.InvokeMethod(
'RelocateStorageVolumesToStoragePool',
storageRelocationServiceInstanceName,
TheElements=[volumeInstanceName],
TargetPool=targetPoolInstanceName)
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Migrating volume from one pool to another. "
"Return code: %(rc)lu. Error: %(error)s")
% {'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return rc
def migrate_volume_to_storage_pool(
self, conn, storageRelocationServiceInstanceName,
volumeInstanceName, targetPoolInstanceName):
"""Given the storage system name, get the storage relocation service.
:param conn: the connection to the ecom server
:param storageRelocationServiceInstanceName: the storage relocation
service
:param volumeInstanceName: the volume to be migrated
:param targetPoolInstanceName: the target pool to migrate the
volume to.
:returns: rc
"""
LOG.debug(
"Volume instance name is %(volumeInstanceName)s. "
"Pool instance name is : %(targetPoolInstanceName)s. "
% {'volumeInstanceName': volumeInstanceName,
'targetPoolInstanceName': targetPoolInstanceName})
rc = -1
try:
rc = self._migrate_volume(
conn, storageRelocationServiceInstanceName,
volumeInstanceName, targetPoolInstanceName)
except Exception as ex:
if 'source of a migration session' in six.text_type(ex):
try:
rc = self._terminate_migrate_session(
conn, volumeInstanceName)
except Exception as ex:
LOG.error(_LE("Exception: %s") % six.text_type(ex))
exceptionMessage = (_(
"Failed to terminate migrate session"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
try:
rc = self._migrate_volume(
conn, storageRelocationServiceInstanceName,
volumeInstanceName, targetPoolInstanceName)
except Exception as ex:
LOG.error(_LE("Exception: %s") % six.text_type(ex))
exceptionMessage = (_(
"Failed to migrate volume for the second time"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
LOG.error(_LE("Exception: %s") % six.text_type(ex))
exceptionMessage = (_(
"Failed to migrate volume for the first time"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return rc
def _terminate_migrate_session(self, conn, volumeInstanceName):
"""Given the volume instance terminate a migrate session.
:param conn: the connection to the ecom server
:param volumeInstanceName: the volume to be migrated
:returns: rc
"""
rc, job = conn.InvokeMethod(
'RequestStateChange', volumeInstanceName,
RequestedState=self.utils.get_num(32769, '16'))
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Terminating migrate session. "
"Return code: %(rc)lu. Error: %(error)s")
% {'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return rc
def create_element_replica(
self, conn, repServiceInstanceName, cloneName,
sourceName, sourceInstance, targetInstance):
"""Make SMI-S call to create replica for source element.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param cloneName: replica name
:param sourceName: source volume name
:param sourceInstance: source volume instance
:param targetInstance: target volume instance
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
if targetInstance is None:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=self.utils.get_num(8, '16'),
SourceElement=sourceInstance.path)
else:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=self.utils.get_num(8, '16'),
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path)
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error Create Cloned Volume: "
"Volume: %(cloneName)s Source Volume:"
"%(sourceName)s. Return code: %(rc)lu. "
"Error: %(error)s")
% {'cloneName': cloneName,
'sourceName': sourceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return rc, job
def delete_clone_relationship(
self, conn, repServiceInstanceName, syncInstanceName,
cloneName, sourceName):
"""Deletes the relationship between the clone and source volume.
Makes an SMI-S call to break clone relationship between the clone
volume and the source
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param cloneName: replica name
:param sourceName: source volume name
:param sourceInstance: source volume instance
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
'''
8/Detach - Delete the synchronization between two storage objects.
Treat the objects as independent after the synchronization is deleted.
'''
rc, job = conn.InvokeMethod(
'ModifyReplicaSynchronization', repServiceInstanceName,
Operation=self.utils.get_num(8, '16'),
Synchronization=syncInstanceName)
LOG.debug("Break clone relationship: Volume: %(cloneName)s "
"Source Volume: %(sourceName)s Return code: %(rc)lu"
% {'cloneName': cloneName,
'sourceName': sourceName,
'rc': rc})
if rc != 0L:
rc, errordesc = self.utils.wait_for_job_complete(conn, job)
if rc != 0L:
exceptionMessage = (_(
"Error break clone relationship: "
"Clone Volume: %(cloneName)s "
"Source Volume: %(sourceName)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'cloneName': cloneName,
'sourceName': sourceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return rc, job
def get_target_endpoints(self, conn, storageHardwareService, hardwareId):
"""Given the hardwareId get the
:param conn: the connection to the ecom server
:param storageHardwareService: the storage HardwareId Service
:param hardwareId: the hardware Id
:returns: rc
:returns: targetendpoints
"""
rc, targetEndpoints = conn.InvokeMethod(
'EMCGetTargetEndpoints', storageHardwareService,
HardwareId=hardwareId)
if rc != 0L:
exceptionMessage = (_("Error finding Target WWNs."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return rc, targetEndpoints
| 42.731818 | 79 | 0.587987 | 27,228 | 0.965429 | 0 | 0 | 0 | 0 | 0 | 0 | 11,856 | 0.420381 |
486b39af5811634dc06771353577ccba06dfa1ca | 9,840 | py | Python | tests/gold_tests/redirect/redirect_actions.test.py | cmcfarlen/trafficserver | 2aa1d3106398eb082e5a454212b0273c63d5f69d | [
"Apache-2.0"
]
| 1,351 | 2015-01-03T08:25:40.000Z | 2022-03-31T09:14:08.000Z | tests/gold_tests/redirect/redirect_actions.test.py | cmcfarlen/trafficserver | 2aa1d3106398eb082e5a454212b0273c63d5f69d | [
"Apache-2.0"
]
| 7,009 | 2015-01-14T16:22:45.000Z | 2022-03-31T17:18:04.000Z | tests/gold_tests/redirect/redirect_actions.test.py | cmcfarlen/trafficserver | 2aa1d3106398eb082e5a454212b0273c63d5f69d | [
"Apache-2.0"
]
| 901 | 2015-01-11T19:21:08.000Z | 2022-03-18T18:21:33.000Z | '''
Test redirection behavior to invalid addresses
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import re
import os
import socket
import sys
Test.Summary = '''
Test redirection behavior to invalid addresses
'''
Test.ContinueOnFail = False
Test.Setup.Copy(os.path.join(Test.Variables.AtsTestToolsDir, 'tcp_client.py'))
dns = Test.MakeDNServer('dns')
# This record is used in each test case to get the initial redirect response from the origin that we will handle.
dnsRecords = {'iwillredirect.test': ['127.0.0.1']}
host = socket.gethostname()
ipv4addrs = set()
try:
ipv4addrs = set([ip for
(family, _, _, _, (ip, *_)) in
socket.getaddrinfo(host, port=None) if
socket.AF_INET == family])
except socket.gaierror:
pass
ipv6addrs = set()
try:
ipv6addrs = set(["[{0}]".format(ip.split('%')[0]) for
(family, _, _, _, (ip, *_)) in
socket.getaddrinfo(host, port=None) if
socket.AF_INET6 == family and 'fe80' != ip[0:4]]) # Skip link-local addresses.
except socket.gaierror:
pass
origin = Test.MakeOriginServer('origin', ip='0.0.0.0')
ArbitraryTimestamp = '12345678'
# This is for cases when the content is actually fetched from the invalid address.
request_header = {
'headers': ('GET / HTTP/1.1\r\n'
'Host: *\r\n\r\n'),
'timestamp': ArbitraryTimestamp,
'body': ''}
response_header = {
'headers': ('HTTP/1.1 204 No Content\r\n'
'Connection: close\r\n\r\n'),
'timestamp': ArbitraryTimestamp,
'body': ''}
origin.addResponse('sessionfile.log', request_header, response_header)
# Map scenarios to trafficserver processes.
trafficservers = {}
data_dirname = 'generated_test_data'
data_path = os.path.join(Test.TestDirectory, data_dirname)
os.makedirs(data_path, exist_ok=True)
def normalizeForAutest(value):
'''
autest uses "test run" names to build file and directory names, so we must transform them in case there are incompatible or
annoying characters.
This means we can also use them in URLs.
'''
if not value:
return None
return re.sub(r'[^a-z0-9-]', '_', value, flags=re.I)
def makeTestCase(redirectTarget, expectedAction, scenario):
'''
Helper method that creates a "meta-test" from which autest generates a test case.
:param redirectTarget: The target address of a redirect from origin to be handled.
:param scenario: Defines the ACL to configure and the addresses to test.
'''
config = ','.join(':'.join(t) for t in sorted((addr.name.lower(), action.name.lower()) for (addr, action) in scenario.items()))
normRedirectTarget = normalizeForAutest(redirectTarget)
normConfig = normalizeForAutest(config)
tr = Test.AddTestRun('With_Config_{0}_Redirect_to_{1}'.format(normConfig, normRedirectTarget))
if trafficservers:
tr.StillRunningAfter = origin
tr.StillRunningAfter = dns
else:
tr.Processes.Default.StartBefore(origin)
tr.Processes.Default.StartBefore(dns)
if config not in trafficservers:
trafficservers[config] = Test.MakeATSProcess('ts_{0}'.format(normConfig), enable_cache=False)
trafficservers[config].Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'http|dns|redirect',
'proxy.config.http.number_of_redirections': 1,
'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port),
'proxy.config.dns.resolv_conf': 'NULL',
'proxy.config.url_remap.remap_required': 0,
'proxy.config.http.redirect.actions': config,
'proxy.config.http.connect_attempts_timeout': 5,
'proxy.config.http.connect_attempts_max_retries': 0,
})
tr.Processes.Default.StartBefore(trafficservers[config])
else:
tr.StillRunningAfter = trafficservers[config]
testDomain = 'testdomain{0}.test'.format(normRedirectTarget)
# The micro DNS server can't tell us whether it has a record of the domain already, so we use a dictionary to avoid duplicates.
# We remove any surrounding brackets that are common to IPv6 addresses.
if redirectTarget:
dnsRecords[testDomain] = [redirectTarget.strip('[]')]
# A GET request parameterized on the config and on the target.
request_header = {
'headers': ('GET /redirect?config={0}&target={1} HTTP/1.1\r\n'
'Host: *\r\n\r\n').
format(normConfig, normRedirectTarget),
'timestamp': ArbitraryTimestamp,
'body': ''}
# Returns a redirect to the test domain for the given target & the port number for the TS of the given config.
response_header = {
'headers': ('HTTP/1.1 307 Temporary Redirect\r\n'
'Location: http://{0}:{1}/\r\n'
'Connection: close\r\n\r\n').
format(testDomain, origin.Variables.Port),
'timestamp': ArbitraryTimestamp,
'body': ''}
origin.addResponse('sessionfile.log', request_header, response_header)
# Generate the request data file.
command_path = os.path.join(data_path, tr.Name)
with open(command_path, 'w') as f:
f.write(('GET /redirect?config={0}&target={1} HTTP/1.1\r\n'
'Host: iwillredirect.test:{2}\r\n\r\n').
format(normConfig, normRedirectTarget, origin.Variables.Port))
# Set the command with the appropriate URL.
port = trafficservers[config].Variables.port
dir_path = os.path.join(data_dirname, tr.Name)
tr.Processes.Default.Command = \
(f"bash -o pipefail -c '{sys.executable} tcp_client.py 127.0.0.1 {port} "
f"{dir_path} | head -n 1'")
tr.Processes.Default.ReturnCode = 0
# Generate and set the 'gold file' to check stdout
goldFilePath = os.path.join(data_path, '{0}.gold'.format(tr.Name))
with open(goldFilePath, 'w') as f:
f.write(expectedAction.value['expectedStatusLine'])
tr.Processes.Default.Streams.stdout = goldFilePath
class AddressE(Enum):
'''
Classes of addresses are mapped to example addresses.
'''
Private = ('10.0.0.1', '[fc00::1]')
Loopback = (['127.1.2.3']) # [::1] is omitted here because it is likely overwritten by Self, and there are no others in IPv6.
Multicast = ('224.1.2.3', '[ff42::]')
Linklocal = ('169.254.0.1', '[fe80::]')
Routable = ('72.30.35.10', '[2001:4998:58:1836::10]') # Do not Follow redirects to these in an automated test.
Self = ipv4addrs | ipv6addrs # Addresses of this host.
Default = None # All addresses apply, nothing in particular to test.
class ActionE(Enum):
# Title case because 'return' is a Python keyword.
Return = {'config': 'return', 'expectedStatusLine': 'HTTP/1.1 307 Temporary Redirect\r\n'}
Reject = {'config': 'reject', 'expectedStatusLine': 'HTTP/1.1 403 Forbidden\r\n'}
Follow = {'config': 'follow', 'expectedStatusLine': 'HTTP/1.1 204 No Content\r\n'}
# Added to test failure modes.
Break = {'expectedStatusLine': 'HTTP/1.1 500 Cannot find server.\r\n'}
scenarios = [
{
# Follow to loopback, but alternately reject/return others.
AddressE.Private: ActionE.Reject,
AddressE.Loopback: ActionE.Follow,
AddressE.Multicast: ActionE.Reject,
AddressE.Linklocal: ActionE.Return,
AddressE.Routable: ActionE.Reject,
AddressE.Self: ActionE.Return,
AddressE.Default: ActionE.Reject,
},
{
# Follow to loopback, but alternately reject/return others, flipped from the previous scenario.
AddressE.Private: ActionE.Return,
AddressE.Loopback: ActionE.Follow,
AddressE.Multicast: ActionE.Return,
AddressE.Linklocal: ActionE.Reject,
AddressE.Routable: ActionE.Return,
AddressE.Self: ActionE.Reject,
AddressE.Default: ActionE.Return,
},
{
# Return loopback, but reject everything else.
AddressE.Loopback: ActionE.Return,
AddressE.Default: ActionE.Reject,
},
{
# Reject loopback, but return everything else.
AddressE.Loopback: ActionE.Reject,
AddressE.Default: ActionE.Return,
},
{
# Return everything.
AddressE.Default: ActionE.Return,
},
]
for scenario in scenarios:
for addressClass in AddressE:
if not addressClass.value:
# Default has no particular addresses to test.
continue
for address in addressClass.value:
expectedAction = scenario[addressClass] if addressClass in scenario else scenario[AddressE.Default]
makeTestCase(redirectTarget=address, expectedAction=expectedAction, scenario=scenario)
# Test redirects to names that cannot be resolved.
makeTestCase(redirectTarget=None, expectedAction=ActionE.Break, scenario=scenario)
dns.addRecords(records=dnsRecords)
# Make sure this runs only after local files have been created.
Test.Setup.Copy(data_path)
| 38.893281 | 131 | 0.66626 | 1,056 | 0.107317 | 0 | 0 | 0 | 0 | 0 | 0 | 4,502 | 0.45752 |
486d212547e00f7831ca70c40d4c968f71b4de71 | 4,575 | py | Python | LynkCoHelper/lynco_regist_wrok.py | 21haoshaonian/LynkCoHelper | b4e5d67583190bf09fe44902499c3a99463b4df5 | [
"MIT"
]
| null | null | null | LynkCoHelper/lynco_regist_wrok.py | 21haoshaonian/LynkCoHelper | b4e5d67583190bf09fe44902499c3a99463b4df5 | [
"MIT"
]
| null | null | null | LynkCoHelper/lynco_regist_wrok.py | 21haoshaonian/LynkCoHelper | b4e5d67583190bf09fe44902499c3a99463b4df5 | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
import threading
import time
import base64
from lynkco_app_request import lynkco_app_request
from com.uestcit.api.gateway.sdk.auth.aes import aes as AES
from sms_request import sms_request
import json
import sys
import os
import re
class lynco_regist_wrok(threading.Thread):
"""新开线程处理任务"""
def __init__(self, config):
# 初始化线程
threading.Thread.__init__(self)
# 缓存配置信息
self.config = config
self.project_id = self.config['sms_platform']['project_id']
self.max_count = int(self.config['sms_platform']['count'])
self.sms_request = sms_request()
# 缓存APPKEY(因为存储的是base64后的值,所以需要base64解码一次)
self.app_key = base64.b64decode(self.config['api_geteway']['app_key']).decode('utf-8')
# 缓存APPSECRET(因为存储的是base64后的值,所以需要base64解码一次)
self.app_secret = base64.b64decode(self.config['api_geteway']['app_secret']).decode('utf-8')
# 缓存AESKEY(因为存储的是两次base64后的值,所以需要base64解码两次)
self.aes_key = base64.b64decode(base64.b64decode(self.config['aes_key']).decode('utf-8')).decode('utf-8')
self.AES = AES(self.aes_key)
self.lynkco_app_request = lynkco_app_request(self.app_key, self.app_secret)
def run(self):
"""线程开始的方法"""
print ("开始注册任务 " + time.strftime('%Y-%m-%d %H:%M:%S'))
self.token = self.get_token()
if('' == self.token):
return 0
phone_list = []
while len(phone_list) < self.max_count:
phone = self.regist()
if('' == phone):
continue
phone_list.append({ 'username': phone, 'password': 'a123456789' })
with open(sys.path[0] + '/phone_list_' + time.strftime('%Y%m%d%H%M%S') + '.json', 'w') as json_file:
json_file.write(json.dumps(phone_list,ensure_ascii = False))
print ("注册执行完成任务 " + time.strftime('%Y-%m-%d %H:%M:%S'))
def get_token(self):
"""登录获取token"""
sms_username = self.config['sms_platform']['username']
sms_password = self.config['sms_platform']['password']
context = self.sms_request.login(sms_username, sms_password)
array = context.split('|')
if(int(array[0]) != 1):
print("短信账户登录失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
token = array[1]
print("短信账户登录成功,token:" + token + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return token
def regist(self):
"""App端操作流程"""
# 获取一个手机号
context = self.sms_request.get_phone(self.token, self.project_id)
array = context.split('|')
if(int(array[0]) != 1):
print("短信账户获取手机号失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
phone = array[1]
# 发送注册短信
response = self.lynkco_app_request.get_vcode_by_regist(phone)
if response['code'] != 'success':
print("发送注册短信失败" + response['message'] + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
# 循环10次获取短信内容,每次获取失败等待3秒钟
vcode = ''
fail_count = 0;
while fail_count < 10:
context = self.sms_request.get_phone_msg(self.token, self.project_id, phone)
array = context.split('|')
if(int(array[0]) != 1):
print("短信账户获取验证码内容失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
fail_count += 1
time.sleep(3)
else:
context = array[1]
# 此处需要正则取验证码
pattern = re.compile(r'\d{6}')
result = pattern.findall(context)
if(len(result) != 1):
print("短信账户解析验证码内容失败:" + context + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
else:
vcode = result[0]
print("短信账户获取验证码内容成功:" + vcode + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
break
if('' == vcode):
return ''
# 发送注册
password = self.AES.encrypt('a123456789')
response = self.lynkco_app_request.regist(phone, password, vcode)
if response['code'] != 'success':
print("发送注册接口失败" + response['message'] + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return ''
# 尝试登陆一次
response = self.lynkco_app_request.login(phone, password)
if response['code'] != 'success':
print("尝试接口失败" + response['message'] + " " + time.strftime('%Y-%m-%d %H:%M:%S'))
return phone
return phone | 39.782609 | 113 | 0.551257 | 4,840 | 0.950324 | 0 | 0 | 0 | 0 | 0 | 0 | 1,548 | 0.303947 |
486e93474bb833e6c69bd39f0e367b929c5bddaf | 2,442 | py | Python | acestream/ACEStream/Core/RequestPolicy.py | GrandPaRPi/p2ptv-pi | 6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3 | [
"MIT"
]
| null | null | null | acestream/ACEStream/Core/RequestPolicy.py | GrandPaRPi/p2ptv-pi | 6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3 | [
"MIT"
]
| null | null | null | acestream/ACEStream/Core/RequestPolicy.py | GrandPaRPi/p2ptv-pi | 6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3 | [
"MIT"
]
| 2 | 2018-04-17T17:34:39.000Z | 2020-07-26T03:43:33.000Z | #Embedded file name: ACEStream\Core\RequestPolicy.pyo
from ACEStream.Core.simpledefs import *
from ACEStream.Core.exceptions import *
from ACEStream.Core.BitTornado.BT1.MessageID import *
DEBUG = False
MAX_QUERIES_FROM_RANDOM_PEER = 1000
class AbstractRequestPolicy:
def __init__(self):
pass
def allowed(self, permid, messageID):
raise NotYetImplementedException()
class AllowAllRequestPolicy(AbstractRequestPolicy):
def allowed(self, permid, messageID):
return self.allowAllRequestsAllPeers(permid, messageID)
def allowAllRequestsAllPeers(self, permid, messageID):
return True
class CommonRequestPolicy(AbstractRequestPolicy):
def __init__(self, session):
self.session = session
self.friendsdb = session.open_dbhandler(NTFY_FRIENDS)
self.peerdb = session.open_dbhandler(NTFY_PEERS)
AbstractRequestPolicy.__init__(self)
def isFriend(self, permid):
fs = self.friendsdb.getFriendState(permid)
return fs == FS_MUTUAL or fs == FS_I_INVITED
def isSuperPeer(self, permid):
return permid in self.session.lm.superpeer_db.getSuperPeers()
def isCrawler(self, permid):
return permid in self.session.lm.crawler_db.getCrawlers()
def benign_random_peer(self, permid):
if MAX_QUERIES_FROM_RANDOM_PEER > 0:
nqueries = self.get_peer_nqueries(permid)
return nqueries < MAX_QUERIES_FROM_RANDOM_PEER
else:
return True
def get_peer_nqueries(self, permid):
peer = self.peerdb.getPeer(permid)
if peer is None:
return 0
else:
return peer['num_queries']
class AllowFriendsRequestPolicy(CommonRequestPolicy):
def allowed(self, permid, messageID):
if messageID in (CRAWLER_REQUEST, CRAWLER_REPLY):
return self.isCrawler(permid)
else:
return self.allowAllRequestsFromFriends(permid, messageID)
def allowAllRequestsFromFriends(self, permid, messageID):
return self.isFriend(permid)
class FriendsCoopDLOtherRQueryQuotumCrawlerAllowAllRequestPolicy(CommonRequestPolicy):
def allowed(self, permid, messageID):
if messageID == CRAWLER_REQUEST:
return self.isCrawler(permid)
elif messageID == QUERY and not (self.isFriend(permid) or self.benign_random_peer(permid)):
return False
else:
return True
| 30.525 | 99 | 0.700246 | 2,190 | 0.896806 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.027027 |
4871ff697124412845f2fa5d890fac9d6f0735fb | 1,172 | py | Python | appserver.py | XplosiveX/webfortune | bdcde9e4fc703c05a5520db3a4623103aeb77028 | [
"Apache-2.0"
]
| null | null | null | appserver.py | XplosiveX/webfortune | bdcde9e4fc703c05a5520db3a4623103aeb77028 | [
"Apache-2.0"
]
| null | null | null | appserver.py | XplosiveX/webfortune | bdcde9e4fc703c05a5520db3a4623103aeb77028 | [
"Apache-2.0"
]
| null | null | null | from flask import Flask, render_template, request, session, redirect, url_for, jsonify, abort
import os
import subprocess
import uuid
app = Flask(__name__)
app.secret_key = str(uuid.uuid4().hex)
intropre = '<pre style="border:black solid 4px; border-radius: 12.5px; background:silver; opacity: 0.65; margin-left:auto; margin-right:auto;height:100%;height:65%;overflow:auto; text-align:center; font-size:16px;">'
@app.route('/')
def index():
return redirect(url_for('fortune'))
@app.route('/fortune/')
def fortunenormal():
fortune = subprocess.check_output(["fortune"]).decode()
prereturn = intropre + fortune + "</pre>"
return prereturn
@app.route('/cowsay/<message>/')
def cowsaynormal(message):
arg_list = ["cowsay"] + message.split()
cow= subprocess.check_output(arg_list).decode()
prereturn = intropre + cow + "</pre>"
return prereturn
@app.route('/cowfortune/')
def cowsayfortune():
outcollect = subprocess.check_output(["fortune"]).decode()
argtodecode = ["cowsay"] + outcollect.split()
fortunecow = subprocess.check_output(argtodecode).decode()
prereturn = intropre + fortunecow + "</pre>"
return prereturn
| 34.470588 | 216 | 0.705631 | 0 | 0 | 0 | 0 | 750 | 0.639932 | 0 | 0 | 320 | 0.273038 |
4872759cf120b5248551d5a5595288fb0852c2a9 | 1,356 | py | Python | auth0/v3/test/management/test_stats.py | akmjenkins/auth0-python | 511b016ac9853c7f4ee66769be7ad315c5585735 | [
"MIT"
]
| 340 | 2015-06-05T12:32:26.000Z | 2022-03-30T18:41:30.000Z | auth0/v3/test/management/test_stats.py | akmjenkins/auth0-python | 511b016ac9853c7f4ee66769be7ad315c5585735 | [
"MIT"
]
| 179 | 2015-05-26T00:35:07.000Z | 2022-03-18T17:16:37.000Z | auth0/v3/test/management/test_stats.py | akmjenkins/auth0-python | 511b016ac9853c7f4ee66769be7ad315c5585735 | [
"MIT"
]
| 151 | 2015-01-27T11:49:01.000Z | 2022-03-03T14:26:09.000Z | import unittest
import mock
from ...management.stats import Stats
class TestStats(unittest.TestCase):
def test_init_with_optionals(self):
t = Stats(domain='domain', token='jwttoken', telemetry=False, timeout=(10, 2))
self.assertEqual(t.client.options.timeout, (10, 2))
telemetry_header = t.client.base_headers.get('Auth0-Client', None)
self.assertEqual(telemetry_header, None)
@mock.patch('auth0.v3.management.stats.RestClient')
def test_active_users(self, mock_rc):
mock_instance = mock_rc.return_value
s = Stats(domain='domain', token='jwttoken')
s.active_users()
mock_instance.get.assert_called_with(
'https://domain/api/v2/stats/active-users',
)
@mock.patch('auth0.v3.management.stats.RestClient')
def test_daily_stats(self, mock_rc):
mock_instance = mock_rc.return_value
s = Stats(domain='domain', token='jwttoken')
s.daily_stats()
mock_instance.get.assert_called_with(
'https://domain/api/v2/stats/daily',
params={'from': None, 'to': None},
)
s.daily_stats(from_date='12341212', to_date='56785656')
mock_instance.get.assert_called_with(
'https://domain/api/v2/stats/daily',
params={'from': '12341212', 'to': '56785656'},
)
| 31.534884 | 86 | 0.640855 | 1,287 | 0.949115 | 0 | 0 | 928 | 0.684366 | 0 | 0 | 316 | 0.233038 |
4874bbd204b2fd95be7d58eace358a2bc329365d | 2,038 | py | Python | JavPy/utils/common.py | generaljun/JavPy | e2b5488631c0979c643a2f86ba4cd8bb1709e2f8 | [
"Apache-2.0"
]
| 1 | 2020-07-30T08:48:17.000Z | 2020-07-30T08:48:17.000Z | JavPy/utils/common.py | liqiang0330/JavPy | e2b5488631c0979c643a2f86ba4cd8bb1709e2f8 | [
"Apache-2.0"
]
| null | null | null | JavPy/utils/common.py | liqiang0330/JavPy | e2b5488631c0979c643a2f86ba4cd8bb1709e2f8 | [
"Apache-2.0"
]
| 2 | 2020-07-30T06:30:23.000Z | 2020-07-30T08:48:19.000Z | import datetime
import functools
import re
from typing import Iterable
version = "0.6"
def noexcept(lambda_expression, default=None, return_exception=False):
try:
res = lambda_expression()
return res if not return_exception else (res, None)
except Exception as ex:
return default if not return_exception else (default, ex)
def cache(func):
__cache = dict()
@functools.wraps(func)
def _wrapped(*args, **kwargs):
key = str(args) + "///" + str(kwargs)
if key in __cache:
if datetime.datetime.now() - __cache[key][0] < datetime.timedelta(hours=1):
return __cache[key][1]
res = func(*args, **kwargs)
if res:
__cache[key] = (datetime.datetime.now(), res)
return res
return _wrapped
_class_name_pattern = re.compile(r"\.(.+?)\s")
def get_func_full_name(func):
try:
return func.__module__ + "." + func.__qualname__
except AttributeError:
try:
return (
func.__module__
+ re.search(_class_name_pattern, func.im_class).group(1)
+ "."
+ func.__name__
)
except AttributeError:
return ""
def assign(origin, new):
for k in new.__slots__:
if k.startswith("__"):
k = k[2:]
v = new.__getattribute__(k)
if v:
origin.__setattr__(k, v)
return origin
def conclude(objects: Iterable):
if objects is None:
return None
objects = list(filter(lambda x: x, objects))
if len(objects) == 0:
return None
if len(objects) == 1:
return objects[0]
return functools.reduce(assign, objects)
def urlencode(string, encoding):
from urllib.parse import quote
return quote(string.encode(encoding))
def urldecode(string, encoding):
from urllib.parse import unquote
return unquote(string, encoding)
def get_code_from_title(title):
return re.search(r"\w+-?\d+", title).group(0)
| 22.898876 | 87 | 0.598135 | 0 | 0 | 0 | 0 | 390 | 0.191364 | 0 | 0 | 45 | 0.02208 |
4875786274d1dcdef100393c55e236d7510c92a2 | 10,457 | py | Python | Foundry_Manager_v2.py | MrVauxs/Foundry-Selection-Menu | 13f9164595c3c11fe01e5d44cd35bcc79b6a34df | [
"MIT"
]
| 5 | 2020-09-26T10:16:17.000Z | 2022-01-06T14:31:54.000Z | Foundry_Manager_v2.py | MrVauxs/Foundry-Selection-Menu | 13f9164595c3c11fe01e5d44cd35bcc79b6a34df | [
"MIT"
]
| null | null | null | Foundry_Manager_v2.py | MrVauxs/Foundry-Selection-Menu | 13f9164595c3c11fe01e5d44cd35bcc79b6a34df | [
"MIT"
]
| 1 | 2020-09-07T23:36:17.000Z | 2020-09-07T23:36:17.000Z | import requests
from bottle import route, run, template,ServerAdapter,redirect
import subprocess
from html.parser import HTMLParser
import threading
import time
ssl_cert=None #XYZ - 'fullchain.pem'
ssl_key=None #XYZ - 'privkey.pem'
world_mapping={"URL_Path":["world-folder","Name To Be Shown"], "URL_Path2":["world-folder-2","Name To Be Shown Two: Electric Boogaloo"]} #XYZ - Repeatable until the HTML page doesn't handle it.
foundry_base="http://blank.com" #XYZ
foundry_port=30000 #XYZ
foundry_url=foundry_base+":"+str(foundry_port)
foundry_directory="C:\Program Files\FoundryVTT" #XYZ - The directory has to point to /resources/app
idle_logout=300 #XYZ- Seconds - time to shut down foundry if at login screen and 0 users
##Populate this automatically from module configuration, can probably get pictures etc but who has time?
class SSLWrapper(ServerAdapter):
def __init__(self, ssl_certfile = None, ssl_keyfile = None, host='0.0.0.0', port=8080):
self._ssl_certfile = ssl_certfile
self._ssl_keyfile = ssl_keyfile
super().__init__(host, port)
def run(self, handler):
from cheroot.ssl.builtin import BuiltinSSLAdapter
from cheroot import wsgi
server = wsgi.Server((self.host, self.port), handler)
self.srv = server
if server.ssl_adapter is not None:
server.ssl_adapter = BuiltinSSLAdapter(self._ssl_certfile, self._ssl_keyfile)
try:
server.start()
finally:
server.stop()
def shutdown(self):
self.srv.stop()
class AwfulScrape_nPlayers(HTMLParser):
#This is why javascript was invented
def __init__(self):
super().__init__()
self.in_label=False #We are searching for a "Current Players:" label
self.previous_label_players=False #If we found it, grab the first input field
self.nPlayers=None #If nothing is found crash
def handle_starttag(self, tag, attrs):
if tag == "label":
self.in_label=True
if (tag == "input") and self.previous_label_players:
self.nPlayers=int(dict(attrs)["value"])
self.previous_label_players=False
def handle_endtag(self, tag):
if tag == "label":
self.in_label=False
if tag == "header":
self.in_header=False
def handle_data(self, data):
if self.in_label:
if "Current Players" in data:
self.previous_label_players=True
else:
self.previous_label_players=False
## A bunch of threading stuff
class monitorPlayers(object):
def __init__(self, foundry_proccess):
self.foundry_proccess = foundry_proccess
thread = threading.Thread(target=self.run, args=())
thread.daemon = False
thread.start()
def run(self):
#Keep checking number of players
#If it's been 0 for 5 minutes, return to setup
zero_players=False
while True:
n_players=get_logged_in_players(timeout=30.) #Returns "None" if in setup etc so it's safe
if (n_players == 0) and zero_players:
self.foundry_proccess.send_signal(2)
self.foundry_proccess.send_signal(2) ##I think I need to send this twice?
self.foundry_proccess.wait()
break
time.sleep(idle_logout) #Wait five minutes
if n_players == 0:
zero_players=True
else:
zero_players=False
server.start()
class runServer(object):
def __init__(self):
self.server=SSLWrapper(ssl_certfile = ssl_cert, ssl_keyfile = ssl_key,port=foundry_port)
thread = threading.Thread(target=self.run, args=([self.server]))
thread.daemon = False
thread.start()
def run(self,server):
run(server=server) ##This isn't a cruel practical joke - the second run refers to bottle.run (I'll remove the uglyness in the future)
class bottleManager: #this is bascially just a global variable
def __init__(self):
self.bottle_server=runServer()
def shutdown(self):
self.bottle_server.server.shutdown()
self.bottle_server = None
def start(self):
self.bottle_server=runServer()
class startFoundryWorld(object):
def __init__(self, world):
self.world = world
thread = threading.Thread(target=self.run, args=([world]))
thread.daemon = False
thread.start()
def run(self,world):
server.shutdown()
process_obj= subprocess.Popen(["node","main.js","--port=30000", "--dataPath=C:\Users\XYZ\AppData\Local\FoundryVTT\Data","--world=%WORLD%".replace("%WORLD%",world)],cwd=foundry_directory) #XYZ - The --dataPath MUST direct to the FoundryVTT data folder (where worlds reside)
import time
time.sleep(12)
monitorPlayers(process_obj)
def get_logged_in_players(timeout=0.1):
r=requests.get(foundry_url+"/join",timeout=timeout)
par=AwfulScrape_nPlayers()
par.feed(r.text)
return par.nPlayers
def _get_world_url(item):
return "<p> > <a href='/"+item[0]+"' >" + item[1][1]+"</a> </p>"
@route('/')
@route('/<world>')
def index(world=None):
if (world == "join") or (world is None):
return """<!DOCTYPE html>
<html>
<title>Foundry World Select</title>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://www.w3schools.com/w3css/4/w3.css">
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Signika">
<style>
body,h1 {font-family: "Signika", sans-serif}
body, html {height: 100%;
background-color: #3f403f;}
.bgimg {
min-height: 100%;
background-position: center;
background-size: cover;
}
</style>
<body>
<div class="bgimg w3-display-container w3-animate-opacity w3-text-white">
<div class="w3-display-topleft w3-padding-large w3-xlarge">
Welcome to Dlivitz & Vauxs Foundry Selection Screen!
</div>
<div class="w3-display-middle">
<h1 class="w3-jumbo w3-animate-top"> <strong> """+"".join([_get_world_url(x) for x in world_mapping.items()]) +""" </strong></h1>
<hr class="w3-border-grey" style="margin:auto;width:40%">
<p class="w3-large w3-center">This will start your selected world and you will be able to login.</p>
</div>
</div>
</body>
</html>
"""
requested_world_path,requested_world = world_mapping.get(world,[None,None])
if requested_world is None:
return template('<h1>Cannot find world <b> {{world}} </b></h1>',world=world)
startFoundryWorld(requested_world_path)
return """<!DOCTYPE html>
<html>
<title>Foundry World Select</title>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://www.w3schools.com/w3css/4/w3.css">
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Signika">
<style>
body,h1 {font-family: "Signika", sans-serif}
body, html {height: 100%;
background-color: #3f403f;}
.bgimg {
min-height: 100%;
background-position: center;
background-size: cover;
}
@keyframes blink {
/**
* At the start of the animation the dot
* has an opacity of .2
*/
0% {
opacity: .2;
}
/**
* At 20% the dot is fully visible and
* then fades out slowly
*/
20% {
opacity: 1;
}
/**
* Until it reaches an opacity of .2 and
* the animation can start again
*/
100% {
opacity: .2;
}
}
.saving span {
/**
* Use the blink animation, which is defined above
*/
animation-name: blink;
/**
* The animation should take 1.4 seconds
*/
animation-duration: 1.4s;
/**
* It will repeat itself forever
*/
animation-iteration-count: infinite;
/**
* This makes sure that the starting style (opacity: .2)
* of the animation is applied before the animation starts.
* Otherwise we would see a short flash or would have
* to set the default styling of the dots to the same
* as the animation. Same applies for the ending styles.
*/
animation-fill-mode: both;
}
.saving span:nth-child(2) {
/**
* Starts the animation of the third dot
* with a delay of .2s, otherwise all dots
* would animate at the same time
*/
animation-delay: .2s;
}
.saving span:nth-child(3) {
/**
* Starts the animation of the third dot
* with a delay of .4s, otherwise all dots
* would animate at the same time
*/
animation-delay: .4s;
}
</style>
<body>
<div class="bgimg w3-display-container w3-animate-opacity w3-text-white">
<div class="w3-display-topleft w3-padding-large w3-xlarge">
Enjoy your game!
</div>
<div class="w3-display-middle">
<h1 class="w3-jumbo w3-animate-top"> <strong><p class="saving">Loading <span>.</span><span>.</span><span>.</span></p> </strong></h1>
</div>
</div>
</body>
<script>
var timer = setTimeout(function() {
window.location='"""+foundry_url+"""'
}, 12000);
</script>
</html>
""" #XYZ - Edit the scripts 12000 milisecond timer depending on your machine.
# This value determines how long the page waits before refreshing and hopefully redirecting the user to the Foundry login page
# (if it's too fast, the page will break and you will have to refresh until Foundry is turned on, too long... you just waste time.)
server=bottleManager()
| 33.516026 | 280 | 0.589557 | 4,274 | 0.408721 | 0 | 0 | 4,983 | 0.476523 | 0 | 0 | 5,874 | 0.561729 |
48769d3fe736152c54bf8b09ad3360ea09bd2080 | 1,181 | py | Python | scripts/12865.py | JihoChoi/BOJ | 08974a9db8ebaa299ace242e951cac53ab55fc4d | [
"MIT"
]
| null | null | null | scripts/12865.py | JihoChoi/BOJ | 08974a9db8ebaa299ace242e951cac53ab55fc4d | [
"MIT"
]
| null | null | null | scripts/12865.py | JihoChoi/BOJ | 08974a9db8ebaa299ace242e951cac53ab55fc4d | [
"MIT"
]
| null | null | null |
"""
TAG: 0-1 Knapsack Problem, Dynamic Programming (DP), O(nW)
References:
- https://www.geeksforgeeks.org/0-1-knapsack-problem-dp-10/
weights and values of n items, capacity -> max value
"""
N, W = map(int, input().split()) # number of items, capacity
weights = []
values = []
for i in range(N):
w, v = map(int, input().split())
weights.append(w)
values.append(v)
def knapsack(W, weights, values, n):
dp = [[0 for x in range(W+1)] for x in range(n+1)]
for i in range(n+1):
for w in range(W+1):
if i == 0 or w == 0:
dp[i][w] = 0
elif weights[i-1] <= w:
dp[i][w] = max(values[i-1] + dp[i-1][w - weights[i-1]], dp[i-1][w])
else:
dp[i][w] = dp[i-1][w]
return dp[n][W]
print(knapsack(W, weights, values, N))
# Naive
"""
def knapsack(W, weights, values, n):
if n == 0 or W == 0: # base
return 0
if (weights[n-1] > W):
return knapsack(W, weights, values, n-1)
else:
return max(
values[n-1] + knapsack(W - weights[n-1], weights, values, n-1),
knapsack(W, weights, values, n-1)
)
"""
| 21.87037 | 83 | 0.516511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 565 | 0.478408 |
4876b544334a9fdacdb07da711e4c0eb80787e3b | 339 | py | Python | tests/test_parse.py | fphammerle/duplitab | 8dcea2dbcb7f44405cdff24e24f598d338bdcea0 | [
"MIT"
]
| 1 | 2021-02-24T11:45:49.000Z | 2021-02-24T11:45:49.000Z | tests/test_parse.py | fphammerle/duplitab | 8dcea2dbcb7f44405cdff24e24f598d338bdcea0 | [
"MIT"
]
| null | null | null | tests/test_parse.py | fphammerle/duplitab | 8dcea2dbcb7f44405cdff24e24f598d338bdcea0 | [
"MIT"
]
| null | null | null | import pytest
import datetime
import duplitab
@pytest.mark.parametrize(('duplicity_timestamp', 'expected'), [
['Tue Oct 11 11:02:01 2016', datetime.datetime(2016, 10, 11, 11, 2, 1)],
])
def test_parse_duplicity_timestamp(duplicity_timestamp, expected):
assert expected == duplitab._parse_duplicity_timestamp(duplicity_timestamp)
| 30.818182 | 79 | 0.769912 | 0 | 0 | 0 | 0 | 290 | 0.855457 | 0 | 0 | 57 | 0.168142 |
4877da686c21f0b39bdbcc627ce13221392e0654 | 5,597 | py | Python | cdlib/algorithms/internal/COACH.py | xing-lab-pitt/cdlib | 590e145429cda1db4d3671c994c502bedd77f108 | [
"BSD-2-Clause"
]
| 248 | 2019-02-17T05:31:22.000Z | 2022-03-30T04:57:20.000Z | cdlib/algorithms/internal/COACH.py | xing-lab-pitt/cdlib | 590e145429cda1db4d3671c994c502bedd77f108 | [
"BSD-2-Clause"
]
| 130 | 2019-02-10T19:35:55.000Z | 2022-03-31T10:58:39.000Z | cdlib/algorithms/internal/COACH.py | xing-lab-pitt/cdlib | 590e145429cda1db4d3671c994c502bedd77f108 | [
"BSD-2-Clause"
]
| 70 | 2019-02-15T19:04:29.000Z | 2022-03-27T12:58:50.000Z | # Author: True Price <[email protected]>
# A core-attachment based method to detect protein complexes in PPI networks
# Wu, Li, Kwoh, Ng (2009)
# http://www.biomedcentral.com/1471-2105/10/169
from collections import defaultdict
from itertools import combinations
import functools
# return average degree and density for a graph
def __graph_stats(graph):
avg_deg = sum(len(n) for n in graph.values()) / float(len(graph))
density = avg_deg / (len(graph) - 1)
return avg_deg, density
# return core nodes, given a graph and its average degree
__get_core_nodes = lambda g, avg: set(v for v, n in g.items() if len(n) >= avg)
# return NA score
__NA_score = lambda a, b: float(len(a & b) ** 2) / (len(a) * len(b))
def __core_removal(graph, density_threshold):
if len(graph) == 1: # need at least two nodes in the graph...
return [graph]
avg_deg, density = __graph_stats(graph)
if density >= density_threshold:
return [graph]
else:
# find and remove core nodes; create connected subcomponents
core_nodes = __get_core_nodes(graph, avg_deg)
result = []
subgraphs = []
for v, n in graph.items():
if v in core_nodes:
continue
n = n - core_nodes # note that we're reassigning n
for s in subgraphs:
if not n.isdisjoint(s):
s |= n
break
else:
subgraphs.append(n | {v})
# connected subcomponent joining
i = 0
while i < len(subgraphs) - 1:
j = i + 1
while j < len(subgraphs):
if not subgraphs[i].isdisjoint(subgraphs[j]):
subgraphs[i] |= subgraphs[j]
subgraphs.pop(j)
else:
j += 1
i += 1
# recursive core removal
for s in subgraphs:
tresults = __core_removal(
dict((v, graph[v] & s) for v in s), density_threshold
)
for tc in tresults:
nodes = set()
for v, n in tc.items():
nodes.add(v)
n |= graph[v] & core_nodes
for c in core_nodes:
tc[c] = graph[c] & (nodes | core_nodes)
result += tresults
return result
def co_ach(g, density_threshold=0.7, affinity_threshold=0.225, closeness_threshold=0.5):
# read protein-protein pairs
data = defaultdict(set)
for a, b in g.edges():
data[a].add(b)
data[b].add(a)
# step 1: find preliminary cores
SC = [] # currently-detected preliminary cores
count = 0
for vertex, neighbors in data.items():
# build neighborhood graph
vertices = {vertex} | neighbors
size1_neighbors = set()
graph = {}
for v in vertices:
n = data[v] & vertices
if len(n) > 1: # ignore size-1 vertices
graph[v] = n
else:
size1_neighbors.add(v)
if len(graph) < 2: # not enough connections in this graph
continue
graph[vertex] -= size1_neighbors
# get core graph
avg_deg, density = __graph_stats(graph)
core_nodes = __get_core_nodes(graph, avg_deg)
vertices = set(graph.keys())
for v in vertices - core_nodes:
del graph[v]
for n in graph.values():
n &= core_nodes
if len(graph) < 2: # not enough connections in this graph
continue
graph_nodes = set(graph)
# inner loop
for sg in __core_removal(graph, density_threshold):
while True:
_, density = __graph_stats(sg)
# if density threshold met, stop; else, remove min degree node
if density >= density_threshold:
break
w = min(sg.items(), key=lambda k: len(k[1]))[0]
del sg[w]
for n in sg.values():
n.discard(w)
sg_nodes = set(sg)
while graph_nodes - sg_nodes:
w = max(graph_nodes - sg_nodes, key=lambda v: len(graph[v] & sg_nodes))
new_sg = sg.copy()
for v, n in new_sg.items():
if w in graph[v]:
n.add(w)
new_sg[w] = graph[w] & sg_nodes
_, density = __graph_stats(new_sg)
if density < density_threshold:
break
sg = new_sg
sg_nodes.add(w)
# redundancy filtering
max_sim = -1
for i in range(len(SC)):
sim = __NA_score(set(SC[i]), sg_nodes)
if sim > max_sim:
max_sim = sim
index = i
if max_sim < affinity_threshold:
SC.append(sg)
else:
_, density_i = __graph_stats(SC[index])
if density * len(sg) > density_i * len(SC[index]):
SC[index] = sg
# step 2: adding peripheral proteins
clusters = set()
for core in SC:
nodes = frozenset(core)
neighbors = (
functools.reduce(lambda x, y: x | y, (data[v] for v in nodes)) - nodes
)
neighbors -= set(
v
for v in neighbors
if float(len(data[v] & nodes)) / len(nodes) <= closeness_threshold
)
clusters.add(nodes | neighbors)
return [list(c) for c in clusters]
| 33.315476 | 88 | 0.517063 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 870 | 0.15544 |
48782a7be4c6875b12d933fb4c7555216fa0e180 | 2,745 | py | Python | appengine/findit/waterfall/test/revert_and_notify_culprit_pipeline_test.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
]
| 1 | 2018-01-02T05:47:07.000Z | 2018-01-02T05:47:07.000Z | appengine/findit/waterfall/test/revert_and_notify_culprit_pipeline_test.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
]
| null | null | null | appengine/findit/waterfall/test/revert_and_notify_culprit_pipeline_test.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
]
| null | null | null | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common.constants import DEFAULT_QUEUE
from common.waterfall import failure_type
from gae_libs.pipeline_wrapper import pipeline_handlers
from waterfall import create_revert_cl_pipeline
from waterfall.create_revert_cl_pipeline import CreateRevertCLPipeline
from waterfall.revert_and_notify_culprit_pipeline import (
RevertAndNotifyCulpritPipeline)
from waterfall.send_notification_for_culprit_pipeline import (
SendNotificationForCulpritPipeline)
from waterfall.test import wf_testcase
class RevertAndNotifyCulpritPipelineTest(wf_testcase.WaterfallTestCase):
app_module = pipeline_handlers._APP
def testSendNotificationForTestCulprit(self):
master_name = 'm'
builder_name = 'b'
build_number = 124
repo_name = 'chromium'
revision = 'r1'
culprits = {
'r1': {
'repo_name': repo_name,
'revision': revision,
}
}
heuristic_cls = [[repo_name, revision]]
try_job_type = failure_type.TEST
self.MockPipeline(SendNotificationForCulpritPipeline,
None,
expected_args=[master_name, builder_name, build_number,
repo_name, revision, True])
pipeline = RevertAndNotifyCulpritPipeline(
master_name, builder_name, build_number, culprits,
heuristic_cls, try_job_type)
pipeline.start(queue_name=DEFAULT_QUEUE)
self.execute_queued_tasks()
def testSendNotificationToConfirmRevert(self):
master_name = 'm'
builder_name = 'b'
build_number = 124
repo_name = 'chromium'
revision = 'r1'
culprits = {
'r1': {
'repo_name': repo_name,
'revision': revision,
}
}
heuristic_cls = [[repo_name, revision]]
try_job_type = failure_type.COMPILE
self.MockPipeline(CreateRevertCLPipeline,
create_revert_cl_pipeline.CREATED_BY_SHERIFF,
expected_args=[master_name, builder_name, build_number,
repo_name, revision])
self.MockPipeline(SendNotificationForCulpritPipeline,
None,
expected_args=[
master_name, builder_name, build_number, repo_name,
revision, True,
create_revert_cl_pipeline.CREATED_BY_SHERIFF])
pipeline = RevertAndNotifyCulpritPipeline(
master_name, builder_name, build_number, culprits,
heuristic_cls, try_job_type)
pipeline.start(queue_name=DEFAULT_QUEUE)
self.execute_queued_tasks() | 36.118421 | 77 | 0.676867 | 2,083 | 0.758834 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.09071 |
4878680107622e5788667d3e8c86e78f88548e8c | 2,368 | py | Python | labs/lab2/lm_model/models/LSTM.py | luyuliu/CSE-5194 | 52970106c21b30e64d4cf1df26bec09929494060 | [
"MIT"
]
| 1 | 2020-12-04T18:07:54.000Z | 2020-12-04T18:07:54.000Z | labs/lab2/lm_model/models/LSTM.py | luyuliu/CSE-5194 | 52970106c21b30e64d4cf1df26bec09929494060 | [
"MIT"
]
| 1 | 2019-11-15T22:05:22.000Z | 2019-12-01T03:41:14.000Z | labs/lab2/lm_model/models/LSTM.py | luyuliu/CSE-5194 | 52970106c21b30e64d4cf1df26bec09929494060 | [
"MIT"
]
| null | null | null | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import numpy as np
class LSTM(nn.Module):
def __init__(self, embedding_matrix, embedding_dim, vocab_size, hidden_dim, dropout, num_layers, bidirectional, output_dim):
"""
Args:
embedding_matrix: Pre-trained word embeddings matrix
embedding_dim: Embedding dimension of the word embeddings
vocab_size: Dimension of the vocabulary
hidden_dim: Dimension of the hiddden states
dropout: Dropout probability
num_layers: Number of layers of the LSTM
bidirectional: Bidiredctional
output_dim: Number of output classes (Subtask A: 2 = (OFF, NOT))
"""
super(LSTM, self).__init__()
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.bidirectional = bidirectional
#Word embeddings
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.word_embeddings.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32), requires_grad=False)
#Dropout
self.dropout = dropout
#LSTM layer(s)
if(self.bidirectional):
self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2 , num_layers, dropout=self.dropout, bidirectional=True)
else:
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers, dropout=self.dropout)
#Linear layer
self.output = nn.Linear(in_features=hidden_dim, out_features=output_dim)
def forward(self, X):
#Word embeddings
embedded = self.word_embeddings(X)
embedded = embedded.permute(1,0,2)
#Batch size
batch_size = X.size(0)
#Initial hidden state
if(self.bidirectional):
h0 = Variable(torch.zeros(2*self.num_layers, batch_size, self.hidden_dim // 2))
c0 = Variable(torch.zeros(2*self.num_layers, batch_size, self.hidden_dim // 2))
else:
h0 = Variable(torch.zeros(self.num_layers, batch_size, self.hidden_dim))
c0 = Variable(torch.zeros(self.num_layers, batch_size, self.hidden_dim))
#Forward state
output, (hidden_state, cell_state) = self.lstm(embedded, (h0, c0))
x = self.output(output[-1])
return x
| 34.823529 | 128 | 0.649916 | 2,239 | 0.945524 | 0 | 0 | 0 | 0 | 0 | 0 | 598 | 0.252534 |
4879e9953736c5be4e8f8c3cffbf391ebb052c79 | 229 | py | Python | Day 17/Aayushi-Mittal.py | ChetasShree/MarchCode | 80ee6206c0e4481b4421a83c7b7b7fc977450009 | [
"MIT"
]
| 9 | 2021-03-02T12:16:24.000Z | 2021-03-26T11:06:08.000Z | Day 17/Aayushi-Mittal.py | ChetasShree/MarchCode | 80ee6206c0e4481b4421a83c7b7b7fc977450009 | [
"MIT"
]
| 65 | 2021-03-02T04:57:47.000Z | 2021-04-02T19:31:30.000Z | Day 17/Aayushi-Mittal.py | ChetasShree/MarchCode | 80ee6206c0e4481b4421a83c7b7b7fc977450009 | [
"MIT"
]
| 94 | 2021-03-02T04:42:28.000Z | 2021-06-28T10:38:20.000Z | # To print fibonacci series upto a given number n.
first = 0
second = 1
n = int(input())
print("Fibbonacci Series:")
for i in range(0,n):
print(first, end=", ")
next = second + first
first = second
second = next
| 19.083333 | 50 | 0.628821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.323144 |
487a8dc97f3ca00e23834b0ac346fc97195d5b14 | 3,578 | py | Python | taotao-cloud-python/taotao-cloud-oldboy/day84-PerfectCRM/PerfectCRM/kingadmin/permissions.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
]
| 47 | 2021-04-13T10:32:13.000Z | 2022-03-31T10:30:30.000Z | taotao-cloud-python/taotao-cloud-oldboy/day84-PerfectCRM/PerfectCRM/kingadmin/permissions.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
]
| 1 | 2021-11-01T07:41:04.000Z | 2021-11-01T07:41:10.000Z | taotao-cloud-python/taotao-cloud-oldboy/day84-PerfectCRM/PerfectCRM/kingadmin/permissions.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
]
| 21 | 2021-04-13T10:32:17.000Z | 2022-03-26T07:43:22.000Z | from django.core.urlresolvers import resolve
from django.shortcuts import render,redirect,HttpResponse
from kingadmin.permission_list import perm_dic
from django.conf import settings
def perm_check(*args,**kwargs):
request = args[0]
resolve_url_obj = resolve(request.path)
current_url_name = resolve_url_obj.url_name # 当前url的url_name
print('---perm:',request.user,request.user.is_authenticated(),current_url_name)
#match_flag = False
match_results = [None,]
match_key = None
if request.user.is_authenticated() is False:
return redirect(settings.LOGIN_URL)
for permission_key,permission_val in perm_dic.items():
per_url_name = permission_val[0]
per_method = permission_val[1]
perm_args = permission_val[2]
perm_kwargs = permission_val[3]
perm_hook_func = permission_val[4] if len(permission_val)>4 else None
if per_url_name == current_url_name: #matches current request url
if per_method == request.method: #matches request method
# if not perm_args: #if no args defined in perm dic, then set this request to passed perm
#逐个匹配参数,看每个参数时候都能对应的上。
args_matched = False #for args only
for item in perm_args:
request_method_func = getattr(request,per_method) #request.GET/POST
if request_method_func.get(item,None):# request字典中有此参数
args_matched = True
else:
print("arg not match......")
args_matched = False
break # 有一个参数不能匹配成功,则判定为假,退出该循环。
else:#当列表为空的时候才走这里
args_matched = True
#匹配有特定值的参数
kwargs_matched = False
for k,v in perm_kwargs.items():
request_method_func = getattr(request, per_method)
arg_val = request_method_func.get(k, None) # request字典中有此参数
print("perm kwargs check:",arg_val,type(arg_val),v,type(v))
if arg_val == str(v): #匹配上了特定的参数 及对应的 参数值, 比如,需要request 对象里必须有一个叫 user_id=3的参数
kwargs_matched = True
else:
kwargs_matched = False
break # 有一个参数不能匹配成功,则判定为假,退出该循环。
else:
kwargs_matched = True
#开始匹配自定义权限钩子函数
perm_hook_matched = False
if perm_hook_func:
perm_hook_matched = perm_hook_func(request)
match_results = [args_matched,kwargs_matched,perm_hook_matched]
print("--->match_results ", match_results)
if all(match_results): #都匹配上了
match_key = permission_key
break
if all(match_results):
app_name, *per_name = match_key.split('_')
print("--->matched ",match_results,match_key)
print(app_name, *per_name)
perm_obj = '%s.%s' % (app_name,match_key)
print("perm str:",perm_obj)
if request.user.has_perm(perm_obj):
print('当前用户有此权限')
return True
else:
print('当前用户没有该权限')
return False
else:
print("未匹配到权限项,当前用户无权限")
def check_permission(func):
def inner(*args,**kwargs):
if not perm_check(*args,**kwargs):
request = args[0]
return render(request,'kingadmin/page_403.html')
return func(*args,**kwargs)
return inner
| 38.473118 | 106 | 0.579374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 963 | 0.243182 |
487a9c7212eb09b59a26433079cfd900f9387fb7 | 7,894 | py | Python | wserver_qdk/tests/main_test.py | PunchyArchy/wserver_qdk | cd29785710cb9f21efb2fc35fa395b1f693b854e | [
"MIT"
]
| null | null | null | wserver_qdk/tests/main_test.py | PunchyArchy/wserver_qdk | cd29785710cb9f21efb2fc35fa395b1f693b854e | [
"MIT"
]
| null | null | null | wserver_qdk/tests/main_test.py | PunchyArchy/wserver_qdk | cd29785710cb9f21efb2fc35fa395b1f693b854e | [
"MIT"
]
| null | null | null | """ Тесты основного класса. """
import unittest
from wserver_qdk.main import WServerQDK
from wserver_qdk import tools
import uuid
class MainTest(unittest.TestCase):
""" Test Case """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.qdk = WServerQDK('192.168.100.118', 8888, login='Test1',
password='jupiter3')
self.qdk.make_connection()
result = self.qdk.make_auth()
result = self.qdk.get_data()
print('auth_result', result)
# self.qdk.make_connection()
def test_set_act(self):
self.qdk.set_act(auto_id=102150, gross=5000, tare=3000, cargo=2000,
time_in='2021.08.24 13:44:13',
time_out='2021.08.24 14:33:31',
carrier_id=507970, trash_cat_id=36,
trash_type_id=12,
polygon_id=9, operator=22, ex_id=127)
response = self.qdk.get_data()
self.assertTrue(response['status'] and
isinstance(response['info']['info'], int))
def test_get_auto_id(self):
self.qdk.get_auto_id(car_number='В060ХА702')
response = self.qdk.get_data()
self.assertTrue(response['status'] and
isinstance(response['info']['info'], int))
self.qdk.get_auto_id(car_number='0101010101')
response = self.qdk.get_data()
self.assertTrue(response['status'] and not response['info'])
def test_get_carrier_id(self):
""" Вернуть ID перевозчика """
self.qdk.get_carrier_id(carrier_name='test_company_1')
response = self.qdk.get_data()
self.assertTrue(response['status'] and
isinstance(response['info']['info'], int))
self.qdk.get_carrier_id(carrier_name='0')
response = self.qdk.get_data()
self.assertTrue(response['status'] and not response['info'])
def test_set_photo(self):
photo_obj = tools.encode_photo('test_act_photo.png')
self.qdk.set_photo(record_id=784663, photo_obj=str(photo_obj),
photo_type=1)
response = self.qdk.get_data()
self.assertTrue(response['status'] and
isinstance(response['info']['info'], int))
def test_set_operator(self):
self.qdk.set_operator('FULLNAME', 'someLogin', 'somePassword', 9)
response = self.qdk.get_data()
self.assertTrue(response['status'] and
isinstance(response['info']['info'], int))
def test_set_auto(self):
random_car_num = str(uuid.uuid4())[:9]
self.qdk.set_auto(car_number=random_car_num, polygon=9,
id_type='tails')
response = self.qdk.get_data()
self.assertTrue(response['status'] and
isinstance(response['info'], int))
self.qdk.set_auto(car_number=random_car_num, polygon=9,
id_type='tails')
response = self.qdk.get_data()
self.assertTrue(not response['info']['status'])
def test_set_carrier(self):
self.qdk.set_carrier('test_carrier_n', inn='123', kpp='456',
polygon=9,
status=True, active=True, ex_id=None)
response = self.qdk.get_data()
self.assertTrue(response['status'])
def test_set_operator_notes(self):
self.qdk.set_operator_notes(record=784663,
note='TEST_COMM_FROM_QDK',
note_type=1)
response = self.qdk.get_data()
self.assertTrue(response['status'] and
isinstance(response['info']['info'], int))
def test_set_trash_cat(self):
random_name = str(uuid.uuid4())[:10]
self.qdk.set_trash_cat(name=random_name, polygon=9, active=False)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'] and
isinstance(response['info']['info'], int))
self.qdk.set_trash_cat(name=random_name, polygon=9, active=False)
response = self.qdk.get_data()
self.assertTrue(not response['info']['status'])
def test_set_trash_type(self):
random_name = str(uuid.uuid4())[:10]
self.qdk.set_trash_type(name=random_name, category=None, polygon=9)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'] and
isinstance(response['info']['info'], int))
def test_get_rfid_num(self):
self.qdk.get_rfid_id(rfid='FFFF000160')
response = self.qdk.get_data()
self.assertTrue(response['status'] and
isinstance(response['info'], int))
self.qdk.get_rfid_id(rfid='a00240sf')
response = self.qdk.get_data()
self.assertTrue(not response['info'])
def test_update_trash_cat(self):
self.qdk.update_trash_cat(cat_id=4, name='Прочее_Изм')
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_trash_cat(cat_id=4, active=False)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_trash_cat(cat_id=4, active=True)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_trash_cat(cat_id=4, polygon=0)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_trash_cat(cat_id=4, name='Прочее', active=True)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
def test_update_trash_type(self):
self.qdk.update_trash_type(type_id=3, polygon=0)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_trash_type(type_id=3, new_name='Пэт_изм')
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_trash_type(type_id=3, new_cat_id=35)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_trash_type(type_id=3, active=False)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_trash_type(type_id=3, new_name='Пэт', new_cat_id=4,
active=True)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
def test_update_auto(self):
self.qdk.update_auto(auto_id=623481, new_car_num='В2')
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_auto(auto_id=623481, new_id_type='rfid')
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_auto(auto_id=623481, active=False)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_auto(auto_id=623481, new_rg_weight=100)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
self.qdk.update_auto(auto_id=623481, new_car_num='ТЕСТ1337',
new_id_type='tails', active=True,
new_rg_weight=0)
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
def test_update_company(self):
self.qdk.update_company(company_id=507994, name='test_company_izm')
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
def test_update_operator(self):
self.qdk.update_operator(22, full_name='Гульнара ФО')
response = self.qdk.get_data()
self.assertTrue(response['info']['status'])
| 43.373626 | 75 | 0.60375 | 7,820 | 0.980687 | 0 | 0 | 0 | 0 | 0 | 0 | 1,054 | 0.13218 |
487b2742f256d49f88ac22c9264f7095601ac607 | 1,643 | py | Python | notes/SparkDifferentialJoin.py | ketanpurohit0/experimental | 02a7d5403037fe1fcc107ddf92e3b5a0748957d6 | [
"MIT"
]
| 1 | 2020-08-25T04:16:07.000Z | 2020-08-25T04:16:07.000Z | notes/SparkDifferentialJoin.py | ketanpurohit0/experimental | 02a7d5403037fe1fcc107ddf92e3b5a0748957d6 | [
"MIT"
]
| null | null | null | notes/SparkDifferentialJoin.py | ketanpurohit0/experimental | 02a7d5403037fe1fcc107ddf92e3b5a0748957d6 | [
"MIT"
]
| null | null | null | import SparkHelper as sh
sparkSession = sh.getSpark()
sparkSession.sparkContext.setLogLevel("ERROR")
# URL
url = sh.getUrl('postgres','postgres','foobar_secret')
q1 = "SELECT * FROM foo_left"
q2 = "SELECT * FROM foo_right"
df1 = sh.getQueryDataFrame(sparkSession, url, q1)
df2 = sh.getQueryDataFrame(sparkSession, url, q2)
# Do an outer join
dfj = df1.join(df2, df1.name_left == df2.name_right, 'full_outer')
dfj.show()
# Now we have outer join where rows havent matched, but some have
# so extract the misses.
left_cols_only = [x for x in dfj.columns if 'left' in x]
df1miss = dfj.select(left_cols_only).filter("name_right is null")
right_cols_only = [x for x in dfj.columns if 'right' in x]
df2miss = dfj.select(right_cols_only).filter("name_left is null")
df1miss.show()
df2miss.show()
# We remove the misses from original frame (we only keep the good records
dfj = dfj.filter('name_left is not null and name_right is not null')
dfj.show()
# Now 'normalise' name on both sides of the misses
from pyspark.sql.functions import regexp_replace, col
df1miss = df1miss.withColumn('name_left', regexp_replace( col('name_left'), '(_[0-9]*_|_[0-9]*$)','@CX@'))
df2miss = df2miss.withColumn('name_right', regexp_replace( col('name_right'), '(_[0-9]*_|_[0-9]*$)','@CX@'))
df1miss.show()
df2miss.show()
# Attempt join again on the misses subset, this time with additional columns
# as the keys
dfj2 = df1miss.join(df2miss, [df1miss.name_left == df2miss.name_right, df1miss.uid_left == df2miss.uid_right], 'full_outer')
dfj2.show()
# Take a union
dfj3 = dfj.union(dfj2)
dfj3.show()
sparkSession.stop()
| 37.340909 | 125 | 0.715764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 665 | 0.404747 |
487b34c0ca841971125a8b74fe964b7cec0a6a76 | 13,227 | py | Python | py_proto/modules/drivers/camera/proto/camera_conf_pb2.py | yujianyi/fusion_localization | c0057e29cbf690d6260f021080fd951c1a6b6baa | [
"Apache-2.0"
]
| 2 | 2019-03-04T02:11:04.000Z | 2019-04-18T11:19:45.000Z | py_proto/modules/drivers/camera/proto/camera_conf_pb2.py | yujianyi/fusion_localization | c0057e29cbf690d6260f021080fd951c1a6b6baa | [
"Apache-2.0"
]
| 1 | 2019-03-15T08:37:53.000Z | 2019-03-15T08:37:53.000Z | py_proto/modules/drivers/camera/proto/camera_conf_pb2.py | yujianyi/fusion_localization | c0057e29cbf690d6260f021080fd951c1a6b6baa | [
"Apache-2.0"
]
| 1 | 2019-03-04T02:11:09.000Z | 2019-03-04T02:11:09.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/drivers/camera/proto/camera_conf.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/drivers/camera/proto/camera_conf.proto',
package='apollo.drivers.camera',
syntax='proto2',
serialized_pb=_b('\n.modules/drivers/camera/proto/camera_conf.proto\x12\x15\x61pollo.drivers.camera\"\xe3\x05\n\nCameraConf\x12\x10\n\x05index\x18\x01 \x02(\r:\x01\x30\x12\x18\n\x08position\x18\x02 \x01(\t:\x06\x63\x65ntre\x12\x14\n\ntopic_name\x18\x03 \x01(\t:\x00\x12!\n\x0cvideo_device\x18\x04 \x01(\t:\x0b/dev/video0\x12 \n\x0b\x63\x61mera_name\x18\x05 \x01(\t:\x0bhead_camera\x12\x19\n\x0f\x63\x61mera_info_url\x18\x06 \x01(\t:\x00\x12\x19\n\x0f\x63\x61mera_frame_id\x18\x07 \x01(\t:\x00\x12\x1c\n\x0e\x63\x61mera_timeout\x18\x08 \x01(\r:\x04\x31\x30\x30\x30\x12\x1c\n\rspin_interval\x18\t \x01(\x01:\x05\x30.005\x12\x17\n\tio_method\x18\n \x01(\t:\x04mmap\x12\x1b\n\x0cpixel_format\x18\x0b \x01(\t:\x05mjpeg\x12\x18\n\x0bimage_width\x18\x0c \x01(\r:\x03\x36\x34\x30\x12\x19\n\x0cimage_height\x18\r \x01(\r:\x03\x34\x38\x30\x12\x16\n\nframe_rate\x18\x0e \x01(\x04:\x02\x33\x30\x12\x16\n\nbrightness\x18\x0f \x01(\x05:\x02-1\x12\x14\n\x08\x63ontrast\x18\x10 \x01(\x05:\x02-1\x12\x16\n\nsaturation\x18\x11 \x01(\x05:\x02-1\x12\x15\n\tsharpness\x18\x12 \x01(\x05:\x02-1\x12\x10\n\x04gain\x18\x13 \x01(\x05:\x02-1\x12\x1a\n\x0c\x61utoexposure\x18\x14 \x01(\x08:\x04true\x12\x15\n\x08\x65xposure\x18\x15 \x01(\r:\x03\x31\x30\x30\x12\x18\n\tautofocus\x18\x16 \x01(\x08:\x05\x66\x61lse\x12\x11\n\x05\x66ocus\x18\x17 \x01(\x05:\x02-1\x12 \n\x12\x61uto_white_balance\x18\x18 \x01(\x08:\x04true\x12\x1b\n\rwhite_balance\x18\x19 \x01(\x05:\x04\x34\x30\x30\x30\x12\x1b\n\x10trigger_internal\x18\x1a \x01(\r:\x01\x30\x12\x17\n\x0btrigger_fps\x18\x1b \x01(\r:\x02\x33\x30\x12\x15\n\nerror_code\x18\x1c \x01(\r:\x01\x30')
)
_CAMERACONF = _descriptor.Descriptor(
name='CameraConf',
full_name='apollo.drivers.camera.CameraConf',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='index', full_name='apollo.drivers.camera.CameraConf.index', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='position', full_name='apollo.drivers.camera.CameraConf.position', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("centre").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='topic_name', full_name='apollo.drivers.camera.CameraConf.topic_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='video_device', full_name='apollo.drivers.camera.CameraConf.video_device', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("/dev/video0").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='camera_name', full_name='apollo.drivers.camera.CameraConf.camera_name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("head_camera").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='camera_info_url', full_name='apollo.drivers.camera.CameraConf.camera_info_url', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='camera_frame_id', full_name='apollo.drivers.camera.CameraConf.camera_frame_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='camera_timeout', full_name='apollo.drivers.camera.CameraConf.camera_timeout', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='spin_interval', full_name='apollo.drivers.camera.CameraConf.spin_interval', index=8,
number=9, type=1, cpp_type=5, label=1,
has_default_value=True, default_value=float(0.005),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='io_method', full_name='apollo.drivers.camera.CameraConf.io_method', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("mmap").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pixel_format', full_name='apollo.drivers.camera.CameraConf.pixel_format', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("mjpeg").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_width', full_name='apollo.drivers.camera.CameraConf.image_width', index=11,
number=12, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=640,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_height', full_name='apollo.drivers.camera.CameraConf.image_height', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=480,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='frame_rate', full_name='apollo.drivers.camera.CameraConf.frame_rate', index=13,
number=14, type=4, cpp_type=4, label=1,
has_default_value=True, default_value=30,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='brightness', full_name='apollo.drivers.camera.CameraConf.brightness', index=14,
number=15, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='contrast', full_name='apollo.drivers.camera.CameraConf.contrast', index=15,
number=16, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='saturation', full_name='apollo.drivers.camera.CameraConf.saturation', index=16,
number=17, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sharpness', full_name='apollo.drivers.camera.CameraConf.sharpness', index=17,
number=18, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gain', full_name='apollo.drivers.camera.CameraConf.gain', index=18,
number=19, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='autoexposure', full_name='apollo.drivers.camera.CameraConf.autoexposure', index=19,
number=20, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='exposure', full_name='apollo.drivers.camera.CameraConf.exposure', index=20,
number=21, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=100,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='autofocus', full_name='apollo.drivers.camera.CameraConf.autofocus', index=21,
number=22, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='focus', full_name='apollo.drivers.camera.CameraConf.focus', index=22,
number=23, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='auto_white_balance', full_name='apollo.drivers.camera.CameraConf.auto_white_balance', index=23,
number=24, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='white_balance', full_name='apollo.drivers.camera.CameraConf.white_balance', index=24,
number=25, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trigger_internal', full_name='apollo.drivers.camera.CameraConf.trigger_internal', index=25,
number=26, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trigger_fps', full_name='apollo.drivers.camera.CameraConf.trigger_fps', index=26,
number=27, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=30,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error_code', full_name='apollo.drivers.camera.CameraConf.error_code', index=27,
number=28, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=813,
)
DESCRIPTOR.message_types_by_name['CameraConf'] = _CAMERACONF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CameraConf = _reflection.GeneratedProtocolMessageType('CameraConf', (_message.Message,), dict(
DESCRIPTOR = _CAMERACONF,
__module__ = 'modules.drivers.camera.proto.camera_conf_pb2'
# @@protoc_insertion_point(class_scope:apollo.drivers.camera.CameraConf)
))
_sym_db.RegisterMessage(CameraConf)
# @@protoc_insertion_point(module_scope)
| 51.069498 | 1,613 | 0.730476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,810 | 0.288047 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.